vhost: flush batched enqueue descs directly
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42                         struct vhost_virtqueue *vq,
43                         uint16_t to, uint16_t from, uint16_t size)
44 {
45         rte_memcpy(&vq->used->ring[to],
46                         &vq->shadow_used_split[from],
47                         size * sizeof(struct vring_used_elem));
48         vhost_log_cache_used_vring(dev, vq,
49                         offsetof(struct vring_used, ring[to]),
50                         size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58         if (used_idx + vq->shadow_used_idx <= vq->size) {
59                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60                                           vq->shadow_used_idx);
61         } else {
62                 uint16_t size;
63
64                 /* update used ring interval [used_idx, vq->size] */
65                 size = vq->size - used_idx;
66                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68                 /* update the left half used ring interval [0, left_size] */
69                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70                                           vq->shadow_used_idx - size);
71         }
72         vq->last_used_idx += vq->shadow_used_idx;
73
74         rte_smp_wmb();
75
76         vhost_log_cache_sync(dev, vq);
77
78         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79         vq->shadow_used_idx = 0;
80         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81                 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86                          uint16_t desc_idx, uint32_t len)
87 {
88         uint16_t i = vq->shadow_used_idx++;
89
90         vq->shadow_used_split[i].id  = desc_idx;
91         vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
96                                   struct vhost_virtqueue *vq)
97 {
98         int i;
99         uint16_t used_idx = vq->last_used_idx;
100         uint16_t head_idx = vq->last_used_idx;
101         uint16_t head_flags = 0;
102
103         /* Split loop in two to save memory barriers */
104         for (i = 0; i < vq->shadow_used_idx; i++) {
105                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108                 used_idx += vq->shadow_used_packed[i].count;
109                 if (used_idx >= vq->size)
110                         used_idx -= vq->size;
111         }
112
113         rte_smp_wmb();
114
115         for (i = 0; i < vq->shadow_used_idx; i++) {
116                 uint16_t flags;
117
118                 if (vq->shadow_used_packed[i].len)
119                         flags = VRING_DESC_F_WRITE;
120                 else
121                         flags = 0;
122
123                 if (vq->used_wrap_counter) {
124                         flags |= VRING_DESC_F_USED;
125                         flags |= VRING_DESC_F_AVAIL;
126                 } else {
127                         flags &= ~VRING_DESC_F_USED;
128                         flags &= ~VRING_DESC_F_AVAIL;
129                 }
130
131                 if (i > 0) {
132                         vq->desc_packed[vq->last_used_idx].flags = flags;
133
134                         vhost_log_cache_used_vring(dev, vq,
135                                         vq->last_used_idx *
136                                         sizeof(struct vring_packed_desc),
137                                         sizeof(struct vring_packed_desc));
138                 } else {
139                         head_idx = vq->last_used_idx;
140                         head_flags = flags;
141                 }
142
143                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
144         }
145
146         vq->desc_packed[head_idx].flags = head_flags;
147
148         vhost_log_cache_used_vring(dev, vq,
149                                 head_idx *
150                                 sizeof(struct vring_packed_desc),
151                                 sizeof(struct vring_packed_desc));
152
153         vq->shadow_used_idx = 0;
154         vhost_log_cache_sync(dev, vq);
155 }
156
157 static __rte_always_inline void
158 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
159                                  struct vhost_virtqueue *vq,
160                                  uint64_t *lens,
161                                  uint16_t *ids)
162 {
163         uint16_t i;
164         uint16_t flags;
165
166         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
167
168         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
169                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
170                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
171         }
172
173         rte_smp_wmb();
174
175         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
176                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
177
178         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
179                                    sizeof(struct vring_packed_desc),
180                                    sizeof(struct vring_packed_desc) *
181                                    PACKED_BATCH_SIZE);
182         vhost_log_cache_sync(dev, vq);
183
184         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
185 }
186
187 static __rte_always_inline void
188 flush_shadow_used_ring_packed(struct virtio_net *dev,
189                         struct vhost_virtqueue *vq)
190 {
191         int i;
192         uint16_t used_idx = vq->last_used_idx;
193         uint16_t head_idx = vq->last_used_idx;
194         uint16_t head_flags = 0;
195
196         /* Split loop in two to save memory barriers */
197         for (i = 0; i < vq->shadow_used_idx; i++) {
198                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
199                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
200
201                 used_idx += vq->shadow_used_packed[i].count;
202                 if (used_idx >= vq->size)
203                         used_idx -= vq->size;
204         }
205
206         for (i = 0; i < vq->shadow_used_idx; i++) {
207                 uint16_t flags;
208
209                 if (vq->shadow_used_packed[i].len)
210                         flags = VRING_DESC_F_WRITE;
211                 else
212                         flags = 0;
213
214                 if (vq->used_wrap_counter) {
215                         flags |= VRING_DESC_F_USED;
216                         flags |= VRING_DESC_F_AVAIL;
217                 } else {
218                         flags &= ~VRING_DESC_F_USED;
219                         flags &= ~VRING_DESC_F_AVAIL;
220                 }
221
222                 if (i > 0) {
223                         vq->desc_packed[vq->last_used_idx].flags = flags;
224
225                         vhost_log_cache_used_vring(dev, vq,
226                                         vq->last_used_idx *
227                                         sizeof(struct vring_packed_desc),
228                                         sizeof(struct vring_packed_desc));
229                 } else {
230                         head_idx = vq->last_used_idx;
231                         head_flags = flags;
232                 }
233
234                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
235         }
236
237         __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
238                          __ATOMIC_RELEASE);
239
240         vhost_log_cache_used_vring(dev, vq,
241                                 head_idx *
242                                 sizeof(struct vring_packed_desc),
243                                 sizeof(struct vring_packed_desc));
244
245         vq->shadow_used_idx = 0;
246         vhost_log_cache_sync(dev, vq);
247 }
248
249 static __rte_always_inline void
250 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
251                          uint16_t desc_idx, uint32_t len, uint16_t count)
252 {
253         uint16_t i = vq->shadow_used_idx++;
254
255         vq->shadow_used_packed[i].id  = desc_idx;
256         vq->shadow_used_packed[i].len = len;
257         vq->shadow_used_packed[i].count = count;
258 }
259
260 static inline void
261 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
262 {
263         struct batch_copy_elem *elem = vq->batch_copy_elems;
264         uint16_t count = vq->batch_copy_nb_elems;
265         int i;
266
267         for (i = 0; i < count; i++) {
268                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
269                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
270                                            elem[i].len);
271                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
272         }
273
274         vq->batch_copy_nb_elems = 0;
275 }
276
277 static inline void
278 do_data_copy_dequeue(struct vhost_virtqueue *vq)
279 {
280         struct batch_copy_elem *elem = vq->batch_copy_elems;
281         uint16_t count = vq->batch_copy_nb_elems;
282         int i;
283
284         for (i = 0; i < count; i++)
285                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
286
287         vq->batch_copy_nb_elems = 0;
288 }
289
290 static __rte_always_inline void
291 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
292                                    struct vhost_virtqueue *vq,
293                                    uint32_t len[],
294                                    uint16_t id[],
295                                    uint16_t count[],
296                                    uint16_t num_buffers)
297 {
298         uint16_t i;
299         for (i = 0; i < num_buffers; i++) {
300                 /* enqueue shadow flush action aligned with batch num */
301                 if (!vq->shadow_used_idx)
302                         vq->shadow_aligned_idx = vq->last_used_idx &
303                                 PACKED_BATCH_MASK;
304                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
305                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
306                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
307                 vq->shadow_aligned_idx += count[i];
308                 vq->shadow_used_idx++;
309         }
310
311         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
312                 do_data_copy_enqueue(dev, vq);
313                 vhost_flush_enqueue_shadow_packed(dev, vq);
314         }
315 }
316
317 /* avoid write operation when necessary, to lessen cache issues */
318 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
319         if ((var) != (val))                     \
320                 (var) = (val);                  \
321 } while (0)
322
323 static __rte_always_inline void
324 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
325 {
326         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
327
328         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
329                 csum_l4 |= PKT_TX_TCP_CKSUM;
330
331         if (csum_l4) {
332                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
333                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
334
335                 switch (csum_l4) {
336                 case PKT_TX_TCP_CKSUM:
337                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
338                                                 cksum));
339                         break;
340                 case PKT_TX_UDP_CKSUM:
341                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
342                                                 dgram_cksum));
343                         break;
344                 case PKT_TX_SCTP_CKSUM:
345                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
346                                                 cksum));
347                         break;
348                 }
349         } else {
350                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
351                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
352                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
353         }
354
355         /* IP cksum verification cannot be bypassed, then calculate here */
356         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
357                 struct rte_ipv4_hdr *ipv4_hdr;
358
359                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
360                                                    m_buf->l2_len);
361                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
362         }
363
364         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
365                 if (m_buf->ol_flags & PKT_TX_IPV4)
366                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
367                 else
368                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
369                 net_hdr->gso_size = m_buf->tso_segsz;
370                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
371                                         + m_buf->l4_len;
372         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
373                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
374                 net_hdr->gso_size = m_buf->tso_segsz;
375                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
376                         m_buf->l4_len;
377         } else {
378                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
379                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
380                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
381         }
382 }
383
384 static __rte_always_inline int
385 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
386                 struct buf_vector *buf_vec, uint16_t *vec_idx,
387                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
388 {
389         uint16_t vec_id = *vec_idx;
390
391         while (desc_len) {
392                 uint64_t desc_addr;
393                 uint64_t desc_chunck_len = desc_len;
394
395                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
396                         return -1;
397
398                 desc_addr = vhost_iova_to_vva(dev, vq,
399                                 desc_iova,
400                                 &desc_chunck_len,
401                                 perm);
402                 if (unlikely(!desc_addr))
403                         return -1;
404
405                 rte_prefetch0((void *)(uintptr_t)desc_addr);
406
407                 buf_vec[vec_id].buf_iova = desc_iova;
408                 buf_vec[vec_id].buf_addr = desc_addr;
409                 buf_vec[vec_id].buf_len  = desc_chunck_len;
410
411                 desc_len -= desc_chunck_len;
412                 desc_iova += desc_chunck_len;
413                 vec_id++;
414         }
415         *vec_idx = vec_id;
416
417         return 0;
418 }
419
420 static __rte_always_inline int
421 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
422                          uint32_t avail_idx, uint16_t *vec_idx,
423                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
424                          uint32_t *desc_chain_len, uint8_t perm)
425 {
426         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
427         uint16_t vec_id = *vec_idx;
428         uint32_t len    = 0;
429         uint64_t dlen;
430         uint32_t nr_descs = vq->size;
431         uint32_t cnt    = 0;
432         struct vring_desc *descs = vq->desc;
433         struct vring_desc *idesc = NULL;
434
435         if (unlikely(idx >= vq->size))
436                 return -1;
437
438         *desc_chain_head = idx;
439
440         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
441                 dlen = vq->desc[idx].len;
442                 nr_descs = dlen / sizeof(struct vring_desc);
443                 if (unlikely(nr_descs > vq->size))
444                         return -1;
445
446                 descs = (struct vring_desc *)(uintptr_t)
447                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
448                                                 &dlen,
449                                                 VHOST_ACCESS_RO);
450                 if (unlikely(!descs))
451                         return -1;
452
453                 if (unlikely(dlen < vq->desc[idx].len)) {
454                         /*
455                          * The indirect desc table is not contiguous
456                          * in process VA space, we have to copy it.
457                          */
458                         idesc = vhost_alloc_copy_ind_table(dev, vq,
459                                         vq->desc[idx].addr, vq->desc[idx].len);
460                         if (unlikely(!idesc))
461                                 return -1;
462
463                         descs = idesc;
464                 }
465
466                 idx = 0;
467         }
468
469         while (1) {
470                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
471                         free_ind_table(idesc);
472                         return -1;
473                 }
474
475                 len += descs[idx].len;
476
477                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
478                                                 descs[idx].addr, descs[idx].len,
479                                                 perm))) {
480                         free_ind_table(idesc);
481                         return -1;
482                 }
483
484                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
485                         break;
486
487                 idx = descs[idx].next;
488         }
489
490         *desc_chain_len = len;
491         *vec_idx = vec_id;
492
493         if (unlikely(!!idesc))
494                 free_ind_table(idesc);
495
496         return 0;
497 }
498
499 /*
500  * Returns -1 on fail, 0 on success
501  */
502 static inline int
503 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
504                                 uint32_t size, struct buf_vector *buf_vec,
505                                 uint16_t *num_buffers, uint16_t avail_head,
506                                 uint16_t *nr_vec)
507 {
508         uint16_t cur_idx;
509         uint16_t vec_idx = 0;
510         uint16_t max_tries, tries = 0;
511
512         uint16_t head_idx = 0;
513         uint32_t len = 0;
514
515         *num_buffers = 0;
516         cur_idx  = vq->last_avail_idx;
517
518         if (rxvq_is_mergeable(dev))
519                 max_tries = vq->size - 1;
520         else
521                 max_tries = 1;
522
523         while (size > 0) {
524                 if (unlikely(cur_idx == avail_head))
525                         return -1;
526                 /*
527                  * if we tried all available ring items, and still
528                  * can't get enough buf, it means something abnormal
529                  * happened.
530                  */
531                 if (unlikely(++tries > max_tries))
532                         return -1;
533
534                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
535                                                 &vec_idx, buf_vec,
536                                                 &head_idx, &len,
537                                                 VHOST_ACCESS_RW) < 0))
538                         return -1;
539                 len = RTE_MIN(len, size);
540                 update_shadow_used_ring_split(vq, head_idx, len);
541                 size -= len;
542
543                 cur_idx++;
544                 *num_buffers += 1;
545         }
546
547         *nr_vec = vec_idx;
548
549         return 0;
550 }
551
552 static __rte_always_inline int
553 fill_vec_buf_packed_indirect(struct virtio_net *dev,
554                         struct vhost_virtqueue *vq,
555                         struct vring_packed_desc *desc, uint16_t *vec_idx,
556                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
557 {
558         uint16_t i;
559         uint32_t nr_descs;
560         uint16_t vec_id = *vec_idx;
561         uint64_t dlen;
562         struct vring_packed_desc *descs, *idescs = NULL;
563
564         dlen = desc->len;
565         descs = (struct vring_packed_desc *)(uintptr_t)
566                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
567         if (unlikely(!descs))
568                 return -1;
569
570         if (unlikely(dlen < desc->len)) {
571                 /*
572                  * The indirect desc table is not contiguous
573                  * in process VA space, we have to copy it.
574                  */
575                 idescs = vhost_alloc_copy_ind_table(dev,
576                                 vq, desc->addr, desc->len);
577                 if (unlikely(!idescs))
578                         return -1;
579
580                 descs = idescs;
581         }
582
583         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
584         if (unlikely(nr_descs >= vq->size)) {
585                 free_ind_table(idescs);
586                 return -1;
587         }
588
589         for (i = 0; i < nr_descs; i++) {
590                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
591                         free_ind_table(idescs);
592                         return -1;
593                 }
594
595                 *len += descs[i].len;
596                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
597                                                 descs[i].addr, descs[i].len,
598                                                 perm)))
599                         return -1;
600         }
601         *vec_idx = vec_id;
602
603         if (unlikely(!!idescs))
604                 free_ind_table(idescs);
605
606         return 0;
607 }
608
609 static __rte_always_inline int
610 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
611                                 uint16_t avail_idx, uint16_t *desc_count,
612                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
613                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
614 {
615         bool wrap_counter = vq->avail_wrap_counter;
616         struct vring_packed_desc *descs = vq->desc_packed;
617         uint16_t vec_id = *vec_idx;
618
619         if (avail_idx < vq->last_avail_idx)
620                 wrap_counter ^= 1;
621
622         /*
623          * Perform a load-acquire barrier in desc_is_avail to
624          * enforce the ordering between desc flags and desc
625          * content.
626          */
627         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
628                 return -1;
629
630         *desc_count = 0;
631         *len = 0;
632
633         while (1) {
634                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
635                         return -1;
636
637                 if (unlikely(*desc_count >= vq->size))
638                         return -1;
639
640                 *desc_count += 1;
641                 *buf_id = descs[avail_idx].id;
642
643                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
644                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
645                                                         &descs[avail_idx],
646                                                         &vec_id, buf_vec,
647                                                         len, perm) < 0))
648                                 return -1;
649                 } else {
650                         *len += descs[avail_idx].len;
651
652                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
653                                                         descs[avail_idx].addr,
654                                                         descs[avail_idx].len,
655                                                         perm)))
656                                 return -1;
657                 }
658
659                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
660                         break;
661
662                 if (++avail_idx >= vq->size) {
663                         avail_idx -= vq->size;
664                         wrap_counter ^= 1;
665                 }
666         }
667
668         *vec_idx = vec_id;
669
670         return 0;
671 }
672
673 /*
674  * Returns -1 on fail, 0 on success
675  */
676 static inline int
677 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
678                                 uint32_t size, struct buf_vector *buf_vec,
679                                 uint16_t *nr_vec, uint16_t *num_buffers,
680                                 uint16_t *nr_descs)
681 {
682         uint16_t avail_idx;
683         uint16_t vec_idx = 0;
684         uint16_t max_tries, tries = 0;
685
686         uint16_t buf_id = 0;
687         uint32_t len = 0;
688         uint16_t desc_count;
689
690         *num_buffers = 0;
691         avail_idx = vq->last_avail_idx;
692
693         if (rxvq_is_mergeable(dev))
694                 max_tries = vq->size - 1;
695         else
696                 max_tries = 1;
697
698         while (size > 0) {
699                 /*
700                  * if we tried all available ring items, and still
701                  * can't get enough buf, it means something abnormal
702                  * happened.
703                  */
704                 if (unlikely(++tries > max_tries))
705                         return -1;
706
707                 if (unlikely(fill_vec_buf_packed(dev, vq,
708                                                 avail_idx, &desc_count,
709                                                 buf_vec, &vec_idx,
710                                                 &buf_id, &len,
711                                                 VHOST_ACCESS_RW) < 0))
712                         return -1;
713
714                 len = RTE_MIN(len, size);
715                 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
716                 size -= len;
717
718                 avail_idx += desc_count;
719                 if (avail_idx >= vq->size)
720                         avail_idx -= vq->size;
721
722                 *nr_descs += desc_count;
723                 *num_buffers += 1;
724         }
725
726         *nr_vec = vec_idx;
727
728         return 0;
729 }
730
731 static __rte_noinline void
732 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
733                 struct buf_vector *buf_vec,
734                 struct virtio_net_hdr_mrg_rxbuf *hdr)
735 {
736         uint64_t len;
737         uint64_t remain = dev->vhost_hlen;
738         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
739         uint64_t iova = buf_vec->buf_iova;
740
741         while (remain) {
742                 len = RTE_MIN(remain,
743                                 buf_vec->buf_len);
744                 dst = buf_vec->buf_addr;
745                 rte_memcpy((void *)(uintptr_t)dst,
746                                 (void *)(uintptr_t)src,
747                                 len);
748
749                 PRINT_PACKET(dev, (uintptr_t)dst,
750                                 (uint32_t)len, 0);
751                 vhost_log_cache_write_iova(dev, vq,
752                                 iova, len);
753
754                 remain -= len;
755                 iova += len;
756                 src += len;
757                 buf_vec++;
758         }
759 }
760
761 static __rte_always_inline int
762 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
763                             struct rte_mbuf *m, struct buf_vector *buf_vec,
764                             uint16_t nr_vec, uint16_t num_buffers)
765 {
766         uint32_t vec_idx = 0;
767         uint32_t mbuf_offset, mbuf_avail;
768         uint32_t buf_offset, buf_avail;
769         uint64_t buf_addr, buf_iova, buf_len;
770         uint32_t cpy_len;
771         uint64_t hdr_addr;
772         struct rte_mbuf *hdr_mbuf;
773         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
774         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
775         int error = 0;
776
777         if (unlikely(m == NULL)) {
778                 error = -1;
779                 goto out;
780         }
781
782         buf_addr = buf_vec[vec_idx].buf_addr;
783         buf_iova = buf_vec[vec_idx].buf_iova;
784         buf_len = buf_vec[vec_idx].buf_len;
785
786         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
787                 error = -1;
788                 goto out;
789         }
790
791         hdr_mbuf = m;
792         hdr_addr = buf_addr;
793         if (unlikely(buf_len < dev->vhost_hlen))
794                 hdr = &tmp_hdr;
795         else
796                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
797
798         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
799                 dev->vid, num_buffers);
800
801         if (unlikely(buf_len < dev->vhost_hlen)) {
802                 buf_offset = dev->vhost_hlen - buf_len;
803                 vec_idx++;
804                 buf_addr = buf_vec[vec_idx].buf_addr;
805                 buf_iova = buf_vec[vec_idx].buf_iova;
806                 buf_len = buf_vec[vec_idx].buf_len;
807                 buf_avail = buf_len - buf_offset;
808         } else {
809                 buf_offset = dev->vhost_hlen;
810                 buf_avail = buf_len - dev->vhost_hlen;
811         }
812
813         mbuf_avail  = rte_pktmbuf_data_len(m);
814         mbuf_offset = 0;
815         while (mbuf_avail != 0 || m->next != NULL) {
816                 /* done with current buf, get the next one */
817                 if (buf_avail == 0) {
818                         vec_idx++;
819                         if (unlikely(vec_idx >= nr_vec)) {
820                                 error = -1;
821                                 goto out;
822                         }
823
824                         buf_addr = buf_vec[vec_idx].buf_addr;
825                         buf_iova = buf_vec[vec_idx].buf_iova;
826                         buf_len = buf_vec[vec_idx].buf_len;
827
828                         buf_offset = 0;
829                         buf_avail  = buf_len;
830                 }
831
832                 /* done with current mbuf, get the next one */
833                 if (mbuf_avail == 0) {
834                         m = m->next;
835
836                         mbuf_offset = 0;
837                         mbuf_avail  = rte_pktmbuf_data_len(m);
838                 }
839
840                 if (hdr_addr) {
841                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
842                         if (rxvq_is_mergeable(dev))
843                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
844                                                 num_buffers);
845
846                         if (unlikely(hdr == &tmp_hdr)) {
847                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
848                         } else {
849                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
850                                                 dev->vhost_hlen, 0);
851                                 vhost_log_cache_write_iova(dev, vq,
852                                                 buf_vec[0].buf_iova,
853                                                 dev->vhost_hlen);
854                         }
855
856                         hdr_addr = 0;
857                 }
858
859                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
860
861                 if (likely(cpy_len > MAX_BATCH_LEN ||
862                                         vq->batch_copy_nb_elems >= vq->size)) {
863                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
864                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
865                                 cpy_len);
866                         vhost_log_cache_write_iova(dev, vq,
867                                                    buf_iova + buf_offset,
868                                                    cpy_len);
869                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
870                                 cpy_len, 0);
871                 } else {
872                         batch_copy[vq->batch_copy_nb_elems].dst =
873                                 (void *)((uintptr_t)(buf_addr + buf_offset));
874                         batch_copy[vq->batch_copy_nb_elems].src =
875                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
876                         batch_copy[vq->batch_copy_nb_elems].log_addr =
877                                 buf_iova + buf_offset;
878                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
879                         vq->batch_copy_nb_elems++;
880                 }
881
882                 mbuf_avail  -= cpy_len;
883                 mbuf_offset += cpy_len;
884                 buf_avail  -= cpy_len;
885                 buf_offset += cpy_len;
886         }
887
888 out:
889
890         return error;
891 }
892
893 static __rte_always_inline int
894 vhost_enqueue_single_packed(struct virtio_net *dev,
895                             struct vhost_virtqueue *vq,
896                             struct rte_mbuf *pkt,
897                             struct buf_vector *buf_vec,
898                             uint16_t *nr_descs)
899 {
900         uint16_t nr_vec = 0;
901         uint16_t avail_idx = vq->last_avail_idx;
902         uint16_t max_tries, tries = 0;
903         uint16_t buf_id = 0;
904         uint32_t len = 0;
905         uint16_t desc_count;
906         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
907         uint16_t num_buffers = 0;
908         uint32_t buffer_len[vq->size];
909         uint16_t buffer_buf_id[vq->size];
910         uint16_t buffer_desc_count[vq->size];
911
912         if (rxvq_is_mergeable(dev))
913                 max_tries = vq->size - 1;
914         else
915                 max_tries = 1;
916
917         while (size > 0) {
918                 /*
919                  * if we tried all available ring items, and still
920                  * can't get enough buf, it means something abnormal
921                  * happened.
922                  */
923                 if (unlikely(++tries > max_tries))
924                         return -1;
925
926                 if (unlikely(fill_vec_buf_packed(dev, vq,
927                                                 avail_idx, &desc_count,
928                                                 buf_vec, &nr_vec,
929                                                 &buf_id, &len,
930                                                 VHOST_ACCESS_RW) < 0))
931                         return -1;
932
933                 len = RTE_MIN(len, size);
934                 size -= len;
935
936                 buffer_len[num_buffers] = len;
937                 buffer_buf_id[num_buffers] = buf_id;
938                 buffer_desc_count[num_buffers] = desc_count;
939                 num_buffers += 1;
940
941                 *nr_descs += desc_count;
942                 avail_idx += desc_count;
943                 if (avail_idx >= vq->size)
944                         avail_idx -= vq->size;
945         }
946
947         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
948                 return -1;
949
950         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
951                                            buffer_desc_count, num_buffers);
952
953         return 0;
954 }
955
956 static __rte_noinline uint32_t
957 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
958         struct rte_mbuf **pkts, uint32_t count)
959 {
960         uint32_t pkt_idx = 0;
961         uint16_t num_buffers;
962         struct buf_vector buf_vec[BUF_VECTOR_MAX];
963         uint16_t avail_head;
964
965         avail_head = *((volatile uint16_t *)&vq->avail->idx);
966
967         /*
968          * The ordering between avail index and
969          * desc reads needs to be enforced.
970          */
971         rte_smp_rmb();
972
973         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
974
975         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
976                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
977                 uint16_t nr_vec = 0;
978
979                 if (unlikely(reserve_avail_buf_split(dev, vq,
980                                                 pkt_len, buf_vec, &num_buffers,
981                                                 avail_head, &nr_vec) < 0)) {
982                         VHOST_LOG_DEBUG(VHOST_DATA,
983                                 "(%d) failed to get enough desc from vring\n",
984                                 dev->vid);
985                         vq->shadow_used_idx -= num_buffers;
986                         break;
987                 }
988
989                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
990                         dev->vid, vq->last_avail_idx,
991                         vq->last_avail_idx + num_buffers);
992
993                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
994                                                 buf_vec, nr_vec,
995                                                 num_buffers) < 0) {
996                         vq->shadow_used_idx -= num_buffers;
997                         break;
998                 }
999
1000                 vq->last_avail_idx += num_buffers;
1001         }
1002
1003         do_data_copy_enqueue(dev, vq);
1004
1005         if (likely(vq->shadow_used_idx)) {
1006                 flush_shadow_used_ring_split(dev, vq);
1007                 vhost_vring_call_split(dev, vq);
1008         }
1009
1010         return pkt_idx;
1011 }
1012
1013 static __rte_unused int
1014 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1015                            struct vhost_virtqueue *vq,
1016                            struct rte_mbuf **pkts)
1017 {
1018         bool wrap_counter = vq->avail_wrap_counter;
1019         struct vring_packed_desc *descs = vq->desc_packed;
1020         uint16_t avail_idx = vq->last_avail_idx;
1021         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1022         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1023         uint32_t buf_offset = dev->vhost_hlen;
1024         uint64_t lens[PACKED_BATCH_SIZE];
1025         uint16_t ids[PACKED_BATCH_SIZE];
1026         uint16_t i;
1027
1028         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1029                 return -1;
1030
1031         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1032                 return -1;
1033
1034         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1035                 if (unlikely(pkts[i]->next != NULL))
1036                         return -1;
1037                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1038                                             wrap_counter)))
1039                         return -1;
1040         }
1041
1042         rte_smp_rmb();
1043
1044         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1045                 lens[i] = descs[avail_idx + i].len;
1046
1047         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1048                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1049                         return -1;
1050         }
1051
1052         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1053                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1054                                                   descs[avail_idx + i].addr,
1055                                                   &lens[i],
1056                                                   VHOST_ACCESS_RW);
1057
1058         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1059                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1060                         return -1;
1061         }
1062
1063         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1064                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1065                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1066                                         (uintptr_t)desc_addrs[i];
1067                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1068         }
1069
1070         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1071                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1072
1073         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1074
1075         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1076                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1077                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1078                            pkts[i]->pkt_len);
1079         }
1080
1081         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1082                 ids[i] = descs[avail_idx + i].id;
1083
1084         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1085
1086         return 0;
1087 }
1088
1089 static __rte_unused int16_t
1090 virtio_dev_rx_single_packed(struct virtio_net *dev,
1091                             struct vhost_virtqueue *vq,
1092                             struct rte_mbuf *pkt)
1093 {
1094         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1095         uint16_t nr_descs = 0;
1096
1097         rte_smp_rmb();
1098         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1099                                                  &nr_descs) < 0)) {
1100                 VHOST_LOG_DEBUG(VHOST_DATA,
1101                                 "(%d) failed to get enough desc from vring\n",
1102                                 dev->vid);
1103                 return -1;
1104         }
1105
1106         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1107                         dev->vid, vq->last_avail_idx,
1108                         vq->last_avail_idx + nr_descs);
1109
1110         vq_inc_last_avail_packed(vq, nr_descs);
1111
1112         return 0;
1113 }
1114
1115 static __rte_noinline uint32_t
1116 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1117         struct rte_mbuf **pkts, uint32_t count)
1118 {
1119         uint32_t pkt_idx = 0;
1120         uint16_t num_buffers;
1121         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1122
1123         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1124                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1125                 uint16_t nr_vec = 0;
1126                 uint16_t nr_descs = 0;
1127
1128                 if (unlikely(reserve_avail_buf_packed(dev, vq,
1129                                                 pkt_len, buf_vec, &nr_vec,
1130                                                 &num_buffers, &nr_descs) < 0)) {
1131                         VHOST_LOG_DEBUG(VHOST_DATA,
1132                                 "(%d) failed to get enough desc from vring\n",
1133                                 dev->vid);
1134                         vq->shadow_used_idx -= num_buffers;
1135                         break;
1136                 }
1137
1138                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1139                         dev->vid, vq->last_avail_idx,
1140                         vq->last_avail_idx + num_buffers);
1141
1142                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1143                                                 buf_vec, nr_vec,
1144                                                 num_buffers) < 0) {
1145                         vq->shadow_used_idx -= num_buffers;
1146                         break;
1147                 }
1148
1149                 vq_inc_last_avail_packed(vq, nr_descs);
1150         }
1151
1152         do_data_copy_enqueue(dev, vq);
1153
1154         if (likely(vq->shadow_used_idx)) {
1155                 vhost_flush_enqueue_shadow_packed(dev, vq);
1156                 vhost_vring_call_packed(dev, vq);
1157         }
1158
1159         return pkt_idx;
1160 }
1161
1162 static __rte_always_inline uint32_t
1163 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1164         struct rte_mbuf **pkts, uint32_t count)
1165 {
1166         struct vhost_virtqueue *vq;
1167         uint32_t nb_tx = 0;
1168
1169         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1170         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1171                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1172                         dev->vid, __func__, queue_id);
1173                 return 0;
1174         }
1175
1176         vq = dev->virtqueue[queue_id];
1177
1178         rte_spinlock_lock(&vq->access_lock);
1179
1180         if (unlikely(vq->enabled == 0))
1181                 goto out_access_unlock;
1182
1183         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1184                 vhost_user_iotlb_rd_lock(vq);
1185
1186         if (unlikely(vq->access_ok == 0))
1187                 if (unlikely(vring_translate(dev, vq) < 0))
1188                         goto out;
1189
1190         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1191         if (count == 0)
1192                 goto out;
1193
1194         if (vq_is_packed(dev))
1195                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1196         else
1197                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1198
1199 out:
1200         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1201                 vhost_user_iotlb_rd_unlock(vq);
1202
1203 out_access_unlock:
1204         rte_spinlock_unlock(&vq->access_lock);
1205
1206         return nb_tx;
1207 }
1208
1209 uint16_t
1210 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1211         struct rte_mbuf **pkts, uint16_t count)
1212 {
1213         struct virtio_net *dev = get_device(vid);
1214
1215         if (!dev)
1216                 return 0;
1217
1218         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1219                 RTE_LOG(ERR, VHOST_DATA,
1220                         "(%d) %s: built-in vhost net backend is disabled.\n",
1221                         dev->vid, __func__);
1222                 return 0;
1223         }
1224
1225         return virtio_dev_rx(dev, queue_id, pkts, count);
1226 }
1227
1228 static inline bool
1229 virtio_net_with_host_offload(struct virtio_net *dev)
1230 {
1231         if (dev->features &
1232                         ((1ULL << VIRTIO_NET_F_CSUM) |
1233                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1234                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1235                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1236                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1237                 return true;
1238
1239         return false;
1240 }
1241
1242 static void
1243 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1244 {
1245         struct rte_ipv4_hdr *ipv4_hdr;
1246         struct rte_ipv6_hdr *ipv6_hdr;
1247         void *l3_hdr = NULL;
1248         struct rte_ether_hdr *eth_hdr;
1249         uint16_t ethertype;
1250
1251         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1252
1253         m->l2_len = sizeof(struct rte_ether_hdr);
1254         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1255
1256         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1257                 struct rte_vlan_hdr *vlan_hdr =
1258                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1259
1260                 m->l2_len += sizeof(struct rte_vlan_hdr);
1261                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1262         }
1263
1264         l3_hdr = (char *)eth_hdr + m->l2_len;
1265
1266         switch (ethertype) {
1267         case RTE_ETHER_TYPE_IPV4:
1268                 ipv4_hdr = l3_hdr;
1269                 *l4_proto = ipv4_hdr->next_proto_id;
1270                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1271                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1272                 m->ol_flags |= PKT_TX_IPV4;
1273                 break;
1274         case RTE_ETHER_TYPE_IPV6:
1275                 ipv6_hdr = l3_hdr;
1276                 *l4_proto = ipv6_hdr->proto;
1277                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1278                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1279                 m->ol_flags |= PKT_TX_IPV6;
1280                 break;
1281         default:
1282                 m->l3_len = 0;
1283                 *l4_proto = 0;
1284                 *l4_hdr = NULL;
1285                 break;
1286         }
1287 }
1288
1289 static __rte_always_inline void
1290 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1291 {
1292         uint16_t l4_proto = 0;
1293         void *l4_hdr = NULL;
1294         struct rte_tcp_hdr *tcp_hdr = NULL;
1295
1296         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1297                 return;
1298
1299         parse_ethernet(m, &l4_proto, &l4_hdr);
1300         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1301                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1302                         switch (hdr->csum_offset) {
1303                         case (offsetof(struct rte_tcp_hdr, cksum)):
1304                                 if (l4_proto == IPPROTO_TCP)
1305                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1306                                 break;
1307                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1308                                 if (l4_proto == IPPROTO_UDP)
1309                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1310                                 break;
1311                         case (offsetof(struct rte_sctp_hdr, cksum)):
1312                                 if (l4_proto == IPPROTO_SCTP)
1313                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1314                                 break;
1315                         default:
1316                                 break;
1317                         }
1318                 }
1319         }
1320
1321         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1322                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1323                 case VIRTIO_NET_HDR_GSO_TCPV4:
1324                 case VIRTIO_NET_HDR_GSO_TCPV6:
1325                         tcp_hdr = l4_hdr;
1326                         m->ol_flags |= PKT_TX_TCP_SEG;
1327                         m->tso_segsz = hdr->gso_size;
1328                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1329                         break;
1330                 case VIRTIO_NET_HDR_GSO_UDP:
1331                         m->ol_flags |= PKT_TX_UDP_SEG;
1332                         m->tso_segsz = hdr->gso_size;
1333                         m->l4_len = sizeof(struct rte_udp_hdr);
1334                         break;
1335                 default:
1336                         RTE_LOG(WARNING, VHOST_DATA,
1337                                 "unsupported gso type %u.\n", hdr->gso_type);
1338                         break;
1339                 }
1340         }
1341 }
1342
1343 static __rte_noinline void
1344 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1345                 struct buf_vector *buf_vec)
1346 {
1347         uint64_t len;
1348         uint64_t remain = sizeof(struct virtio_net_hdr);
1349         uint64_t src;
1350         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1351
1352         while (remain) {
1353                 len = RTE_MIN(remain, buf_vec->buf_len);
1354                 src = buf_vec->buf_addr;
1355                 rte_memcpy((void *)(uintptr_t)dst,
1356                                 (void *)(uintptr_t)src, len);
1357
1358                 remain -= len;
1359                 dst += len;
1360                 buf_vec++;
1361         }
1362 }
1363
1364 static __rte_always_inline int
1365 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1366                   struct buf_vector *buf_vec, uint16_t nr_vec,
1367                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1368 {
1369         uint32_t buf_avail, buf_offset;
1370         uint64_t buf_addr, buf_iova, buf_len;
1371         uint32_t mbuf_avail, mbuf_offset;
1372         uint32_t cpy_len;
1373         struct rte_mbuf *cur = m, *prev = m;
1374         struct virtio_net_hdr tmp_hdr;
1375         struct virtio_net_hdr *hdr = NULL;
1376         /* A counter to avoid desc dead loop chain */
1377         uint16_t vec_idx = 0;
1378         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1379         int error = 0;
1380
1381         buf_addr = buf_vec[vec_idx].buf_addr;
1382         buf_iova = buf_vec[vec_idx].buf_iova;
1383         buf_len = buf_vec[vec_idx].buf_len;
1384
1385         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1386                 error = -1;
1387                 goto out;
1388         }
1389
1390         if (virtio_net_with_host_offload(dev)) {
1391                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1392                         /*
1393                          * No luck, the virtio-net header doesn't fit
1394                          * in a contiguous virtual area.
1395                          */
1396                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1397                         hdr = &tmp_hdr;
1398                 } else {
1399                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1400                 }
1401         }
1402
1403         /*
1404          * A virtio driver normally uses at least 2 desc buffers
1405          * for Tx: the first for storing the header, and others
1406          * for storing the data.
1407          */
1408         if (unlikely(buf_len < dev->vhost_hlen)) {
1409                 buf_offset = dev->vhost_hlen - buf_len;
1410                 vec_idx++;
1411                 buf_addr = buf_vec[vec_idx].buf_addr;
1412                 buf_iova = buf_vec[vec_idx].buf_iova;
1413                 buf_len = buf_vec[vec_idx].buf_len;
1414                 buf_avail  = buf_len - buf_offset;
1415         } else if (buf_len == dev->vhost_hlen) {
1416                 if (unlikely(++vec_idx >= nr_vec))
1417                         goto out;
1418                 buf_addr = buf_vec[vec_idx].buf_addr;
1419                 buf_iova = buf_vec[vec_idx].buf_iova;
1420                 buf_len = buf_vec[vec_idx].buf_len;
1421
1422                 buf_offset = 0;
1423                 buf_avail = buf_len;
1424         } else {
1425                 buf_offset = dev->vhost_hlen;
1426                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1427         }
1428
1429         PRINT_PACKET(dev,
1430                         (uintptr_t)(buf_addr + buf_offset),
1431                         (uint32_t)buf_avail, 0);
1432
1433         mbuf_offset = 0;
1434         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1435         while (1) {
1436                 uint64_t hpa;
1437
1438                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1439
1440                 /*
1441                  * A desc buf might across two host physical pages that are
1442                  * not continuous. In such case (gpa_to_hpa returns 0), data
1443                  * will be copied even though zero copy is enabled.
1444                  */
1445                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1446                                         buf_iova + buf_offset, cpy_len)))) {
1447                         cur->data_len = cpy_len;
1448                         cur->data_off = 0;
1449                         cur->buf_addr =
1450                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1451                         cur->buf_iova = hpa;
1452
1453                         /*
1454                          * In zero copy mode, one mbuf can only reference data
1455                          * for one or partial of one desc buff.
1456                          */
1457                         mbuf_avail = cpy_len;
1458                 } else {
1459                         if (likely(cpy_len > MAX_BATCH_LEN ||
1460                                    vq->batch_copy_nb_elems >= vq->size ||
1461                                    (hdr && cur == m))) {
1462                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1463                                                                    mbuf_offset),
1464                                            (void *)((uintptr_t)(buf_addr +
1465                                                            buf_offset)),
1466                                            cpy_len);
1467                         } else {
1468                                 batch_copy[vq->batch_copy_nb_elems].dst =
1469                                         rte_pktmbuf_mtod_offset(cur, void *,
1470                                                                 mbuf_offset);
1471                                 batch_copy[vq->batch_copy_nb_elems].src =
1472                                         (void *)((uintptr_t)(buf_addr +
1473                                                                 buf_offset));
1474                                 batch_copy[vq->batch_copy_nb_elems].len =
1475                                         cpy_len;
1476                                 vq->batch_copy_nb_elems++;
1477                         }
1478                 }
1479
1480                 mbuf_avail  -= cpy_len;
1481                 mbuf_offset += cpy_len;
1482                 buf_avail -= cpy_len;
1483                 buf_offset += cpy_len;
1484
1485                 /* This buf reaches to its end, get the next one */
1486                 if (buf_avail == 0) {
1487                         if (++vec_idx >= nr_vec)
1488                                 break;
1489
1490                         buf_addr = buf_vec[vec_idx].buf_addr;
1491                         buf_iova = buf_vec[vec_idx].buf_iova;
1492                         buf_len = buf_vec[vec_idx].buf_len;
1493
1494                         buf_offset = 0;
1495                         buf_avail  = buf_len;
1496
1497                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1498                                         (uint32_t)buf_avail, 0);
1499                 }
1500
1501                 /*
1502                  * This mbuf reaches to its end, get a new one
1503                  * to hold more data.
1504                  */
1505                 if (mbuf_avail == 0) {
1506                         cur = rte_pktmbuf_alloc(mbuf_pool);
1507                         if (unlikely(cur == NULL)) {
1508                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1509                                         "allocate memory for mbuf.\n");
1510                                 error = -1;
1511                                 goto out;
1512                         }
1513                         if (unlikely(dev->dequeue_zero_copy))
1514                                 rte_mbuf_refcnt_update(cur, 1);
1515
1516                         prev->next = cur;
1517                         prev->data_len = mbuf_offset;
1518                         m->nb_segs += 1;
1519                         m->pkt_len += mbuf_offset;
1520                         prev = cur;
1521
1522                         mbuf_offset = 0;
1523                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1524                 }
1525         }
1526
1527         prev->data_len = mbuf_offset;
1528         m->pkt_len    += mbuf_offset;
1529
1530         if (hdr)
1531                 vhost_dequeue_offload(hdr, m);
1532
1533 out:
1534
1535         return error;
1536 }
1537
1538 static __rte_always_inline struct zcopy_mbuf *
1539 get_zmbuf(struct vhost_virtqueue *vq)
1540 {
1541         uint16_t i;
1542         uint16_t last;
1543         int tries = 0;
1544
1545         /* search [last_zmbuf_idx, zmbuf_size) */
1546         i = vq->last_zmbuf_idx;
1547         last = vq->zmbuf_size;
1548
1549 again:
1550         for (; i < last; i++) {
1551                 if (vq->zmbufs[i].in_use == 0) {
1552                         vq->last_zmbuf_idx = i + 1;
1553                         vq->zmbufs[i].in_use = 1;
1554                         return &vq->zmbufs[i];
1555                 }
1556         }
1557
1558         tries++;
1559         if (tries == 1) {
1560                 /* search [0, last_zmbuf_idx) */
1561                 i = 0;
1562                 last = vq->last_zmbuf_idx;
1563                 goto again;
1564         }
1565
1566         return NULL;
1567 }
1568
1569 static void
1570 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1571 {
1572         rte_free(opaque);
1573 }
1574
1575 static int
1576 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1577 {
1578         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1579         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1580         uint16_t buf_len;
1581         rte_iova_t iova;
1582         void *buf;
1583
1584         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1585          * required, otherwise store shinfo in the new buffer.
1586          */
1587         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1588                 shinfo = rte_pktmbuf_mtod(pkt,
1589                                           struct rte_mbuf_ext_shared_info *);
1590         else {
1591                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1592                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1593         }
1594
1595         if (unlikely(total_len > UINT16_MAX))
1596                 return -ENOSPC;
1597
1598         buf_len = total_len;
1599         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1600         if (unlikely(buf == NULL))
1601                 return -ENOMEM;
1602
1603         /* Initialize shinfo */
1604         if (shinfo) {
1605                 shinfo->free_cb = virtio_dev_extbuf_free;
1606                 shinfo->fcb_opaque = buf;
1607                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1608         } else {
1609                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1610                                               virtio_dev_extbuf_free, buf);
1611                 if (unlikely(shinfo == NULL)) {
1612                         rte_free(buf);
1613                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1614                         return -1;
1615                 }
1616         }
1617
1618         iova = rte_malloc_virt2iova(buf);
1619         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1620         rte_pktmbuf_reset_headroom(pkt);
1621
1622         return 0;
1623 }
1624
1625 /*
1626  * Allocate a host supported pktmbuf.
1627  */
1628 static __rte_always_inline struct rte_mbuf *
1629 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1630                          uint32_t data_len)
1631 {
1632         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1633
1634         if (unlikely(pkt == NULL))
1635                 return NULL;
1636
1637         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1638                 return pkt;
1639
1640         /* attach an external buffer if supported */
1641         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1642                 return pkt;
1643
1644         /* check if chained buffers are allowed */
1645         if (!dev->linearbuf)
1646                 return pkt;
1647
1648         /* Data doesn't fit into the buffer and the host supports
1649          * only linear buffers
1650          */
1651         rte_pktmbuf_free(pkt);
1652
1653         return NULL;
1654 }
1655
1656 static __rte_noinline uint16_t
1657 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1658         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1659 {
1660         uint16_t i;
1661         uint16_t free_entries;
1662
1663         if (unlikely(dev->dequeue_zero_copy)) {
1664                 struct zcopy_mbuf *zmbuf, *next;
1665
1666                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1667                      zmbuf != NULL; zmbuf = next) {
1668                         next = TAILQ_NEXT(zmbuf, next);
1669
1670                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1671                                 update_shadow_used_ring_split(vq,
1672                                                 zmbuf->desc_idx, 0);
1673                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1674                                 restore_mbuf(zmbuf->mbuf);
1675                                 rte_pktmbuf_free(zmbuf->mbuf);
1676                                 put_zmbuf(zmbuf);
1677                                 vq->nr_zmbuf -= 1;
1678                         }
1679                 }
1680
1681                 if (likely(vq->shadow_used_idx)) {
1682                         flush_shadow_used_ring_split(dev, vq);
1683                         vhost_vring_call_split(dev, vq);
1684                 }
1685         }
1686
1687         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1688                         vq->last_avail_idx;
1689         if (free_entries == 0)
1690                 return 0;
1691
1692         /*
1693          * The ordering between avail index and
1694          * desc reads needs to be enforced.
1695          */
1696         rte_smp_rmb();
1697
1698         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1699
1700         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1701
1702         count = RTE_MIN(count, MAX_PKT_BURST);
1703         count = RTE_MIN(count, free_entries);
1704         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1705                         dev->vid, count);
1706
1707         for (i = 0; i < count; i++) {
1708                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1709                 uint16_t head_idx;
1710                 uint32_t buf_len;
1711                 uint16_t nr_vec = 0;
1712                 int err;
1713
1714                 if (unlikely(fill_vec_buf_split(dev, vq,
1715                                                 vq->last_avail_idx + i,
1716                                                 &nr_vec, buf_vec,
1717                                                 &head_idx, &buf_len,
1718                                                 VHOST_ACCESS_RO) < 0))
1719                         break;
1720
1721                 if (likely(dev->dequeue_zero_copy == 0))
1722                         update_shadow_used_ring_split(vq, head_idx, 0);
1723
1724                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1725                 if (unlikely(pkts[i] == NULL))
1726                         break;
1727
1728                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1729                                 mbuf_pool);
1730                 if (unlikely(err)) {
1731                         rte_pktmbuf_free(pkts[i]);
1732                         break;
1733                 }
1734
1735                 if (unlikely(dev->dequeue_zero_copy)) {
1736                         struct zcopy_mbuf *zmbuf;
1737
1738                         zmbuf = get_zmbuf(vq);
1739                         if (!zmbuf) {
1740                                 rte_pktmbuf_free(pkts[i]);
1741                                 break;
1742                         }
1743                         zmbuf->mbuf = pkts[i];
1744                         zmbuf->desc_idx = head_idx;
1745
1746                         /*
1747                          * Pin lock the mbuf; we will check later to see
1748                          * whether the mbuf is freed (when we are the last
1749                          * user) or not. If that's the case, we then could
1750                          * update the used ring safely.
1751                          */
1752                         rte_mbuf_refcnt_update(pkts[i], 1);
1753
1754                         vq->nr_zmbuf += 1;
1755                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1756                 }
1757         }
1758         vq->last_avail_idx += i;
1759
1760         if (likely(dev->dequeue_zero_copy == 0)) {
1761                 do_data_copy_dequeue(vq);
1762                 if (unlikely(i < count))
1763                         vq->shadow_used_idx = i;
1764                 if (likely(vq->shadow_used_idx)) {
1765                         flush_shadow_used_ring_split(dev, vq);
1766                         vhost_vring_call_split(dev, vq);
1767                 }
1768         }
1769
1770         return i;
1771 }
1772
1773 static __rte_always_inline int
1774 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1775                                  struct vhost_virtqueue *vq,
1776                                  struct rte_mempool *mbuf_pool,
1777                                  struct rte_mbuf **pkts,
1778                                  uint16_t avail_idx,
1779                                  uintptr_t *desc_addrs,
1780                                  uint16_t *ids)
1781 {
1782         bool wrap = vq->avail_wrap_counter;
1783         struct vring_packed_desc *descs = vq->desc_packed;
1784         struct virtio_net_hdr *hdr;
1785         uint64_t lens[PACKED_BATCH_SIZE];
1786         uint64_t buf_lens[PACKED_BATCH_SIZE];
1787         uint32_t buf_offset = dev->vhost_hlen;
1788         uint16_t flags, i;
1789
1790         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1791                 return -1;
1792         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1793                 return -1;
1794
1795         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1796                 flags = descs[avail_idx + i].flags;
1797                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1798                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1799                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1800                         return -1;
1801         }
1802
1803         rte_smp_rmb();
1804
1805         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1806                 lens[i] = descs[avail_idx + i].len;
1807
1808         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1809                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1810                                                   descs[avail_idx + i].addr,
1811                                                   &lens[i], VHOST_ACCESS_RW);
1812         }
1813
1814         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1815                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1816                         return -1;
1817         }
1818
1819         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1820                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1821                 if (!pkts[i])
1822                         goto free_buf;
1823         }
1824
1825         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1826                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1827
1828         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1829                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1830                         goto free_buf;
1831         }
1832
1833         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1834                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1835                 pkts[i]->data_len = pkts[i]->pkt_len;
1836                 ids[i] = descs[avail_idx + i].id;
1837         }
1838
1839         if (virtio_net_with_host_offload(dev)) {
1840                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1841                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1842                         vhost_dequeue_offload(hdr, pkts[i]);
1843                 }
1844         }
1845
1846         return 0;
1847
1848 free_buf:
1849         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1850                 rte_pktmbuf_free(pkts[i]);
1851
1852         return -1;
1853 }
1854
1855 static __rte_unused int
1856 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1857                            struct vhost_virtqueue *vq,
1858                            struct rte_mempool *mbuf_pool,
1859                            struct rte_mbuf **pkts)
1860 {
1861         uint16_t avail_idx = vq->last_avail_idx;
1862         uint32_t buf_offset = dev->vhost_hlen;
1863         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1864         uint16_t ids[PACKED_BATCH_SIZE];
1865         uint16_t i;
1866
1867         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1868                                              avail_idx, desc_addrs, ids))
1869                 return -1;
1870
1871         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1872                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1873
1874         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1875                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1876                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1877                            pkts[i]->pkt_len);
1878
1879         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1880
1881         return 0;
1882 }
1883
1884 static __rte_always_inline int
1885 vhost_dequeue_single_packed(struct virtio_net *dev,
1886                             struct vhost_virtqueue *vq,
1887                             struct rte_mempool *mbuf_pool,
1888                             struct rte_mbuf **pkts,
1889                             uint16_t *buf_id,
1890                             uint16_t *desc_count)
1891 {
1892         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1893         uint32_t buf_len;
1894         uint16_t nr_vec = 0;
1895         int err;
1896
1897         if (unlikely(fill_vec_buf_packed(dev, vq,
1898                                          vq->last_avail_idx, desc_count,
1899                                          buf_vec, &nr_vec,
1900                                          buf_id, &buf_len,
1901                                          VHOST_ACCESS_RO) < 0))
1902                 return -1;
1903
1904         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1905         if (unlikely(*pkts == NULL)) {
1906                 RTE_LOG(ERR, VHOST_DATA,
1907                         "Failed to allocate memory for mbuf.\n");
1908                 return -1;
1909         }
1910
1911         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1912                                 mbuf_pool);
1913         if (unlikely(err)) {
1914                 rte_pktmbuf_free(*pkts);
1915                 return -1;
1916         }
1917
1918         return 0;
1919 }
1920
1921 static __rte_unused int
1922 virtio_dev_tx_single_packed(struct virtio_net *dev,
1923                             struct vhost_virtqueue *vq,
1924                             struct rte_mempool *mbuf_pool,
1925                             struct rte_mbuf **pkts)
1926 {
1927
1928         uint16_t buf_id, desc_count;
1929
1930         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1931                                         &desc_count))
1932                 return -1;
1933
1934         vq_inc_last_avail_packed(vq, desc_count);
1935
1936         return 0;
1937 }
1938
1939 static __rte_noinline uint16_t
1940 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1941         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1942 {
1943         uint16_t i;
1944
1945         if (unlikely(dev->dequeue_zero_copy)) {
1946                 struct zcopy_mbuf *zmbuf, *next;
1947
1948                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1949                      zmbuf != NULL; zmbuf = next) {
1950                         next = TAILQ_NEXT(zmbuf, next);
1951
1952                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1953                                 update_shadow_used_ring_packed(vq,
1954                                                 zmbuf->desc_idx,
1955                                                 0,
1956                                                 zmbuf->desc_count);
1957
1958                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1959                                 restore_mbuf(zmbuf->mbuf);
1960                                 rte_pktmbuf_free(zmbuf->mbuf);
1961                                 put_zmbuf(zmbuf);
1962                                 vq->nr_zmbuf -= 1;
1963                         }
1964                 }
1965
1966                 if (likely(vq->shadow_used_idx)) {
1967                         flush_shadow_used_ring_packed(dev, vq);
1968                         vhost_vring_call_packed(dev, vq);
1969                 }
1970         }
1971
1972         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1973
1974         count = RTE_MIN(count, MAX_PKT_BURST);
1975         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1976                         dev->vid, count);
1977
1978         for (i = 0; i < count; i++) {
1979                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1980                 uint16_t buf_id;
1981                 uint32_t buf_len;
1982                 uint16_t desc_count, nr_vec = 0;
1983                 int err;
1984
1985                 if (unlikely(fill_vec_buf_packed(dev, vq,
1986                                                 vq->last_avail_idx, &desc_count,
1987                                                 buf_vec, &nr_vec,
1988                                                 &buf_id, &buf_len,
1989                                                 VHOST_ACCESS_RO) < 0))
1990                         break;
1991
1992                 if (likely(dev->dequeue_zero_copy == 0))
1993                         update_shadow_used_ring_packed(vq, buf_id, 0,
1994                                         desc_count);
1995
1996                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1997                 if (unlikely(pkts[i] == NULL))
1998                         break;
1999
2000                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2001                                 mbuf_pool);
2002                 if (unlikely(err)) {
2003                         rte_pktmbuf_free(pkts[i]);
2004                         break;
2005                 }
2006
2007                 if (unlikely(dev->dequeue_zero_copy)) {
2008                         struct zcopy_mbuf *zmbuf;
2009
2010                         zmbuf = get_zmbuf(vq);
2011                         if (!zmbuf) {
2012                                 rte_pktmbuf_free(pkts[i]);
2013                                 break;
2014                         }
2015                         zmbuf->mbuf = pkts[i];
2016                         zmbuf->desc_idx = buf_id;
2017                         zmbuf->desc_count = desc_count;
2018
2019                         /*
2020                          * Pin lock the mbuf; we will check later to see
2021                          * whether the mbuf is freed (when we are the last
2022                          * user) or not. If that's the case, we then could
2023                          * update the used ring safely.
2024                          */
2025                         rte_mbuf_refcnt_update(pkts[i], 1);
2026
2027                         vq->nr_zmbuf += 1;
2028                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2029                 }
2030
2031                 vq_inc_last_avail_packed(vq, desc_count);
2032         }
2033
2034         if (likely(dev->dequeue_zero_copy == 0)) {
2035                 do_data_copy_dequeue(vq);
2036                 if (unlikely(i < count))
2037                         vq->shadow_used_idx = i;
2038                 if (likely(vq->shadow_used_idx)) {
2039                         flush_shadow_used_ring_packed(dev, vq);
2040                         vhost_vring_call_packed(dev, vq);
2041                 }
2042         }
2043
2044         return i;
2045 }
2046
2047 uint16_t
2048 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2049         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2050 {
2051         struct virtio_net *dev;
2052         struct rte_mbuf *rarp_mbuf = NULL;
2053         struct vhost_virtqueue *vq;
2054
2055         dev = get_device(vid);
2056         if (!dev)
2057                 return 0;
2058
2059         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2060                 RTE_LOG(ERR, VHOST_DATA,
2061                         "(%d) %s: built-in vhost net backend is disabled.\n",
2062                         dev->vid, __func__);
2063                 return 0;
2064         }
2065
2066         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2067                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2068                         dev->vid, __func__, queue_id);
2069                 return 0;
2070         }
2071
2072         vq = dev->virtqueue[queue_id];
2073
2074         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2075                 return 0;
2076
2077         if (unlikely(vq->enabled == 0)) {
2078                 count = 0;
2079                 goto out_access_unlock;
2080         }
2081
2082         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2083                 vhost_user_iotlb_rd_lock(vq);
2084
2085         if (unlikely(vq->access_ok == 0))
2086                 if (unlikely(vring_translate(dev, vq) < 0)) {
2087                         count = 0;
2088                         goto out;
2089                 }
2090
2091         /*
2092          * Construct a RARP broadcast packet, and inject it to the "pkts"
2093          * array, to looks like that guest actually send such packet.
2094          *
2095          * Check user_send_rarp() for more information.
2096          *
2097          * broadcast_rarp shares a cacheline in the virtio_net structure
2098          * with some fields that are accessed during enqueue and
2099          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2100          * result in false sharing between enqueue and dequeue.
2101          *
2102          * Prevent unnecessary false sharing by reading broadcast_rarp first
2103          * and only performing cmpset if the read indicates it is likely to
2104          * be set.
2105          */
2106         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2107                         rte_atomic16_cmpset((volatile uint16_t *)
2108                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2109
2110                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2111                 if (rarp_mbuf == NULL) {
2112                         RTE_LOG(ERR, VHOST_DATA,
2113                                 "Failed to make RARP packet.\n");
2114                         count = 0;
2115                         goto out;
2116                 }
2117                 count -= 1;
2118         }
2119
2120         if (vq_is_packed(dev))
2121                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2122         else
2123                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2124
2125 out:
2126         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2127                 vhost_user_iotlb_rd_unlock(vq);
2128
2129 out_access_unlock:
2130         rte_spinlock_unlock(&vq->access_lock);
2131
2132         if (unlikely(rarp_mbuf != NULL)) {
2133                 /*
2134                  * Inject it to the head of "pkts" array, so that switch's mac
2135                  * learning table will get updated first.
2136                  */
2137                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2138                 pkts[0] = rarp_mbuf;
2139                 count += 1;
2140         }
2141
2142         return count;
2143 }