ab6726996b1b99bec8d93f1965c53afe61c0f554
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42                         struct vhost_virtqueue *vq,
43                         uint16_t to, uint16_t from, uint16_t size)
44 {
45         rte_memcpy(&vq->used->ring[to],
46                         &vq->shadow_used_split[from],
47                         size * sizeof(struct vring_used_elem));
48         vhost_log_cache_used_vring(dev, vq,
49                         offsetof(struct vring_used, ring[to]),
50                         size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58         if (used_idx + vq->shadow_used_idx <= vq->size) {
59                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60                                           vq->shadow_used_idx);
61         } else {
62                 uint16_t size;
63
64                 /* update used ring interval [used_idx, vq->size] */
65                 size = vq->size - used_idx;
66                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68                 /* update the left half used ring interval [0, left_size] */
69                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70                                           vq->shadow_used_idx - size);
71         }
72         vq->last_used_idx += vq->shadow_used_idx;
73
74         rte_smp_wmb();
75
76         vhost_log_cache_sync(dev, vq);
77
78         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79         vq->shadow_used_idx = 0;
80         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81                 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86                          uint16_t desc_idx, uint32_t len)
87 {
88         uint16_t i = vq->shadow_used_idx++;
89
90         vq->shadow_used_split[i].id  = desc_idx;
91         vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
96                                   struct vhost_virtqueue *vq)
97 {
98         int i;
99         uint16_t used_idx = vq->last_used_idx;
100         uint16_t head_idx = vq->last_used_idx;
101         uint16_t head_flags = 0;
102
103         /* Split loop in two to save memory barriers */
104         for (i = 0; i < vq->shadow_used_idx; i++) {
105                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108                 used_idx += vq->shadow_used_packed[i].count;
109                 if (used_idx >= vq->size)
110                         used_idx -= vq->size;
111         }
112
113         rte_smp_wmb();
114
115         for (i = 0; i < vq->shadow_used_idx; i++) {
116                 uint16_t flags;
117
118                 if (vq->shadow_used_packed[i].len)
119                         flags = VRING_DESC_F_WRITE;
120                 else
121                         flags = 0;
122
123                 if (vq->used_wrap_counter) {
124                         flags |= VRING_DESC_F_USED;
125                         flags |= VRING_DESC_F_AVAIL;
126                 } else {
127                         flags &= ~VRING_DESC_F_USED;
128                         flags &= ~VRING_DESC_F_AVAIL;
129                 }
130
131                 if (i > 0) {
132                         vq->desc_packed[vq->last_used_idx].flags = flags;
133
134                         vhost_log_cache_used_vring(dev, vq,
135                                         vq->last_used_idx *
136                                         sizeof(struct vring_packed_desc),
137                                         sizeof(struct vring_packed_desc));
138                 } else {
139                         head_idx = vq->last_used_idx;
140                         head_flags = flags;
141                 }
142
143                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
144         }
145
146         vq->desc_packed[head_idx].flags = head_flags;
147
148         vhost_log_cache_used_vring(dev, vq,
149                                 head_idx *
150                                 sizeof(struct vring_packed_desc),
151                                 sizeof(struct vring_packed_desc));
152
153         vq->shadow_used_idx = 0;
154         vhost_log_cache_sync(dev, vq);
155 }
156
157 static __rte_always_inline void
158 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
159                                   struct vhost_virtqueue *vq)
160 {
161         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
162
163         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
164         rte_smp_wmb();
165         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
166
167         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
168                                    sizeof(struct vring_packed_desc),
169                                    sizeof(struct vring_packed_desc));
170         vq->shadow_used_idx = 0;
171         vhost_log_cache_sync(dev, vq);
172 }
173
174 static __rte_always_inline void
175 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
176                                  struct vhost_virtqueue *vq,
177                                  uint64_t *lens,
178                                  uint16_t *ids)
179 {
180         uint16_t i;
181         uint16_t flags;
182
183         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
184
185         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
186                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
187                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
188         }
189
190         rte_smp_wmb();
191
192         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
193                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
194
195         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
196                                    sizeof(struct vring_packed_desc),
197                                    sizeof(struct vring_packed_desc) *
198                                    PACKED_BATCH_SIZE);
199         vhost_log_cache_sync(dev, vq);
200
201         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
202 }
203
204 static __rte_always_inline void
205 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
206                                   struct vhost_virtqueue *vq,
207                                   uint16_t *ids)
208 {
209         uint16_t flags;
210         uint16_t i;
211         uint16_t begin;
212
213         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
214
215         if (!vq->shadow_used_idx) {
216                 vq->shadow_last_used_idx = vq->last_used_idx;
217                 vq->shadow_used_packed[0].id  = ids[0];
218                 vq->shadow_used_packed[0].len = 0;
219                 vq->shadow_used_packed[0].count = 1;
220                 vq->shadow_used_packed[0].flags = flags;
221                 vq->shadow_used_idx++;
222                 begin = 1;
223         } else
224                 begin = 0;
225
226         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
227                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
228                 vq->desc_packed[vq->last_used_idx + i].len = 0;
229         }
230
231         rte_smp_wmb();
232         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
233                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
234
235         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
236                                    sizeof(struct vring_packed_desc),
237                                    sizeof(struct vring_packed_desc) *
238                                    PACKED_BATCH_SIZE);
239         vhost_log_cache_sync(dev, vq);
240
241         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
242 }
243
244 static __rte_always_inline void
245 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
246                                    uint16_t buf_id,
247                                    uint16_t count)
248 {
249         uint16_t flags;
250
251         flags = vq->desc_packed[vq->last_used_idx].flags;
252         if (vq->used_wrap_counter) {
253                 flags |= VRING_DESC_F_USED;
254                 flags |= VRING_DESC_F_AVAIL;
255         } else {
256                 flags &= ~VRING_DESC_F_USED;
257                 flags &= ~VRING_DESC_F_AVAIL;
258         }
259
260         if (!vq->shadow_used_idx) {
261                 vq->shadow_last_used_idx = vq->last_used_idx;
262
263                 vq->shadow_used_packed[0].id  = buf_id;
264                 vq->shadow_used_packed[0].len = 0;
265                 vq->shadow_used_packed[0].flags = flags;
266                 vq->shadow_used_idx++;
267         } else {
268                 vq->desc_packed[vq->last_used_idx].id = buf_id;
269                 vq->desc_packed[vq->last_used_idx].len = 0;
270                 vq->desc_packed[vq->last_used_idx].flags = flags;
271         }
272
273         vq_inc_last_used_packed(vq, count);
274 }
275
276 static inline void
277 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
278 {
279         struct batch_copy_elem *elem = vq->batch_copy_elems;
280         uint16_t count = vq->batch_copy_nb_elems;
281         int i;
282
283         for (i = 0; i < count; i++) {
284                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
285                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
286                                            elem[i].len);
287                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
288         }
289
290         vq->batch_copy_nb_elems = 0;
291 }
292
293 static inline void
294 do_data_copy_dequeue(struct vhost_virtqueue *vq)
295 {
296         struct batch_copy_elem *elem = vq->batch_copy_elems;
297         uint16_t count = vq->batch_copy_nb_elems;
298         int i;
299
300         for (i = 0; i < count; i++)
301                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
302
303         vq->batch_copy_nb_elems = 0;
304 }
305
306 static __rte_always_inline void
307 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
308                                    struct vhost_virtqueue *vq,
309                                    uint32_t len[],
310                                    uint16_t id[],
311                                    uint16_t count[],
312                                    uint16_t num_buffers)
313 {
314         uint16_t i;
315         for (i = 0; i < num_buffers; i++) {
316                 /* enqueue shadow flush action aligned with batch num */
317                 if (!vq->shadow_used_idx)
318                         vq->shadow_aligned_idx = vq->last_used_idx &
319                                 PACKED_BATCH_MASK;
320                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
321                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
322                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
323                 vq->shadow_aligned_idx += count[i];
324                 vq->shadow_used_idx++;
325         }
326
327         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
328                 do_data_copy_enqueue(dev, vq);
329                 vhost_flush_enqueue_shadow_packed(dev, vq);
330         }
331 }
332
333 static __rte_always_inline void
334 vhost_flush_dequeue_packed(struct virtio_net *dev,
335                            struct vhost_virtqueue *vq)
336 {
337         int shadow_count;
338         if (!vq->shadow_used_idx)
339                 return;
340
341         shadow_count = vq->last_used_idx - vq->shadow_last_used_idx;
342         if (shadow_count <= 0)
343                 shadow_count += vq->size;
344
345         if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
346                 do_data_copy_dequeue(vq);
347                 vhost_flush_dequeue_shadow_packed(dev, vq);
348                 vhost_vring_call_packed(dev, vq);
349         }
350 }
351
352 /* avoid write operation when necessary, to lessen cache issues */
353 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
354         if ((var) != (val))                     \
355                 (var) = (val);                  \
356 } while (0)
357
358 static __rte_always_inline void
359 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
360 {
361         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
362
363         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
364                 csum_l4 |= PKT_TX_TCP_CKSUM;
365
366         if (csum_l4) {
367                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
368                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
369
370                 switch (csum_l4) {
371                 case PKT_TX_TCP_CKSUM:
372                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
373                                                 cksum));
374                         break;
375                 case PKT_TX_UDP_CKSUM:
376                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
377                                                 dgram_cksum));
378                         break;
379                 case PKT_TX_SCTP_CKSUM:
380                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
381                                                 cksum));
382                         break;
383                 }
384         } else {
385                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
386                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
387                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
388         }
389
390         /* IP cksum verification cannot be bypassed, then calculate here */
391         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
392                 struct rte_ipv4_hdr *ipv4_hdr;
393
394                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
395                                                    m_buf->l2_len);
396                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
397         }
398
399         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
400                 if (m_buf->ol_flags & PKT_TX_IPV4)
401                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
402                 else
403                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
404                 net_hdr->gso_size = m_buf->tso_segsz;
405                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
406                                         + m_buf->l4_len;
407         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
408                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
409                 net_hdr->gso_size = m_buf->tso_segsz;
410                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
411                         m_buf->l4_len;
412         } else {
413                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
414                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
415                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
416         }
417 }
418
419 static __rte_always_inline int
420 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
421                 struct buf_vector *buf_vec, uint16_t *vec_idx,
422                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
423 {
424         uint16_t vec_id = *vec_idx;
425
426         while (desc_len) {
427                 uint64_t desc_addr;
428                 uint64_t desc_chunck_len = desc_len;
429
430                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
431                         return -1;
432
433                 desc_addr = vhost_iova_to_vva(dev, vq,
434                                 desc_iova,
435                                 &desc_chunck_len,
436                                 perm);
437                 if (unlikely(!desc_addr))
438                         return -1;
439
440                 rte_prefetch0((void *)(uintptr_t)desc_addr);
441
442                 buf_vec[vec_id].buf_iova = desc_iova;
443                 buf_vec[vec_id].buf_addr = desc_addr;
444                 buf_vec[vec_id].buf_len  = desc_chunck_len;
445
446                 desc_len -= desc_chunck_len;
447                 desc_iova += desc_chunck_len;
448                 vec_id++;
449         }
450         *vec_idx = vec_id;
451
452         return 0;
453 }
454
455 static __rte_always_inline int
456 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
457                          uint32_t avail_idx, uint16_t *vec_idx,
458                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
459                          uint32_t *desc_chain_len, uint8_t perm)
460 {
461         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
462         uint16_t vec_id = *vec_idx;
463         uint32_t len    = 0;
464         uint64_t dlen;
465         uint32_t nr_descs = vq->size;
466         uint32_t cnt    = 0;
467         struct vring_desc *descs = vq->desc;
468         struct vring_desc *idesc = NULL;
469
470         if (unlikely(idx >= vq->size))
471                 return -1;
472
473         *desc_chain_head = idx;
474
475         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
476                 dlen = vq->desc[idx].len;
477                 nr_descs = dlen / sizeof(struct vring_desc);
478                 if (unlikely(nr_descs > vq->size))
479                         return -1;
480
481                 descs = (struct vring_desc *)(uintptr_t)
482                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
483                                                 &dlen,
484                                                 VHOST_ACCESS_RO);
485                 if (unlikely(!descs))
486                         return -1;
487
488                 if (unlikely(dlen < vq->desc[idx].len)) {
489                         /*
490                          * The indirect desc table is not contiguous
491                          * in process VA space, we have to copy it.
492                          */
493                         idesc = vhost_alloc_copy_ind_table(dev, vq,
494                                         vq->desc[idx].addr, vq->desc[idx].len);
495                         if (unlikely(!idesc))
496                                 return -1;
497
498                         descs = idesc;
499                 }
500
501                 idx = 0;
502         }
503
504         while (1) {
505                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
506                         free_ind_table(idesc);
507                         return -1;
508                 }
509
510                 len += descs[idx].len;
511
512                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
513                                                 descs[idx].addr, descs[idx].len,
514                                                 perm))) {
515                         free_ind_table(idesc);
516                         return -1;
517                 }
518
519                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
520                         break;
521
522                 idx = descs[idx].next;
523         }
524
525         *desc_chain_len = len;
526         *vec_idx = vec_id;
527
528         if (unlikely(!!idesc))
529                 free_ind_table(idesc);
530
531         return 0;
532 }
533
534 /*
535  * Returns -1 on fail, 0 on success
536  */
537 static inline int
538 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
539                                 uint32_t size, struct buf_vector *buf_vec,
540                                 uint16_t *num_buffers, uint16_t avail_head,
541                                 uint16_t *nr_vec)
542 {
543         uint16_t cur_idx;
544         uint16_t vec_idx = 0;
545         uint16_t max_tries, tries = 0;
546
547         uint16_t head_idx = 0;
548         uint32_t len = 0;
549
550         *num_buffers = 0;
551         cur_idx  = vq->last_avail_idx;
552
553         if (rxvq_is_mergeable(dev))
554                 max_tries = vq->size - 1;
555         else
556                 max_tries = 1;
557
558         while (size > 0) {
559                 if (unlikely(cur_idx == avail_head))
560                         return -1;
561                 /*
562                  * if we tried all available ring items, and still
563                  * can't get enough buf, it means something abnormal
564                  * happened.
565                  */
566                 if (unlikely(++tries > max_tries))
567                         return -1;
568
569                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
570                                                 &vec_idx, buf_vec,
571                                                 &head_idx, &len,
572                                                 VHOST_ACCESS_RW) < 0))
573                         return -1;
574                 len = RTE_MIN(len, size);
575                 update_shadow_used_ring_split(vq, head_idx, len);
576                 size -= len;
577
578                 cur_idx++;
579                 *num_buffers += 1;
580         }
581
582         *nr_vec = vec_idx;
583
584         return 0;
585 }
586
587 static __rte_always_inline int
588 fill_vec_buf_packed_indirect(struct virtio_net *dev,
589                         struct vhost_virtqueue *vq,
590                         struct vring_packed_desc *desc, uint16_t *vec_idx,
591                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
592 {
593         uint16_t i;
594         uint32_t nr_descs;
595         uint16_t vec_id = *vec_idx;
596         uint64_t dlen;
597         struct vring_packed_desc *descs, *idescs = NULL;
598
599         dlen = desc->len;
600         descs = (struct vring_packed_desc *)(uintptr_t)
601                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
602         if (unlikely(!descs))
603                 return -1;
604
605         if (unlikely(dlen < desc->len)) {
606                 /*
607                  * The indirect desc table is not contiguous
608                  * in process VA space, we have to copy it.
609                  */
610                 idescs = vhost_alloc_copy_ind_table(dev,
611                                 vq, desc->addr, desc->len);
612                 if (unlikely(!idescs))
613                         return -1;
614
615                 descs = idescs;
616         }
617
618         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
619         if (unlikely(nr_descs >= vq->size)) {
620                 free_ind_table(idescs);
621                 return -1;
622         }
623
624         for (i = 0; i < nr_descs; i++) {
625                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
626                         free_ind_table(idescs);
627                         return -1;
628                 }
629
630                 *len += descs[i].len;
631                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
632                                                 descs[i].addr, descs[i].len,
633                                                 perm)))
634                         return -1;
635         }
636         *vec_idx = vec_id;
637
638         if (unlikely(!!idescs))
639                 free_ind_table(idescs);
640
641         return 0;
642 }
643
644 static __rte_always_inline int
645 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
646                                 uint16_t avail_idx, uint16_t *desc_count,
647                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
648                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
649 {
650         bool wrap_counter = vq->avail_wrap_counter;
651         struct vring_packed_desc *descs = vq->desc_packed;
652         uint16_t vec_id = *vec_idx;
653
654         if (avail_idx < vq->last_avail_idx)
655                 wrap_counter ^= 1;
656
657         /*
658          * Perform a load-acquire barrier in desc_is_avail to
659          * enforce the ordering between desc flags and desc
660          * content.
661          */
662         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
663                 return -1;
664
665         *desc_count = 0;
666         *len = 0;
667
668         while (1) {
669                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
670                         return -1;
671
672                 if (unlikely(*desc_count >= vq->size))
673                         return -1;
674
675                 *desc_count += 1;
676                 *buf_id = descs[avail_idx].id;
677
678                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
679                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
680                                                         &descs[avail_idx],
681                                                         &vec_id, buf_vec,
682                                                         len, perm) < 0))
683                                 return -1;
684                 } else {
685                         *len += descs[avail_idx].len;
686
687                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
688                                                         descs[avail_idx].addr,
689                                                         descs[avail_idx].len,
690                                                         perm)))
691                                 return -1;
692                 }
693
694                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
695                         break;
696
697                 if (++avail_idx >= vq->size) {
698                         avail_idx -= vq->size;
699                         wrap_counter ^= 1;
700                 }
701         }
702
703         *vec_idx = vec_id;
704
705         return 0;
706 }
707
708 static __rte_noinline void
709 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
710                 struct buf_vector *buf_vec,
711                 struct virtio_net_hdr_mrg_rxbuf *hdr)
712 {
713         uint64_t len;
714         uint64_t remain = dev->vhost_hlen;
715         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
716         uint64_t iova = buf_vec->buf_iova;
717
718         while (remain) {
719                 len = RTE_MIN(remain,
720                                 buf_vec->buf_len);
721                 dst = buf_vec->buf_addr;
722                 rte_memcpy((void *)(uintptr_t)dst,
723                                 (void *)(uintptr_t)src,
724                                 len);
725
726                 PRINT_PACKET(dev, (uintptr_t)dst,
727                                 (uint32_t)len, 0);
728                 vhost_log_cache_write_iova(dev, vq,
729                                 iova, len);
730
731                 remain -= len;
732                 iova += len;
733                 src += len;
734                 buf_vec++;
735         }
736 }
737
738 static __rte_always_inline int
739 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
740                             struct rte_mbuf *m, struct buf_vector *buf_vec,
741                             uint16_t nr_vec, uint16_t num_buffers)
742 {
743         uint32_t vec_idx = 0;
744         uint32_t mbuf_offset, mbuf_avail;
745         uint32_t buf_offset, buf_avail;
746         uint64_t buf_addr, buf_iova, buf_len;
747         uint32_t cpy_len;
748         uint64_t hdr_addr;
749         struct rte_mbuf *hdr_mbuf;
750         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
751         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
752         int error = 0;
753
754         if (unlikely(m == NULL)) {
755                 error = -1;
756                 goto out;
757         }
758
759         buf_addr = buf_vec[vec_idx].buf_addr;
760         buf_iova = buf_vec[vec_idx].buf_iova;
761         buf_len = buf_vec[vec_idx].buf_len;
762
763         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
764                 error = -1;
765                 goto out;
766         }
767
768         hdr_mbuf = m;
769         hdr_addr = buf_addr;
770         if (unlikely(buf_len < dev->vhost_hlen))
771                 hdr = &tmp_hdr;
772         else
773                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
774
775         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
776                 dev->vid, num_buffers);
777
778         if (unlikely(buf_len < dev->vhost_hlen)) {
779                 buf_offset = dev->vhost_hlen - buf_len;
780                 vec_idx++;
781                 buf_addr = buf_vec[vec_idx].buf_addr;
782                 buf_iova = buf_vec[vec_idx].buf_iova;
783                 buf_len = buf_vec[vec_idx].buf_len;
784                 buf_avail = buf_len - buf_offset;
785         } else {
786                 buf_offset = dev->vhost_hlen;
787                 buf_avail = buf_len - dev->vhost_hlen;
788         }
789
790         mbuf_avail  = rte_pktmbuf_data_len(m);
791         mbuf_offset = 0;
792         while (mbuf_avail != 0 || m->next != NULL) {
793                 /* done with current buf, get the next one */
794                 if (buf_avail == 0) {
795                         vec_idx++;
796                         if (unlikely(vec_idx >= nr_vec)) {
797                                 error = -1;
798                                 goto out;
799                         }
800
801                         buf_addr = buf_vec[vec_idx].buf_addr;
802                         buf_iova = buf_vec[vec_idx].buf_iova;
803                         buf_len = buf_vec[vec_idx].buf_len;
804
805                         buf_offset = 0;
806                         buf_avail  = buf_len;
807                 }
808
809                 /* done with current mbuf, get the next one */
810                 if (mbuf_avail == 0) {
811                         m = m->next;
812
813                         mbuf_offset = 0;
814                         mbuf_avail  = rte_pktmbuf_data_len(m);
815                 }
816
817                 if (hdr_addr) {
818                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
819                         if (rxvq_is_mergeable(dev))
820                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
821                                                 num_buffers);
822
823                         if (unlikely(hdr == &tmp_hdr)) {
824                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
825                         } else {
826                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
827                                                 dev->vhost_hlen, 0);
828                                 vhost_log_cache_write_iova(dev, vq,
829                                                 buf_vec[0].buf_iova,
830                                                 dev->vhost_hlen);
831                         }
832
833                         hdr_addr = 0;
834                 }
835
836                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
837
838                 if (likely(cpy_len > MAX_BATCH_LEN ||
839                                         vq->batch_copy_nb_elems >= vq->size)) {
840                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
841                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
842                                 cpy_len);
843                         vhost_log_cache_write_iova(dev, vq,
844                                                    buf_iova + buf_offset,
845                                                    cpy_len);
846                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
847                                 cpy_len, 0);
848                 } else {
849                         batch_copy[vq->batch_copy_nb_elems].dst =
850                                 (void *)((uintptr_t)(buf_addr + buf_offset));
851                         batch_copy[vq->batch_copy_nb_elems].src =
852                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
853                         batch_copy[vq->batch_copy_nb_elems].log_addr =
854                                 buf_iova + buf_offset;
855                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
856                         vq->batch_copy_nb_elems++;
857                 }
858
859                 mbuf_avail  -= cpy_len;
860                 mbuf_offset += cpy_len;
861                 buf_avail  -= cpy_len;
862                 buf_offset += cpy_len;
863         }
864
865 out:
866
867         return error;
868 }
869
870 static __rte_always_inline int
871 vhost_enqueue_single_packed(struct virtio_net *dev,
872                             struct vhost_virtqueue *vq,
873                             struct rte_mbuf *pkt,
874                             struct buf_vector *buf_vec,
875                             uint16_t *nr_descs)
876 {
877         uint16_t nr_vec = 0;
878         uint16_t avail_idx = vq->last_avail_idx;
879         uint16_t max_tries, tries = 0;
880         uint16_t buf_id = 0;
881         uint32_t len = 0;
882         uint16_t desc_count;
883         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
884         uint16_t num_buffers = 0;
885         uint32_t buffer_len[vq->size];
886         uint16_t buffer_buf_id[vq->size];
887         uint16_t buffer_desc_count[vq->size];
888
889         if (rxvq_is_mergeable(dev))
890                 max_tries = vq->size - 1;
891         else
892                 max_tries = 1;
893
894         while (size > 0) {
895                 /*
896                  * if we tried all available ring items, and still
897                  * can't get enough buf, it means something abnormal
898                  * happened.
899                  */
900                 if (unlikely(++tries > max_tries))
901                         return -1;
902
903                 if (unlikely(fill_vec_buf_packed(dev, vq,
904                                                 avail_idx, &desc_count,
905                                                 buf_vec, &nr_vec,
906                                                 &buf_id, &len,
907                                                 VHOST_ACCESS_RW) < 0))
908                         return -1;
909
910                 len = RTE_MIN(len, size);
911                 size -= len;
912
913                 buffer_len[num_buffers] = len;
914                 buffer_buf_id[num_buffers] = buf_id;
915                 buffer_desc_count[num_buffers] = desc_count;
916                 num_buffers += 1;
917
918                 *nr_descs += desc_count;
919                 avail_idx += desc_count;
920                 if (avail_idx >= vq->size)
921                         avail_idx -= vq->size;
922         }
923
924         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
925                 return -1;
926
927         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
928                                            buffer_desc_count, num_buffers);
929
930         return 0;
931 }
932
933 static __rte_noinline uint32_t
934 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
935         struct rte_mbuf **pkts, uint32_t count)
936 {
937         uint32_t pkt_idx = 0;
938         uint16_t num_buffers;
939         struct buf_vector buf_vec[BUF_VECTOR_MAX];
940         uint16_t avail_head;
941
942         avail_head = *((volatile uint16_t *)&vq->avail->idx);
943
944         /*
945          * The ordering between avail index and
946          * desc reads needs to be enforced.
947          */
948         rte_smp_rmb();
949
950         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
951
952         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
953                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
954                 uint16_t nr_vec = 0;
955
956                 if (unlikely(reserve_avail_buf_split(dev, vq,
957                                                 pkt_len, buf_vec, &num_buffers,
958                                                 avail_head, &nr_vec) < 0)) {
959                         VHOST_LOG_DEBUG(VHOST_DATA,
960                                 "(%d) failed to get enough desc from vring\n",
961                                 dev->vid);
962                         vq->shadow_used_idx -= num_buffers;
963                         break;
964                 }
965
966                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
967                         dev->vid, vq->last_avail_idx,
968                         vq->last_avail_idx + num_buffers);
969
970                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
971                                                 buf_vec, nr_vec,
972                                                 num_buffers) < 0) {
973                         vq->shadow_used_idx -= num_buffers;
974                         break;
975                 }
976
977                 vq->last_avail_idx += num_buffers;
978         }
979
980         do_data_copy_enqueue(dev, vq);
981
982         if (likely(vq->shadow_used_idx)) {
983                 flush_shadow_used_ring_split(dev, vq);
984                 vhost_vring_call_split(dev, vq);
985         }
986
987         return pkt_idx;
988 }
989
990 static __rte_always_inline int
991 virtio_dev_rx_batch_packed(struct virtio_net *dev,
992                            struct vhost_virtqueue *vq,
993                            struct rte_mbuf **pkts)
994 {
995         bool wrap_counter = vq->avail_wrap_counter;
996         struct vring_packed_desc *descs = vq->desc_packed;
997         uint16_t avail_idx = vq->last_avail_idx;
998         uint64_t desc_addrs[PACKED_BATCH_SIZE];
999         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1000         uint32_t buf_offset = dev->vhost_hlen;
1001         uint64_t lens[PACKED_BATCH_SIZE];
1002         uint16_t ids[PACKED_BATCH_SIZE];
1003         uint16_t i;
1004
1005         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1006                 return -1;
1007
1008         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1009                 return -1;
1010
1011         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1012                 if (unlikely(pkts[i]->next != NULL))
1013                         return -1;
1014                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1015                                             wrap_counter)))
1016                         return -1;
1017         }
1018
1019         rte_smp_rmb();
1020
1021         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1022                 lens[i] = descs[avail_idx + i].len;
1023
1024         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1025                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1026                         return -1;
1027         }
1028
1029         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1030                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1031                                                   descs[avail_idx + i].addr,
1032                                                   &lens[i],
1033                                                   VHOST_ACCESS_RW);
1034
1035         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1036                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1037                         return -1;
1038         }
1039
1040         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1041                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1042                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1043                                         (uintptr_t)desc_addrs[i];
1044                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1045         }
1046
1047         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1048                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1049
1050         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1051
1052         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1053                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1054                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1055                            pkts[i]->pkt_len);
1056         }
1057
1058         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1059                 ids[i] = descs[avail_idx + i].id;
1060
1061         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1062
1063         return 0;
1064 }
1065
1066 static __rte_always_inline int16_t
1067 virtio_dev_rx_single_packed(struct virtio_net *dev,
1068                             struct vhost_virtqueue *vq,
1069                             struct rte_mbuf *pkt)
1070 {
1071         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1072         uint16_t nr_descs = 0;
1073
1074         rte_smp_rmb();
1075         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1076                                                  &nr_descs) < 0)) {
1077                 VHOST_LOG_DEBUG(VHOST_DATA,
1078                                 "(%d) failed to get enough desc from vring\n",
1079                                 dev->vid);
1080                 return -1;
1081         }
1082
1083         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1084                         dev->vid, vq->last_avail_idx,
1085                         vq->last_avail_idx + nr_descs);
1086
1087         vq_inc_last_avail_packed(vq, nr_descs);
1088
1089         return 0;
1090 }
1091
1092 static __rte_noinline uint32_t
1093 virtio_dev_rx_packed(struct virtio_net *dev,
1094                      struct vhost_virtqueue *vq,
1095                      struct rte_mbuf **pkts,
1096                      uint32_t count)
1097 {
1098         uint32_t pkt_idx = 0;
1099         uint32_t remained = count;
1100
1101         do {
1102                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1103
1104                 if (remained >= PACKED_BATCH_SIZE) {
1105                         if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
1106                                 pkt_idx += PACKED_BATCH_SIZE;
1107                                 remained -= PACKED_BATCH_SIZE;
1108                                 continue;
1109                         }
1110                 }
1111
1112                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1113                         break;
1114                 pkt_idx++;
1115                 remained--;
1116
1117         } while (pkt_idx < count);
1118
1119         if (vq->shadow_used_idx) {
1120                 do_data_copy_enqueue(dev, vq);
1121                 vhost_flush_enqueue_shadow_packed(dev, vq);
1122         }
1123
1124         if (pkt_idx)
1125                 vhost_vring_call_packed(dev, vq);
1126
1127         return pkt_idx;
1128 }
1129
1130 static __rte_always_inline uint32_t
1131 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1132         struct rte_mbuf **pkts, uint32_t count)
1133 {
1134         struct vhost_virtqueue *vq;
1135         uint32_t nb_tx = 0;
1136
1137         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1138         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1139                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1140                         dev->vid, __func__, queue_id);
1141                 return 0;
1142         }
1143
1144         vq = dev->virtqueue[queue_id];
1145
1146         rte_spinlock_lock(&vq->access_lock);
1147
1148         if (unlikely(vq->enabled == 0))
1149                 goto out_access_unlock;
1150
1151         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1152                 vhost_user_iotlb_rd_lock(vq);
1153
1154         if (unlikely(vq->access_ok == 0))
1155                 if (unlikely(vring_translate(dev, vq) < 0))
1156                         goto out;
1157
1158         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1159         if (count == 0)
1160                 goto out;
1161
1162         if (vq_is_packed(dev))
1163                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1164         else
1165                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1166
1167 out:
1168         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1169                 vhost_user_iotlb_rd_unlock(vq);
1170
1171 out_access_unlock:
1172         rte_spinlock_unlock(&vq->access_lock);
1173
1174         return nb_tx;
1175 }
1176
1177 uint16_t
1178 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1179         struct rte_mbuf **pkts, uint16_t count)
1180 {
1181         struct virtio_net *dev = get_device(vid);
1182
1183         if (!dev)
1184                 return 0;
1185
1186         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1187                 RTE_LOG(ERR, VHOST_DATA,
1188                         "(%d) %s: built-in vhost net backend is disabled.\n",
1189                         dev->vid, __func__);
1190                 return 0;
1191         }
1192
1193         return virtio_dev_rx(dev, queue_id, pkts, count);
1194 }
1195
1196 static inline bool
1197 virtio_net_with_host_offload(struct virtio_net *dev)
1198 {
1199         if (dev->features &
1200                         ((1ULL << VIRTIO_NET_F_CSUM) |
1201                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1202                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1203                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1204                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1205                 return true;
1206
1207         return false;
1208 }
1209
1210 static void
1211 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1212 {
1213         struct rte_ipv4_hdr *ipv4_hdr;
1214         struct rte_ipv6_hdr *ipv6_hdr;
1215         void *l3_hdr = NULL;
1216         struct rte_ether_hdr *eth_hdr;
1217         uint16_t ethertype;
1218
1219         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1220
1221         m->l2_len = sizeof(struct rte_ether_hdr);
1222         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1223
1224         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1225                 struct rte_vlan_hdr *vlan_hdr =
1226                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1227
1228                 m->l2_len += sizeof(struct rte_vlan_hdr);
1229                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1230         }
1231
1232         l3_hdr = (char *)eth_hdr + m->l2_len;
1233
1234         switch (ethertype) {
1235         case RTE_ETHER_TYPE_IPV4:
1236                 ipv4_hdr = l3_hdr;
1237                 *l4_proto = ipv4_hdr->next_proto_id;
1238                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1239                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1240                 m->ol_flags |= PKT_TX_IPV4;
1241                 break;
1242         case RTE_ETHER_TYPE_IPV6:
1243                 ipv6_hdr = l3_hdr;
1244                 *l4_proto = ipv6_hdr->proto;
1245                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1246                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1247                 m->ol_flags |= PKT_TX_IPV6;
1248                 break;
1249         default:
1250                 m->l3_len = 0;
1251                 *l4_proto = 0;
1252                 *l4_hdr = NULL;
1253                 break;
1254         }
1255 }
1256
1257 static __rte_always_inline void
1258 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1259 {
1260         uint16_t l4_proto = 0;
1261         void *l4_hdr = NULL;
1262         struct rte_tcp_hdr *tcp_hdr = NULL;
1263
1264         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1265                 return;
1266
1267         parse_ethernet(m, &l4_proto, &l4_hdr);
1268         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1269                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1270                         switch (hdr->csum_offset) {
1271                         case (offsetof(struct rte_tcp_hdr, cksum)):
1272                                 if (l4_proto == IPPROTO_TCP)
1273                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1274                                 break;
1275                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1276                                 if (l4_proto == IPPROTO_UDP)
1277                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1278                                 break;
1279                         case (offsetof(struct rte_sctp_hdr, cksum)):
1280                                 if (l4_proto == IPPROTO_SCTP)
1281                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1282                                 break;
1283                         default:
1284                                 break;
1285                         }
1286                 }
1287         }
1288
1289         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1290                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1291                 case VIRTIO_NET_HDR_GSO_TCPV4:
1292                 case VIRTIO_NET_HDR_GSO_TCPV6:
1293                         tcp_hdr = l4_hdr;
1294                         m->ol_flags |= PKT_TX_TCP_SEG;
1295                         m->tso_segsz = hdr->gso_size;
1296                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1297                         break;
1298                 case VIRTIO_NET_HDR_GSO_UDP:
1299                         m->ol_flags |= PKT_TX_UDP_SEG;
1300                         m->tso_segsz = hdr->gso_size;
1301                         m->l4_len = sizeof(struct rte_udp_hdr);
1302                         break;
1303                 default:
1304                         RTE_LOG(WARNING, VHOST_DATA,
1305                                 "unsupported gso type %u.\n", hdr->gso_type);
1306                         break;
1307                 }
1308         }
1309 }
1310
1311 static __rte_noinline void
1312 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1313                 struct buf_vector *buf_vec)
1314 {
1315         uint64_t len;
1316         uint64_t remain = sizeof(struct virtio_net_hdr);
1317         uint64_t src;
1318         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1319
1320         while (remain) {
1321                 len = RTE_MIN(remain, buf_vec->buf_len);
1322                 src = buf_vec->buf_addr;
1323                 rte_memcpy((void *)(uintptr_t)dst,
1324                                 (void *)(uintptr_t)src, len);
1325
1326                 remain -= len;
1327                 dst += len;
1328                 buf_vec++;
1329         }
1330 }
1331
1332 static __rte_always_inline int
1333 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1334                   struct buf_vector *buf_vec, uint16_t nr_vec,
1335                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1336 {
1337         uint32_t buf_avail, buf_offset;
1338         uint64_t buf_addr, buf_iova, buf_len;
1339         uint32_t mbuf_avail, mbuf_offset;
1340         uint32_t cpy_len;
1341         struct rte_mbuf *cur = m, *prev = m;
1342         struct virtio_net_hdr tmp_hdr;
1343         struct virtio_net_hdr *hdr = NULL;
1344         /* A counter to avoid desc dead loop chain */
1345         uint16_t vec_idx = 0;
1346         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1347         int error = 0;
1348
1349         buf_addr = buf_vec[vec_idx].buf_addr;
1350         buf_iova = buf_vec[vec_idx].buf_iova;
1351         buf_len = buf_vec[vec_idx].buf_len;
1352
1353         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1354                 error = -1;
1355                 goto out;
1356         }
1357
1358         if (virtio_net_with_host_offload(dev)) {
1359                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1360                         /*
1361                          * No luck, the virtio-net header doesn't fit
1362                          * in a contiguous virtual area.
1363                          */
1364                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1365                         hdr = &tmp_hdr;
1366                 } else {
1367                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1368                 }
1369         }
1370
1371         /*
1372          * A virtio driver normally uses at least 2 desc buffers
1373          * for Tx: the first for storing the header, and others
1374          * for storing the data.
1375          */
1376         if (unlikely(buf_len < dev->vhost_hlen)) {
1377                 buf_offset = dev->vhost_hlen - buf_len;
1378                 vec_idx++;
1379                 buf_addr = buf_vec[vec_idx].buf_addr;
1380                 buf_iova = buf_vec[vec_idx].buf_iova;
1381                 buf_len = buf_vec[vec_idx].buf_len;
1382                 buf_avail  = buf_len - buf_offset;
1383         } else if (buf_len == dev->vhost_hlen) {
1384                 if (unlikely(++vec_idx >= nr_vec))
1385                         goto out;
1386                 buf_addr = buf_vec[vec_idx].buf_addr;
1387                 buf_iova = buf_vec[vec_idx].buf_iova;
1388                 buf_len = buf_vec[vec_idx].buf_len;
1389
1390                 buf_offset = 0;
1391                 buf_avail = buf_len;
1392         } else {
1393                 buf_offset = dev->vhost_hlen;
1394                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1395         }
1396
1397         PRINT_PACKET(dev,
1398                         (uintptr_t)(buf_addr + buf_offset),
1399                         (uint32_t)buf_avail, 0);
1400
1401         mbuf_offset = 0;
1402         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1403         while (1) {
1404                 uint64_t hpa;
1405
1406                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1407
1408                 /*
1409                  * A desc buf might across two host physical pages that are
1410                  * not continuous. In such case (gpa_to_hpa returns 0), data
1411                  * will be copied even though zero copy is enabled.
1412                  */
1413                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1414                                         buf_iova + buf_offset, cpy_len)))) {
1415                         cur->data_len = cpy_len;
1416                         cur->data_off = 0;
1417                         cur->buf_addr =
1418                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1419                         cur->buf_iova = hpa;
1420
1421                         /*
1422                          * In zero copy mode, one mbuf can only reference data
1423                          * for one or partial of one desc buff.
1424                          */
1425                         mbuf_avail = cpy_len;
1426                 } else {
1427                         if (likely(cpy_len > MAX_BATCH_LEN ||
1428                                    vq->batch_copy_nb_elems >= vq->size ||
1429                                    (hdr && cur == m))) {
1430                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1431                                                                    mbuf_offset),
1432                                            (void *)((uintptr_t)(buf_addr +
1433                                                            buf_offset)),
1434                                            cpy_len);
1435                         } else {
1436                                 batch_copy[vq->batch_copy_nb_elems].dst =
1437                                         rte_pktmbuf_mtod_offset(cur, void *,
1438                                                                 mbuf_offset);
1439                                 batch_copy[vq->batch_copy_nb_elems].src =
1440                                         (void *)((uintptr_t)(buf_addr +
1441                                                                 buf_offset));
1442                                 batch_copy[vq->batch_copy_nb_elems].len =
1443                                         cpy_len;
1444                                 vq->batch_copy_nb_elems++;
1445                         }
1446                 }
1447
1448                 mbuf_avail  -= cpy_len;
1449                 mbuf_offset += cpy_len;
1450                 buf_avail -= cpy_len;
1451                 buf_offset += cpy_len;
1452
1453                 /* This buf reaches to its end, get the next one */
1454                 if (buf_avail == 0) {
1455                         if (++vec_idx >= nr_vec)
1456                                 break;
1457
1458                         buf_addr = buf_vec[vec_idx].buf_addr;
1459                         buf_iova = buf_vec[vec_idx].buf_iova;
1460                         buf_len = buf_vec[vec_idx].buf_len;
1461
1462                         buf_offset = 0;
1463                         buf_avail  = buf_len;
1464
1465                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1466                                         (uint32_t)buf_avail, 0);
1467                 }
1468
1469                 /*
1470                  * This mbuf reaches to its end, get a new one
1471                  * to hold more data.
1472                  */
1473                 if (mbuf_avail == 0) {
1474                         cur = rte_pktmbuf_alloc(mbuf_pool);
1475                         if (unlikely(cur == NULL)) {
1476                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1477                                         "allocate memory for mbuf.\n");
1478                                 error = -1;
1479                                 goto out;
1480                         }
1481                         if (unlikely(dev->dequeue_zero_copy))
1482                                 rte_mbuf_refcnt_update(cur, 1);
1483
1484                         prev->next = cur;
1485                         prev->data_len = mbuf_offset;
1486                         m->nb_segs += 1;
1487                         m->pkt_len += mbuf_offset;
1488                         prev = cur;
1489
1490                         mbuf_offset = 0;
1491                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1492                 }
1493         }
1494
1495         prev->data_len = mbuf_offset;
1496         m->pkt_len    += mbuf_offset;
1497
1498         if (hdr)
1499                 vhost_dequeue_offload(hdr, m);
1500
1501 out:
1502
1503         return error;
1504 }
1505
1506 static __rte_always_inline struct zcopy_mbuf *
1507 get_zmbuf(struct vhost_virtqueue *vq)
1508 {
1509         uint16_t i;
1510         uint16_t last;
1511         int tries = 0;
1512
1513         /* search [last_zmbuf_idx, zmbuf_size) */
1514         i = vq->last_zmbuf_idx;
1515         last = vq->zmbuf_size;
1516
1517 again:
1518         for (; i < last; i++) {
1519                 if (vq->zmbufs[i].in_use == 0) {
1520                         vq->last_zmbuf_idx = i + 1;
1521                         vq->zmbufs[i].in_use = 1;
1522                         return &vq->zmbufs[i];
1523                 }
1524         }
1525
1526         tries++;
1527         if (tries == 1) {
1528                 /* search [0, last_zmbuf_idx) */
1529                 i = 0;
1530                 last = vq->last_zmbuf_idx;
1531                 goto again;
1532         }
1533
1534         return NULL;
1535 }
1536
1537 static void
1538 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1539 {
1540         rte_free(opaque);
1541 }
1542
1543 static int
1544 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1545 {
1546         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1547         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1548         uint16_t buf_len;
1549         rte_iova_t iova;
1550         void *buf;
1551
1552         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1553          * required, otherwise store shinfo in the new buffer.
1554          */
1555         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1556                 shinfo = rte_pktmbuf_mtod(pkt,
1557                                           struct rte_mbuf_ext_shared_info *);
1558         else {
1559                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1560                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1561         }
1562
1563         if (unlikely(total_len > UINT16_MAX))
1564                 return -ENOSPC;
1565
1566         buf_len = total_len;
1567         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1568         if (unlikely(buf == NULL))
1569                 return -ENOMEM;
1570
1571         /* Initialize shinfo */
1572         if (shinfo) {
1573                 shinfo->free_cb = virtio_dev_extbuf_free;
1574                 shinfo->fcb_opaque = buf;
1575                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1576         } else {
1577                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1578                                               virtio_dev_extbuf_free, buf);
1579                 if (unlikely(shinfo == NULL)) {
1580                         rte_free(buf);
1581                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1582                         return -1;
1583                 }
1584         }
1585
1586         iova = rte_malloc_virt2iova(buf);
1587         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1588         rte_pktmbuf_reset_headroom(pkt);
1589
1590         return 0;
1591 }
1592
1593 /*
1594  * Allocate a host supported pktmbuf.
1595  */
1596 static __rte_always_inline struct rte_mbuf *
1597 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1598                          uint32_t data_len)
1599 {
1600         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1601
1602         if (unlikely(pkt == NULL))
1603                 return NULL;
1604
1605         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1606                 return pkt;
1607
1608         /* attach an external buffer if supported */
1609         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1610                 return pkt;
1611
1612         /* check if chained buffers are allowed */
1613         if (!dev->linearbuf)
1614                 return pkt;
1615
1616         /* Data doesn't fit into the buffer and the host supports
1617          * only linear buffers
1618          */
1619         rte_pktmbuf_free(pkt);
1620
1621         return NULL;
1622 }
1623
1624 static __rte_noinline uint16_t
1625 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1626         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1627 {
1628         uint16_t i;
1629         uint16_t free_entries;
1630
1631         if (unlikely(dev->dequeue_zero_copy)) {
1632                 struct zcopy_mbuf *zmbuf, *next;
1633
1634                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1635                      zmbuf != NULL; zmbuf = next) {
1636                         next = TAILQ_NEXT(zmbuf, next);
1637
1638                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1639                                 update_shadow_used_ring_split(vq,
1640                                                 zmbuf->desc_idx, 0);
1641                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1642                                 restore_mbuf(zmbuf->mbuf);
1643                                 rte_pktmbuf_free(zmbuf->mbuf);
1644                                 put_zmbuf(zmbuf);
1645                                 vq->nr_zmbuf -= 1;
1646                         }
1647                 }
1648
1649                 if (likely(vq->shadow_used_idx)) {
1650                         flush_shadow_used_ring_split(dev, vq);
1651                         vhost_vring_call_split(dev, vq);
1652                 }
1653         }
1654
1655         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1656                         vq->last_avail_idx;
1657         if (free_entries == 0)
1658                 return 0;
1659
1660         /*
1661          * The ordering between avail index and
1662          * desc reads needs to be enforced.
1663          */
1664         rte_smp_rmb();
1665
1666         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1667
1668         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1669
1670         count = RTE_MIN(count, MAX_PKT_BURST);
1671         count = RTE_MIN(count, free_entries);
1672         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1673                         dev->vid, count);
1674
1675         for (i = 0; i < count; i++) {
1676                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1677                 uint16_t head_idx;
1678                 uint32_t buf_len;
1679                 uint16_t nr_vec = 0;
1680                 int err;
1681
1682                 if (unlikely(fill_vec_buf_split(dev, vq,
1683                                                 vq->last_avail_idx + i,
1684                                                 &nr_vec, buf_vec,
1685                                                 &head_idx, &buf_len,
1686                                                 VHOST_ACCESS_RO) < 0))
1687                         break;
1688
1689                 if (likely(dev->dequeue_zero_copy == 0))
1690                         update_shadow_used_ring_split(vq, head_idx, 0);
1691
1692                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1693                 if (unlikely(pkts[i] == NULL))
1694                         break;
1695
1696                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1697                                 mbuf_pool);
1698                 if (unlikely(err)) {
1699                         rte_pktmbuf_free(pkts[i]);
1700                         break;
1701                 }
1702
1703                 if (unlikely(dev->dequeue_zero_copy)) {
1704                         struct zcopy_mbuf *zmbuf;
1705
1706                         zmbuf = get_zmbuf(vq);
1707                         if (!zmbuf) {
1708                                 rte_pktmbuf_free(pkts[i]);
1709                                 break;
1710                         }
1711                         zmbuf->mbuf = pkts[i];
1712                         zmbuf->desc_idx = head_idx;
1713
1714                         /*
1715                          * Pin lock the mbuf; we will check later to see
1716                          * whether the mbuf is freed (when we are the last
1717                          * user) or not. If that's the case, we then could
1718                          * update the used ring safely.
1719                          */
1720                         rte_mbuf_refcnt_update(pkts[i], 1);
1721
1722                         vq->nr_zmbuf += 1;
1723                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1724                 }
1725         }
1726         vq->last_avail_idx += i;
1727
1728         if (likely(dev->dequeue_zero_copy == 0)) {
1729                 do_data_copy_dequeue(vq);
1730                 if (unlikely(i < count))
1731                         vq->shadow_used_idx = i;
1732                 if (likely(vq->shadow_used_idx)) {
1733                         flush_shadow_used_ring_split(dev, vq);
1734                         vhost_vring_call_split(dev, vq);
1735                 }
1736         }
1737
1738         return i;
1739 }
1740
1741 static __rte_always_inline int
1742 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1743                                  struct vhost_virtqueue *vq,
1744                                  struct rte_mempool *mbuf_pool,
1745                                  struct rte_mbuf **pkts,
1746                                  uint16_t avail_idx,
1747                                  uintptr_t *desc_addrs,
1748                                  uint16_t *ids)
1749 {
1750         bool wrap = vq->avail_wrap_counter;
1751         struct vring_packed_desc *descs = vq->desc_packed;
1752         struct virtio_net_hdr *hdr;
1753         uint64_t lens[PACKED_BATCH_SIZE];
1754         uint64_t buf_lens[PACKED_BATCH_SIZE];
1755         uint32_t buf_offset = dev->vhost_hlen;
1756         uint16_t flags, i;
1757
1758         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1759                 return -1;
1760         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1761                 return -1;
1762
1763         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1764                 flags = descs[avail_idx + i].flags;
1765                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1766                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1767                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1768                         return -1;
1769         }
1770
1771         rte_smp_rmb();
1772
1773         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1774                 lens[i] = descs[avail_idx + i].len;
1775
1776         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1777                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1778                                                   descs[avail_idx + i].addr,
1779                                                   &lens[i], VHOST_ACCESS_RW);
1780         }
1781
1782         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1783                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1784                         return -1;
1785         }
1786
1787         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1788                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1789                 if (!pkts[i])
1790                         goto free_buf;
1791         }
1792
1793         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1794                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1795
1796         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1797                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1798                         goto free_buf;
1799         }
1800
1801         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1802                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1803                 pkts[i]->data_len = pkts[i]->pkt_len;
1804                 ids[i] = descs[avail_idx + i].id;
1805         }
1806
1807         if (virtio_net_with_host_offload(dev)) {
1808                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1809                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1810                         vhost_dequeue_offload(hdr, pkts[i]);
1811                 }
1812         }
1813
1814         return 0;
1815
1816 free_buf:
1817         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1818                 rte_pktmbuf_free(pkts[i]);
1819
1820         return -1;
1821 }
1822
1823 static __rte_always_inline int
1824 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1825                            struct vhost_virtqueue *vq,
1826                            struct rte_mempool *mbuf_pool,
1827                            struct rte_mbuf **pkts)
1828 {
1829         uint16_t avail_idx = vq->last_avail_idx;
1830         uint32_t buf_offset = dev->vhost_hlen;
1831         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1832         uint16_t ids[PACKED_BATCH_SIZE];
1833         uint16_t i;
1834
1835         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1836                                              avail_idx, desc_addrs, ids))
1837                 return -1;
1838
1839         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1840                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1841
1842         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1843                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1844                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1845                            pkts[i]->pkt_len);
1846
1847         vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1848
1849         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1850
1851         return 0;
1852 }
1853
1854 static __rte_always_inline int
1855 vhost_dequeue_single_packed(struct virtio_net *dev,
1856                             struct vhost_virtqueue *vq,
1857                             struct rte_mempool *mbuf_pool,
1858                             struct rte_mbuf **pkts,
1859                             uint16_t *buf_id,
1860                             uint16_t *desc_count)
1861 {
1862         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1863         uint32_t buf_len;
1864         uint16_t nr_vec = 0;
1865         int err;
1866
1867         if (unlikely(fill_vec_buf_packed(dev, vq,
1868                                          vq->last_avail_idx, desc_count,
1869                                          buf_vec, &nr_vec,
1870                                          buf_id, &buf_len,
1871                                          VHOST_ACCESS_RO) < 0))
1872                 return -1;
1873
1874         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1875         if (unlikely(*pkts == NULL)) {
1876                 RTE_LOG(ERR, VHOST_DATA,
1877                         "Failed to allocate memory for mbuf.\n");
1878                 return -1;
1879         }
1880
1881         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1882                                 mbuf_pool);
1883         if (unlikely(err)) {
1884                 rte_pktmbuf_free(*pkts);
1885                 return -1;
1886         }
1887
1888         return 0;
1889 }
1890
1891 static __rte_always_inline int
1892 virtio_dev_tx_single_packed(struct virtio_net *dev,
1893                             struct vhost_virtqueue *vq,
1894                             struct rte_mempool *mbuf_pool,
1895                             struct rte_mbuf **pkts)
1896 {
1897
1898         uint16_t buf_id, desc_count;
1899
1900         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1901                                         &desc_count))
1902                 return -1;
1903
1904         vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1905
1906         vq_inc_last_avail_packed(vq, desc_count);
1907
1908         return 0;
1909 }
1910
1911 static __rte_always_inline int
1912 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1913                                  struct vhost_virtqueue *vq,
1914                                  struct rte_mempool *mbuf_pool,
1915                                  struct rte_mbuf **pkts)
1916 {
1917         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1918         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1919         uint16_t ids[PACKED_BATCH_SIZE];
1920         uint16_t i;
1921
1922         uint16_t avail_idx = vq->last_avail_idx;
1923
1924         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1925                                              avail_idx, desc_addrs, ids))
1926                 return -1;
1927
1928         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1929                 zmbufs[i] = get_zmbuf(vq);
1930
1931         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1932                 if (!zmbufs[i])
1933                         goto free_pkt;
1934         }
1935
1936         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1937                 zmbufs[i]->mbuf = pkts[i];
1938                 zmbufs[i]->desc_idx = avail_idx + i;
1939                 zmbufs[i]->desc_count = 1;
1940         }
1941
1942         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1943                 rte_mbuf_refcnt_update(pkts[i], 1);
1944
1945         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1946                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
1947
1948         vq->nr_zmbuf += PACKED_BATCH_SIZE;
1949         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1950
1951         return 0;
1952
1953 free_pkt:
1954         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1955                 rte_pktmbuf_free(pkts[i]);
1956
1957         return -1;
1958 }
1959
1960 static __rte_always_inline int
1961 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
1962                                   struct vhost_virtqueue *vq,
1963                                   struct rte_mempool *mbuf_pool,
1964                                   struct rte_mbuf **pkts)
1965 {
1966         uint16_t buf_id, desc_count;
1967         struct zcopy_mbuf *zmbuf;
1968
1969         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1970                                         &desc_count))
1971                 return -1;
1972
1973         zmbuf = get_zmbuf(vq);
1974         if (!zmbuf) {
1975                 rte_pktmbuf_free(*pkts);
1976                 return -1;
1977         }
1978         zmbuf->mbuf = *pkts;
1979         zmbuf->desc_idx = vq->last_avail_idx;
1980         zmbuf->desc_count = desc_count;
1981
1982         rte_mbuf_refcnt_update(*pkts, 1);
1983
1984         vq->nr_zmbuf += 1;
1985         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1986
1987         vq_inc_last_avail_packed(vq, desc_count);
1988         return 0;
1989 }
1990
1991 static __rte_always_inline void
1992 free_zmbuf(struct vhost_virtqueue *vq)
1993 {
1994         struct zcopy_mbuf *next = NULL;
1995         struct zcopy_mbuf *zmbuf;
1996
1997         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1998              zmbuf != NULL; zmbuf = next) {
1999                 next = TAILQ_NEXT(zmbuf, next);
2000
2001                 uint16_t last_used_idx = vq->last_used_idx;
2002
2003                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2004                         uint16_t flags;
2005                         flags = vq->desc_packed[last_used_idx].flags;
2006                         if (vq->used_wrap_counter) {
2007                                 flags |= VRING_DESC_F_USED;
2008                                 flags |= VRING_DESC_F_AVAIL;
2009                         } else {
2010                                 flags &= ~VRING_DESC_F_USED;
2011                                 flags &= ~VRING_DESC_F_AVAIL;
2012                         }
2013
2014                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2015                         vq->desc_packed[last_used_idx].len = 0;
2016
2017                         rte_smp_wmb();
2018                         vq->desc_packed[last_used_idx].flags = flags;
2019
2020                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2021
2022                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2023                         restore_mbuf(zmbuf->mbuf);
2024                         rte_pktmbuf_free(zmbuf->mbuf);
2025                         put_zmbuf(zmbuf);
2026                         vq->nr_zmbuf -= 1;
2027                 }
2028         }
2029 }
2030
2031 static __rte_noinline uint16_t
2032 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2033                            struct vhost_virtqueue *vq,
2034                            struct rte_mempool *mbuf_pool,
2035                            struct rte_mbuf **pkts,
2036                            uint32_t count)
2037 {
2038         uint32_t pkt_idx = 0;
2039         uint32_t remained = count;
2040
2041         free_zmbuf(vq);
2042
2043         do {
2044                 if (remained >= PACKED_BATCH_SIZE) {
2045                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2046                                 mbuf_pool, &pkts[pkt_idx])) {
2047                                 pkt_idx += PACKED_BATCH_SIZE;
2048                                 remained -= PACKED_BATCH_SIZE;
2049                                 continue;
2050                         }
2051                 }
2052
2053                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2054                                                       &pkts[pkt_idx]))
2055                         break;
2056                 pkt_idx++;
2057                 remained--;
2058
2059         } while (remained);
2060
2061         if (pkt_idx)
2062                 vhost_vring_call_packed(dev, vq);
2063
2064         return pkt_idx;
2065 }
2066
2067 static __rte_noinline uint16_t
2068 virtio_dev_tx_packed(struct virtio_net *dev,
2069                      struct vhost_virtqueue *vq,
2070                      struct rte_mempool *mbuf_pool,
2071                      struct rte_mbuf **pkts,
2072                      uint32_t count)
2073 {
2074         uint32_t pkt_idx = 0;
2075         uint32_t remained = count;
2076
2077         do {
2078                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2079
2080                 if (remained >= PACKED_BATCH_SIZE) {
2081                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2082                                                         &pkts[pkt_idx])) {
2083                                 vhost_flush_dequeue_packed(dev, vq);
2084                                 pkt_idx += PACKED_BATCH_SIZE;
2085                                 remained -= PACKED_BATCH_SIZE;
2086                                 continue;
2087                         }
2088                 }
2089
2090                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2091                                                 &pkts[pkt_idx]))
2092                         break;
2093                 vhost_flush_dequeue_packed(dev, vq);
2094                 pkt_idx++;
2095                 remained--;
2096
2097         } while (remained);
2098
2099         if (vq->shadow_used_idx)
2100                 do_data_copy_dequeue(vq);
2101
2102         return pkt_idx;
2103 }
2104
2105 uint16_t
2106 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2107         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2108 {
2109         struct virtio_net *dev;
2110         struct rte_mbuf *rarp_mbuf = NULL;
2111         struct vhost_virtqueue *vq;
2112
2113         dev = get_device(vid);
2114         if (!dev)
2115                 return 0;
2116
2117         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2118                 RTE_LOG(ERR, VHOST_DATA,
2119                         "(%d) %s: built-in vhost net backend is disabled.\n",
2120                         dev->vid, __func__);
2121                 return 0;
2122         }
2123
2124         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2125                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2126                         dev->vid, __func__, queue_id);
2127                 return 0;
2128         }
2129
2130         vq = dev->virtqueue[queue_id];
2131
2132         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2133                 return 0;
2134
2135         if (unlikely(vq->enabled == 0)) {
2136                 count = 0;
2137                 goto out_access_unlock;
2138         }
2139
2140         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2141                 vhost_user_iotlb_rd_lock(vq);
2142
2143         if (unlikely(vq->access_ok == 0))
2144                 if (unlikely(vring_translate(dev, vq) < 0)) {
2145                         count = 0;
2146                         goto out;
2147                 }
2148
2149         /*
2150          * Construct a RARP broadcast packet, and inject it to the "pkts"
2151          * array, to looks like that guest actually send such packet.
2152          *
2153          * Check user_send_rarp() for more information.
2154          *
2155          * broadcast_rarp shares a cacheline in the virtio_net structure
2156          * with some fields that are accessed during enqueue and
2157          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2158          * result in false sharing between enqueue and dequeue.
2159          *
2160          * Prevent unnecessary false sharing by reading broadcast_rarp first
2161          * and only performing cmpset if the read indicates it is likely to
2162          * be set.
2163          */
2164         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2165                         rte_atomic16_cmpset((volatile uint16_t *)
2166                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2167
2168                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2169                 if (rarp_mbuf == NULL) {
2170                         RTE_LOG(ERR, VHOST_DATA,
2171                                 "Failed to make RARP packet.\n");
2172                         count = 0;
2173                         goto out;
2174                 }
2175                 count -= 1;
2176         }
2177
2178         if (vq_is_packed(dev)) {
2179                 if (unlikely(dev->dequeue_zero_copy))
2180                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2181                                                            pkts, count);
2182                 else
2183                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2184                                                      count);
2185         } else
2186                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2187
2188 out:
2189         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2190                 vhost_user_iotlb_rd_unlock(vq);
2191
2192 out_access_unlock:
2193         rte_spinlock_unlock(&vq->access_lock);
2194
2195         if (unlikely(rarp_mbuf != NULL)) {
2196                 /*
2197                  * Inject it to the head of "pkts" array, so that switch's mac
2198                  * learning table will get updated first.
2199                  */
2200                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2201                 pkts[0] = rarp_mbuf;
2202                 count += 1;
2203         }
2204
2205         return count;
2206 }