1c63262cefd0ce83ab609f616b943a08f0603f48
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42                         struct vhost_virtqueue *vq,
43                         uint16_t to, uint16_t from, uint16_t size)
44 {
45         rte_memcpy(&vq->used->ring[to],
46                         &vq->shadow_used_split[from],
47                         size * sizeof(struct vring_used_elem));
48         vhost_log_cache_used_vring(dev, vq,
49                         offsetof(struct vring_used, ring[to]),
50                         size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58         if (used_idx + vq->shadow_used_idx <= vq->size) {
59                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60                                           vq->shadow_used_idx);
61         } else {
62                 uint16_t size;
63
64                 /* update used ring interval [used_idx, vq->size] */
65                 size = vq->size - used_idx;
66                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68                 /* update the left half used ring interval [0, left_size] */
69                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70                                           vq->shadow_used_idx - size);
71         }
72         vq->last_used_idx += vq->shadow_used_idx;
73
74         rte_smp_wmb();
75
76         vhost_log_cache_sync(dev, vq);
77
78         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79         vq->shadow_used_idx = 0;
80         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81                 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86                          uint16_t desc_idx, uint32_t len)
87 {
88         uint16_t i = vq->shadow_used_idx++;
89
90         vq->shadow_used_split[i].id  = desc_idx;
91         vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
96                                   struct vhost_virtqueue *vq)
97 {
98         int i;
99         uint16_t used_idx = vq->last_used_idx;
100         uint16_t head_idx = vq->last_used_idx;
101         uint16_t head_flags = 0;
102
103         /* Split loop in two to save memory barriers */
104         for (i = 0; i < vq->shadow_used_idx; i++) {
105                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108                 used_idx += vq->shadow_used_packed[i].count;
109                 if (used_idx >= vq->size)
110                         used_idx -= vq->size;
111         }
112
113         rte_smp_wmb();
114
115         for (i = 0; i < vq->shadow_used_idx; i++) {
116                 uint16_t flags;
117
118                 if (vq->shadow_used_packed[i].len)
119                         flags = VRING_DESC_F_WRITE;
120                 else
121                         flags = 0;
122
123                 if (vq->used_wrap_counter) {
124                         flags |= VRING_DESC_F_USED;
125                         flags |= VRING_DESC_F_AVAIL;
126                 } else {
127                         flags &= ~VRING_DESC_F_USED;
128                         flags &= ~VRING_DESC_F_AVAIL;
129                 }
130
131                 if (i > 0) {
132                         vq->desc_packed[vq->last_used_idx].flags = flags;
133
134                         vhost_log_cache_used_vring(dev, vq,
135                                         vq->last_used_idx *
136                                         sizeof(struct vring_packed_desc),
137                                         sizeof(struct vring_packed_desc));
138                 } else {
139                         head_idx = vq->last_used_idx;
140                         head_flags = flags;
141                 }
142
143                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
144         }
145
146         vq->desc_packed[head_idx].flags = head_flags;
147
148         vhost_log_cache_used_vring(dev, vq,
149                                 head_idx *
150                                 sizeof(struct vring_packed_desc),
151                                 sizeof(struct vring_packed_desc));
152
153         vq->shadow_used_idx = 0;
154         vhost_log_cache_sync(dev, vq);
155 }
156
157 static __rte_always_inline void
158 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
159                                   struct vhost_virtqueue *vq)
160 {
161         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
162
163         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
164         rte_smp_wmb();
165         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
166
167         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
168                                    sizeof(struct vring_packed_desc),
169                                    sizeof(struct vring_packed_desc));
170         vq->shadow_used_idx = 0;
171         vhost_log_cache_sync(dev, vq);
172 }
173
174 static __rte_always_inline void
175 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
176                                  struct vhost_virtqueue *vq,
177                                  uint64_t *lens,
178                                  uint16_t *ids)
179 {
180         uint16_t i;
181         uint16_t flags;
182
183         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
184
185         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
186                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
187                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
188         }
189
190         rte_smp_wmb();
191
192         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
193                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
194
195         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
196                                    sizeof(struct vring_packed_desc),
197                                    sizeof(struct vring_packed_desc) *
198                                    PACKED_BATCH_SIZE);
199         vhost_log_cache_sync(dev, vq);
200
201         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
202 }
203
204 static __rte_always_inline void
205 flush_shadow_used_ring_packed(struct virtio_net *dev,
206                         struct vhost_virtqueue *vq)
207 {
208         int i;
209         uint16_t used_idx = vq->last_used_idx;
210         uint16_t head_idx = vq->last_used_idx;
211         uint16_t head_flags = 0;
212
213         /* Split loop in two to save memory barriers */
214         for (i = 0; i < vq->shadow_used_idx; i++) {
215                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
216                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
217
218                 used_idx += vq->shadow_used_packed[i].count;
219                 if (used_idx >= vq->size)
220                         used_idx -= vq->size;
221         }
222
223         for (i = 0; i < vq->shadow_used_idx; i++) {
224                 uint16_t flags;
225
226                 if (vq->shadow_used_packed[i].len)
227                         flags = VRING_DESC_F_WRITE;
228                 else
229                         flags = 0;
230
231                 if (vq->used_wrap_counter) {
232                         flags |= VRING_DESC_F_USED;
233                         flags |= VRING_DESC_F_AVAIL;
234                 } else {
235                         flags &= ~VRING_DESC_F_USED;
236                         flags &= ~VRING_DESC_F_AVAIL;
237                 }
238
239                 if (i > 0) {
240                         vq->desc_packed[vq->last_used_idx].flags = flags;
241
242                         vhost_log_cache_used_vring(dev, vq,
243                                         vq->last_used_idx *
244                                         sizeof(struct vring_packed_desc),
245                                         sizeof(struct vring_packed_desc));
246                 } else {
247                         head_idx = vq->last_used_idx;
248                         head_flags = flags;
249                 }
250
251                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
252         }
253
254         __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
255                          __ATOMIC_RELEASE);
256
257         vhost_log_cache_used_vring(dev, vq,
258                                 head_idx *
259                                 sizeof(struct vring_packed_desc),
260                                 sizeof(struct vring_packed_desc));
261
262         vq->shadow_used_idx = 0;
263         vhost_log_cache_sync(dev, vq);
264 }
265
266 static __rte_always_inline void
267 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
268                                   struct vhost_virtqueue *vq,
269                                   uint16_t *ids)
270 {
271         uint16_t flags;
272         uint16_t i;
273         uint16_t begin;
274
275         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
276
277         if (!vq->shadow_used_idx) {
278                 vq->shadow_last_used_idx = vq->last_used_idx;
279                 vq->shadow_used_packed[0].id  = ids[0];
280                 vq->shadow_used_packed[0].len = 0;
281                 vq->shadow_used_packed[0].count = 1;
282                 vq->shadow_used_packed[0].flags = flags;
283                 vq->shadow_used_idx++;
284                 begin = 1;
285         } else
286                 begin = 0;
287
288         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
289                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
290                 vq->desc_packed[vq->last_used_idx + i].len = 0;
291         }
292
293         rte_smp_wmb();
294         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
295                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
296
297         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
298                                    sizeof(struct vring_packed_desc),
299                                    sizeof(struct vring_packed_desc) *
300                                    PACKED_BATCH_SIZE);
301         vhost_log_cache_sync(dev, vq);
302
303         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
304 }
305
306 static __rte_always_inline void
307 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
308                                    uint16_t buf_id,
309                                    uint16_t count)
310 {
311         uint16_t flags;
312
313         flags = vq->desc_packed[vq->last_used_idx].flags;
314         if (vq->used_wrap_counter) {
315                 flags |= VRING_DESC_F_USED;
316                 flags |= VRING_DESC_F_AVAIL;
317         } else {
318                 flags &= ~VRING_DESC_F_USED;
319                 flags &= ~VRING_DESC_F_AVAIL;
320         }
321
322         if (!vq->shadow_used_idx) {
323                 vq->shadow_last_used_idx = vq->last_used_idx;
324
325                 vq->shadow_used_packed[0].id  = buf_id;
326                 vq->shadow_used_packed[0].len = 0;
327                 vq->shadow_used_packed[0].flags = flags;
328                 vq->shadow_used_idx++;
329         } else {
330                 vq->desc_packed[vq->last_used_idx].id = buf_id;
331                 vq->desc_packed[vq->last_used_idx].len = 0;
332                 vq->desc_packed[vq->last_used_idx].flags = flags;
333         }
334
335         vq_inc_last_used_packed(vq, count);
336 }
337
338 static __rte_always_inline void
339 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
340                          uint16_t desc_idx, uint32_t len, uint16_t count)
341 {
342         uint16_t i = vq->shadow_used_idx++;
343
344         vq->shadow_used_packed[i].id  = desc_idx;
345         vq->shadow_used_packed[i].len = len;
346         vq->shadow_used_packed[i].count = count;
347 }
348
349 static inline void
350 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
351 {
352         struct batch_copy_elem *elem = vq->batch_copy_elems;
353         uint16_t count = vq->batch_copy_nb_elems;
354         int i;
355
356         for (i = 0; i < count; i++) {
357                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
358                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
359                                            elem[i].len);
360                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
361         }
362
363         vq->batch_copy_nb_elems = 0;
364 }
365
366 static inline void
367 do_data_copy_dequeue(struct vhost_virtqueue *vq)
368 {
369         struct batch_copy_elem *elem = vq->batch_copy_elems;
370         uint16_t count = vq->batch_copy_nb_elems;
371         int i;
372
373         for (i = 0; i < count; i++)
374                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
375
376         vq->batch_copy_nb_elems = 0;
377 }
378
379 static __rte_always_inline void
380 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
381                                    struct vhost_virtqueue *vq,
382                                    uint32_t len[],
383                                    uint16_t id[],
384                                    uint16_t count[],
385                                    uint16_t num_buffers)
386 {
387         uint16_t i;
388         for (i = 0; i < num_buffers; i++) {
389                 /* enqueue shadow flush action aligned with batch num */
390                 if (!vq->shadow_used_idx)
391                         vq->shadow_aligned_idx = vq->last_used_idx &
392                                 PACKED_BATCH_MASK;
393                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
394                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
395                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
396                 vq->shadow_aligned_idx += count[i];
397                 vq->shadow_used_idx++;
398         }
399
400         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
401                 do_data_copy_enqueue(dev, vq);
402                 vhost_flush_enqueue_shadow_packed(dev, vq);
403         }
404 }
405
406 static __rte_unused void
407 vhost_flush_dequeue_packed(struct virtio_net *dev,
408                            struct vhost_virtqueue *vq)
409 {
410         int shadow_count;
411         if (!vq->shadow_used_idx)
412                 return;
413
414         shadow_count = vq->last_used_idx - vq->shadow_last_used_idx;
415         if (shadow_count <= 0)
416                 shadow_count += vq->size;
417
418         if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
419                 do_data_copy_dequeue(vq);
420                 vhost_flush_dequeue_shadow_packed(dev, vq);
421                 vhost_vring_call_packed(dev, vq);
422         }
423 }
424
425 /* avoid write operation when necessary, to lessen cache issues */
426 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
427         if ((var) != (val))                     \
428                 (var) = (val);                  \
429 } while (0)
430
431 static __rte_always_inline void
432 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
433 {
434         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
435
436         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
437                 csum_l4 |= PKT_TX_TCP_CKSUM;
438
439         if (csum_l4) {
440                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
441                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
442
443                 switch (csum_l4) {
444                 case PKT_TX_TCP_CKSUM:
445                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
446                                                 cksum));
447                         break;
448                 case PKT_TX_UDP_CKSUM:
449                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
450                                                 dgram_cksum));
451                         break;
452                 case PKT_TX_SCTP_CKSUM:
453                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
454                                                 cksum));
455                         break;
456                 }
457         } else {
458                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
459                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
460                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
461         }
462
463         /* IP cksum verification cannot be bypassed, then calculate here */
464         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
465                 struct rte_ipv4_hdr *ipv4_hdr;
466
467                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
468                                                    m_buf->l2_len);
469                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
470         }
471
472         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
473                 if (m_buf->ol_flags & PKT_TX_IPV4)
474                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
475                 else
476                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
477                 net_hdr->gso_size = m_buf->tso_segsz;
478                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
479                                         + m_buf->l4_len;
480         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
481                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
482                 net_hdr->gso_size = m_buf->tso_segsz;
483                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
484                         m_buf->l4_len;
485         } else {
486                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
487                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
488                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
489         }
490 }
491
492 static __rte_always_inline int
493 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
494                 struct buf_vector *buf_vec, uint16_t *vec_idx,
495                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
496 {
497         uint16_t vec_id = *vec_idx;
498
499         while (desc_len) {
500                 uint64_t desc_addr;
501                 uint64_t desc_chunck_len = desc_len;
502
503                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
504                         return -1;
505
506                 desc_addr = vhost_iova_to_vva(dev, vq,
507                                 desc_iova,
508                                 &desc_chunck_len,
509                                 perm);
510                 if (unlikely(!desc_addr))
511                         return -1;
512
513                 rte_prefetch0((void *)(uintptr_t)desc_addr);
514
515                 buf_vec[vec_id].buf_iova = desc_iova;
516                 buf_vec[vec_id].buf_addr = desc_addr;
517                 buf_vec[vec_id].buf_len  = desc_chunck_len;
518
519                 desc_len -= desc_chunck_len;
520                 desc_iova += desc_chunck_len;
521                 vec_id++;
522         }
523         *vec_idx = vec_id;
524
525         return 0;
526 }
527
528 static __rte_always_inline int
529 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
530                          uint32_t avail_idx, uint16_t *vec_idx,
531                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
532                          uint32_t *desc_chain_len, uint8_t perm)
533 {
534         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
535         uint16_t vec_id = *vec_idx;
536         uint32_t len    = 0;
537         uint64_t dlen;
538         uint32_t nr_descs = vq->size;
539         uint32_t cnt    = 0;
540         struct vring_desc *descs = vq->desc;
541         struct vring_desc *idesc = NULL;
542
543         if (unlikely(idx >= vq->size))
544                 return -1;
545
546         *desc_chain_head = idx;
547
548         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
549                 dlen = vq->desc[idx].len;
550                 nr_descs = dlen / sizeof(struct vring_desc);
551                 if (unlikely(nr_descs > vq->size))
552                         return -1;
553
554                 descs = (struct vring_desc *)(uintptr_t)
555                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
556                                                 &dlen,
557                                                 VHOST_ACCESS_RO);
558                 if (unlikely(!descs))
559                         return -1;
560
561                 if (unlikely(dlen < vq->desc[idx].len)) {
562                         /*
563                          * The indirect desc table is not contiguous
564                          * in process VA space, we have to copy it.
565                          */
566                         idesc = vhost_alloc_copy_ind_table(dev, vq,
567                                         vq->desc[idx].addr, vq->desc[idx].len);
568                         if (unlikely(!idesc))
569                                 return -1;
570
571                         descs = idesc;
572                 }
573
574                 idx = 0;
575         }
576
577         while (1) {
578                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
579                         free_ind_table(idesc);
580                         return -1;
581                 }
582
583                 len += descs[idx].len;
584
585                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
586                                                 descs[idx].addr, descs[idx].len,
587                                                 perm))) {
588                         free_ind_table(idesc);
589                         return -1;
590                 }
591
592                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
593                         break;
594
595                 idx = descs[idx].next;
596         }
597
598         *desc_chain_len = len;
599         *vec_idx = vec_id;
600
601         if (unlikely(!!idesc))
602                 free_ind_table(idesc);
603
604         return 0;
605 }
606
607 /*
608  * Returns -1 on fail, 0 on success
609  */
610 static inline int
611 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
612                                 uint32_t size, struct buf_vector *buf_vec,
613                                 uint16_t *num_buffers, uint16_t avail_head,
614                                 uint16_t *nr_vec)
615 {
616         uint16_t cur_idx;
617         uint16_t vec_idx = 0;
618         uint16_t max_tries, tries = 0;
619
620         uint16_t head_idx = 0;
621         uint32_t len = 0;
622
623         *num_buffers = 0;
624         cur_idx  = vq->last_avail_idx;
625
626         if (rxvq_is_mergeable(dev))
627                 max_tries = vq->size - 1;
628         else
629                 max_tries = 1;
630
631         while (size > 0) {
632                 if (unlikely(cur_idx == avail_head))
633                         return -1;
634                 /*
635                  * if we tried all available ring items, and still
636                  * can't get enough buf, it means something abnormal
637                  * happened.
638                  */
639                 if (unlikely(++tries > max_tries))
640                         return -1;
641
642                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
643                                                 &vec_idx, buf_vec,
644                                                 &head_idx, &len,
645                                                 VHOST_ACCESS_RW) < 0))
646                         return -1;
647                 len = RTE_MIN(len, size);
648                 update_shadow_used_ring_split(vq, head_idx, len);
649                 size -= len;
650
651                 cur_idx++;
652                 *num_buffers += 1;
653         }
654
655         *nr_vec = vec_idx;
656
657         return 0;
658 }
659
660 static __rte_always_inline int
661 fill_vec_buf_packed_indirect(struct virtio_net *dev,
662                         struct vhost_virtqueue *vq,
663                         struct vring_packed_desc *desc, uint16_t *vec_idx,
664                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
665 {
666         uint16_t i;
667         uint32_t nr_descs;
668         uint16_t vec_id = *vec_idx;
669         uint64_t dlen;
670         struct vring_packed_desc *descs, *idescs = NULL;
671
672         dlen = desc->len;
673         descs = (struct vring_packed_desc *)(uintptr_t)
674                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
675         if (unlikely(!descs))
676                 return -1;
677
678         if (unlikely(dlen < desc->len)) {
679                 /*
680                  * The indirect desc table is not contiguous
681                  * in process VA space, we have to copy it.
682                  */
683                 idescs = vhost_alloc_copy_ind_table(dev,
684                                 vq, desc->addr, desc->len);
685                 if (unlikely(!idescs))
686                         return -1;
687
688                 descs = idescs;
689         }
690
691         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
692         if (unlikely(nr_descs >= vq->size)) {
693                 free_ind_table(idescs);
694                 return -1;
695         }
696
697         for (i = 0; i < nr_descs; i++) {
698                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
699                         free_ind_table(idescs);
700                         return -1;
701                 }
702
703                 *len += descs[i].len;
704                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
705                                                 descs[i].addr, descs[i].len,
706                                                 perm)))
707                         return -1;
708         }
709         *vec_idx = vec_id;
710
711         if (unlikely(!!idescs))
712                 free_ind_table(idescs);
713
714         return 0;
715 }
716
717 static __rte_always_inline int
718 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
719                                 uint16_t avail_idx, uint16_t *desc_count,
720                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
721                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
722 {
723         bool wrap_counter = vq->avail_wrap_counter;
724         struct vring_packed_desc *descs = vq->desc_packed;
725         uint16_t vec_id = *vec_idx;
726
727         if (avail_idx < vq->last_avail_idx)
728                 wrap_counter ^= 1;
729
730         /*
731          * Perform a load-acquire barrier in desc_is_avail to
732          * enforce the ordering between desc flags and desc
733          * content.
734          */
735         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
736                 return -1;
737
738         *desc_count = 0;
739         *len = 0;
740
741         while (1) {
742                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
743                         return -1;
744
745                 if (unlikely(*desc_count >= vq->size))
746                         return -1;
747
748                 *desc_count += 1;
749                 *buf_id = descs[avail_idx].id;
750
751                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
752                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
753                                                         &descs[avail_idx],
754                                                         &vec_id, buf_vec,
755                                                         len, perm) < 0))
756                                 return -1;
757                 } else {
758                         *len += descs[avail_idx].len;
759
760                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
761                                                         descs[avail_idx].addr,
762                                                         descs[avail_idx].len,
763                                                         perm)))
764                                 return -1;
765                 }
766
767                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
768                         break;
769
770                 if (++avail_idx >= vq->size) {
771                         avail_idx -= vq->size;
772                         wrap_counter ^= 1;
773                 }
774         }
775
776         *vec_idx = vec_id;
777
778         return 0;
779 }
780
781 static __rte_noinline void
782 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
783                 struct buf_vector *buf_vec,
784                 struct virtio_net_hdr_mrg_rxbuf *hdr)
785 {
786         uint64_t len;
787         uint64_t remain = dev->vhost_hlen;
788         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
789         uint64_t iova = buf_vec->buf_iova;
790
791         while (remain) {
792                 len = RTE_MIN(remain,
793                                 buf_vec->buf_len);
794                 dst = buf_vec->buf_addr;
795                 rte_memcpy((void *)(uintptr_t)dst,
796                                 (void *)(uintptr_t)src,
797                                 len);
798
799                 PRINT_PACKET(dev, (uintptr_t)dst,
800                                 (uint32_t)len, 0);
801                 vhost_log_cache_write_iova(dev, vq,
802                                 iova, len);
803
804                 remain -= len;
805                 iova += len;
806                 src += len;
807                 buf_vec++;
808         }
809 }
810
811 static __rte_always_inline int
812 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
813                             struct rte_mbuf *m, struct buf_vector *buf_vec,
814                             uint16_t nr_vec, uint16_t num_buffers)
815 {
816         uint32_t vec_idx = 0;
817         uint32_t mbuf_offset, mbuf_avail;
818         uint32_t buf_offset, buf_avail;
819         uint64_t buf_addr, buf_iova, buf_len;
820         uint32_t cpy_len;
821         uint64_t hdr_addr;
822         struct rte_mbuf *hdr_mbuf;
823         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
824         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
825         int error = 0;
826
827         if (unlikely(m == NULL)) {
828                 error = -1;
829                 goto out;
830         }
831
832         buf_addr = buf_vec[vec_idx].buf_addr;
833         buf_iova = buf_vec[vec_idx].buf_iova;
834         buf_len = buf_vec[vec_idx].buf_len;
835
836         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
837                 error = -1;
838                 goto out;
839         }
840
841         hdr_mbuf = m;
842         hdr_addr = buf_addr;
843         if (unlikely(buf_len < dev->vhost_hlen))
844                 hdr = &tmp_hdr;
845         else
846                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
847
848         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
849                 dev->vid, num_buffers);
850
851         if (unlikely(buf_len < dev->vhost_hlen)) {
852                 buf_offset = dev->vhost_hlen - buf_len;
853                 vec_idx++;
854                 buf_addr = buf_vec[vec_idx].buf_addr;
855                 buf_iova = buf_vec[vec_idx].buf_iova;
856                 buf_len = buf_vec[vec_idx].buf_len;
857                 buf_avail = buf_len - buf_offset;
858         } else {
859                 buf_offset = dev->vhost_hlen;
860                 buf_avail = buf_len - dev->vhost_hlen;
861         }
862
863         mbuf_avail  = rte_pktmbuf_data_len(m);
864         mbuf_offset = 0;
865         while (mbuf_avail != 0 || m->next != NULL) {
866                 /* done with current buf, get the next one */
867                 if (buf_avail == 0) {
868                         vec_idx++;
869                         if (unlikely(vec_idx >= nr_vec)) {
870                                 error = -1;
871                                 goto out;
872                         }
873
874                         buf_addr = buf_vec[vec_idx].buf_addr;
875                         buf_iova = buf_vec[vec_idx].buf_iova;
876                         buf_len = buf_vec[vec_idx].buf_len;
877
878                         buf_offset = 0;
879                         buf_avail  = buf_len;
880                 }
881
882                 /* done with current mbuf, get the next one */
883                 if (mbuf_avail == 0) {
884                         m = m->next;
885
886                         mbuf_offset = 0;
887                         mbuf_avail  = rte_pktmbuf_data_len(m);
888                 }
889
890                 if (hdr_addr) {
891                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
892                         if (rxvq_is_mergeable(dev))
893                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
894                                                 num_buffers);
895
896                         if (unlikely(hdr == &tmp_hdr)) {
897                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
898                         } else {
899                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
900                                                 dev->vhost_hlen, 0);
901                                 vhost_log_cache_write_iova(dev, vq,
902                                                 buf_vec[0].buf_iova,
903                                                 dev->vhost_hlen);
904                         }
905
906                         hdr_addr = 0;
907                 }
908
909                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
910
911                 if (likely(cpy_len > MAX_BATCH_LEN ||
912                                         vq->batch_copy_nb_elems >= vq->size)) {
913                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
914                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
915                                 cpy_len);
916                         vhost_log_cache_write_iova(dev, vq,
917                                                    buf_iova + buf_offset,
918                                                    cpy_len);
919                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
920                                 cpy_len, 0);
921                 } else {
922                         batch_copy[vq->batch_copy_nb_elems].dst =
923                                 (void *)((uintptr_t)(buf_addr + buf_offset));
924                         batch_copy[vq->batch_copy_nb_elems].src =
925                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
926                         batch_copy[vq->batch_copy_nb_elems].log_addr =
927                                 buf_iova + buf_offset;
928                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
929                         vq->batch_copy_nb_elems++;
930                 }
931
932                 mbuf_avail  -= cpy_len;
933                 mbuf_offset += cpy_len;
934                 buf_avail  -= cpy_len;
935                 buf_offset += cpy_len;
936         }
937
938 out:
939
940         return error;
941 }
942
943 static __rte_always_inline int
944 vhost_enqueue_single_packed(struct virtio_net *dev,
945                             struct vhost_virtqueue *vq,
946                             struct rte_mbuf *pkt,
947                             struct buf_vector *buf_vec,
948                             uint16_t *nr_descs)
949 {
950         uint16_t nr_vec = 0;
951         uint16_t avail_idx = vq->last_avail_idx;
952         uint16_t max_tries, tries = 0;
953         uint16_t buf_id = 0;
954         uint32_t len = 0;
955         uint16_t desc_count;
956         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
957         uint16_t num_buffers = 0;
958         uint32_t buffer_len[vq->size];
959         uint16_t buffer_buf_id[vq->size];
960         uint16_t buffer_desc_count[vq->size];
961
962         if (rxvq_is_mergeable(dev))
963                 max_tries = vq->size - 1;
964         else
965                 max_tries = 1;
966
967         while (size > 0) {
968                 /*
969                  * if we tried all available ring items, and still
970                  * can't get enough buf, it means something abnormal
971                  * happened.
972                  */
973                 if (unlikely(++tries > max_tries))
974                         return -1;
975
976                 if (unlikely(fill_vec_buf_packed(dev, vq,
977                                                 avail_idx, &desc_count,
978                                                 buf_vec, &nr_vec,
979                                                 &buf_id, &len,
980                                                 VHOST_ACCESS_RW) < 0))
981                         return -1;
982
983                 len = RTE_MIN(len, size);
984                 size -= len;
985
986                 buffer_len[num_buffers] = len;
987                 buffer_buf_id[num_buffers] = buf_id;
988                 buffer_desc_count[num_buffers] = desc_count;
989                 num_buffers += 1;
990
991                 *nr_descs += desc_count;
992                 avail_idx += desc_count;
993                 if (avail_idx >= vq->size)
994                         avail_idx -= vq->size;
995         }
996
997         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
998                 return -1;
999
1000         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1001                                            buffer_desc_count, num_buffers);
1002
1003         return 0;
1004 }
1005
1006 static __rte_noinline uint32_t
1007 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1008         struct rte_mbuf **pkts, uint32_t count)
1009 {
1010         uint32_t pkt_idx = 0;
1011         uint16_t num_buffers;
1012         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1013         uint16_t avail_head;
1014
1015         avail_head = *((volatile uint16_t *)&vq->avail->idx);
1016
1017         /*
1018          * The ordering between avail index and
1019          * desc reads needs to be enforced.
1020          */
1021         rte_smp_rmb();
1022
1023         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1024
1025         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1026                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1027                 uint16_t nr_vec = 0;
1028
1029                 if (unlikely(reserve_avail_buf_split(dev, vq,
1030                                                 pkt_len, buf_vec, &num_buffers,
1031                                                 avail_head, &nr_vec) < 0)) {
1032                         VHOST_LOG_DEBUG(VHOST_DATA,
1033                                 "(%d) failed to get enough desc from vring\n",
1034                                 dev->vid);
1035                         vq->shadow_used_idx -= num_buffers;
1036                         break;
1037                 }
1038
1039                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1040                         dev->vid, vq->last_avail_idx,
1041                         vq->last_avail_idx + num_buffers);
1042
1043                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1044                                                 buf_vec, nr_vec,
1045                                                 num_buffers) < 0) {
1046                         vq->shadow_used_idx -= num_buffers;
1047                         break;
1048                 }
1049
1050                 vq->last_avail_idx += num_buffers;
1051         }
1052
1053         do_data_copy_enqueue(dev, vq);
1054
1055         if (likely(vq->shadow_used_idx)) {
1056                 flush_shadow_used_ring_split(dev, vq);
1057                 vhost_vring_call_split(dev, vq);
1058         }
1059
1060         return pkt_idx;
1061 }
1062
1063 static __rte_always_inline int
1064 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1065                            struct vhost_virtqueue *vq,
1066                            struct rte_mbuf **pkts)
1067 {
1068         bool wrap_counter = vq->avail_wrap_counter;
1069         struct vring_packed_desc *descs = vq->desc_packed;
1070         uint16_t avail_idx = vq->last_avail_idx;
1071         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1072         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1073         uint32_t buf_offset = dev->vhost_hlen;
1074         uint64_t lens[PACKED_BATCH_SIZE];
1075         uint16_t ids[PACKED_BATCH_SIZE];
1076         uint16_t i;
1077
1078         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1079                 return -1;
1080
1081         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1082                 return -1;
1083
1084         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1085                 if (unlikely(pkts[i]->next != NULL))
1086                         return -1;
1087                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1088                                             wrap_counter)))
1089                         return -1;
1090         }
1091
1092         rte_smp_rmb();
1093
1094         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1095                 lens[i] = descs[avail_idx + i].len;
1096
1097         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1098                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1099                         return -1;
1100         }
1101
1102         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1103                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1104                                                   descs[avail_idx + i].addr,
1105                                                   &lens[i],
1106                                                   VHOST_ACCESS_RW);
1107
1108         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1109                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1110                         return -1;
1111         }
1112
1113         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1114                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1115                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1116                                         (uintptr_t)desc_addrs[i];
1117                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1118         }
1119
1120         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1121                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1122
1123         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1124
1125         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1126                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1127                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1128                            pkts[i]->pkt_len);
1129         }
1130
1131         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1132                 ids[i] = descs[avail_idx + i].id;
1133
1134         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1135
1136         return 0;
1137 }
1138
1139 static __rte_always_inline int16_t
1140 virtio_dev_rx_single_packed(struct virtio_net *dev,
1141                             struct vhost_virtqueue *vq,
1142                             struct rte_mbuf *pkt)
1143 {
1144         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1145         uint16_t nr_descs = 0;
1146
1147         rte_smp_rmb();
1148         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1149                                                  &nr_descs) < 0)) {
1150                 VHOST_LOG_DEBUG(VHOST_DATA,
1151                                 "(%d) failed to get enough desc from vring\n",
1152                                 dev->vid);
1153                 return -1;
1154         }
1155
1156         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1157                         dev->vid, vq->last_avail_idx,
1158                         vq->last_avail_idx + nr_descs);
1159
1160         vq_inc_last_avail_packed(vq, nr_descs);
1161
1162         return 0;
1163 }
1164
1165 static __rte_noinline uint32_t
1166 virtio_dev_rx_packed(struct virtio_net *dev,
1167                      struct vhost_virtqueue *vq,
1168                      struct rte_mbuf **pkts,
1169                      uint32_t count)
1170 {
1171         uint32_t pkt_idx = 0;
1172         uint32_t remained = count;
1173
1174         do {
1175                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1176
1177                 if (remained >= PACKED_BATCH_SIZE) {
1178                         if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
1179                                 pkt_idx += PACKED_BATCH_SIZE;
1180                                 remained -= PACKED_BATCH_SIZE;
1181                                 continue;
1182                         }
1183                 }
1184
1185                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1186                         break;
1187                 pkt_idx++;
1188                 remained--;
1189
1190         } while (pkt_idx < count);
1191
1192         if (vq->shadow_used_idx) {
1193                 do_data_copy_enqueue(dev, vq);
1194                 vhost_flush_enqueue_shadow_packed(dev, vq);
1195         }
1196
1197         if (pkt_idx)
1198                 vhost_vring_call_packed(dev, vq);
1199
1200         return pkt_idx;
1201 }
1202
1203 static __rte_always_inline uint32_t
1204 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1205         struct rte_mbuf **pkts, uint32_t count)
1206 {
1207         struct vhost_virtqueue *vq;
1208         uint32_t nb_tx = 0;
1209
1210         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1211         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1212                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1213                         dev->vid, __func__, queue_id);
1214                 return 0;
1215         }
1216
1217         vq = dev->virtqueue[queue_id];
1218
1219         rte_spinlock_lock(&vq->access_lock);
1220
1221         if (unlikely(vq->enabled == 0))
1222                 goto out_access_unlock;
1223
1224         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1225                 vhost_user_iotlb_rd_lock(vq);
1226
1227         if (unlikely(vq->access_ok == 0))
1228                 if (unlikely(vring_translate(dev, vq) < 0))
1229                         goto out;
1230
1231         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1232         if (count == 0)
1233                 goto out;
1234
1235         if (vq_is_packed(dev))
1236                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1237         else
1238                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1239
1240 out:
1241         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1242                 vhost_user_iotlb_rd_unlock(vq);
1243
1244 out_access_unlock:
1245         rte_spinlock_unlock(&vq->access_lock);
1246
1247         return nb_tx;
1248 }
1249
1250 uint16_t
1251 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1252         struct rte_mbuf **pkts, uint16_t count)
1253 {
1254         struct virtio_net *dev = get_device(vid);
1255
1256         if (!dev)
1257                 return 0;
1258
1259         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1260                 RTE_LOG(ERR, VHOST_DATA,
1261                         "(%d) %s: built-in vhost net backend is disabled.\n",
1262                         dev->vid, __func__);
1263                 return 0;
1264         }
1265
1266         return virtio_dev_rx(dev, queue_id, pkts, count);
1267 }
1268
1269 static inline bool
1270 virtio_net_with_host_offload(struct virtio_net *dev)
1271 {
1272         if (dev->features &
1273                         ((1ULL << VIRTIO_NET_F_CSUM) |
1274                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1275                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1276                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1277                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1278                 return true;
1279
1280         return false;
1281 }
1282
1283 static void
1284 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1285 {
1286         struct rte_ipv4_hdr *ipv4_hdr;
1287         struct rte_ipv6_hdr *ipv6_hdr;
1288         void *l3_hdr = NULL;
1289         struct rte_ether_hdr *eth_hdr;
1290         uint16_t ethertype;
1291
1292         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1293
1294         m->l2_len = sizeof(struct rte_ether_hdr);
1295         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1296
1297         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1298                 struct rte_vlan_hdr *vlan_hdr =
1299                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1300
1301                 m->l2_len += sizeof(struct rte_vlan_hdr);
1302                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1303         }
1304
1305         l3_hdr = (char *)eth_hdr + m->l2_len;
1306
1307         switch (ethertype) {
1308         case RTE_ETHER_TYPE_IPV4:
1309                 ipv4_hdr = l3_hdr;
1310                 *l4_proto = ipv4_hdr->next_proto_id;
1311                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1312                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1313                 m->ol_flags |= PKT_TX_IPV4;
1314                 break;
1315         case RTE_ETHER_TYPE_IPV6:
1316                 ipv6_hdr = l3_hdr;
1317                 *l4_proto = ipv6_hdr->proto;
1318                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1319                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1320                 m->ol_flags |= PKT_TX_IPV6;
1321                 break;
1322         default:
1323                 m->l3_len = 0;
1324                 *l4_proto = 0;
1325                 *l4_hdr = NULL;
1326                 break;
1327         }
1328 }
1329
1330 static __rte_always_inline void
1331 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1332 {
1333         uint16_t l4_proto = 0;
1334         void *l4_hdr = NULL;
1335         struct rte_tcp_hdr *tcp_hdr = NULL;
1336
1337         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1338                 return;
1339
1340         parse_ethernet(m, &l4_proto, &l4_hdr);
1341         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1342                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1343                         switch (hdr->csum_offset) {
1344                         case (offsetof(struct rte_tcp_hdr, cksum)):
1345                                 if (l4_proto == IPPROTO_TCP)
1346                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1347                                 break;
1348                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1349                                 if (l4_proto == IPPROTO_UDP)
1350                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1351                                 break;
1352                         case (offsetof(struct rte_sctp_hdr, cksum)):
1353                                 if (l4_proto == IPPROTO_SCTP)
1354                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1355                                 break;
1356                         default:
1357                                 break;
1358                         }
1359                 }
1360         }
1361
1362         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1363                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1364                 case VIRTIO_NET_HDR_GSO_TCPV4:
1365                 case VIRTIO_NET_HDR_GSO_TCPV6:
1366                         tcp_hdr = l4_hdr;
1367                         m->ol_flags |= PKT_TX_TCP_SEG;
1368                         m->tso_segsz = hdr->gso_size;
1369                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1370                         break;
1371                 case VIRTIO_NET_HDR_GSO_UDP:
1372                         m->ol_flags |= PKT_TX_UDP_SEG;
1373                         m->tso_segsz = hdr->gso_size;
1374                         m->l4_len = sizeof(struct rte_udp_hdr);
1375                         break;
1376                 default:
1377                         RTE_LOG(WARNING, VHOST_DATA,
1378                                 "unsupported gso type %u.\n", hdr->gso_type);
1379                         break;
1380                 }
1381         }
1382 }
1383
1384 static __rte_noinline void
1385 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1386                 struct buf_vector *buf_vec)
1387 {
1388         uint64_t len;
1389         uint64_t remain = sizeof(struct virtio_net_hdr);
1390         uint64_t src;
1391         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1392
1393         while (remain) {
1394                 len = RTE_MIN(remain, buf_vec->buf_len);
1395                 src = buf_vec->buf_addr;
1396                 rte_memcpy((void *)(uintptr_t)dst,
1397                                 (void *)(uintptr_t)src, len);
1398
1399                 remain -= len;
1400                 dst += len;
1401                 buf_vec++;
1402         }
1403 }
1404
1405 static __rte_always_inline int
1406 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1407                   struct buf_vector *buf_vec, uint16_t nr_vec,
1408                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1409 {
1410         uint32_t buf_avail, buf_offset;
1411         uint64_t buf_addr, buf_iova, buf_len;
1412         uint32_t mbuf_avail, mbuf_offset;
1413         uint32_t cpy_len;
1414         struct rte_mbuf *cur = m, *prev = m;
1415         struct virtio_net_hdr tmp_hdr;
1416         struct virtio_net_hdr *hdr = NULL;
1417         /* A counter to avoid desc dead loop chain */
1418         uint16_t vec_idx = 0;
1419         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1420         int error = 0;
1421
1422         buf_addr = buf_vec[vec_idx].buf_addr;
1423         buf_iova = buf_vec[vec_idx].buf_iova;
1424         buf_len = buf_vec[vec_idx].buf_len;
1425
1426         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1427                 error = -1;
1428                 goto out;
1429         }
1430
1431         if (virtio_net_with_host_offload(dev)) {
1432                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1433                         /*
1434                          * No luck, the virtio-net header doesn't fit
1435                          * in a contiguous virtual area.
1436                          */
1437                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1438                         hdr = &tmp_hdr;
1439                 } else {
1440                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1441                 }
1442         }
1443
1444         /*
1445          * A virtio driver normally uses at least 2 desc buffers
1446          * for Tx: the first for storing the header, and others
1447          * for storing the data.
1448          */
1449         if (unlikely(buf_len < dev->vhost_hlen)) {
1450                 buf_offset = dev->vhost_hlen - buf_len;
1451                 vec_idx++;
1452                 buf_addr = buf_vec[vec_idx].buf_addr;
1453                 buf_iova = buf_vec[vec_idx].buf_iova;
1454                 buf_len = buf_vec[vec_idx].buf_len;
1455                 buf_avail  = buf_len - buf_offset;
1456         } else if (buf_len == dev->vhost_hlen) {
1457                 if (unlikely(++vec_idx >= nr_vec))
1458                         goto out;
1459                 buf_addr = buf_vec[vec_idx].buf_addr;
1460                 buf_iova = buf_vec[vec_idx].buf_iova;
1461                 buf_len = buf_vec[vec_idx].buf_len;
1462
1463                 buf_offset = 0;
1464                 buf_avail = buf_len;
1465         } else {
1466                 buf_offset = dev->vhost_hlen;
1467                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1468         }
1469
1470         PRINT_PACKET(dev,
1471                         (uintptr_t)(buf_addr + buf_offset),
1472                         (uint32_t)buf_avail, 0);
1473
1474         mbuf_offset = 0;
1475         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1476         while (1) {
1477                 uint64_t hpa;
1478
1479                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1480
1481                 /*
1482                  * A desc buf might across two host physical pages that are
1483                  * not continuous. In such case (gpa_to_hpa returns 0), data
1484                  * will be copied even though zero copy is enabled.
1485                  */
1486                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1487                                         buf_iova + buf_offset, cpy_len)))) {
1488                         cur->data_len = cpy_len;
1489                         cur->data_off = 0;
1490                         cur->buf_addr =
1491                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1492                         cur->buf_iova = hpa;
1493
1494                         /*
1495                          * In zero copy mode, one mbuf can only reference data
1496                          * for one or partial of one desc buff.
1497                          */
1498                         mbuf_avail = cpy_len;
1499                 } else {
1500                         if (likely(cpy_len > MAX_BATCH_LEN ||
1501                                    vq->batch_copy_nb_elems >= vq->size ||
1502                                    (hdr && cur == m))) {
1503                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1504                                                                    mbuf_offset),
1505                                            (void *)((uintptr_t)(buf_addr +
1506                                                            buf_offset)),
1507                                            cpy_len);
1508                         } else {
1509                                 batch_copy[vq->batch_copy_nb_elems].dst =
1510                                         rte_pktmbuf_mtod_offset(cur, void *,
1511                                                                 mbuf_offset);
1512                                 batch_copy[vq->batch_copy_nb_elems].src =
1513                                         (void *)((uintptr_t)(buf_addr +
1514                                                                 buf_offset));
1515                                 batch_copy[vq->batch_copy_nb_elems].len =
1516                                         cpy_len;
1517                                 vq->batch_copy_nb_elems++;
1518                         }
1519                 }
1520
1521                 mbuf_avail  -= cpy_len;
1522                 mbuf_offset += cpy_len;
1523                 buf_avail -= cpy_len;
1524                 buf_offset += cpy_len;
1525
1526                 /* This buf reaches to its end, get the next one */
1527                 if (buf_avail == 0) {
1528                         if (++vec_idx >= nr_vec)
1529                                 break;
1530
1531                         buf_addr = buf_vec[vec_idx].buf_addr;
1532                         buf_iova = buf_vec[vec_idx].buf_iova;
1533                         buf_len = buf_vec[vec_idx].buf_len;
1534
1535                         buf_offset = 0;
1536                         buf_avail  = buf_len;
1537
1538                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1539                                         (uint32_t)buf_avail, 0);
1540                 }
1541
1542                 /*
1543                  * This mbuf reaches to its end, get a new one
1544                  * to hold more data.
1545                  */
1546                 if (mbuf_avail == 0) {
1547                         cur = rte_pktmbuf_alloc(mbuf_pool);
1548                         if (unlikely(cur == NULL)) {
1549                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1550                                         "allocate memory for mbuf.\n");
1551                                 error = -1;
1552                                 goto out;
1553                         }
1554                         if (unlikely(dev->dequeue_zero_copy))
1555                                 rte_mbuf_refcnt_update(cur, 1);
1556
1557                         prev->next = cur;
1558                         prev->data_len = mbuf_offset;
1559                         m->nb_segs += 1;
1560                         m->pkt_len += mbuf_offset;
1561                         prev = cur;
1562
1563                         mbuf_offset = 0;
1564                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1565                 }
1566         }
1567
1568         prev->data_len = mbuf_offset;
1569         m->pkt_len    += mbuf_offset;
1570
1571         if (hdr)
1572                 vhost_dequeue_offload(hdr, m);
1573
1574 out:
1575
1576         return error;
1577 }
1578
1579 static __rte_always_inline struct zcopy_mbuf *
1580 get_zmbuf(struct vhost_virtqueue *vq)
1581 {
1582         uint16_t i;
1583         uint16_t last;
1584         int tries = 0;
1585
1586         /* search [last_zmbuf_idx, zmbuf_size) */
1587         i = vq->last_zmbuf_idx;
1588         last = vq->zmbuf_size;
1589
1590 again:
1591         for (; i < last; i++) {
1592                 if (vq->zmbufs[i].in_use == 0) {
1593                         vq->last_zmbuf_idx = i + 1;
1594                         vq->zmbufs[i].in_use = 1;
1595                         return &vq->zmbufs[i];
1596                 }
1597         }
1598
1599         tries++;
1600         if (tries == 1) {
1601                 /* search [0, last_zmbuf_idx) */
1602                 i = 0;
1603                 last = vq->last_zmbuf_idx;
1604                 goto again;
1605         }
1606
1607         return NULL;
1608 }
1609
1610 static void
1611 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1612 {
1613         rte_free(opaque);
1614 }
1615
1616 static int
1617 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1618 {
1619         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1620         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1621         uint16_t buf_len;
1622         rte_iova_t iova;
1623         void *buf;
1624
1625         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1626          * required, otherwise store shinfo in the new buffer.
1627          */
1628         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1629                 shinfo = rte_pktmbuf_mtod(pkt,
1630                                           struct rte_mbuf_ext_shared_info *);
1631         else {
1632                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1633                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1634         }
1635
1636         if (unlikely(total_len > UINT16_MAX))
1637                 return -ENOSPC;
1638
1639         buf_len = total_len;
1640         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1641         if (unlikely(buf == NULL))
1642                 return -ENOMEM;
1643
1644         /* Initialize shinfo */
1645         if (shinfo) {
1646                 shinfo->free_cb = virtio_dev_extbuf_free;
1647                 shinfo->fcb_opaque = buf;
1648                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1649         } else {
1650                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1651                                               virtio_dev_extbuf_free, buf);
1652                 if (unlikely(shinfo == NULL)) {
1653                         rte_free(buf);
1654                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1655                         return -1;
1656                 }
1657         }
1658
1659         iova = rte_malloc_virt2iova(buf);
1660         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1661         rte_pktmbuf_reset_headroom(pkt);
1662
1663         return 0;
1664 }
1665
1666 /*
1667  * Allocate a host supported pktmbuf.
1668  */
1669 static __rte_always_inline struct rte_mbuf *
1670 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1671                          uint32_t data_len)
1672 {
1673         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1674
1675         if (unlikely(pkt == NULL))
1676                 return NULL;
1677
1678         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1679                 return pkt;
1680
1681         /* attach an external buffer if supported */
1682         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1683                 return pkt;
1684
1685         /* check if chained buffers are allowed */
1686         if (!dev->linearbuf)
1687                 return pkt;
1688
1689         /* Data doesn't fit into the buffer and the host supports
1690          * only linear buffers
1691          */
1692         rte_pktmbuf_free(pkt);
1693
1694         return NULL;
1695 }
1696
1697 static __rte_noinline uint16_t
1698 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1699         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1700 {
1701         uint16_t i;
1702         uint16_t free_entries;
1703
1704         if (unlikely(dev->dequeue_zero_copy)) {
1705                 struct zcopy_mbuf *zmbuf, *next;
1706
1707                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1708                      zmbuf != NULL; zmbuf = next) {
1709                         next = TAILQ_NEXT(zmbuf, next);
1710
1711                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1712                                 update_shadow_used_ring_split(vq,
1713                                                 zmbuf->desc_idx, 0);
1714                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1715                                 restore_mbuf(zmbuf->mbuf);
1716                                 rte_pktmbuf_free(zmbuf->mbuf);
1717                                 put_zmbuf(zmbuf);
1718                                 vq->nr_zmbuf -= 1;
1719                         }
1720                 }
1721
1722                 if (likely(vq->shadow_used_idx)) {
1723                         flush_shadow_used_ring_split(dev, vq);
1724                         vhost_vring_call_split(dev, vq);
1725                 }
1726         }
1727
1728         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1729                         vq->last_avail_idx;
1730         if (free_entries == 0)
1731                 return 0;
1732
1733         /*
1734          * The ordering between avail index and
1735          * desc reads needs to be enforced.
1736          */
1737         rte_smp_rmb();
1738
1739         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1740
1741         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1742
1743         count = RTE_MIN(count, MAX_PKT_BURST);
1744         count = RTE_MIN(count, free_entries);
1745         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1746                         dev->vid, count);
1747
1748         for (i = 0; i < count; i++) {
1749                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1750                 uint16_t head_idx;
1751                 uint32_t buf_len;
1752                 uint16_t nr_vec = 0;
1753                 int err;
1754
1755                 if (unlikely(fill_vec_buf_split(dev, vq,
1756                                                 vq->last_avail_idx + i,
1757                                                 &nr_vec, buf_vec,
1758                                                 &head_idx, &buf_len,
1759                                                 VHOST_ACCESS_RO) < 0))
1760                         break;
1761
1762                 if (likely(dev->dequeue_zero_copy == 0))
1763                         update_shadow_used_ring_split(vq, head_idx, 0);
1764
1765                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1766                 if (unlikely(pkts[i] == NULL))
1767                         break;
1768
1769                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1770                                 mbuf_pool);
1771                 if (unlikely(err)) {
1772                         rte_pktmbuf_free(pkts[i]);
1773                         break;
1774                 }
1775
1776                 if (unlikely(dev->dequeue_zero_copy)) {
1777                         struct zcopy_mbuf *zmbuf;
1778
1779                         zmbuf = get_zmbuf(vq);
1780                         if (!zmbuf) {
1781                                 rte_pktmbuf_free(pkts[i]);
1782                                 break;
1783                         }
1784                         zmbuf->mbuf = pkts[i];
1785                         zmbuf->desc_idx = head_idx;
1786
1787                         /*
1788                          * Pin lock the mbuf; we will check later to see
1789                          * whether the mbuf is freed (when we are the last
1790                          * user) or not. If that's the case, we then could
1791                          * update the used ring safely.
1792                          */
1793                         rte_mbuf_refcnt_update(pkts[i], 1);
1794
1795                         vq->nr_zmbuf += 1;
1796                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1797                 }
1798         }
1799         vq->last_avail_idx += i;
1800
1801         if (likely(dev->dequeue_zero_copy == 0)) {
1802                 do_data_copy_dequeue(vq);
1803                 if (unlikely(i < count))
1804                         vq->shadow_used_idx = i;
1805                 if (likely(vq->shadow_used_idx)) {
1806                         flush_shadow_used_ring_split(dev, vq);
1807                         vhost_vring_call_split(dev, vq);
1808                 }
1809         }
1810
1811         return i;
1812 }
1813
1814 static __rte_always_inline int
1815 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1816                                  struct vhost_virtqueue *vq,
1817                                  struct rte_mempool *mbuf_pool,
1818                                  struct rte_mbuf **pkts,
1819                                  uint16_t avail_idx,
1820                                  uintptr_t *desc_addrs,
1821                                  uint16_t *ids)
1822 {
1823         bool wrap = vq->avail_wrap_counter;
1824         struct vring_packed_desc *descs = vq->desc_packed;
1825         struct virtio_net_hdr *hdr;
1826         uint64_t lens[PACKED_BATCH_SIZE];
1827         uint64_t buf_lens[PACKED_BATCH_SIZE];
1828         uint32_t buf_offset = dev->vhost_hlen;
1829         uint16_t flags, i;
1830
1831         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1832                 return -1;
1833         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1834                 return -1;
1835
1836         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1837                 flags = descs[avail_idx + i].flags;
1838                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1839                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1840                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1841                         return -1;
1842         }
1843
1844         rte_smp_rmb();
1845
1846         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1847                 lens[i] = descs[avail_idx + i].len;
1848
1849         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1850                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1851                                                   descs[avail_idx + i].addr,
1852                                                   &lens[i], VHOST_ACCESS_RW);
1853         }
1854
1855         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1856                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1857                         return -1;
1858         }
1859
1860         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1861                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1862                 if (!pkts[i])
1863                         goto free_buf;
1864         }
1865
1866         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1867                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1868
1869         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1870                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1871                         goto free_buf;
1872         }
1873
1874         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1875                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1876                 pkts[i]->data_len = pkts[i]->pkt_len;
1877                 ids[i] = descs[avail_idx + i].id;
1878         }
1879
1880         if (virtio_net_with_host_offload(dev)) {
1881                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1882                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1883                         vhost_dequeue_offload(hdr, pkts[i]);
1884                 }
1885         }
1886
1887         return 0;
1888
1889 free_buf:
1890         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1891                 rte_pktmbuf_free(pkts[i]);
1892
1893         return -1;
1894 }
1895
1896 static __rte_unused int
1897 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1898                            struct vhost_virtqueue *vq,
1899                            struct rte_mempool *mbuf_pool,
1900                            struct rte_mbuf **pkts)
1901 {
1902         uint16_t avail_idx = vq->last_avail_idx;
1903         uint32_t buf_offset = dev->vhost_hlen;
1904         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1905         uint16_t ids[PACKED_BATCH_SIZE];
1906         uint16_t i;
1907
1908         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1909                                              avail_idx, desc_addrs, ids))
1910                 return -1;
1911
1912         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1913                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1914
1915         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1916                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1917                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1918                            pkts[i]->pkt_len);
1919
1920         vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1921
1922         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1923
1924         return 0;
1925 }
1926
1927 static __rte_always_inline int
1928 vhost_dequeue_single_packed(struct virtio_net *dev,
1929                             struct vhost_virtqueue *vq,
1930                             struct rte_mempool *mbuf_pool,
1931                             struct rte_mbuf **pkts,
1932                             uint16_t *buf_id,
1933                             uint16_t *desc_count)
1934 {
1935         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1936         uint32_t buf_len;
1937         uint16_t nr_vec = 0;
1938         int err;
1939
1940         if (unlikely(fill_vec_buf_packed(dev, vq,
1941                                          vq->last_avail_idx, desc_count,
1942                                          buf_vec, &nr_vec,
1943                                          buf_id, &buf_len,
1944                                          VHOST_ACCESS_RO) < 0))
1945                 return -1;
1946
1947         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1948         if (unlikely(*pkts == NULL)) {
1949                 RTE_LOG(ERR, VHOST_DATA,
1950                         "Failed to allocate memory for mbuf.\n");
1951                 return -1;
1952         }
1953
1954         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1955                                 mbuf_pool);
1956         if (unlikely(err)) {
1957                 rte_pktmbuf_free(*pkts);
1958                 return -1;
1959         }
1960
1961         return 0;
1962 }
1963
1964 static __rte_unused int
1965 virtio_dev_tx_single_packed(struct virtio_net *dev,
1966                             struct vhost_virtqueue *vq,
1967                             struct rte_mempool *mbuf_pool,
1968                             struct rte_mbuf **pkts)
1969 {
1970
1971         uint16_t buf_id, desc_count;
1972
1973         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1974                                         &desc_count))
1975                 return -1;
1976
1977         vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1978
1979         vq_inc_last_avail_packed(vq, desc_count);
1980
1981         return 0;
1982 }
1983
1984 static __rte_noinline uint16_t
1985 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1986         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1987 {
1988         uint16_t i;
1989
1990         if (unlikely(dev->dequeue_zero_copy)) {
1991                 struct zcopy_mbuf *zmbuf, *next;
1992
1993                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1994                      zmbuf != NULL; zmbuf = next) {
1995                         next = TAILQ_NEXT(zmbuf, next);
1996
1997                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1998                                 update_shadow_used_ring_packed(vq,
1999                                                 zmbuf->desc_idx,
2000                                                 0,
2001                                                 zmbuf->desc_count);
2002
2003                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2004                                 restore_mbuf(zmbuf->mbuf);
2005                                 rte_pktmbuf_free(zmbuf->mbuf);
2006                                 put_zmbuf(zmbuf);
2007                                 vq->nr_zmbuf -= 1;
2008                         }
2009                 }
2010
2011                 if (likely(vq->shadow_used_idx)) {
2012                         flush_shadow_used_ring_packed(dev, vq);
2013                         vhost_vring_call_packed(dev, vq);
2014                 }
2015         }
2016
2017         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
2018
2019         count = RTE_MIN(count, MAX_PKT_BURST);
2020         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
2021                         dev->vid, count);
2022
2023         for (i = 0; i < count; i++) {
2024                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2025                 uint16_t buf_id;
2026                 uint32_t buf_len;
2027                 uint16_t desc_count, nr_vec = 0;
2028                 int err;
2029
2030                 if (unlikely(fill_vec_buf_packed(dev, vq,
2031                                                 vq->last_avail_idx, &desc_count,
2032                                                 buf_vec, &nr_vec,
2033                                                 &buf_id, &buf_len,
2034                                                 VHOST_ACCESS_RO) < 0))
2035                         break;
2036
2037                 if (likely(dev->dequeue_zero_copy == 0))
2038                         update_shadow_used_ring_packed(vq, buf_id, 0,
2039                                         desc_count);
2040
2041                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2042                 if (unlikely(pkts[i] == NULL))
2043                         break;
2044
2045                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2046                                 mbuf_pool);
2047                 if (unlikely(err)) {
2048                         rte_pktmbuf_free(pkts[i]);
2049                         break;
2050                 }
2051
2052                 if (unlikely(dev->dequeue_zero_copy)) {
2053                         struct zcopy_mbuf *zmbuf;
2054
2055                         zmbuf = get_zmbuf(vq);
2056                         if (!zmbuf) {
2057                                 rte_pktmbuf_free(pkts[i]);
2058                                 break;
2059                         }
2060                         zmbuf->mbuf = pkts[i];
2061                         zmbuf->desc_idx = buf_id;
2062                         zmbuf->desc_count = desc_count;
2063
2064                         /*
2065                          * Pin lock the mbuf; we will check later to see
2066                          * whether the mbuf is freed (when we are the last
2067                          * user) or not. If that's the case, we then could
2068                          * update the used ring safely.
2069                          */
2070                         rte_mbuf_refcnt_update(pkts[i], 1);
2071
2072                         vq->nr_zmbuf += 1;
2073                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2074                 }
2075
2076                 vq_inc_last_avail_packed(vq, desc_count);
2077         }
2078
2079         if (likely(dev->dequeue_zero_copy == 0)) {
2080                 do_data_copy_dequeue(vq);
2081                 if (unlikely(i < count))
2082                         vq->shadow_used_idx = i;
2083                 if (likely(vq->shadow_used_idx)) {
2084                         flush_shadow_used_ring_packed(dev, vq);
2085                         vhost_vring_call_packed(dev, vq);
2086                 }
2087         }
2088
2089         return i;
2090 }
2091
2092 uint16_t
2093 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2094         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2095 {
2096         struct virtio_net *dev;
2097         struct rte_mbuf *rarp_mbuf = NULL;
2098         struct vhost_virtqueue *vq;
2099
2100         dev = get_device(vid);
2101         if (!dev)
2102                 return 0;
2103
2104         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2105                 RTE_LOG(ERR, VHOST_DATA,
2106                         "(%d) %s: built-in vhost net backend is disabled.\n",
2107                         dev->vid, __func__);
2108                 return 0;
2109         }
2110
2111         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2112                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2113                         dev->vid, __func__, queue_id);
2114                 return 0;
2115         }
2116
2117         vq = dev->virtqueue[queue_id];
2118
2119         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2120                 return 0;
2121
2122         if (unlikely(vq->enabled == 0)) {
2123                 count = 0;
2124                 goto out_access_unlock;
2125         }
2126
2127         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2128                 vhost_user_iotlb_rd_lock(vq);
2129
2130         if (unlikely(vq->access_ok == 0))
2131                 if (unlikely(vring_translate(dev, vq) < 0)) {
2132                         count = 0;
2133                         goto out;
2134                 }
2135
2136         /*
2137          * Construct a RARP broadcast packet, and inject it to the "pkts"
2138          * array, to looks like that guest actually send such packet.
2139          *
2140          * Check user_send_rarp() for more information.
2141          *
2142          * broadcast_rarp shares a cacheline in the virtio_net structure
2143          * with some fields that are accessed during enqueue and
2144          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2145          * result in false sharing between enqueue and dequeue.
2146          *
2147          * Prevent unnecessary false sharing by reading broadcast_rarp first
2148          * and only performing cmpset if the read indicates it is likely to
2149          * be set.
2150          */
2151         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2152                         rte_atomic16_cmpset((volatile uint16_t *)
2153                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2154
2155                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2156                 if (rarp_mbuf == NULL) {
2157                         RTE_LOG(ERR, VHOST_DATA,
2158                                 "Failed to make RARP packet.\n");
2159                         count = 0;
2160                         goto out;
2161                 }
2162                 count -= 1;
2163         }
2164
2165         if (vq_is_packed(dev))
2166                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2167         else
2168                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2169
2170 out:
2171         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2172                 vhost_user_iotlb_rd_unlock(vq);
2173
2174 out_access_unlock:
2175         rte_spinlock_unlock(&vq->access_lock);
2176
2177         if (unlikely(rarp_mbuf != NULL)) {
2178                 /*
2179                  * Inject it to the head of "pkts" array, so that switch's mac
2180                  * learning table will get updated first.
2181                  */
2182                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2183                 pkts[0] = rarp_mbuf;
2184                 count += 1;
2185         }
2186
2187         return count;
2188 }