vhost: improve prefetching in enqueue path
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline struct vring_desc *
41 alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
42                                          struct vring_desc *desc)
43 {
44         struct vring_desc *idesc;
45         uint64_t src, dst;
46         uint64_t len, remain = desc->len;
47         uint64_t desc_addr = desc->addr;
48
49         idesc = rte_malloc(__func__, desc->len, 0);
50         if (unlikely(!idesc))
51                 return 0;
52
53         dst = (uint64_t)(uintptr_t)idesc;
54
55         while (remain) {
56                 len = remain;
57                 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
58                                 VHOST_ACCESS_RO);
59                 if (unlikely(!src || !len)) {
60                         rte_free(idesc);
61                         return 0;
62                 }
63
64                 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
65
66                 remain -= len;
67                 dst += len;
68                 desc_addr += len;
69         }
70
71         return idesc;
72 }
73
74 static __rte_always_inline void
75 free_ind_table(struct vring_desc *idesc)
76 {
77         rte_free(idesc);
78 }
79
80 static __rte_always_inline void
81 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
82                           uint16_t to, uint16_t from, uint16_t size)
83 {
84         rte_memcpy(&vq->used->ring[to],
85                         &vq->shadow_used_ring[from],
86                         size * sizeof(struct vring_used_elem));
87         vhost_log_cache_used_vring(dev, vq,
88                         offsetof(struct vring_used, ring[to]),
89                         size * sizeof(struct vring_used_elem));
90 }
91
92 static __rte_always_inline void
93 flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
94 {
95         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
96
97         if (used_idx + vq->shadow_used_idx <= vq->size) {
98                 do_flush_shadow_used_ring(dev, vq, used_idx, 0,
99                                           vq->shadow_used_idx);
100         } else {
101                 uint16_t size;
102
103                 /* update used ring interval [used_idx, vq->size] */
104                 size = vq->size - used_idx;
105                 do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
106
107                 /* update the left half used ring interval [0, left_size] */
108                 do_flush_shadow_used_ring(dev, vq, 0, size,
109                                           vq->shadow_used_idx - size);
110         }
111         vq->last_used_idx += vq->shadow_used_idx;
112
113         rte_smp_wmb();
114
115         vhost_log_cache_sync(dev, vq);
116
117         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
118         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
119                 sizeof(vq->used->idx));
120 }
121
122 static __rte_always_inline void
123 update_shadow_used_ring(struct vhost_virtqueue *vq,
124                          uint16_t desc_idx, uint16_t len)
125 {
126         uint16_t i = vq->shadow_used_idx++;
127
128         vq->shadow_used_ring[i].id  = desc_idx;
129         vq->shadow_used_ring[i].len = len;
130 }
131
132 static inline void
133 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
134 {
135         struct batch_copy_elem *elem = vq->batch_copy_elems;
136         uint16_t count = vq->batch_copy_nb_elems;
137         int i;
138
139         for (i = 0; i < count; i++) {
140                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
141                 vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
142                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
143         }
144 }
145
146 static inline void
147 do_data_copy_dequeue(struct vhost_virtqueue *vq)
148 {
149         struct batch_copy_elem *elem = vq->batch_copy_elems;
150         uint16_t count = vq->batch_copy_nb_elems;
151         int i;
152
153         for (i = 0; i < count; i++)
154                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
155 }
156
157 /* avoid write operation when necessary, to lessen cache issues */
158 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
159         if ((var) != (val))                     \
160                 (var) = (val);                  \
161 } while (0)
162
163 static __rte_always_inline void
164 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
165 {
166         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
167
168         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
169                 csum_l4 |= PKT_TX_TCP_CKSUM;
170
171         if (csum_l4) {
172                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
173                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
174
175                 switch (csum_l4) {
176                 case PKT_TX_TCP_CKSUM:
177                         net_hdr->csum_offset = (offsetof(struct tcp_hdr,
178                                                 cksum));
179                         break;
180                 case PKT_TX_UDP_CKSUM:
181                         net_hdr->csum_offset = (offsetof(struct udp_hdr,
182                                                 dgram_cksum));
183                         break;
184                 case PKT_TX_SCTP_CKSUM:
185                         net_hdr->csum_offset = (offsetof(struct sctp_hdr,
186                                                 cksum));
187                         break;
188                 }
189         } else {
190                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
191                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
192                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
193         }
194
195         /* IP cksum verification cannot be bypassed, then calculate here */
196         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
197                 struct ipv4_hdr *ipv4_hdr;
198
199                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
200                                                    m_buf->l2_len);
201                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
202         }
203
204         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
205                 if (m_buf->ol_flags & PKT_TX_IPV4)
206                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
207                 else
208                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
209                 net_hdr->gso_size = m_buf->tso_segsz;
210                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
211                                         + m_buf->l4_len;
212         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
213                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
214                 net_hdr->gso_size = m_buf->tso_segsz;
215                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
216                         m_buf->l4_len;
217         } else {
218                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
219                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
220                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
221         }
222 }
223
224 static __rte_always_inline int
225 fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
226                          uint32_t avail_idx, uint32_t *vec_idx,
227                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
228                          uint16_t *desc_chain_len, uint8_t perm)
229 {
230         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
231         uint32_t vec_id = *vec_idx;
232         uint32_t len    = 0;
233         uint64_t dlen, desc_avail, desc_iova;
234         struct vring_desc *descs = vq->desc;
235         struct vring_desc *idesc = NULL;
236
237         *desc_chain_head = idx;
238
239         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
240                 dlen = vq->desc[idx].len;
241                 descs = (struct vring_desc *)(uintptr_t)
242                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
243                                                 &dlen,
244                                                 VHOST_ACCESS_RO);
245                 if (unlikely(!descs))
246                         return -1;
247
248                 if (unlikely(dlen < vq->desc[idx].len)) {
249                         /*
250                          * The indirect desc table is not contiguous
251                          * in process VA space, we have to copy it.
252                          */
253                         idesc = alloc_copy_ind_table(dev, vq, &vq->desc[idx]);
254                         if (unlikely(!idesc))
255                                 return -1;
256
257                         descs = idesc;
258                 }
259
260                 idx = 0;
261         }
262
263         while (1) {
264                 if (unlikely(idx >= vq->size)) {
265                         free_ind_table(idesc);
266                         return -1;
267                 }
268
269
270                 len += descs[idx].len;
271                 desc_avail = descs[idx].len;
272                 desc_iova = descs[idx].addr;
273
274                 while (desc_avail) {
275                         uint64_t desc_addr;
276                         uint64_t desc_chunck_len = desc_avail;
277
278                         if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
279                                 free_ind_table(idesc);
280                                 return -1;
281                         }
282
283                         desc_addr = vhost_iova_to_vva(dev, vq,
284                                         desc_iova,
285                                         &desc_chunck_len,
286                                         perm);
287                         if (unlikely(!desc_addr)) {
288                                 free_ind_table(idesc);
289                                 return -1;
290                         }
291
292                         buf_vec[vec_id].buf_iova = desc_iova;
293                         buf_vec[vec_id].buf_addr = desc_addr;
294                         buf_vec[vec_id].buf_len  = desc_chunck_len;
295                         buf_vec[vec_id].desc_idx = idx;
296
297                         desc_avail -= desc_chunck_len;
298                         desc_iova += desc_chunck_len;
299                         vec_id++;
300                 }
301
302                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
303                         break;
304
305                 idx = descs[idx].next;
306         }
307
308         *desc_chain_len = len;
309         *vec_idx = vec_id;
310
311         if (unlikely(!!idesc))
312                 free_ind_table(idesc);
313
314         return 0;
315 }
316
317 /*
318  * Returns -1 on fail, 0 on success
319  */
320 static inline int
321 reserve_avail_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
322                                 uint32_t size, struct buf_vector *buf_vec,
323                                 uint16_t *num_buffers, uint16_t avail_head,
324                                 uint16_t *nr_vec)
325 {
326         uint16_t cur_idx;
327         uint32_t vec_idx = 0;
328         uint16_t max_tries, tries = 0;
329
330         uint16_t head_idx = 0;
331         uint16_t len = 0;
332
333         *num_buffers = 0;
334         cur_idx  = vq->last_avail_idx;
335
336         if (rxvq_is_mergeable(dev))
337                 max_tries = vq->size;
338         else
339                 max_tries = 1;
340
341         while (size > 0) {
342                 if (unlikely(cur_idx == avail_head))
343                         return -1;
344
345                 if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
346                                                 &head_idx, &len,
347                                                 VHOST_ACCESS_RW) < 0))
348                         return -1;
349                 len = RTE_MIN(len, size);
350                 update_shadow_used_ring(vq, head_idx, len);
351                 size -= len;
352
353                 cur_idx++;
354                 tries++;
355                 *num_buffers += 1;
356
357                 /*
358                  * if we tried all available ring items, and still
359                  * can't get enough buf, it means something abnormal
360                  * happened.
361                  */
362                 if (unlikely(tries > max_tries))
363                         return -1;
364         }
365
366         *nr_vec = vec_idx;
367
368         return 0;
369 }
370
371 static __rte_always_inline int
372 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
373                             struct rte_mbuf *m, struct buf_vector *buf_vec,
374                             uint16_t nr_vec, uint16_t num_buffers)
375 {
376         uint32_t vec_idx = 0;
377         uint32_t mbuf_offset, mbuf_avail;
378         uint32_t buf_offset, buf_avail;
379         uint64_t buf_addr, buf_iova, buf_len;
380         uint32_t cpy_len;
381         uint64_t hdr_addr;
382         struct rte_mbuf *hdr_mbuf;
383         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
384         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
385         int error = 0;
386
387         if (unlikely(m == NULL)) {
388                 error = -1;
389                 goto out;
390         }
391
392         buf_addr = buf_vec[vec_idx].buf_addr;
393         buf_iova = buf_vec[vec_idx].buf_iova;
394         buf_len = buf_vec[vec_idx].buf_len;
395
396         if (nr_vec > 1)
397                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
398
399         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
400                 error = -1;
401                 goto out;
402         }
403
404         hdr_mbuf = m;
405         hdr_addr = buf_addr;
406         if (unlikely(buf_len < dev->vhost_hlen))
407                 hdr = &tmp_hdr;
408         else
409                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
410
411         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
412                 dev->vid, num_buffers);
413
414         if (unlikely(buf_len < dev->vhost_hlen)) {
415                 buf_offset = dev->vhost_hlen - buf_len;
416                 vec_idx++;
417                 buf_addr = buf_vec[vec_idx].buf_addr;
418                 buf_iova = buf_vec[vec_idx].buf_iova;
419                 buf_len = buf_vec[vec_idx].buf_len;
420                 buf_avail = buf_len - buf_offset;
421         } else {
422                 buf_offset = dev->vhost_hlen;
423                 buf_avail = buf_len - dev->vhost_hlen;
424         }
425
426         mbuf_avail  = rte_pktmbuf_data_len(m);
427         mbuf_offset = 0;
428         while (mbuf_avail != 0 || m->next != NULL) {
429                 /* done with current buf, get the next one */
430                 if (buf_avail == 0) {
431                         vec_idx++;
432                         if (unlikely(vec_idx >= nr_vec)) {
433                                 error = -1;
434                                 goto out;
435                         }
436
437                         buf_addr = buf_vec[vec_idx].buf_addr;
438                         buf_iova = buf_vec[vec_idx].buf_iova;
439                         buf_len = buf_vec[vec_idx].buf_len;
440
441                         /* Prefetch next buffer address. */
442                         if (vec_idx + 1 < nr_vec)
443                                 rte_prefetch0((void *)(uintptr_t)
444                                                 buf_vec[vec_idx + 1].buf_addr);
445                         buf_offset = 0;
446                         buf_avail  = buf_len;
447                 }
448
449                 /* done with current mbuf, get the next one */
450                 if (mbuf_avail == 0) {
451                         m = m->next;
452
453                         mbuf_offset = 0;
454                         mbuf_avail  = rte_pktmbuf_data_len(m);
455                 }
456
457                 if (hdr_addr) {
458                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
459                         if (rxvq_is_mergeable(dev))
460                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
461                                                 num_buffers);
462
463                         if (unlikely(hdr == &tmp_hdr)) {
464                                 uint64_t len;
465                                 uint64_t remain = dev->vhost_hlen;
466                                 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
467                                 uint64_t iova = buf_vec[0].buf_iova;
468                                 uint16_t hdr_vec_idx = 0;
469
470                                 while (remain) {
471                                         len = remain;
472                                         dst = buf_vec[hdr_vec_idx].buf_addr;
473                                         rte_memcpy((void *)(uintptr_t)dst,
474                                                         (void *)(uintptr_t)src,
475                                                         len);
476
477                                         PRINT_PACKET(dev, (uintptr_t)dst,
478                                                         (uint32_t)len, 0);
479                                         vhost_log_cache_write(dev, vq,
480                                                         iova, len);
481
482                                         remain -= len;
483                                         iova += len;
484                                         src += len;
485                                         hdr_vec_idx++;
486                                 }
487                         } else {
488                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
489                                                 dev->vhost_hlen, 0);
490                                 vhost_log_cache_write(dev, vq,
491                                                 buf_vec[0].buf_iova,
492                                                 dev->vhost_hlen);
493                         }
494
495                         hdr_addr = 0;
496                 }
497
498                 cpy_len = RTE_MIN(buf_len, mbuf_avail);
499
500                 if (likely(cpy_len > MAX_BATCH_LEN ||
501                                         vq->batch_copy_nb_elems >= vq->size)) {
502                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
503                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
504                                 cpy_len);
505                         vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
506                                         cpy_len);
507                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
508                                 cpy_len, 0);
509                 } else {
510                         batch_copy[vq->batch_copy_nb_elems].dst =
511                                 (void *)((uintptr_t)(buf_addr + buf_offset));
512                         batch_copy[vq->batch_copy_nb_elems].src =
513                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
514                         batch_copy[vq->batch_copy_nb_elems].log_addr =
515                                 buf_iova + buf_offset;
516                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
517                         vq->batch_copy_nb_elems++;
518                 }
519
520                 mbuf_avail  -= cpy_len;
521                 mbuf_offset += cpy_len;
522                 buf_avail  -= cpy_len;
523                 buf_offset += cpy_len;
524         }
525
526 out:
527
528         return error;
529 }
530
531 static __rte_always_inline uint32_t
532 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
533         struct rte_mbuf **pkts, uint32_t count)
534 {
535         struct vhost_virtqueue *vq;
536         uint32_t pkt_idx = 0;
537         uint16_t num_buffers;
538         struct buf_vector buf_vec[BUF_VECTOR_MAX];
539         uint16_t avail_head;
540
541         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
542         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
543                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
544                         dev->vid, __func__, queue_id);
545                 return 0;
546         }
547
548         vq = dev->virtqueue[queue_id];
549
550         rte_spinlock_lock(&vq->access_lock);
551
552         if (unlikely(vq->enabled == 0))
553                 goto out_access_unlock;
554
555         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
556                 vhost_user_iotlb_rd_lock(vq);
557
558         if (unlikely(vq->access_ok == 0))
559                 if (unlikely(vring_translate(dev, vq) < 0))
560                         goto out;
561
562         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
563         if (count == 0)
564                 goto out;
565
566         vq->batch_copy_nb_elems = 0;
567
568         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
569
570         vq->shadow_used_idx = 0;
571         avail_head = *((volatile uint16_t *)&vq->avail->idx);
572         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
573                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
574                 uint16_t nr_vec = 0;
575
576                 if (unlikely(reserve_avail_buf(dev, vq,
577                                                 pkt_len, buf_vec, &num_buffers,
578                                                 avail_head, &nr_vec) < 0)) {
579                         VHOST_LOG_DEBUG(VHOST_DATA,
580                                 "(%d) failed to get enough desc from vring\n",
581                                 dev->vid);
582                         vq->shadow_used_idx -= num_buffers;
583                         break;
584                 }
585
586                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
587
588                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
589                         dev->vid, vq->last_avail_idx,
590                         vq->last_avail_idx + num_buffers);
591
592                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
593                                                 buf_vec, nr_vec,
594                                                 num_buffers) < 0) {
595                         vq->shadow_used_idx -= num_buffers;
596                         break;
597                 }
598
599                 vq->last_avail_idx += num_buffers;
600         }
601
602         do_data_copy_enqueue(dev, vq);
603
604         if (likely(vq->shadow_used_idx)) {
605                 flush_shadow_used_ring(dev, vq);
606                 vhost_vring_call(dev, vq);
607         }
608
609 out:
610         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
611                 vhost_user_iotlb_rd_unlock(vq);
612
613 out_access_unlock:
614         rte_spinlock_unlock(&vq->access_lock);
615
616         return pkt_idx;
617 }
618
619 uint16_t
620 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
621         struct rte_mbuf **pkts, uint16_t count)
622 {
623         struct virtio_net *dev = get_device(vid);
624
625         if (!dev)
626                 return 0;
627
628         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
629                 RTE_LOG(ERR, VHOST_DATA,
630                         "(%d) %s: built-in vhost net backend is disabled.\n",
631                         dev->vid, __func__);
632                 return 0;
633         }
634
635         return virtio_dev_rx(dev, queue_id, pkts, count);
636 }
637
638 static inline bool
639 virtio_net_with_host_offload(struct virtio_net *dev)
640 {
641         if (dev->features &
642                         ((1ULL << VIRTIO_NET_F_CSUM) |
643                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
644                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
645                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
646                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
647                 return true;
648
649         return false;
650 }
651
652 static void
653 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
654 {
655         struct ipv4_hdr *ipv4_hdr;
656         struct ipv6_hdr *ipv6_hdr;
657         void *l3_hdr = NULL;
658         struct ether_hdr *eth_hdr;
659         uint16_t ethertype;
660
661         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
662
663         m->l2_len = sizeof(struct ether_hdr);
664         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
665
666         if (ethertype == ETHER_TYPE_VLAN) {
667                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
668
669                 m->l2_len += sizeof(struct vlan_hdr);
670                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
671         }
672
673         l3_hdr = (char *)eth_hdr + m->l2_len;
674
675         switch (ethertype) {
676         case ETHER_TYPE_IPv4:
677                 ipv4_hdr = l3_hdr;
678                 *l4_proto = ipv4_hdr->next_proto_id;
679                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
680                 *l4_hdr = (char *)l3_hdr + m->l3_len;
681                 m->ol_flags |= PKT_TX_IPV4;
682                 break;
683         case ETHER_TYPE_IPv6:
684                 ipv6_hdr = l3_hdr;
685                 *l4_proto = ipv6_hdr->proto;
686                 m->l3_len = sizeof(struct ipv6_hdr);
687                 *l4_hdr = (char *)l3_hdr + m->l3_len;
688                 m->ol_flags |= PKT_TX_IPV6;
689                 break;
690         default:
691                 m->l3_len = 0;
692                 *l4_proto = 0;
693                 *l4_hdr = NULL;
694                 break;
695         }
696 }
697
698 static __rte_always_inline void
699 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
700 {
701         uint16_t l4_proto = 0;
702         void *l4_hdr = NULL;
703         struct tcp_hdr *tcp_hdr = NULL;
704
705         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
706                 return;
707
708         parse_ethernet(m, &l4_proto, &l4_hdr);
709         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
710                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
711                         switch (hdr->csum_offset) {
712                         case (offsetof(struct tcp_hdr, cksum)):
713                                 if (l4_proto == IPPROTO_TCP)
714                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
715                                 break;
716                         case (offsetof(struct udp_hdr, dgram_cksum)):
717                                 if (l4_proto == IPPROTO_UDP)
718                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
719                                 break;
720                         case (offsetof(struct sctp_hdr, cksum)):
721                                 if (l4_proto == IPPROTO_SCTP)
722                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
723                                 break;
724                         default:
725                                 break;
726                         }
727                 }
728         }
729
730         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
731                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
732                 case VIRTIO_NET_HDR_GSO_TCPV4:
733                 case VIRTIO_NET_HDR_GSO_TCPV6:
734                         tcp_hdr = l4_hdr;
735                         m->ol_flags |= PKT_TX_TCP_SEG;
736                         m->tso_segsz = hdr->gso_size;
737                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
738                         break;
739                 case VIRTIO_NET_HDR_GSO_UDP:
740                         m->ol_flags |= PKT_TX_UDP_SEG;
741                         m->tso_segsz = hdr->gso_size;
742                         m->l4_len = sizeof(struct udp_hdr);
743                         break;
744                 default:
745                         RTE_LOG(WARNING, VHOST_DATA,
746                                 "unsupported gso type %u.\n", hdr->gso_type);
747                         break;
748                 }
749         }
750 }
751
752 static __rte_always_inline void
753 put_zmbuf(struct zcopy_mbuf *zmbuf)
754 {
755         zmbuf->in_use = 0;
756 }
757
758 static __rte_always_inline int
759 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
760                   struct buf_vector *buf_vec, uint16_t nr_vec,
761                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
762 {
763         uint32_t buf_avail, buf_offset;
764         uint64_t buf_addr, buf_iova, buf_len;
765         uint32_t mbuf_avail, mbuf_offset;
766         uint32_t cpy_len;
767         struct rte_mbuf *cur = m, *prev = m;
768         struct virtio_net_hdr tmp_hdr;
769         struct virtio_net_hdr *hdr = NULL;
770         /* A counter to avoid desc dead loop chain */
771         uint16_t vec_idx = 0;
772         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
773         int error = 0;
774
775         buf_addr = buf_vec[vec_idx].buf_addr;
776         buf_iova = buf_vec[vec_idx].buf_iova;
777         buf_len = buf_vec[vec_idx].buf_len;
778
779         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
780                 error = -1;
781                 goto out;
782         }
783
784         if (likely(nr_vec > 1))
785                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
786
787         if (virtio_net_with_host_offload(dev)) {
788                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
789                         uint64_t len;
790                         uint64_t remain = sizeof(struct virtio_net_hdr);
791                         uint64_t src;
792                         uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
793                         uint16_t hdr_vec_idx = 0;
794
795                         /*
796                          * No luck, the virtio-net header doesn't fit
797                          * in a contiguous virtual area.
798                          */
799                         while (remain) {
800                                 len = remain;
801                                 src = buf_vec[hdr_vec_idx].buf_addr;
802                                 rte_memcpy((void *)(uintptr_t)dst,
803                                                    (void *)(uintptr_t)src, len);
804
805                                 remain -= len;
806                                 dst += len;
807                                 hdr_vec_idx++;
808                         }
809
810                         hdr = &tmp_hdr;
811                 } else {
812                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
813                         rte_prefetch0(hdr);
814                 }
815         }
816
817         /*
818          * A virtio driver normally uses at least 2 desc buffers
819          * for Tx: the first for storing the header, and others
820          * for storing the data.
821          */
822         if (unlikely(buf_len < dev->vhost_hlen)) {
823                 buf_offset = dev->vhost_hlen - buf_len;
824                 vec_idx++;
825                 buf_addr = buf_vec[vec_idx].buf_addr;
826                 buf_iova = buf_vec[vec_idx].buf_iova;
827                 buf_len = buf_vec[vec_idx].buf_len;
828                 buf_avail  = buf_len - buf_offset;
829         } else if (buf_len == dev->vhost_hlen) {
830                 if (unlikely(++vec_idx >= nr_vec))
831                         goto out;
832                 buf_addr = buf_vec[vec_idx].buf_addr;
833                 buf_iova = buf_vec[vec_idx].buf_iova;
834                 buf_len = buf_vec[vec_idx].buf_len;
835
836                 buf_offset = 0;
837                 buf_avail = buf_len;
838         } else {
839                 buf_offset = dev->vhost_hlen;
840                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
841         }
842
843         rte_prefetch0((void *)(uintptr_t)
844                         (buf_addr + buf_offset));
845
846         PRINT_PACKET(dev,
847                         (uintptr_t)(buf_addr + buf_offset),
848                         (uint32_t)buf_avail, 0);
849
850         mbuf_offset = 0;
851         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
852         while (1) {
853                 uint64_t hpa;
854
855                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
856
857                 /*
858                  * A desc buf might across two host physical pages that are
859                  * not continuous. In such case (gpa_to_hpa returns 0), data
860                  * will be copied even though zero copy is enabled.
861                  */
862                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
863                                         buf_iova + buf_offset, cpy_len)))) {
864                         cur->data_len = cpy_len;
865                         cur->data_off = 0;
866                         cur->buf_addr =
867                                 (void *)(uintptr_t)(buf_addr + buf_offset);
868                         cur->buf_iova = hpa;
869
870                         /*
871                          * In zero copy mode, one mbuf can only reference data
872                          * for one or partial of one desc buff.
873                          */
874                         mbuf_avail = cpy_len;
875                 } else {
876                         if (likely(cpy_len > MAX_BATCH_LEN ||
877                                    vq->batch_copy_nb_elems >= vq->size ||
878                                    (hdr && cur == m))) {
879                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
880                                                                    mbuf_offset),
881                                            (void *)((uintptr_t)(buf_addr +
882                                                            buf_offset)),
883                                            cpy_len);
884                         } else {
885                                 batch_copy[vq->batch_copy_nb_elems].dst =
886                                         rte_pktmbuf_mtod_offset(cur, void *,
887                                                                 mbuf_offset);
888                                 batch_copy[vq->batch_copy_nb_elems].src =
889                                         (void *)((uintptr_t)(buf_addr +
890                                                                 buf_offset));
891                                 batch_copy[vq->batch_copy_nb_elems].len =
892                                         cpy_len;
893                                 vq->batch_copy_nb_elems++;
894                         }
895                 }
896
897                 mbuf_avail  -= cpy_len;
898                 mbuf_offset += cpy_len;
899                 buf_avail -= cpy_len;
900                 buf_offset += cpy_len;
901
902                 /* This buf reaches to its end, get the next one */
903                 if (buf_avail == 0) {
904                         if (++vec_idx >= nr_vec)
905                                 break;
906
907                         buf_addr = buf_vec[vec_idx].buf_addr;
908                         buf_iova = buf_vec[vec_idx].buf_iova;
909                         buf_len = buf_vec[vec_idx].buf_len;
910
911                         /*
912                          * Prefecth desc n + 1 buffer while
913                          * desc n buffer is processed.
914                          */
915                         if (vec_idx + 1 < nr_vec)
916                                 rte_prefetch0((void *)(uintptr_t)
917                                                 buf_vec[vec_idx + 1].buf_addr);
918
919                         buf_offset = 0;
920                         buf_avail  = buf_len;
921
922                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
923                                         (uint32_t)buf_avail, 0);
924                 }
925
926                 /*
927                  * This mbuf reaches to its end, get a new one
928                  * to hold more data.
929                  */
930                 if (mbuf_avail == 0) {
931                         cur = rte_pktmbuf_alloc(mbuf_pool);
932                         if (unlikely(cur == NULL)) {
933                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
934                                         "allocate memory for mbuf.\n");
935                                 error = -1;
936                                 goto out;
937                         }
938                         if (unlikely(dev->dequeue_zero_copy))
939                                 rte_mbuf_refcnt_update(cur, 1);
940
941                         prev->next = cur;
942                         prev->data_len = mbuf_offset;
943                         m->nb_segs += 1;
944                         m->pkt_len += mbuf_offset;
945                         prev = cur;
946
947                         mbuf_offset = 0;
948                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
949                 }
950         }
951
952         prev->data_len = mbuf_offset;
953         m->pkt_len    += mbuf_offset;
954
955         if (hdr)
956                 vhost_dequeue_offload(hdr, m);
957
958 out:
959
960         return error;
961 }
962
963 static __rte_always_inline struct zcopy_mbuf *
964 get_zmbuf(struct vhost_virtqueue *vq)
965 {
966         uint16_t i;
967         uint16_t last;
968         int tries = 0;
969
970         /* search [last_zmbuf_idx, zmbuf_size) */
971         i = vq->last_zmbuf_idx;
972         last = vq->zmbuf_size;
973
974 again:
975         for (; i < last; i++) {
976                 if (vq->zmbufs[i].in_use == 0) {
977                         vq->last_zmbuf_idx = i + 1;
978                         vq->zmbufs[i].in_use = 1;
979                         return &vq->zmbufs[i];
980                 }
981         }
982
983         tries++;
984         if (tries == 1) {
985                 /* search [0, last_zmbuf_idx) */
986                 i = 0;
987                 last = vq->last_zmbuf_idx;
988                 goto again;
989         }
990
991         return NULL;
992 }
993
994 static __rte_always_inline bool
995 mbuf_is_consumed(struct rte_mbuf *m)
996 {
997         while (m) {
998                 if (rte_mbuf_refcnt_read(m) > 1)
999                         return false;
1000                 m = m->next;
1001         }
1002
1003         return true;
1004 }
1005
1006 static __rte_always_inline void
1007 restore_mbuf(struct rte_mbuf *m)
1008 {
1009         uint32_t mbuf_size, priv_size;
1010
1011         while (m) {
1012                 priv_size = rte_pktmbuf_priv_size(m->pool);
1013                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1014                 /* start of buffer is after mbuf structure and priv data */
1015
1016                 m->buf_addr = (char *)m + mbuf_size;
1017                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1018                 m = m->next;
1019         }
1020 }
1021
1022 uint16_t
1023 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1024         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1025 {
1026         struct virtio_net *dev;
1027         struct rte_mbuf *rarp_mbuf = NULL;
1028         struct vhost_virtqueue *vq;
1029         uint32_t i = 0;
1030         uint16_t free_entries;
1031
1032         dev = get_device(vid);
1033         if (!dev)
1034                 return 0;
1035
1036         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1037                 RTE_LOG(ERR, VHOST_DATA,
1038                         "(%d) %s: built-in vhost net backend is disabled.\n",
1039                         dev->vid, __func__);
1040                 return 0;
1041         }
1042
1043         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1044                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1045                         dev->vid, __func__, queue_id);
1046                 return 0;
1047         }
1048
1049         vq = dev->virtqueue[queue_id];
1050
1051         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1052                 return 0;
1053
1054         if (unlikely(vq->enabled == 0))
1055                 goto out_access_unlock;
1056
1057         vq->batch_copy_nb_elems = 0;
1058         vq->shadow_used_idx = 0;
1059
1060         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1061                 vhost_user_iotlb_rd_lock(vq);
1062
1063         if (unlikely(vq->access_ok == 0))
1064                 if (unlikely(vring_translate(dev, vq) < 0))
1065                         goto out;
1066
1067         if (unlikely(dev->dequeue_zero_copy)) {
1068                 struct zcopy_mbuf *zmbuf, *next;
1069                 int nr_updated = 0;
1070
1071                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1072                      zmbuf != NULL; zmbuf = next) {
1073                         next = TAILQ_NEXT(zmbuf, next);
1074
1075                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1076                                 update_shadow_used_ring(vq, zmbuf->desc_idx, 0);
1077                                 nr_updated += 1;
1078
1079                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1080                                 restore_mbuf(zmbuf->mbuf);
1081                                 rte_pktmbuf_free(zmbuf->mbuf);
1082                                 put_zmbuf(zmbuf);
1083                                 vq->nr_zmbuf -= 1;
1084                         }
1085                 }
1086
1087                 flush_shadow_used_ring(dev, vq);
1088                 vhost_vring_call(dev, vq);
1089                 vq->shadow_used_idx = 0;
1090         }
1091
1092         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1093
1094         /*
1095          * Construct a RARP broadcast packet, and inject it to the "pkts"
1096          * array, to looks like that guest actually send such packet.
1097          *
1098          * Check user_send_rarp() for more information.
1099          *
1100          * broadcast_rarp shares a cacheline in the virtio_net structure
1101          * with some fields that are accessed during enqueue and
1102          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1103          * result in false sharing between enqueue and dequeue.
1104          *
1105          * Prevent unnecessary false sharing by reading broadcast_rarp first
1106          * and only performing cmpset if the read indicates it is likely to
1107          * be set.
1108          */
1109
1110         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1111                         rte_atomic16_cmpset((volatile uint16_t *)
1112                                 &dev->broadcast_rarp.cnt, 1, 0))) {
1113
1114                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1115                 if (rarp_mbuf == NULL) {
1116                         RTE_LOG(ERR, VHOST_DATA,
1117                                 "Failed to make RARP packet.\n");
1118                         return 0;
1119                 }
1120                 count -= 1;
1121         }
1122
1123         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1124                         vq->last_avail_idx;
1125         if (free_entries == 0)
1126                 goto out;
1127
1128         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1129
1130         count = RTE_MIN(count, MAX_PKT_BURST);
1131         count = RTE_MIN(count, free_entries);
1132         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1133                         dev->vid, count);
1134
1135         for (i = 0; i < count; i++) {
1136                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1137                 uint16_t head_idx, dummy_len;
1138                 uint32_t nr_vec = 0;
1139                 int err;
1140
1141                 if (unlikely(fill_vec_buf(dev, vq,
1142                                                 vq->last_avail_idx + i,
1143                                                 &nr_vec, buf_vec,
1144                                                 &head_idx, &dummy_len,
1145                                                 VHOST_ACCESS_RO) < 0))
1146                         break;
1147
1148                 if (likely(dev->dequeue_zero_copy == 0))
1149                         update_shadow_used_ring(vq, head_idx, 0);
1150
1151                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1152
1153                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1154                 if (unlikely(pkts[i] == NULL)) {
1155                         RTE_LOG(ERR, VHOST_DATA,
1156                                 "Failed to allocate memory for mbuf.\n");
1157                         break;
1158                 }
1159
1160                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1161                                 mbuf_pool);
1162                 if (unlikely(err)) {
1163                         rte_pktmbuf_free(pkts[i]);
1164                         break;
1165                 }
1166
1167                 if (unlikely(dev->dequeue_zero_copy)) {
1168                         struct zcopy_mbuf *zmbuf;
1169
1170                         zmbuf = get_zmbuf(vq);
1171                         if (!zmbuf) {
1172                                 rte_pktmbuf_free(pkts[i]);
1173                                 break;
1174                         }
1175                         zmbuf->mbuf = pkts[i];
1176                         zmbuf->desc_idx = head_idx;
1177
1178                         /*
1179                          * Pin lock the mbuf; we will check later to see
1180                          * whether the mbuf is freed (when we are the last
1181                          * user) or not. If that's the case, we then could
1182                          * update the used ring safely.
1183                          */
1184                         rte_mbuf_refcnt_update(pkts[i], 1);
1185
1186                         vq->nr_zmbuf += 1;
1187                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1188                 }
1189         }
1190         vq->last_avail_idx += i;
1191
1192         if (likely(dev->dequeue_zero_copy == 0)) {
1193                 do_data_copy_dequeue(vq);
1194                 if (unlikely(i < count))
1195                         vq->shadow_used_idx = i;
1196                 flush_shadow_used_ring(dev, vq);
1197                 vhost_vring_call(dev, vq);
1198         }
1199
1200 out:
1201         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1202                 vhost_user_iotlb_rd_unlock(vq);
1203
1204 out_access_unlock:
1205         rte_spinlock_unlock(&vq->access_lock);
1206
1207         if (unlikely(rarp_mbuf != NULL)) {
1208                 /*
1209                  * Inject it to the head of "pkts" array, so that switch's mac
1210                  * learning table will get updated first.
1211                  */
1212                 memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
1213                 pkts[0] = rarp_mbuf;
1214                 i += 1;
1215         }
1216
1217         return i;
1218 }