b5fc4ecbe19286cd3731cbf04d4ee34cd3074168
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45         struct virtnet_rx *rxvq = rxq;
46         struct virtqueue *vq = rxvq->vq;
47
48         return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54         vq->vq_free_cnt += num;
55         vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61         struct vring_desc *dp, *dp_tail;
62         struct vq_desc_extra *dxp;
63         uint16_t desc_idx_last = desc_idx;
64
65         dp  = &vq->vq_split.ring.desc[desc_idx];
66         dxp = &vq->vq_descx[desc_idx];
67         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69                 while (dp->flags & VRING_DESC_F_NEXT) {
70                         desc_idx_last = dp->next;
71                         dp = &vq->vq_split.ring.desc[dp->next];
72                 }
73         }
74         dxp->ndescs = 0;
75
76         /*
77          * We must append the existing free chain, if any, to the end of
78          * newly freed chain. If the virtqueue was completely used, then
79          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80          */
81         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82                 vq->vq_desc_head_idx = desc_idx;
83         } else {
84                 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85                 dp_tail->next = desc_idx;
86         }
87
88         vq->vq_desc_tail_idx = desc_idx_last;
89         dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91
92 static void
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
94 {
95         struct vq_desc_extra *dxp;
96
97         dxp = &vq->vq_descx[id];
98         vq->vq_free_cnt += dxp->ndescs;
99
100         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101                 vq->vq_desc_head_idx = id;
102         else
103                 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
104
105         vq->vq_desc_tail_idx = id;
106         dxp->next = VQ_RING_DESC_CHAIN_END;
107 }
108
109 void
110 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
111 {
112         uint32_t s = mbuf->pkt_len;
113         struct rte_ether_addr *ea;
114
115         stats->bytes += s;
116
117         if (s == 64) {
118                 stats->size_bins[1]++;
119         } else if (s > 64 && s < 1024) {
120                 uint32_t bin;
121
122                 /* count zeros, and offset into correct bin */
123                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
124                 stats->size_bins[bin]++;
125         } else {
126                 if (s < 64)
127                         stats->size_bins[0]++;
128                 else if (s < 1519)
129                         stats->size_bins[6]++;
130                 else
131                         stats->size_bins[7]++;
132         }
133
134         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
135         if (rte_is_multicast_ether_addr(ea)) {
136                 if (rte_is_broadcast_ether_addr(ea))
137                         stats->broadcast++;
138                 else
139                         stats->multicast++;
140         }
141 }
142
143 static inline void
144 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
145 {
146         VIRTIO_DUMP_PACKET(m, m->data_len);
147
148         virtio_update_packet_stats(&rxvq->stats, m);
149 }
150
151 static uint16_t
152 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
153                                   struct rte_mbuf **rx_pkts,
154                                   uint32_t *len,
155                                   uint16_t num)
156 {
157         struct rte_mbuf *cookie;
158         uint16_t used_idx;
159         uint16_t id;
160         struct vring_packed_desc *desc;
161         uint16_t i;
162
163         desc = vq->vq_packed.ring.desc;
164
165         for (i = 0; i < num; i++) {
166                 used_idx = vq->vq_used_cons_idx;
167                 /* desc_is_used has a load-acquire or rte_cio_rmb inside
168                  * and wait for used desc in virtqueue.
169                  */
170                 if (!desc_is_used(&desc[used_idx], vq))
171                         return i;
172                 len[i] = desc[used_idx].len;
173                 id = desc[used_idx].id;
174                 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
175                 if (unlikely(cookie == NULL)) {
176                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
177                                 vq->vq_used_cons_idx);
178                         break;
179                 }
180                 rte_prefetch0(cookie);
181                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
182                 rx_pkts[i] = cookie;
183
184                 vq->vq_free_cnt++;
185                 vq->vq_used_cons_idx++;
186                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
187                         vq->vq_used_cons_idx -= vq->vq_nentries;
188                         vq->vq_packed.used_wrap_counter ^= 1;
189                 }
190         }
191
192         return i;
193 }
194
195 static uint16_t
196 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
197                            uint32_t *len, uint16_t num)
198 {
199         struct vring_used_elem *uep;
200         struct rte_mbuf *cookie;
201         uint16_t used_idx, desc_idx;
202         uint16_t i;
203
204         /*  Caller does the check */
205         for (i = 0; i < num ; i++) {
206                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
207                 uep = &vq->vq_split.ring.used->ring[used_idx];
208                 desc_idx = (uint16_t) uep->id;
209                 len[i] = uep->len;
210                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
211
212                 if (unlikely(cookie == NULL)) {
213                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
214                                 vq->vq_used_cons_idx);
215                         break;
216                 }
217
218                 rte_prefetch0(cookie);
219                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
220                 rx_pkts[i]  = cookie;
221                 vq->vq_used_cons_idx++;
222                 vq_ring_free_chain(vq, desc_idx);
223                 vq->vq_descx[desc_idx].cookie = NULL;
224         }
225
226         return i;
227 }
228
229 static uint16_t
230 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
231                         struct rte_mbuf **rx_pkts,
232                         uint32_t *len,
233                         uint16_t num)
234 {
235         struct vring_used_elem *uep;
236         struct rte_mbuf *cookie;
237         uint16_t used_idx = 0;
238         uint16_t i;
239
240         if (unlikely(num == 0))
241                 return 0;
242
243         for (i = 0; i < num; i++) {
244                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
245                 /* Desc idx same as used idx */
246                 uep = &vq->vq_split.ring.used->ring[used_idx];
247                 len[i] = uep->len;
248                 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
249
250                 if (unlikely(cookie == NULL)) {
251                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
252                                 vq->vq_used_cons_idx);
253                         break;
254                 }
255
256                 rte_prefetch0(cookie);
257                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
258                 rx_pkts[i]  = cookie;
259                 vq->vq_used_cons_idx++;
260                 vq->vq_descx[used_idx].cookie = NULL;
261         }
262
263         vq_ring_free_inorder(vq, used_idx, i);
264         return i;
265 }
266
267 #ifndef DEFAULT_TX_FREE_THRESH
268 #define DEFAULT_TX_FREE_THRESH 32
269 #endif
270
271 static void
272 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
273 {
274         uint16_t used_idx, id, curr_id, free_cnt = 0;
275         uint16_t size = vq->vq_nentries;
276         struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
277         struct vq_desc_extra *dxp;
278
279         used_idx = vq->vq_used_cons_idx;
280         /* desc_is_used has a load-acquire or rte_cio_rmb inside
281          * and wait for used desc in virtqueue.
282          */
283         while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
284                 id = desc[used_idx].id;
285                 do {
286                         curr_id = used_idx;
287                         dxp = &vq->vq_descx[used_idx];
288                         used_idx += dxp->ndescs;
289                         free_cnt += dxp->ndescs;
290                         num -= dxp->ndescs;
291                         if (used_idx >= size) {
292                                 used_idx -= size;
293                                 vq->vq_packed.used_wrap_counter ^= 1;
294                         }
295                         if (dxp->cookie != NULL) {
296                                 rte_pktmbuf_free(dxp->cookie);
297                                 dxp->cookie = NULL;
298                         }
299                 } while (curr_id != id);
300         }
301         vq->vq_used_cons_idx = used_idx;
302         vq->vq_free_cnt += free_cnt;
303 }
304
305 static void
306 virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
307 {
308         uint16_t used_idx, id;
309         uint16_t size = vq->vq_nentries;
310         struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
311         struct vq_desc_extra *dxp;
312
313         used_idx = vq->vq_used_cons_idx;
314         /* desc_is_used has a load-acquire or rte_cio_rmb inside
315          * and wait for used desc in virtqueue.
316          */
317         while (num-- && desc_is_used(&desc[used_idx], vq)) {
318                 id = desc[used_idx].id;
319                 dxp = &vq->vq_descx[id];
320                 vq->vq_used_cons_idx += dxp->ndescs;
321                 if (vq->vq_used_cons_idx >= size) {
322                         vq->vq_used_cons_idx -= size;
323                         vq->vq_packed.used_wrap_counter ^= 1;
324                 }
325                 vq_ring_free_id_packed(vq, id);
326                 if (dxp->cookie != NULL) {
327                         rte_pktmbuf_free(dxp->cookie);
328                         dxp->cookie = NULL;
329                 }
330                 used_idx = vq->vq_used_cons_idx;
331         }
332 }
333
334 /* Cleanup from completed transmits. */
335 static inline void
336 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
337 {
338         if (in_order)
339                 virtio_xmit_cleanup_inorder_packed(vq, num);
340         else
341                 virtio_xmit_cleanup_normal_packed(vq, num);
342 }
343
344 static void
345 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
346 {
347         uint16_t i, used_idx, desc_idx;
348         for (i = 0; i < num; i++) {
349                 struct vring_used_elem *uep;
350                 struct vq_desc_extra *dxp;
351
352                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
353                 uep = &vq->vq_split.ring.used->ring[used_idx];
354
355                 desc_idx = (uint16_t) uep->id;
356                 dxp = &vq->vq_descx[desc_idx];
357                 vq->vq_used_cons_idx++;
358                 vq_ring_free_chain(vq, desc_idx);
359
360                 if (dxp->cookie != NULL) {
361                         rte_pktmbuf_free(dxp->cookie);
362                         dxp->cookie = NULL;
363                 }
364         }
365 }
366
367 /* Cleanup from completed inorder transmits. */
368 static __rte_always_inline void
369 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
370 {
371         uint16_t i, idx = vq->vq_used_cons_idx;
372         int16_t free_cnt = 0;
373         struct vq_desc_extra *dxp = NULL;
374
375         if (unlikely(num == 0))
376                 return;
377
378         for (i = 0; i < num; i++) {
379                 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
380                 free_cnt += dxp->ndescs;
381                 if (dxp->cookie != NULL) {
382                         rte_pktmbuf_free(dxp->cookie);
383                         dxp->cookie = NULL;
384                 }
385         }
386
387         vq->vq_free_cnt += free_cnt;
388         vq->vq_used_cons_idx = idx;
389 }
390
391 static inline int
392 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
393                         struct rte_mbuf **cookies,
394                         uint16_t num)
395 {
396         struct vq_desc_extra *dxp;
397         struct virtio_hw *hw = vq->hw;
398         struct vring_desc *start_dp;
399         uint16_t head_idx, idx, i = 0;
400
401         if (unlikely(vq->vq_free_cnt == 0))
402                 return -ENOSPC;
403         if (unlikely(vq->vq_free_cnt < num))
404                 return -EMSGSIZE;
405
406         head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
407         start_dp = vq->vq_split.ring.desc;
408
409         while (i < num) {
410                 idx = head_idx & (vq->vq_nentries - 1);
411                 dxp = &vq->vq_descx[idx];
412                 dxp->cookie = (void *)cookies[i];
413                 dxp->ndescs = 1;
414
415                 start_dp[idx].addr =
416                                 VIRTIO_MBUF_ADDR(cookies[i], vq) +
417                                 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
418                 start_dp[idx].len =
419                                 cookies[i]->buf_len -
420                                 RTE_PKTMBUF_HEADROOM +
421                                 hw->vtnet_hdr_size;
422                 start_dp[idx].flags =  VRING_DESC_F_WRITE;
423
424                 vq_update_avail_ring(vq, idx);
425                 head_idx++;
426                 i++;
427         }
428
429         vq->vq_desc_head_idx += num;
430         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
431         return 0;
432 }
433
434 static inline int
435 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
436                                 uint16_t num)
437 {
438         struct vq_desc_extra *dxp;
439         struct virtio_hw *hw = vq->hw;
440         struct vring_desc *start_dp = vq->vq_split.ring.desc;
441         uint16_t idx, i;
442
443         if (unlikely(vq->vq_free_cnt == 0))
444                 return -ENOSPC;
445         if (unlikely(vq->vq_free_cnt < num))
446                 return -EMSGSIZE;
447
448         if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
449                 return -EFAULT;
450
451         for (i = 0; i < num; i++) {
452                 idx = vq->vq_desc_head_idx;
453                 dxp = &vq->vq_descx[idx];
454                 dxp->cookie = (void *)cookie[i];
455                 dxp->ndescs = 1;
456
457                 start_dp[idx].addr =
458                         VIRTIO_MBUF_ADDR(cookie[i], vq) +
459                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
460                 start_dp[idx].len =
461                         cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
462                         hw->vtnet_hdr_size;
463                 start_dp[idx].flags = VRING_DESC_F_WRITE;
464                 vq->vq_desc_head_idx = start_dp[idx].next;
465                 vq_update_avail_ring(vq, idx);
466                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
467                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
468                         break;
469                 }
470         }
471
472         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
473
474         return 0;
475 }
476
477 static inline int
478 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
479                                      struct rte_mbuf **cookie, uint16_t num)
480 {
481         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
482         uint16_t flags = vq->vq_packed.cached_flags;
483         struct virtio_hw *hw = vq->hw;
484         struct vq_desc_extra *dxp;
485         uint16_t idx;
486         int i;
487
488         if (unlikely(vq->vq_free_cnt == 0))
489                 return -ENOSPC;
490         if (unlikely(vq->vq_free_cnt < num))
491                 return -EMSGSIZE;
492
493         for (i = 0; i < num; i++) {
494                 idx = vq->vq_avail_idx;
495                 dxp = &vq->vq_descx[idx];
496                 dxp->cookie = (void *)cookie[i];
497                 dxp->ndescs = 1;
498
499                 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
500                                 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
501                 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
502                                         + hw->vtnet_hdr_size;
503
504                 vq->vq_desc_head_idx = dxp->next;
505                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
506                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
507
508                 virtqueue_store_flags_packed(&start_dp[idx], flags,
509                                              hw->weak_barriers);
510
511                 if (++vq->vq_avail_idx >= vq->vq_nentries) {
512                         vq->vq_avail_idx -= vq->vq_nentries;
513                         vq->vq_packed.cached_flags ^=
514                                 VRING_PACKED_DESC_F_AVAIL_USED;
515                         flags = vq->vq_packed.cached_flags;
516                 }
517         }
518         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
519         return 0;
520 }
521
522 /* When doing TSO, the IP length is not included in the pseudo header
523  * checksum of the packet given to the PMD, but for virtio it is
524  * expected.
525  */
526 static void
527 virtio_tso_fix_cksum(struct rte_mbuf *m)
528 {
529         /* common case: header is not fragmented */
530         if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
531                         m->l4_len)) {
532                 struct rte_ipv4_hdr *iph;
533                 struct rte_ipv6_hdr *ip6h;
534                 struct rte_tcp_hdr *th;
535                 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
536                 uint32_t tmp;
537
538                 iph = rte_pktmbuf_mtod_offset(m,
539                                         struct rte_ipv4_hdr *, m->l2_len);
540                 th = RTE_PTR_ADD(iph, m->l3_len);
541                 if ((iph->version_ihl >> 4) == 4) {
542                         iph->hdr_checksum = 0;
543                         iph->hdr_checksum = rte_ipv4_cksum(iph);
544                         ip_len = iph->total_length;
545                         ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
546                                 m->l3_len);
547                 } else {
548                         ip6h = (struct rte_ipv6_hdr *)iph;
549                         ip_paylen = ip6h->payload_len;
550                 }
551
552                 /* calculate the new phdr checksum not including ip_paylen */
553                 prev_cksum = th->cksum;
554                 tmp = prev_cksum;
555                 tmp += ip_paylen;
556                 tmp = (tmp & 0xffff) + (tmp >> 16);
557                 new_cksum = tmp;
558
559                 /* replace it in the packet */
560                 th->cksum = new_cksum;
561         }
562 }
563
564
565 /* avoid write operation when necessary, to lessen cache issues */
566 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
567         if ((var) != (val))                     \
568                 (var) = (val);                  \
569 } while (0)
570
571 #define virtqueue_clear_net_hdr(_hdr) do {              \
572         ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);     \
573         ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);    \
574         ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);          \
575         ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);       \
576         ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);       \
577         ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);        \
578 } while (0)
579
580 static inline void
581 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
582                         struct rte_mbuf *cookie,
583                         bool offload)
584 {
585         if (offload) {
586                 if (cookie->ol_flags & PKT_TX_TCP_SEG)
587                         cookie->ol_flags |= PKT_TX_TCP_CKSUM;
588
589                 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
590                 case PKT_TX_UDP_CKSUM:
591                         hdr->csum_start = cookie->l2_len + cookie->l3_len;
592                         hdr->csum_offset = offsetof(struct rte_udp_hdr,
593                                 dgram_cksum);
594                         hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
595                         break;
596
597                 case PKT_TX_TCP_CKSUM:
598                         hdr->csum_start = cookie->l2_len + cookie->l3_len;
599                         hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
600                         hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
601                         break;
602
603                 default:
604                         ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
605                         ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
606                         ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
607                         break;
608                 }
609
610                 /* TCP Segmentation Offload */
611                 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
612                         hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
613                                 VIRTIO_NET_HDR_GSO_TCPV6 :
614                                 VIRTIO_NET_HDR_GSO_TCPV4;
615                         hdr->gso_size = cookie->tso_segsz;
616                         hdr->hdr_len =
617                                 cookie->l2_len +
618                                 cookie->l3_len +
619                                 cookie->l4_len;
620                 } else {
621                         ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
622                         ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
623                         ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
624                 }
625         }
626 }
627
628 static inline void
629 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
630                         struct rte_mbuf **cookies,
631                         uint16_t num)
632 {
633         struct vq_desc_extra *dxp;
634         struct virtqueue *vq = txvq->vq;
635         struct vring_desc *start_dp;
636         struct virtio_net_hdr *hdr;
637         uint16_t idx;
638         uint16_t head_size = vq->hw->vtnet_hdr_size;
639         uint16_t i = 0;
640
641         idx = vq->vq_desc_head_idx;
642         start_dp = vq->vq_split.ring.desc;
643
644         while (i < num) {
645                 idx = idx & (vq->vq_nentries - 1);
646                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
647                 dxp->cookie = (void *)cookies[i];
648                 dxp->ndescs = 1;
649                 virtio_update_packet_stats(&txvq->stats, cookies[i]);
650
651                 hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
652                         cookies[i]->data_off - head_size;
653
654                 /* if offload disabled, hdr is not zeroed yet, do it now */
655                 if (!vq->hw->has_tx_offload)
656                         virtqueue_clear_net_hdr(hdr);
657                 else
658                         virtqueue_xmit_offload(hdr, cookies[i], true);
659
660                 start_dp[idx].addr  =
661                         VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
662                 start_dp[idx].len   = cookies[i]->data_len + head_size;
663                 start_dp[idx].flags = 0;
664
665
666                 vq_update_avail_ring(vq, idx);
667
668                 idx++;
669                 i++;
670         };
671
672         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
673         vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
674 }
675
676 static inline void
677 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
678                                    struct rte_mbuf *cookie,
679                                    int in_order)
680 {
681         struct virtqueue *vq = txvq->vq;
682         struct vring_packed_desc *dp;
683         struct vq_desc_extra *dxp;
684         uint16_t idx, id, flags;
685         uint16_t head_size = vq->hw->vtnet_hdr_size;
686         struct virtio_net_hdr *hdr;
687
688         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
689         idx = vq->vq_avail_idx;
690         dp = &vq->vq_packed.ring.desc[idx];
691
692         dxp = &vq->vq_descx[id];
693         dxp->ndescs = 1;
694         dxp->cookie = cookie;
695
696         flags = vq->vq_packed.cached_flags;
697
698         /* prepend cannot fail, checked by caller */
699         hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
700                 cookie->data_off - head_size;
701
702         /* if offload disabled, hdr is not zeroed yet, do it now */
703         if (!vq->hw->has_tx_offload)
704                 virtqueue_clear_net_hdr(hdr);
705         else
706                 virtqueue_xmit_offload(hdr, cookie, true);
707
708         dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
709         dp->len  = cookie->data_len + head_size;
710         dp->id   = id;
711
712         if (++vq->vq_avail_idx >= vq->vq_nentries) {
713                 vq->vq_avail_idx -= vq->vq_nentries;
714                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
715         }
716
717         vq->vq_free_cnt--;
718
719         if (!in_order) {
720                 vq->vq_desc_head_idx = dxp->next;
721                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
722                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
723         }
724
725         virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
726 }
727
728 static inline void
729 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
730                               uint16_t needed, int can_push, int in_order)
731 {
732         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
733         struct vq_desc_extra *dxp;
734         struct virtqueue *vq = txvq->vq;
735         struct vring_packed_desc *start_dp, *head_dp;
736         uint16_t idx, id, head_idx, head_flags;
737         uint16_t head_size = vq->hw->vtnet_hdr_size;
738         struct virtio_net_hdr *hdr;
739         uint16_t prev;
740         bool prepend_header = false;
741
742         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
743
744         dxp = &vq->vq_descx[id];
745         dxp->ndescs = needed;
746         dxp->cookie = cookie;
747
748         head_idx = vq->vq_avail_idx;
749         idx = head_idx;
750         prev = head_idx;
751         start_dp = vq->vq_packed.ring.desc;
752
753         head_dp = &vq->vq_packed.ring.desc[idx];
754         head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
755         head_flags |= vq->vq_packed.cached_flags;
756
757         if (can_push) {
758                 /* prepend cannot fail, checked by caller */
759                 hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
760                         cookie->data_off - head_size;
761                 prepend_header = true;
762
763                 /* if offload disabled, it is not zeroed below, do it now */
764                 if (!vq->hw->has_tx_offload)
765                         virtqueue_clear_net_hdr(hdr);
766         } else {
767                 /* setup first tx ring slot to point to header
768                  * stored in reserved region.
769                  */
770                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
771                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
772                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
773                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
774                 idx++;
775                 if (idx >= vq->vq_nentries) {
776                         idx -= vq->vq_nentries;
777                         vq->vq_packed.cached_flags ^=
778                                 VRING_PACKED_DESC_F_AVAIL_USED;
779                 }
780         }
781
782         virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
783
784         do {
785                 uint16_t flags;
786
787                 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
788                 start_dp[idx].len  = cookie->data_len;
789                 if (prepend_header) {
790                         start_dp[idx].addr -= head_size;
791                         start_dp[idx].len += head_size;
792                         prepend_header = false;
793                 }
794
795                 if (likely(idx != head_idx)) {
796                         flags = cookie->next ? VRING_DESC_F_NEXT : 0;
797                         flags |= vq->vq_packed.cached_flags;
798                         start_dp[idx].flags = flags;
799                 }
800                 prev = idx;
801                 idx++;
802                 if (idx >= vq->vq_nentries) {
803                         idx -= vq->vq_nentries;
804                         vq->vq_packed.cached_flags ^=
805                                 VRING_PACKED_DESC_F_AVAIL_USED;
806                 }
807         } while ((cookie = cookie->next) != NULL);
808
809         start_dp[prev].id = id;
810
811         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
812         vq->vq_avail_idx = idx;
813
814         if (!in_order) {
815                 vq->vq_desc_head_idx = dxp->next;
816                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
817                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
818         }
819
820         virtqueue_store_flags_packed(head_dp, head_flags,
821                                      vq->hw->weak_barriers);
822 }
823
824 static inline void
825 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
826                         uint16_t needed, int use_indirect, int can_push,
827                         int in_order)
828 {
829         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
830         struct vq_desc_extra *dxp;
831         struct virtqueue *vq = txvq->vq;
832         struct vring_desc *start_dp;
833         uint16_t seg_num = cookie->nb_segs;
834         uint16_t head_idx, idx;
835         uint16_t head_size = vq->hw->vtnet_hdr_size;
836         bool prepend_header = false;
837         struct virtio_net_hdr *hdr;
838
839         head_idx = vq->vq_desc_head_idx;
840         idx = head_idx;
841         if (in_order)
842                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
843         else
844                 dxp = &vq->vq_descx[idx];
845         dxp->cookie = (void *)cookie;
846         dxp->ndescs = needed;
847
848         start_dp = vq->vq_split.ring.desc;
849
850         if (can_push) {
851                 /* prepend cannot fail, checked by caller */
852                 hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
853                         cookie->data_off - head_size;
854                 prepend_header = true;
855
856                 /* if offload disabled, it is not zeroed below, do it now */
857                 if (!vq->hw->has_tx_offload)
858                         virtqueue_clear_net_hdr(hdr);
859         } else if (use_indirect) {
860                 /* setup tx ring slot to point to indirect
861                  * descriptor list stored in reserved region.
862                  *
863                  * the first slot in indirect ring is already preset
864                  * to point to the header in reserved region
865                  */
866                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
867                         RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
868                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
869                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
870                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
871
872                 /* loop below will fill in rest of the indirect elements */
873                 start_dp = txr[idx].tx_indir;
874                 idx = 1;
875         } else {
876                 /* setup first tx ring slot to point to header
877                  * stored in reserved region.
878                  */
879                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
880                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
881                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
882                 start_dp[idx].flags = VRING_DESC_F_NEXT;
883                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
884
885                 idx = start_dp[idx].next;
886         }
887
888         virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
889
890         do {
891                 start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
892                 start_dp[idx].len   = cookie->data_len;
893                 if (prepend_header) {
894                         start_dp[idx].addr -= head_size;
895                         start_dp[idx].len += head_size;
896                         prepend_header = false;
897                 }
898                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
899                 idx = start_dp[idx].next;
900         } while ((cookie = cookie->next) != NULL);
901
902         if (use_indirect)
903                 idx = vq->vq_split.ring.desc[head_idx].next;
904
905         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
906
907         vq->vq_desc_head_idx = idx;
908         vq_update_avail_ring(vq, head_idx);
909
910         if (!in_order) {
911                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
912                         vq->vq_desc_tail_idx = idx;
913         }
914 }
915
916 void
917 virtio_dev_cq_start(struct rte_eth_dev *dev)
918 {
919         struct virtio_hw *hw = dev->data->dev_private;
920
921         if (hw->cvq && hw->cvq->vq) {
922                 rte_spinlock_init(&hw->cvq->lock);
923                 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
924         }
925 }
926
927 int
928 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
929                         uint16_t queue_idx,
930                         uint16_t nb_desc,
931                         unsigned int socket_id __rte_unused,
932                         const struct rte_eth_rxconf *rx_conf __rte_unused,
933                         struct rte_mempool *mp)
934 {
935         uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
936         struct virtio_hw *hw = dev->data->dev_private;
937         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
938         struct virtnet_rx *rxvq;
939
940         PMD_INIT_FUNC_TRACE();
941
942         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
943                 nb_desc = vq->vq_nentries;
944         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
945
946         rxvq = &vq->rxq;
947         rxvq->queue_id = queue_idx;
948         rxvq->mpool = mp;
949         dev->data->rx_queues[queue_idx] = rxvq;
950
951         return 0;
952 }
953
954 int
955 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
956 {
957         uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
958         struct virtio_hw *hw = dev->data->dev_private;
959         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
960         struct virtnet_rx *rxvq = &vq->rxq;
961         struct rte_mbuf *m;
962         uint16_t desc_idx;
963         int error, nbufs, i;
964
965         PMD_INIT_FUNC_TRACE();
966
967         /* Allocate blank mbufs for the each rx descriptor */
968         nbufs = 0;
969
970         if (hw->use_simple_rx) {
971                 for (desc_idx = 0; desc_idx < vq->vq_nentries;
972                      desc_idx++) {
973                         vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
974                         vq->vq_split.ring.desc[desc_idx].flags =
975                                 VRING_DESC_F_WRITE;
976                 }
977
978                 virtio_rxq_vec_setup(rxvq);
979         }
980
981         memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
982         for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
983              desc_idx++) {
984                 vq->sw_ring[vq->vq_nentries + desc_idx] =
985                         &rxvq->fake_mbuf;
986         }
987
988         if (hw->use_simple_rx) {
989                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
990                         virtio_rxq_rearm_vec(rxvq);
991                         nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
992                 }
993         } else if (hw->use_inorder_rx) {
994                 if ((!virtqueue_full(vq))) {
995                         uint16_t free_cnt = vq->vq_free_cnt;
996                         struct rte_mbuf *pkts[free_cnt];
997
998                         if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
999                                 free_cnt)) {
1000                                 error = virtqueue_enqueue_refill_inorder(vq,
1001                                                 pkts,
1002                                                 free_cnt);
1003                                 if (unlikely(error)) {
1004                                         for (i = 0; i < free_cnt; i++)
1005                                                 rte_pktmbuf_free(pkts[i]);
1006                                 }
1007                         }
1008
1009                         nbufs += free_cnt;
1010                         vq_update_avail_idx(vq);
1011                 }
1012         } else {
1013                 while (!virtqueue_full(vq)) {
1014                         m = rte_mbuf_raw_alloc(rxvq->mpool);
1015                         if (m == NULL)
1016                                 break;
1017
1018                         /* Enqueue allocated buffers */
1019                         if (vtpci_packed_queue(vq->hw))
1020                                 error = virtqueue_enqueue_recv_refill_packed(vq,
1021                                                 &m, 1);
1022                         else
1023                                 error = virtqueue_enqueue_recv_refill(vq,
1024                                                 &m, 1);
1025                         if (error) {
1026                                 rte_pktmbuf_free(m);
1027                                 break;
1028                         }
1029                         nbufs++;
1030                 }
1031
1032                 if (!vtpci_packed_queue(vq->hw))
1033                         vq_update_avail_idx(vq);
1034         }
1035
1036         PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
1037
1038         VIRTQUEUE_DUMP(vq);
1039
1040         return 0;
1041 }
1042
1043 /*
1044  * struct rte_eth_dev *dev: Used to update dev
1045  * uint16_t nb_desc: Defaults to values read from config space
1046  * unsigned int socket_id: Used to allocate memzone
1047  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
1048  * uint16_t queue_idx: Just used as an index in dev txq list
1049  */
1050 int
1051 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
1052                         uint16_t queue_idx,
1053                         uint16_t nb_desc,
1054                         unsigned int socket_id __rte_unused,
1055                         const struct rte_eth_txconf *tx_conf)
1056 {
1057         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1058         struct virtio_hw *hw = dev->data->dev_private;
1059         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1060         struct virtnet_tx *txvq;
1061         uint16_t tx_free_thresh;
1062
1063         PMD_INIT_FUNC_TRACE();
1064
1065         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
1066                 nb_desc = vq->vq_nentries;
1067         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
1068
1069         txvq = &vq->txq;
1070         txvq->queue_id = queue_idx;
1071
1072         tx_free_thresh = tx_conf->tx_free_thresh;
1073         if (tx_free_thresh == 0)
1074                 tx_free_thresh =
1075                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
1076
1077         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
1078                 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
1079                         "number of TX entries minus 3 (%u)."
1080                         " (tx_free_thresh=%u port=%u queue=%u)\n",
1081                         vq->vq_nentries - 3,
1082                         tx_free_thresh, dev->data->port_id, queue_idx);
1083                 return -EINVAL;
1084         }
1085
1086         vq->vq_free_thresh = tx_free_thresh;
1087
1088         dev->data->tx_queues[queue_idx] = txvq;
1089         return 0;
1090 }
1091
1092 int
1093 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
1094                                 uint16_t queue_idx)
1095 {
1096         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
1097         struct virtio_hw *hw = dev->data->dev_private;
1098         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
1099
1100         PMD_INIT_FUNC_TRACE();
1101
1102         if (!vtpci_packed_queue(hw)) {
1103                 if (hw->use_inorder_tx)
1104                         vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
1105         }
1106
1107         VIRTQUEUE_DUMP(vq);
1108
1109         return 0;
1110 }
1111
1112 static inline void
1113 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
1114 {
1115         int error;
1116         /*
1117          * Requeue the discarded mbuf. This should always be
1118          * successful since it was just dequeued.
1119          */
1120         if (vtpci_packed_queue(vq->hw))
1121                 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
1122         else
1123                 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
1124
1125         if (unlikely(error)) {
1126                 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1127                 rte_pktmbuf_free(m);
1128         }
1129 }
1130
1131 static inline void
1132 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1133 {
1134         int error;
1135
1136         error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1137         if (unlikely(error)) {
1138                 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1139                 rte_pktmbuf_free(m);
1140         }
1141 }
1142
1143 /* Optionally fill offload information in structure */
1144 static inline int
1145 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1146 {
1147         struct rte_net_hdr_lens hdr_lens;
1148         uint32_t hdrlen, ptype;
1149         int l4_supported = 0;
1150
1151         /* nothing to do */
1152         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1153                 return 0;
1154
1155         m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1156
1157         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1158         m->packet_type = ptype;
1159         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1160             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1161             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1162                 l4_supported = 1;
1163
1164         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1165                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1166                 if (hdr->csum_start <= hdrlen && l4_supported) {
1167                         m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1168                 } else {
1169                         /* Unknown proto or tunnel, do sw cksum. We can assume
1170                          * the cksum field is in the first segment since the
1171                          * buffers we provided to the host are large enough.
1172                          * In case of SCTP, this will be wrong since it's a CRC
1173                          * but there's nothing we can do.
1174                          */
1175                         uint16_t csum = 0, off;
1176
1177                         rte_raw_cksum_mbuf(m, hdr->csum_start,
1178                                 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1179                                 &csum);
1180                         if (likely(csum != 0xffff))
1181                                 csum = ~csum;
1182                         off = hdr->csum_offset + hdr->csum_start;
1183                         if (rte_pktmbuf_data_len(m) >= off + 1)
1184                                 *rte_pktmbuf_mtod_offset(m, uint16_t *,
1185                                         off) = csum;
1186                 }
1187         } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1188                 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1189         }
1190
1191         /* GSO request, save required information in mbuf */
1192         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1193                 /* Check unsupported modes */
1194                 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1195                     (hdr->gso_size == 0)) {
1196                         return -EINVAL;
1197                 }
1198
1199                 /* Update mss lengthes in mbuf */
1200                 m->tso_segsz = hdr->gso_size;
1201                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1202                         case VIRTIO_NET_HDR_GSO_TCPV4:
1203                         case VIRTIO_NET_HDR_GSO_TCPV6:
1204                                 m->ol_flags |= PKT_RX_LRO | \
1205                                         PKT_RX_L4_CKSUM_NONE;
1206                                 break;
1207                         default:
1208                                 return -EINVAL;
1209                 }
1210         }
1211
1212         return 0;
1213 }
1214
1215 #define VIRTIO_MBUF_BURST_SZ 64
1216 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1217 uint16_t
1218 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1219 {
1220         struct virtnet_rx *rxvq = rx_queue;
1221         struct virtqueue *vq = rxvq->vq;
1222         struct virtio_hw *hw = vq->hw;
1223         struct rte_mbuf *rxm;
1224         uint16_t nb_used, num, nb_rx;
1225         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1226         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1227         int error;
1228         uint32_t i, nb_enqueued;
1229         uint32_t hdr_size;
1230         struct virtio_net_hdr *hdr;
1231
1232         nb_rx = 0;
1233         if (unlikely(hw->started == 0))
1234                 return nb_rx;
1235
1236         nb_used = VIRTQUEUE_NUSED(vq);
1237
1238         virtio_rmb(hw->weak_barriers);
1239
1240         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1241         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1242                 num = VIRTIO_MBUF_BURST_SZ;
1243         if (likely(num > DESC_PER_CACHELINE))
1244                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1245
1246         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1247         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1248
1249         nb_enqueued = 0;
1250         hdr_size = hw->vtnet_hdr_size;
1251
1252         for (i = 0; i < num ; i++) {
1253                 rxm = rcv_pkts[i];
1254
1255                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1256
1257                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1258                         PMD_RX_LOG(ERR, "Packet drop");
1259                         nb_enqueued++;
1260                         virtio_discard_rxbuf(vq, rxm);
1261                         rxvq->stats.errors++;
1262                         continue;
1263                 }
1264
1265                 rxm->port = rxvq->port_id;
1266                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1267                 rxm->ol_flags = 0;
1268                 rxm->vlan_tci = 0;
1269
1270                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1271                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1272
1273                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1274                         RTE_PKTMBUF_HEADROOM - hdr_size);
1275
1276                 if (hw->vlan_strip)
1277                         rte_vlan_strip(rxm);
1278
1279                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1280                         virtio_discard_rxbuf(vq, rxm);
1281                         rxvq->stats.errors++;
1282                         continue;
1283                 }
1284
1285                 virtio_rx_stats_updated(rxvq, rxm);
1286
1287                 rx_pkts[nb_rx++] = rxm;
1288         }
1289
1290         rxvq->stats.packets += nb_rx;
1291
1292         /* Allocate new mbuf for the used descriptor */
1293         if (likely(!virtqueue_full(vq))) {
1294                 uint16_t free_cnt = vq->vq_free_cnt;
1295                 struct rte_mbuf *new_pkts[free_cnt];
1296
1297                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1298                                                 free_cnt) == 0)) {
1299                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1300                                         free_cnt);
1301                         if (unlikely(error)) {
1302                                 for (i = 0; i < free_cnt; i++)
1303                                         rte_pktmbuf_free(new_pkts[i]);
1304                         }
1305                         nb_enqueued += free_cnt;
1306                 } else {
1307                         struct rte_eth_dev *dev =
1308                                 &rte_eth_devices[rxvq->port_id];
1309                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1310                 }
1311         }
1312
1313         if (likely(nb_enqueued)) {
1314                 vq_update_avail_idx(vq);
1315
1316                 if (unlikely(virtqueue_kick_prepare(vq))) {
1317                         virtqueue_notify(vq);
1318                         PMD_RX_LOG(DEBUG, "Notified");
1319                 }
1320         }
1321
1322         return nb_rx;
1323 }
1324
1325 uint16_t
1326 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1327                         uint16_t nb_pkts)
1328 {
1329         struct virtnet_rx *rxvq = rx_queue;
1330         struct virtqueue *vq = rxvq->vq;
1331         struct virtio_hw *hw = vq->hw;
1332         struct rte_mbuf *rxm;
1333         uint16_t num, nb_rx;
1334         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1335         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1336         int error;
1337         uint32_t i, nb_enqueued;
1338         uint32_t hdr_size;
1339         struct virtio_net_hdr *hdr;
1340
1341         nb_rx = 0;
1342         if (unlikely(hw->started == 0))
1343                 return nb_rx;
1344
1345         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1346         if (likely(num > DESC_PER_CACHELINE))
1347                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1348
1349         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1350         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1351
1352         nb_enqueued = 0;
1353         hdr_size = hw->vtnet_hdr_size;
1354
1355         for (i = 0; i < num; i++) {
1356                 rxm = rcv_pkts[i];
1357
1358                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1359
1360                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1361                         PMD_RX_LOG(ERR, "Packet drop");
1362                         nb_enqueued++;
1363                         virtio_discard_rxbuf(vq, rxm);
1364                         rxvq->stats.errors++;
1365                         continue;
1366                 }
1367
1368                 rxm->port = rxvq->port_id;
1369                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1370                 rxm->ol_flags = 0;
1371                 rxm->vlan_tci = 0;
1372
1373                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1374                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1375
1376                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1377                         RTE_PKTMBUF_HEADROOM - hdr_size);
1378
1379                 if (hw->vlan_strip)
1380                         rte_vlan_strip(rxm);
1381
1382                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1383                         virtio_discard_rxbuf(vq, rxm);
1384                         rxvq->stats.errors++;
1385                         continue;
1386                 }
1387
1388                 virtio_rx_stats_updated(rxvq, rxm);
1389
1390                 rx_pkts[nb_rx++] = rxm;
1391         }
1392
1393         rxvq->stats.packets += nb_rx;
1394
1395         /* Allocate new mbuf for the used descriptor */
1396         if (likely(!virtqueue_full(vq))) {
1397                 uint16_t free_cnt = vq->vq_free_cnt;
1398                 struct rte_mbuf *new_pkts[free_cnt];
1399
1400                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1401                                                 free_cnt) == 0)) {
1402                         error = virtqueue_enqueue_recv_refill_packed(vq,
1403                                         new_pkts, free_cnt);
1404                         if (unlikely(error)) {
1405                                 for (i = 0; i < free_cnt; i++)
1406                                         rte_pktmbuf_free(new_pkts[i]);
1407                         }
1408                         nb_enqueued += free_cnt;
1409                 } else {
1410                         struct rte_eth_dev *dev =
1411                                 &rte_eth_devices[rxvq->port_id];
1412                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1413                 }
1414         }
1415
1416         if (likely(nb_enqueued)) {
1417                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1418                         virtqueue_notify(vq);
1419                         PMD_RX_LOG(DEBUG, "Notified");
1420                 }
1421         }
1422
1423         return nb_rx;
1424 }
1425
1426
1427 uint16_t
1428 virtio_recv_pkts_inorder(void *rx_queue,
1429                         struct rte_mbuf **rx_pkts,
1430                         uint16_t nb_pkts)
1431 {
1432         struct virtnet_rx *rxvq = rx_queue;
1433         struct virtqueue *vq = rxvq->vq;
1434         struct virtio_hw *hw = vq->hw;
1435         struct rte_mbuf *rxm;
1436         struct rte_mbuf *prev = NULL;
1437         uint16_t nb_used, num, nb_rx;
1438         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1439         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1440         int error;
1441         uint32_t nb_enqueued;
1442         uint32_t seg_num;
1443         uint32_t seg_res;
1444         uint32_t hdr_size;
1445         int32_t i;
1446
1447         nb_rx = 0;
1448         if (unlikely(hw->started == 0))
1449                 return nb_rx;
1450
1451         nb_used = VIRTQUEUE_NUSED(vq);
1452         nb_used = RTE_MIN(nb_used, nb_pkts);
1453         nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1454
1455         virtio_rmb(hw->weak_barriers);
1456
1457         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1458
1459         nb_enqueued = 0;
1460         seg_num = 1;
1461         seg_res = 0;
1462         hdr_size = hw->vtnet_hdr_size;
1463
1464         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1465
1466         for (i = 0; i < num; i++) {
1467                 struct virtio_net_hdr_mrg_rxbuf *header;
1468
1469                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1470                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1471
1472                 rxm = rcv_pkts[i];
1473
1474                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1475                         PMD_RX_LOG(ERR, "Packet drop");
1476                         nb_enqueued++;
1477                         virtio_discard_rxbuf_inorder(vq, rxm);
1478                         rxvq->stats.errors++;
1479                         continue;
1480                 }
1481
1482                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1483                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1484                          - hdr_size);
1485
1486                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1487                         seg_num = header->num_buffers;
1488                         if (seg_num == 0)
1489                                 seg_num = 1;
1490                 } else {
1491                         seg_num = 1;
1492                 }
1493
1494                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1495                 rxm->nb_segs = seg_num;
1496                 rxm->ol_flags = 0;
1497                 rxm->vlan_tci = 0;
1498                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1499                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1500
1501                 rxm->port = rxvq->port_id;
1502
1503                 rx_pkts[nb_rx] = rxm;
1504                 prev = rxm;
1505
1506                 if (vq->hw->has_rx_offload &&
1507                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1508                         virtio_discard_rxbuf_inorder(vq, rxm);
1509                         rxvq->stats.errors++;
1510                         continue;
1511                 }
1512
1513                 if (hw->vlan_strip)
1514                         rte_vlan_strip(rx_pkts[nb_rx]);
1515
1516                 seg_res = seg_num - 1;
1517
1518                 /* Merge remaining segments */
1519                 while (seg_res != 0 && i < (num - 1)) {
1520                         i++;
1521
1522                         rxm = rcv_pkts[i];
1523                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1524                         rxm->pkt_len = (uint32_t)(len[i]);
1525                         rxm->data_len = (uint16_t)(len[i]);
1526
1527                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1528
1529                         prev->next = rxm;
1530                         prev = rxm;
1531                         seg_res -= 1;
1532                 }
1533
1534                 if (!seg_res) {
1535                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1536                         nb_rx++;
1537                 }
1538         }
1539
1540         /* Last packet still need merge segments */
1541         while (seg_res != 0) {
1542                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1543                                         VIRTIO_MBUF_BURST_SZ);
1544
1545                 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1546                         virtio_rmb(hw->weak_barriers);
1547                         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1548                                                            rcv_cnt);
1549                         uint16_t extra_idx = 0;
1550
1551                         rcv_cnt = num;
1552                         while (extra_idx < rcv_cnt) {
1553                                 rxm = rcv_pkts[extra_idx];
1554                                 rxm->data_off =
1555                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1556                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1557                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1558                                 prev->next = rxm;
1559                                 prev = rxm;
1560                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1561                                 extra_idx += 1;
1562                         };
1563                         seg_res -= rcv_cnt;
1564
1565                         if (!seg_res) {
1566                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1567                                 nb_rx++;
1568                         }
1569                 } else {
1570                         PMD_RX_LOG(ERR,
1571                                         "No enough segments for packet.");
1572                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1573                         rxvq->stats.errors++;
1574                         break;
1575                 }
1576         }
1577
1578         rxvq->stats.packets += nb_rx;
1579
1580         /* Allocate new mbuf for the used descriptor */
1581
1582         if (likely(!virtqueue_full(vq))) {
1583                 /* free_cnt may include mrg descs */
1584                 uint16_t free_cnt = vq->vq_free_cnt;
1585                 struct rte_mbuf *new_pkts[free_cnt];
1586
1587                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1588                         error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1589                                         free_cnt);
1590                         if (unlikely(error)) {
1591                                 for (i = 0; i < free_cnt; i++)
1592                                         rte_pktmbuf_free(new_pkts[i]);
1593                         }
1594                         nb_enqueued += free_cnt;
1595                 } else {
1596                         struct rte_eth_dev *dev =
1597                                 &rte_eth_devices[rxvq->port_id];
1598                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1599                 }
1600         }
1601
1602         if (likely(nb_enqueued)) {
1603                 vq_update_avail_idx(vq);
1604
1605                 if (unlikely(virtqueue_kick_prepare(vq))) {
1606                         virtqueue_notify(vq);
1607                         PMD_RX_LOG(DEBUG, "Notified");
1608                 }
1609         }
1610
1611         return nb_rx;
1612 }
1613
1614 uint16_t
1615 virtio_recv_mergeable_pkts(void *rx_queue,
1616                         struct rte_mbuf **rx_pkts,
1617                         uint16_t nb_pkts)
1618 {
1619         struct virtnet_rx *rxvq = rx_queue;
1620         struct virtqueue *vq = rxvq->vq;
1621         struct virtio_hw *hw = vq->hw;
1622         struct rte_mbuf *rxm;
1623         struct rte_mbuf *prev = NULL;
1624         uint16_t nb_used, num, nb_rx = 0;
1625         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1626         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1627         int error;
1628         uint32_t nb_enqueued = 0;
1629         uint32_t seg_num = 0;
1630         uint32_t seg_res = 0;
1631         uint32_t hdr_size = hw->vtnet_hdr_size;
1632         int32_t i;
1633
1634         if (unlikely(hw->started == 0))
1635                 return nb_rx;
1636
1637         nb_used = VIRTQUEUE_NUSED(vq);
1638
1639         virtio_rmb(hw->weak_barriers);
1640
1641         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1642
1643         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1644         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1645                 num = VIRTIO_MBUF_BURST_SZ;
1646         if (likely(num > DESC_PER_CACHELINE))
1647                 num = num - ((vq->vq_used_cons_idx + num) %
1648                                 DESC_PER_CACHELINE);
1649
1650
1651         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1652
1653         for (i = 0; i < num; i++) {
1654                 struct virtio_net_hdr_mrg_rxbuf *header;
1655
1656                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1657                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1658
1659                 rxm = rcv_pkts[i];
1660
1661                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1662                         PMD_RX_LOG(ERR, "Packet drop");
1663                         nb_enqueued++;
1664                         virtio_discard_rxbuf(vq, rxm);
1665                         rxvq->stats.errors++;
1666                         continue;
1667                 }
1668
1669                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1670                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1671                          - hdr_size);
1672                 seg_num = header->num_buffers;
1673                 if (seg_num == 0)
1674                         seg_num = 1;
1675
1676                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1677                 rxm->nb_segs = seg_num;
1678                 rxm->ol_flags = 0;
1679                 rxm->vlan_tci = 0;
1680                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1681                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1682
1683                 rxm->port = rxvq->port_id;
1684
1685                 rx_pkts[nb_rx] = rxm;
1686                 prev = rxm;
1687
1688                 if (hw->has_rx_offload &&
1689                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1690                         virtio_discard_rxbuf(vq, rxm);
1691                         rxvq->stats.errors++;
1692                         continue;
1693                 }
1694
1695                 if (hw->vlan_strip)
1696                         rte_vlan_strip(rx_pkts[nb_rx]);
1697
1698                 seg_res = seg_num - 1;
1699
1700                 /* Merge remaining segments */
1701                 while (seg_res != 0 && i < (num - 1)) {
1702                         i++;
1703
1704                         rxm = rcv_pkts[i];
1705                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1706                         rxm->pkt_len = (uint32_t)(len[i]);
1707                         rxm->data_len = (uint16_t)(len[i]);
1708
1709                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1710
1711                         prev->next = rxm;
1712                         prev = rxm;
1713                         seg_res -= 1;
1714                 }
1715
1716                 if (!seg_res) {
1717                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1718                         nb_rx++;
1719                 }
1720         }
1721
1722         /* Last packet still need merge segments */
1723         while (seg_res != 0) {
1724                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1725                                         VIRTIO_MBUF_BURST_SZ);
1726
1727                 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1728                         virtio_rmb(hw->weak_barriers);
1729                         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1730                                                            rcv_cnt);
1731                         uint16_t extra_idx = 0;
1732
1733                         rcv_cnt = num;
1734                         while (extra_idx < rcv_cnt) {
1735                                 rxm = rcv_pkts[extra_idx];
1736                                 rxm->data_off =
1737                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1738                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1739                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1740                                 prev->next = rxm;
1741                                 prev = rxm;
1742                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1743                                 extra_idx += 1;
1744                         };
1745                         seg_res -= rcv_cnt;
1746
1747                         if (!seg_res) {
1748                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1749                                 nb_rx++;
1750                         }
1751                 } else {
1752                         PMD_RX_LOG(ERR,
1753                                         "No enough segments for packet.");
1754                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1755                         rxvq->stats.errors++;
1756                         break;
1757                 }
1758         }
1759
1760         rxvq->stats.packets += nb_rx;
1761
1762         /* Allocate new mbuf for the used descriptor */
1763         if (likely(!virtqueue_full(vq))) {
1764                 /* free_cnt may include mrg descs */
1765                 uint16_t free_cnt = vq->vq_free_cnt;
1766                 struct rte_mbuf *new_pkts[free_cnt];
1767
1768                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1769                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1770                                         free_cnt);
1771                         if (unlikely(error)) {
1772                                 for (i = 0; i < free_cnt; i++)
1773                                         rte_pktmbuf_free(new_pkts[i]);
1774                         }
1775                         nb_enqueued += free_cnt;
1776                 } else {
1777                         struct rte_eth_dev *dev =
1778                                 &rte_eth_devices[rxvq->port_id];
1779                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1780                 }
1781         }
1782
1783         if (likely(nb_enqueued)) {
1784                 vq_update_avail_idx(vq);
1785
1786                 if (unlikely(virtqueue_kick_prepare(vq))) {
1787                         virtqueue_notify(vq);
1788                         PMD_RX_LOG(DEBUG, "Notified");
1789                 }
1790         }
1791
1792         return nb_rx;
1793 }
1794
1795 uint16_t
1796 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1797                         struct rte_mbuf **rx_pkts,
1798                         uint16_t nb_pkts)
1799 {
1800         struct virtnet_rx *rxvq = rx_queue;
1801         struct virtqueue *vq = rxvq->vq;
1802         struct virtio_hw *hw = vq->hw;
1803         struct rte_mbuf *rxm;
1804         struct rte_mbuf *prev = NULL;
1805         uint16_t num, nb_rx = 0;
1806         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1807         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1808         uint32_t nb_enqueued = 0;
1809         uint32_t seg_num = 0;
1810         uint32_t seg_res = 0;
1811         uint32_t hdr_size = hw->vtnet_hdr_size;
1812         int32_t i;
1813         int error;
1814
1815         if (unlikely(hw->started == 0))
1816                 return nb_rx;
1817
1818
1819         num = nb_pkts;
1820         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1821                 num = VIRTIO_MBUF_BURST_SZ;
1822         if (likely(num > DESC_PER_CACHELINE))
1823                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1824
1825         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1826
1827         for (i = 0; i < num; i++) {
1828                 struct virtio_net_hdr_mrg_rxbuf *header;
1829
1830                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1831                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1832
1833                 rxm = rcv_pkts[i];
1834
1835                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1836                         PMD_RX_LOG(ERR, "Packet drop");
1837                         nb_enqueued++;
1838                         virtio_discard_rxbuf(vq, rxm);
1839                         rxvq->stats.errors++;
1840                         continue;
1841                 }
1842
1843                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1844                           rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1845                 seg_num = header->num_buffers;
1846
1847                 if (seg_num == 0)
1848                         seg_num = 1;
1849
1850                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1851                 rxm->nb_segs = seg_num;
1852                 rxm->ol_flags = 0;
1853                 rxm->vlan_tci = 0;
1854                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1855                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1856
1857                 rxm->port = rxvq->port_id;
1858                 rx_pkts[nb_rx] = rxm;
1859                 prev = rxm;
1860
1861                 if (hw->has_rx_offload &&
1862                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1863                         virtio_discard_rxbuf(vq, rxm);
1864                         rxvq->stats.errors++;
1865                         continue;
1866                 }
1867
1868                 if (hw->vlan_strip)
1869                         rte_vlan_strip(rx_pkts[nb_rx]);
1870
1871                 seg_res = seg_num - 1;
1872
1873                 /* Merge remaining segments */
1874                 while (seg_res != 0 && i < (num - 1)) {
1875                         i++;
1876
1877                         rxm = rcv_pkts[i];
1878                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1879                         rxm->pkt_len = (uint32_t)(len[i]);
1880                         rxm->data_len = (uint16_t)(len[i]);
1881
1882                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1883
1884                         prev->next = rxm;
1885                         prev = rxm;
1886                         seg_res -= 1;
1887                 }
1888
1889                 if (!seg_res) {
1890                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1891                         nb_rx++;
1892                 }
1893         }
1894
1895         /* Last packet still need merge segments */
1896         while (seg_res != 0) {
1897                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1898                                         VIRTIO_MBUF_BURST_SZ);
1899                 uint16_t extra_idx = 0;
1900
1901                 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1902                                 len, rcv_cnt);
1903                 if (unlikely(rcv_cnt == 0)) {
1904                         PMD_RX_LOG(ERR, "No enough segments for packet.");
1905                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1906                         rxvq->stats.errors++;
1907                         break;
1908                 }
1909
1910                 while (extra_idx < rcv_cnt) {
1911                         rxm = rcv_pkts[extra_idx];
1912
1913                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1914                         rxm->pkt_len = (uint32_t)(len[extra_idx]);
1915                         rxm->data_len = (uint16_t)(len[extra_idx]);
1916
1917                         prev->next = rxm;
1918                         prev = rxm;
1919                         rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1920                         extra_idx += 1;
1921                 }
1922                 seg_res -= rcv_cnt;
1923                 if (!seg_res) {
1924                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1925                         nb_rx++;
1926                 }
1927         }
1928
1929         rxvq->stats.packets += nb_rx;
1930
1931         /* Allocate new mbuf for the used descriptor */
1932         if (likely(!virtqueue_full(vq))) {
1933                 /* free_cnt may include mrg descs */
1934                 uint16_t free_cnt = vq->vq_free_cnt;
1935                 struct rte_mbuf *new_pkts[free_cnt];
1936
1937                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1938                         error = virtqueue_enqueue_recv_refill_packed(vq,
1939                                         new_pkts, free_cnt);
1940                         if (unlikely(error)) {
1941                                 for (i = 0; i < free_cnt; i++)
1942                                         rte_pktmbuf_free(new_pkts[i]);
1943                         }
1944                         nb_enqueued += free_cnt;
1945                 } else {
1946                         struct rte_eth_dev *dev =
1947                                 &rte_eth_devices[rxvq->port_id];
1948                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1949                 }
1950         }
1951
1952         if (likely(nb_enqueued)) {
1953                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1954                         virtqueue_notify(vq);
1955                         PMD_RX_LOG(DEBUG, "Notified");
1956                 }
1957         }
1958
1959         return nb_rx;
1960 }
1961
1962 uint16_t
1963 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1964                         uint16_t nb_pkts)
1965 {
1966         uint16_t nb_tx;
1967         int error;
1968
1969         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1970                 struct rte_mbuf *m = tx_pkts[nb_tx];
1971
1972 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1973                 error = rte_validate_tx_offload(m);
1974                 if (unlikely(error)) {
1975                         rte_errno = -error;
1976                         break;
1977                 }
1978 #endif
1979
1980                 /* Do VLAN tag insertion */
1981                 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1982                         error = rte_vlan_insert(&m);
1983                         /* rte_vlan_insert() may change pointer
1984                          * even in the case of failure
1985                          */
1986                         tx_pkts[nb_tx] = m;
1987
1988                         if (unlikely(error)) {
1989                                 rte_errno = -error;
1990                                 break;
1991                         }
1992                 }
1993
1994                 error = rte_net_intel_cksum_prepare(m);
1995                 if (unlikely(error)) {
1996                         rte_errno = -error;
1997                         break;
1998                 }
1999
2000                 if (m->ol_flags & PKT_TX_TCP_SEG)
2001                         virtio_tso_fix_cksum(m);
2002         }
2003
2004         return nb_tx;
2005 }
2006
2007 uint16_t
2008 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
2009                         uint16_t nb_pkts)
2010 {
2011         struct virtnet_tx *txvq = tx_queue;
2012         struct virtqueue *vq = txvq->vq;
2013         struct virtio_hw *hw = vq->hw;
2014         uint16_t hdr_size = hw->vtnet_hdr_size;
2015         uint16_t nb_tx = 0;
2016         bool in_order = hw->use_inorder_tx;
2017
2018         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2019                 return nb_tx;
2020
2021         if (unlikely(nb_pkts < 1))
2022                 return nb_pkts;
2023
2024         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2025
2026         if (nb_pkts > vq->vq_free_cnt)
2027                 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
2028                                            in_order);
2029
2030         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2031                 struct rte_mbuf *txm = tx_pkts[nb_tx];
2032                 int can_push = 0, slots, need;
2033
2034                 /* optimize ring usage */
2035                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2036                       vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2037                     rte_mbuf_refcnt_read(txm) == 1 &&
2038                     RTE_MBUF_DIRECT(txm) &&
2039                     txm->nb_segs == 1 &&
2040                     rte_pktmbuf_headroom(txm) >= hdr_size &&
2041                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2042                            __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2043                         can_push = 1;
2044
2045                 /* How many main ring entries are needed to this Tx?
2046                  * any_layout => number of segments
2047                  * default    => number of segments + 1
2048                  */
2049                 slots = txm->nb_segs + !can_push;
2050                 need = slots - vq->vq_free_cnt;
2051
2052                 /* Positive value indicates it need free vring descriptors */
2053                 if (unlikely(need > 0)) {
2054                         virtio_xmit_cleanup_packed(vq, need, in_order);
2055                         need = slots - vq->vq_free_cnt;
2056                         if (unlikely(need > 0)) {
2057                                 PMD_TX_LOG(ERR,
2058                                            "No free tx descriptors to transmit");
2059                                 break;
2060                         }
2061                 }
2062
2063                 /* Enqueue Packet buffers */
2064                 if (can_push)
2065                         virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
2066                 else
2067                         virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
2068                                                       in_order);
2069
2070                 virtio_update_packet_stats(&txvq->stats, txm);
2071         }
2072
2073         txvq->stats.packets += nb_tx;
2074
2075         if (likely(nb_tx)) {
2076                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
2077                         virtqueue_notify(vq);
2078                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2079                 }
2080         }
2081
2082         return nb_tx;
2083 }
2084
2085 uint16_t
2086 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2087 {
2088         struct virtnet_tx *txvq = tx_queue;
2089         struct virtqueue *vq = txvq->vq;
2090         struct virtio_hw *hw = vq->hw;
2091         uint16_t hdr_size = hw->vtnet_hdr_size;
2092         uint16_t nb_used, nb_tx = 0;
2093
2094         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2095                 return nb_tx;
2096
2097         if (unlikely(nb_pkts < 1))
2098                 return nb_pkts;
2099
2100         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2101         nb_used = VIRTQUEUE_NUSED(vq);
2102
2103         virtio_rmb(hw->weak_barriers);
2104         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2105                 virtio_xmit_cleanup(vq, nb_used);
2106
2107         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2108                 struct rte_mbuf *txm = tx_pkts[nb_tx];
2109                 int can_push = 0, use_indirect = 0, slots, need;
2110
2111                 /* optimize ring usage */
2112                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2113                       vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2114                     rte_mbuf_refcnt_read(txm) == 1 &&
2115                     RTE_MBUF_DIRECT(txm) &&
2116                     txm->nb_segs == 1 &&
2117                     rte_pktmbuf_headroom(txm) >= hdr_size &&
2118                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2119                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2120                         can_push = 1;
2121                 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2122                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2123                         use_indirect = 1;
2124
2125                 /* How many main ring entries are needed to this Tx?
2126                  * any_layout => number of segments
2127                  * indirect   => 1
2128                  * default    => number of segments + 1
2129                  */
2130                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2131                 need = slots - vq->vq_free_cnt;
2132
2133                 /* Positive value indicates it need free vring descriptors */
2134                 if (unlikely(need > 0)) {
2135                         nb_used = VIRTQUEUE_NUSED(vq);
2136                         virtio_rmb(hw->weak_barriers);
2137                         need = RTE_MIN(need, (int)nb_used);
2138
2139                         virtio_xmit_cleanup(vq, need);
2140                         need = slots - vq->vq_free_cnt;
2141                         if (unlikely(need > 0)) {
2142                                 PMD_TX_LOG(ERR,
2143                                            "No free tx descriptors to transmit");
2144                                 break;
2145                         }
2146                 }
2147
2148                 /* Enqueue Packet buffers */
2149                 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2150                         can_push, 0);
2151
2152                 virtio_update_packet_stats(&txvq->stats, txm);
2153         }
2154
2155         txvq->stats.packets += nb_tx;
2156
2157         if (likely(nb_tx)) {
2158                 vq_update_avail_idx(vq);
2159
2160                 if (unlikely(virtqueue_kick_prepare(vq))) {
2161                         virtqueue_notify(vq);
2162                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2163                 }
2164         }
2165
2166         return nb_tx;
2167 }
2168
2169 static __rte_always_inline int
2170 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
2171 {
2172         uint16_t nb_used, nb_clean, nb_descs;
2173         struct virtio_hw *hw = vq->hw;
2174
2175         nb_descs = vq->vq_free_cnt + need;
2176         nb_used = VIRTQUEUE_NUSED(vq);
2177         virtio_rmb(hw->weak_barriers);
2178         nb_clean = RTE_MIN(need, (int)nb_used);
2179
2180         virtio_xmit_cleanup_inorder(vq, nb_clean);
2181
2182         return nb_descs - vq->vq_free_cnt;
2183 }
2184
2185 uint16_t
2186 virtio_xmit_pkts_inorder(void *tx_queue,
2187                         struct rte_mbuf **tx_pkts,
2188                         uint16_t nb_pkts)
2189 {
2190         struct virtnet_tx *txvq = tx_queue;
2191         struct virtqueue *vq = txvq->vq;
2192         struct virtio_hw *hw = vq->hw;
2193         uint16_t hdr_size = hw->vtnet_hdr_size;
2194         uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
2195         struct rte_mbuf *inorder_pkts[nb_pkts];
2196         int need;
2197
2198         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2199                 return nb_tx;
2200
2201         if (unlikely(nb_pkts < 1))
2202                 return nb_pkts;
2203
2204         VIRTQUEUE_DUMP(vq);
2205         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2206         nb_used = VIRTQUEUE_NUSED(vq);
2207
2208         virtio_rmb(hw->weak_barriers);
2209         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2210                 virtio_xmit_cleanup_inorder(vq, nb_used);
2211
2212         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2213                 struct rte_mbuf *txm = tx_pkts[nb_tx];
2214                 int slots;
2215
2216                 /* optimize ring usage */
2217                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2218                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2219                      rte_mbuf_refcnt_read(txm) == 1 &&
2220                      RTE_MBUF_DIRECT(txm) &&
2221                      txm->nb_segs == 1 &&
2222                      rte_pktmbuf_headroom(txm) >= hdr_size &&
2223                      rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2224                                 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2225                         inorder_pkts[nb_inorder_pkts] = txm;
2226                         nb_inorder_pkts++;
2227
2228                         continue;
2229                 }
2230
2231                 if (nb_inorder_pkts) {
2232                         need = nb_inorder_pkts - vq->vq_free_cnt;
2233                         if (unlikely(need > 0)) {
2234                                 need = virtio_xmit_try_cleanup_inorder(vq,
2235                                                                        need);
2236                                 if (unlikely(need > 0)) {
2237                                         PMD_TX_LOG(ERR,
2238                                                 "No free tx descriptors to "
2239                                                 "transmit");
2240                                         break;
2241                                 }
2242                         }
2243                         virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2244                                                         nb_inorder_pkts);
2245                         nb_inorder_pkts = 0;
2246                 }
2247
2248                 slots = txm->nb_segs + 1;
2249                 need = slots - vq->vq_free_cnt;
2250                 if (unlikely(need > 0)) {
2251                         need = virtio_xmit_try_cleanup_inorder(vq, slots);
2252
2253                         if (unlikely(need > 0)) {
2254                                 PMD_TX_LOG(ERR,
2255                                         "No free tx descriptors to transmit");
2256                                 break;
2257                         }
2258                 }
2259                 /* Enqueue Packet buffers */
2260                 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2261
2262                 virtio_update_packet_stats(&txvq->stats, txm);
2263         }
2264
2265         /* Transmit all inorder packets */
2266         if (nb_inorder_pkts) {
2267                 need = nb_inorder_pkts - vq->vq_free_cnt;
2268                 if (unlikely(need > 0)) {
2269                         need = virtio_xmit_try_cleanup_inorder(vq,
2270                                                                   need);
2271                         if (unlikely(need > 0)) {
2272                                 PMD_TX_LOG(ERR,
2273                                         "No free tx descriptors to transmit");
2274                                 nb_inorder_pkts = vq->vq_free_cnt;
2275                                 nb_tx -= need;
2276                         }
2277                 }
2278
2279                 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2280                                                 nb_inorder_pkts);
2281         }
2282
2283         txvq->stats.packets += nb_tx;
2284
2285         if (likely(nb_tx)) {
2286                 vq_update_avail_idx(vq);
2287
2288                 if (unlikely(virtqueue_kick_prepare(vq))) {
2289                         virtqueue_notify(vq);
2290                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2291                 }
2292         }
2293
2294         VIRTQUEUE_DUMP(vq);
2295
2296         return nb_tx;
2297 }