build: check for broken AVX512 compiler support
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45         struct virtnet_rx *rxvq = rxq;
46         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
47
48         return virtqueue_nused(vq) >= offset;
49 }
50
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54         vq->vq_free_cnt += num;
55         vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61         struct vring_desc *dp, *dp_tail;
62         struct vq_desc_extra *dxp;
63         uint16_t desc_idx_last = desc_idx;
64
65         dp  = &vq->vq_split.ring.desc[desc_idx];
66         dxp = &vq->vq_descx[desc_idx];
67         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69                 while (dp->flags & VRING_DESC_F_NEXT) {
70                         desc_idx_last = dp->next;
71                         dp = &vq->vq_split.ring.desc[dp->next];
72                 }
73         }
74         dxp->ndescs = 0;
75
76         /*
77          * We must append the existing free chain, if any, to the end of
78          * newly freed chain. If the virtqueue was completely used, then
79          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80          */
81         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82                 vq->vq_desc_head_idx = desc_idx;
83         } else {
84                 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85                 dp_tail->next = desc_idx;
86         }
87
88         vq->vq_desc_tail_idx = desc_idx_last;
89         dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95         uint32_t s = mbuf->pkt_len;
96         struct rte_ether_addr *ea;
97
98         stats->bytes += s;
99
100         if (s == 64) {
101                 stats->size_bins[1]++;
102         } else if (s > 64 && s < 1024) {
103                 uint32_t bin;
104
105                 /* count zeros, and offset into correct bin */
106                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107                 stats->size_bins[bin]++;
108         } else {
109                 if (s < 64)
110                         stats->size_bins[0]++;
111                 else if (s < 1519)
112                         stats->size_bins[6]++;
113                 else
114                         stats->size_bins[7]++;
115         }
116
117         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118         if (rte_is_multicast_ether_addr(ea)) {
119                 if (rte_is_broadcast_ether_addr(ea))
120                         stats->broadcast++;
121                 else
122                         stats->multicast++;
123         }
124 }
125
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129         VIRTIO_DUMP_PACKET(m, m->data_len);
130
131         virtio_update_packet_stats(&rxvq->stats, m);
132 }
133
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136                                   struct rte_mbuf **rx_pkts,
137                                   uint32_t *len,
138                                   uint16_t num)
139 {
140         struct rte_mbuf *cookie;
141         uint16_t used_idx;
142         uint16_t id;
143         struct vring_packed_desc *desc;
144         uint16_t i;
145
146         desc = vq->vq_packed.ring.desc;
147
148         for (i = 0; i < num; i++) {
149                 used_idx = vq->vq_used_cons_idx;
150                 /* desc_is_used has a load-acquire or rte_io_rmb inside
151                  * and wait for used desc in virtqueue.
152                  */
153                 if (!desc_is_used(&desc[used_idx], vq))
154                         return i;
155                 len[i] = desc[used_idx].len;
156                 id = desc[used_idx].id;
157                 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158                 if (unlikely(cookie == NULL)) {
159                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160                                 vq->vq_used_cons_idx);
161                         break;
162                 }
163                 rte_prefetch0(cookie);
164                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165                 rx_pkts[i] = cookie;
166
167                 vq->vq_free_cnt++;
168                 vq->vq_used_cons_idx++;
169                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170                         vq->vq_used_cons_idx -= vq->vq_nentries;
171                         vq->vq_packed.used_wrap_counter ^= 1;
172                 }
173         }
174
175         return i;
176 }
177
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180                            uint32_t *len, uint16_t num)
181 {
182         struct vring_used_elem *uep;
183         struct rte_mbuf *cookie;
184         uint16_t used_idx, desc_idx;
185         uint16_t i;
186
187         /*  Caller does the check */
188         for (i = 0; i < num ; i++) {
189                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190                 uep = &vq->vq_split.ring.used->ring[used_idx];
191                 desc_idx = (uint16_t) uep->id;
192                 len[i] = uep->len;
193                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194
195                 if (unlikely(cookie == NULL)) {
196                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197                                 vq->vq_used_cons_idx);
198                         break;
199                 }
200
201                 rte_prefetch0(cookie);
202                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203                 rx_pkts[i]  = cookie;
204                 vq->vq_used_cons_idx++;
205                 vq_ring_free_chain(vq, desc_idx);
206                 vq->vq_descx[desc_idx].cookie = NULL;
207         }
208
209         return i;
210 }
211
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214                         struct rte_mbuf **rx_pkts,
215                         uint32_t *len,
216                         uint16_t num)
217 {
218         struct vring_used_elem *uep;
219         struct rte_mbuf *cookie;
220         uint16_t used_idx = 0;
221         uint16_t i;
222
223         if (unlikely(num == 0))
224                 return 0;
225
226         for (i = 0; i < num; i++) {
227                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228                 /* Desc idx same as used idx */
229                 uep = &vq->vq_split.ring.used->ring[used_idx];
230                 len[i] = uep->len;
231                 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232
233                 if (unlikely(cookie == NULL)) {
234                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235                                 vq->vq_used_cons_idx);
236                         break;
237                 }
238
239                 rte_prefetch0(cookie);
240                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241                 rx_pkts[i]  = cookie;
242                 vq->vq_used_cons_idx++;
243                 vq->vq_descx[used_idx].cookie = NULL;
244         }
245
246         vq_ring_free_inorder(vq, used_idx, i);
247         return i;
248 }
249
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252                         struct rte_mbuf **cookies,
253                         uint16_t num)
254 {
255         struct vq_desc_extra *dxp;
256         struct virtio_hw *hw = vq->hw;
257         struct vring_desc *start_dp;
258         uint16_t head_idx, idx, i = 0;
259
260         if (unlikely(vq->vq_free_cnt == 0))
261                 return -ENOSPC;
262         if (unlikely(vq->vq_free_cnt < num))
263                 return -EMSGSIZE;
264
265         head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266         start_dp = vq->vq_split.ring.desc;
267
268         while (i < num) {
269                 idx = head_idx & (vq->vq_nentries - 1);
270                 dxp = &vq->vq_descx[idx];
271                 dxp->cookie = (void *)cookies[i];
272                 dxp->ndescs = 1;
273
274                 start_dp[idx].addr = cookies[i]->buf_iova +
275                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276                 start_dp[idx].len = cookies[i]->buf_len -
277                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278                 start_dp[idx].flags =  VRING_DESC_F_WRITE;
279
280                 vq_update_avail_ring(vq, idx);
281                 head_idx++;
282                 i++;
283         }
284
285         vq->vq_desc_head_idx += num;
286         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
287         return 0;
288 }
289
290 static inline int
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
292                                 uint16_t num)
293 {
294         struct vq_desc_extra *dxp;
295         struct virtio_hw *hw = vq->hw;
296         struct vring_desc *start_dp = vq->vq_split.ring.desc;
297         uint16_t idx, i;
298
299         if (unlikely(vq->vq_free_cnt == 0))
300                 return -ENOSPC;
301         if (unlikely(vq->vq_free_cnt < num))
302                 return -EMSGSIZE;
303
304         if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
305                 return -EFAULT;
306
307         for (i = 0; i < num; i++) {
308                 idx = vq->vq_desc_head_idx;
309                 dxp = &vq->vq_descx[idx];
310                 dxp->cookie = (void *)cookie[i];
311                 dxp->ndescs = 1;
312
313                 start_dp[idx].addr = cookie[i]->buf_iova +
314                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315                 start_dp[idx].len = cookie[i]->buf_len -
316                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
317                 start_dp[idx].flags = VRING_DESC_F_WRITE;
318                 vq->vq_desc_head_idx = start_dp[idx].next;
319                 vq_update_avail_ring(vq, idx);
320                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
322                         break;
323                 }
324         }
325
326         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
327
328         return 0;
329 }
330
331 static inline void
332 virtqueue_refill_single_packed(struct virtqueue *vq,
333                                struct vring_packed_desc *dp,
334                                struct rte_mbuf *cookie)
335 {
336         uint16_t flags = vq->vq_packed.cached_flags;
337         struct virtio_hw *hw = vq->hw;
338
339         dp->addr = cookie->buf_iova +
340                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
341         dp->len = cookie->buf_len -
342                 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
343
344         virtqueue_store_flags_packed(dp, flags,
345                                      hw->weak_barriers);
346
347         if (++vq->vq_avail_idx >= vq->vq_nentries) {
348                 vq->vq_avail_idx -= vq->vq_nentries;
349                 vq->vq_packed.cached_flags ^=
350                         VRING_PACKED_DESC_F_AVAIL_USED;
351                 flags = vq->vq_packed.cached_flags;
352         }
353 }
354
355 static inline int
356 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
357                                      struct rte_mbuf **cookie, uint16_t num)
358 {
359         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
360         struct vq_desc_extra *dxp;
361         uint16_t idx;
362         int i;
363
364         if (unlikely(vq->vq_free_cnt == 0))
365                 return -ENOSPC;
366         if (unlikely(vq->vq_free_cnt < num))
367                 return -EMSGSIZE;
368
369         for (i = 0; i < num; i++) {
370                 idx = vq->vq_avail_idx;
371                 dxp = &vq->vq_descx[idx];
372                 dxp->cookie = (void *)cookie[i];
373                 dxp->ndescs = 1;
374
375                 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
376         }
377         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
378         return 0;
379 }
380
381 static inline int
382 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
383                                      struct rte_mbuf **cookie, uint16_t num)
384 {
385         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
386         struct vq_desc_extra *dxp;
387         uint16_t idx, did;
388         int i;
389
390         if (unlikely(vq->vq_free_cnt == 0))
391                 return -ENOSPC;
392         if (unlikely(vq->vq_free_cnt < num))
393                 return -EMSGSIZE;
394
395         for (i = 0; i < num; i++) {
396                 idx = vq->vq_avail_idx;
397                 did = start_dp[idx].id;
398                 dxp = &vq->vq_descx[did];
399                 dxp->cookie = (void *)cookie[i];
400                 dxp->ndescs = 1;
401
402                 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
403         }
404         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
405         return 0;
406 }
407
408 /* When doing TSO, the IP length is not included in the pseudo header
409  * checksum of the packet given to the PMD, but for virtio it is
410  * expected.
411  */
412 static void
413 virtio_tso_fix_cksum(struct rte_mbuf *m)
414 {
415         /* common case: header is not fragmented */
416         if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
417                         m->l4_len)) {
418                 struct rte_ipv4_hdr *iph;
419                 struct rte_ipv6_hdr *ip6h;
420                 struct rte_tcp_hdr *th;
421                 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
422                 uint32_t tmp;
423
424                 iph = rte_pktmbuf_mtod_offset(m,
425                                         struct rte_ipv4_hdr *, m->l2_len);
426                 th = RTE_PTR_ADD(iph, m->l3_len);
427                 if ((iph->version_ihl >> 4) == 4) {
428                         iph->hdr_checksum = 0;
429                         iph->hdr_checksum = rte_ipv4_cksum(iph);
430                         ip_len = iph->total_length;
431                         ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
432                                 m->l3_len);
433                 } else {
434                         ip6h = (struct rte_ipv6_hdr *)iph;
435                         ip_paylen = ip6h->payload_len;
436                 }
437
438                 /* calculate the new phdr checksum not including ip_paylen */
439                 prev_cksum = th->cksum;
440                 tmp = prev_cksum;
441                 tmp += ip_paylen;
442                 tmp = (tmp & 0xffff) + (tmp >> 16);
443                 new_cksum = tmp;
444
445                 /* replace it in the packet */
446                 th->cksum = new_cksum;
447         }
448 }
449
450
451
452
453 static inline void
454 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
455                         struct rte_mbuf **cookies,
456                         uint16_t num)
457 {
458         struct vq_desc_extra *dxp;
459         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
460         struct vring_desc *start_dp;
461         struct virtio_net_hdr *hdr;
462         uint16_t idx;
463         int16_t head_size = vq->hw->vtnet_hdr_size;
464         uint16_t i = 0;
465
466         idx = vq->vq_desc_head_idx;
467         start_dp = vq->vq_split.ring.desc;
468
469         while (i < num) {
470                 idx = idx & (vq->vq_nentries - 1);
471                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
472                 dxp->cookie = (void *)cookies[i];
473                 dxp->ndescs = 1;
474                 virtio_update_packet_stats(&txvq->stats, cookies[i]);
475
476                 hdr = rte_pktmbuf_mtod_offset(cookies[i],
477                                 struct virtio_net_hdr *, -head_size);
478
479                 /* if offload disabled, hdr is not zeroed yet, do it now */
480                 if (!vq->hw->has_tx_offload)
481                         virtqueue_clear_net_hdr(hdr);
482                 else
483                         virtqueue_xmit_offload(hdr, cookies[i]);
484
485                 start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
486                 start_dp[idx].len   = cookies[i]->data_len + head_size;
487                 start_dp[idx].flags = 0;
488
489
490                 vq_update_avail_ring(vq, idx);
491
492                 idx++;
493                 i++;
494         };
495
496         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
497         vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
498 }
499
500 static inline void
501 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
502                                    struct rte_mbuf *cookie,
503                                    int in_order)
504 {
505         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
506         struct vring_packed_desc *dp;
507         struct vq_desc_extra *dxp;
508         uint16_t idx, id, flags;
509         int16_t head_size = vq->hw->vtnet_hdr_size;
510         struct virtio_net_hdr *hdr;
511
512         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
513         idx = vq->vq_avail_idx;
514         dp = &vq->vq_packed.ring.desc[idx];
515
516         dxp = &vq->vq_descx[id];
517         dxp->ndescs = 1;
518         dxp->cookie = cookie;
519
520         flags = vq->vq_packed.cached_flags;
521
522         /* prepend cannot fail, checked by caller */
523         hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
524                                       -head_size);
525
526         /* if offload disabled, hdr is not zeroed yet, do it now */
527         if (!vq->hw->has_tx_offload)
528                 virtqueue_clear_net_hdr(hdr);
529         else
530                 virtqueue_xmit_offload(hdr, cookie);
531
532         dp->addr = rte_mbuf_data_iova(cookie) - head_size;
533         dp->len  = cookie->data_len + head_size;
534         dp->id   = id;
535
536         if (++vq->vq_avail_idx >= vq->vq_nentries) {
537                 vq->vq_avail_idx -= vq->vq_nentries;
538                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
539         }
540
541         vq->vq_free_cnt--;
542
543         if (!in_order) {
544                 vq->vq_desc_head_idx = dxp->next;
545                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
546                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
547         }
548
549         virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
550 }
551
552 static inline void
553 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
554                         uint16_t needed, int use_indirect, int can_push,
555                         int in_order)
556 {
557         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
558         struct vq_desc_extra *dxp;
559         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
560         struct vring_desc *start_dp;
561         uint16_t seg_num = cookie->nb_segs;
562         uint16_t head_idx, idx;
563         int16_t head_size = vq->hw->vtnet_hdr_size;
564         bool prepend_header = false;
565         struct virtio_net_hdr *hdr;
566
567         head_idx = vq->vq_desc_head_idx;
568         idx = head_idx;
569         if (in_order)
570                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
571         else
572                 dxp = &vq->vq_descx[idx];
573         dxp->cookie = (void *)cookie;
574         dxp->ndescs = needed;
575
576         start_dp = vq->vq_split.ring.desc;
577
578         if (can_push) {
579                 /* prepend cannot fail, checked by caller */
580                 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
581                                               -head_size);
582                 prepend_header = true;
583
584                 /* if offload disabled, it is not zeroed below, do it now */
585                 if (!vq->hw->has_tx_offload)
586                         virtqueue_clear_net_hdr(hdr);
587         } else if (use_indirect) {
588                 /* setup tx ring slot to point to indirect
589                  * descriptor list stored in reserved region.
590                  *
591                  * the first slot in indirect ring is already preset
592                  * to point to the header in reserved region
593                  */
594                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
595                         RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
596                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
597                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
598                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
599
600                 /* loop below will fill in rest of the indirect elements */
601                 start_dp = txr[idx].tx_indir;
602                 idx = 1;
603         } else {
604                 /* setup first tx ring slot to point to header
605                  * stored in reserved region.
606                  */
607                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
608                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
609                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
610                 start_dp[idx].flags = VRING_DESC_F_NEXT;
611                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
612
613                 idx = start_dp[idx].next;
614         }
615
616         if (vq->hw->has_tx_offload)
617                 virtqueue_xmit_offload(hdr, cookie);
618
619         do {
620                 start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
621                 start_dp[idx].len   = cookie->data_len;
622                 if (prepend_header) {
623                         start_dp[idx].addr -= head_size;
624                         start_dp[idx].len += head_size;
625                         prepend_header = false;
626                 }
627                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
628                 idx = start_dp[idx].next;
629         } while ((cookie = cookie->next) != NULL);
630
631         if (use_indirect)
632                 idx = vq->vq_split.ring.desc[head_idx].next;
633
634         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
635
636         vq->vq_desc_head_idx = idx;
637         vq_update_avail_ring(vq, head_idx);
638
639         if (!in_order) {
640                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
641                         vq->vq_desc_tail_idx = idx;
642         }
643 }
644
645 void
646 virtio_dev_cq_start(struct rte_eth_dev *dev)
647 {
648         struct virtio_hw *hw = dev->data->dev_private;
649
650         if (hw->cvq) {
651                 rte_spinlock_init(&hw->cvq->lock);
652                 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
653         }
654 }
655
656 int
657 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
658                         uint16_t queue_idx,
659                         uint16_t nb_desc,
660                         unsigned int socket_id __rte_unused,
661                         const struct rte_eth_rxconf *rx_conf,
662                         struct rte_mempool *mp)
663 {
664         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
665         struct virtio_hw *hw = dev->data->dev_private;
666         struct virtqueue *vq = hw->vqs[vq_idx];
667         struct virtnet_rx *rxvq;
668         uint16_t rx_free_thresh;
669         uint16_t buf_size;
670         const char *error;
671
672         PMD_INIT_FUNC_TRACE();
673
674         if (rx_conf->rx_deferred_start) {
675                 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
676                 return -EINVAL;
677         }
678
679         buf_size = virtio_rx_mem_pool_buf_size(mp);
680         if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
681                                      hw->rx_ol_scatter, &error)) {
682                 PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
683                              queue_idx, error);
684                 return -EINVAL;
685         }
686
687         rx_free_thresh = rx_conf->rx_free_thresh;
688         if (rx_free_thresh == 0)
689                 rx_free_thresh =
690                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
691
692         if (rx_free_thresh & 0x3) {
693                 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
694                         " (rx_free_thresh=%u port=%u queue=%u)\n",
695                         rx_free_thresh, dev->data->port_id, queue_idx);
696                 return -EINVAL;
697         }
698
699         if (rx_free_thresh >= vq->vq_nentries) {
700                 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
701                         "number of RX entries (%u)."
702                         " (rx_free_thresh=%u port=%u queue=%u)\n",
703                         vq->vq_nentries,
704                         rx_free_thresh, dev->data->port_id, queue_idx);
705                 return -EINVAL;
706         }
707         vq->vq_free_thresh = rx_free_thresh;
708
709         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
710                 nb_desc = vq->vq_nentries;
711         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
712
713         rxvq = &vq->rxq;
714         rxvq->queue_id = queue_idx;
715         rxvq->mpool = mp;
716         dev->data->rx_queues[queue_idx] = rxvq;
717
718         return 0;
719 }
720
721 int
722 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
723 {
724         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
725         struct virtio_hw *hw = dev->data->dev_private;
726         struct virtqueue *vq = hw->vqs[vq_idx];
727         struct virtnet_rx *rxvq = &vq->rxq;
728         struct rte_mbuf *m;
729         uint16_t desc_idx;
730         int error, nbufs, i;
731         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
732
733         PMD_INIT_FUNC_TRACE();
734
735         /* Allocate blank mbufs for the each rx descriptor */
736         nbufs = 0;
737
738         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
739                 for (desc_idx = 0; desc_idx < vq->vq_nentries;
740                      desc_idx++) {
741                         vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
742                         vq->vq_split.ring.desc[desc_idx].flags =
743                                 VRING_DESC_F_WRITE;
744                 }
745
746                 virtio_rxq_vec_setup(rxvq);
747         }
748
749         memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
750         for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
751                 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
752
753         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
754                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
755                         virtio_rxq_rearm_vec(rxvq);
756                         nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
757                 }
758         } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
759                 if ((!virtqueue_full(vq))) {
760                         uint16_t free_cnt = vq->vq_free_cnt;
761                         struct rte_mbuf *pkts[free_cnt];
762
763                         if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
764                                 free_cnt)) {
765                                 error = virtqueue_enqueue_refill_inorder(vq,
766                                                 pkts,
767                                                 free_cnt);
768                                 if (unlikely(error)) {
769                                         for (i = 0; i < free_cnt; i++)
770                                                 rte_pktmbuf_free(pkts[i]);
771                                 }
772                         }
773
774                         nbufs += free_cnt;
775                         vq_update_avail_idx(vq);
776                 }
777         } else {
778                 while (!virtqueue_full(vq)) {
779                         m = rte_mbuf_raw_alloc(rxvq->mpool);
780                         if (m == NULL)
781                                 break;
782
783                         /* Enqueue allocated buffers */
784                         if (virtio_with_packed_queue(vq->hw))
785                                 error = virtqueue_enqueue_recv_refill_packed_init(vq,
786                                                 &m, 1);
787                         else
788                                 error = virtqueue_enqueue_recv_refill(vq,
789                                                 &m, 1);
790                         if (error) {
791                                 rte_pktmbuf_free(m);
792                                 break;
793                         }
794                         nbufs++;
795                 }
796
797                 if (!virtio_with_packed_queue(vq->hw))
798                         vq_update_avail_idx(vq);
799         }
800
801         PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
802
803         VIRTQUEUE_DUMP(vq);
804
805         return 0;
806 }
807
808 /*
809  * struct rte_eth_dev *dev: Used to update dev
810  * uint16_t nb_desc: Defaults to values read from config space
811  * unsigned int socket_id: Used to allocate memzone
812  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
813  * uint16_t queue_idx: Just used as an index in dev txq list
814  */
815 int
816 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
817                         uint16_t queue_idx,
818                         uint16_t nb_desc,
819                         unsigned int socket_id __rte_unused,
820                         const struct rte_eth_txconf *tx_conf)
821 {
822         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
823         struct virtio_hw *hw = dev->data->dev_private;
824         struct virtqueue *vq = hw->vqs[vq_idx];
825         struct virtnet_tx *txvq;
826         uint16_t tx_free_thresh;
827
828         PMD_INIT_FUNC_TRACE();
829
830         if (tx_conf->tx_deferred_start) {
831                 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
832                 return -EINVAL;
833         }
834
835         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
836                 nb_desc = vq->vq_nentries;
837         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
838
839         txvq = &vq->txq;
840         txvq->queue_id = queue_idx;
841
842         tx_free_thresh = tx_conf->tx_free_thresh;
843         if (tx_free_thresh == 0)
844                 tx_free_thresh =
845                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
846
847         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
848                 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
849                         "number of TX entries minus 3 (%u)."
850                         " (tx_free_thresh=%u port=%u queue=%u)\n",
851                         vq->vq_nentries - 3,
852                         tx_free_thresh, dev->data->port_id, queue_idx);
853                 return -EINVAL;
854         }
855
856         vq->vq_free_thresh = tx_free_thresh;
857
858         dev->data->tx_queues[queue_idx] = txvq;
859         return 0;
860 }
861
862 int
863 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
864                                 uint16_t queue_idx)
865 {
866         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
867         struct virtio_hw *hw = dev->data->dev_private;
868         struct virtqueue *vq = hw->vqs[vq_idx];
869
870         PMD_INIT_FUNC_TRACE();
871
872         if (!virtio_with_packed_queue(hw)) {
873                 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
874                         vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
875         }
876
877         VIRTQUEUE_DUMP(vq);
878
879         return 0;
880 }
881
882 static inline void
883 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
884 {
885         int error;
886         /*
887          * Requeue the discarded mbuf. This should always be
888          * successful since it was just dequeued.
889          */
890         if (virtio_with_packed_queue(vq->hw))
891                 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
892         else
893                 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
894
895         if (unlikely(error)) {
896                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
897                 rte_pktmbuf_free(m);
898         }
899 }
900
901 static inline void
902 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
903 {
904         int error;
905
906         error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
907         if (unlikely(error)) {
908                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
909                 rte_pktmbuf_free(m);
910         }
911 }
912
913 /* Optionally fill offload information in structure */
914 static inline int
915 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
916 {
917         struct rte_net_hdr_lens hdr_lens;
918         uint32_t hdrlen, ptype;
919         int l4_supported = 0;
920
921         /* nothing to do */
922         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
923                 return 0;
924
925         m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
926
927         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
928         m->packet_type = ptype;
929         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
930             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
931             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
932                 l4_supported = 1;
933
934         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
935                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
936                 if (hdr->csum_start <= hdrlen && l4_supported) {
937                         m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
938                 } else {
939                         /* Unknown proto or tunnel, do sw cksum. We can assume
940                          * the cksum field is in the first segment since the
941                          * buffers we provided to the host are large enough.
942                          * In case of SCTP, this will be wrong since it's a CRC
943                          * but there's nothing we can do.
944                          */
945                         uint16_t csum = 0, off;
946
947                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
948                                 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
949                                 &csum) < 0)
950                                 return -EINVAL;
951                         if (likely(csum != 0xffff))
952                                 csum = ~csum;
953                         off = hdr->csum_offset + hdr->csum_start;
954                         if (rte_pktmbuf_data_len(m) >= off + 1)
955                                 *rte_pktmbuf_mtod_offset(m, uint16_t *,
956                                         off) = csum;
957                 }
958         } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
959                 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
960         }
961
962         /* GSO request, save required information in mbuf */
963         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
964                 /* Check unsupported modes */
965                 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
966                     (hdr->gso_size == 0)) {
967                         return -EINVAL;
968                 }
969
970                 /* Update mss lengthes in mbuf */
971                 m->tso_segsz = hdr->gso_size;
972                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
973                         case VIRTIO_NET_HDR_GSO_TCPV4:
974                         case VIRTIO_NET_HDR_GSO_TCPV6:
975                                 m->ol_flags |= PKT_RX_LRO | \
976                                         PKT_RX_L4_CKSUM_NONE;
977                                 break;
978                         default:
979                                 return -EINVAL;
980                 }
981         }
982
983         return 0;
984 }
985
986 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
987 uint16_t
988 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
989 {
990         struct virtnet_rx *rxvq = rx_queue;
991         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
992         struct virtio_hw *hw = vq->hw;
993         struct rte_mbuf *rxm;
994         uint16_t nb_used, num, nb_rx;
995         uint32_t len[VIRTIO_MBUF_BURST_SZ];
996         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
997         int error;
998         uint32_t i, nb_enqueued;
999         uint32_t hdr_size;
1000         struct virtio_net_hdr *hdr;
1001
1002         nb_rx = 0;
1003         if (unlikely(hw->started == 0))
1004                 return nb_rx;
1005
1006         nb_used = virtqueue_nused(vq);
1007
1008         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1009         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1010                 num = VIRTIO_MBUF_BURST_SZ;
1011         if (likely(num > DESC_PER_CACHELINE))
1012                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1013
1014         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1015         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1016
1017         nb_enqueued = 0;
1018         hdr_size = hw->vtnet_hdr_size;
1019
1020         for (i = 0; i < num ; i++) {
1021                 rxm = rcv_pkts[i];
1022
1023                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1024
1025                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1026                         PMD_RX_LOG(ERR, "Packet drop");
1027                         nb_enqueued++;
1028                         virtio_discard_rxbuf(vq, rxm);
1029                         rxvq->stats.errors++;
1030                         continue;
1031                 }
1032
1033                 rxm->port = rxvq->port_id;
1034                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1035                 rxm->ol_flags = 0;
1036                 rxm->vlan_tci = 0;
1037
1038                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1039                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1040
1041                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1042                         RTE_PKTMBUF_HEADROOM - hdr_size);
1043
1044                 if (hw->vlan_strip)
1045                         rte_vlan_strip(rxm);
1046
1047                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1048                         virtio_discard_rxbuf(vq, rxm);
1049                         rxvq->stats.errors++;
1050                         continue;
1051                 }
1052
1053                 virtio_rx_stats_updated(rxvq, rxm);
1054
1055                 rx_pkts[nb_rx++] = rxm;
1056         }
1057
1058         rxvq->stats.packets += nb_rx;
1059
1060         /* Allocate new mbuf for the used descriptor */
1061         if (likely(!virtqueue_full(vq))) {
1062                 uint16_t free_cnt = vq->vq_free_cnt;
1063                 struct rte_mbuf *new_pkts[free_cnt];
1064
1065                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1066                                                 free_cnt) == 0)) {
1067                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1068                                         free_cnt);
1069                         if (unlikely(error)) {
1070                                 for (i = 0; i < free_cnt; i++)
1071                                         rte_pktmbuf_free(new_pkts[i]);
1072                         }
1073                         nb_enqueued += free_cnt;
1074                 } else {
1075                         struct rte_eth_dev *dev =
1076                                 &rte_eth_devices[rxvq->port_id];
1077                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1078                 }
1079         }
1080
1081         if (likely(nb_enqueued)) {
1082                 vq_update_avail_idx(vq);
1083
1084                 if (unlikely(virtqueue_kick_prepare(vq))) {
1085                         virtqueue_notify(vq);
1086                         PMD_RX_LOG(DEBUG, "Notified");
1087                 }
1088         }
1089
1090         return nb_rx;
1091 }
1092
1093 uint16_t
1094 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1095                         uint16_t nb_pkts)
1096 {
1097         struct virtnet_rx *rxvq = rx_queue;
1098         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1099         struct virtio_hw *hw = vq->hw;
1100         struct rte_mbuf *rxm;
1101         uint16_t num, nb_rx;
1102         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1103         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1104         int error;
1105         uint32_t i, nb_enqueued;
1106         uint32_t hdr_size;
1107         struct virtio_net_hdr *hdr;
1108
1109         nb_rx = 0;
1110         if (unlikely(hw->started == 0))
1111                 return nb_rx;
1112
1113         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1114         if (likely(num > DESC_PER_CACHELINE))
1115                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1116
1117         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1118         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1119
1120         nb_enqueued = 0;
1121         hdr_size = hw->vtnet_hdr_size;
1122
1123         for (i = 0; i < num; i++) {
1124                 rxm = rcv_pkts[i];
1125
1126                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1127
1128                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1129                         PMD_RX_LOG(ERR, "Packet drop");
1130                         nb_enqueued++;
1131                         virtio_discard_rxbuf(vq, rxm);
1132                         rxvq->stats.errors++;
1133                         continue;
1134                 }
1135
1136                 rxm->port = rxvq->port_id;
1137                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1138                 rxm->ol_flags = 0;
1139                 rxm->vlan_tci = 0;
1140
1141                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1142                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1143
1144                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1145                         RTE_PKTMBUF_HEADROOM - hdr_size);
1146
1147                 if (hw->vlan_strip)
1148                         rte_vlan_strip(rxm);
1149
1150                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1151                         virtio_discard_rxbuf(vq, rxm);
1152                         rxvq->stats.errors++;
1153                         continue;
1154                 }
1155
1156                 virtio_rx_stats_updated(rxvq, rxm);
1157
1158                 rx_pkts[nb_rx++] = rxm;
1159         }
1160
1161         rxvq->stats.packets += nb_rx;
1162
1163         /* Allocate new mbuf for the used descriptor */
1164         if (likely(!virtqueue_full(vq))) {
1165                 uint16_t free_cnt = vq->vq_free_cnt;
1166                 struct rte_mbuf *new_pkts[free_cnt];
1167
1168                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1169                                                 free_cnt) == 0)) {
1170                         error = virtqueue_enqueue_recv_refill_packed(vq,
1171                                         new_pkts, free_cnt);
1172                         if (unlikely(error)) {
1173                                 for (i = 0; i < free_cnt; i++)
1174                                         rte_pktmbuf_free(new_pkts[i]);
1175                         }
1176                         nb_enqueued += free_cnt;
1177                 } else {
1178                         struct rte_eth_dev *dev =
1179                                 &rte_eth_devices[rxvq->port_id];
1180                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1181                 }
1182         }
1183
1184         if (likely(nb_enqueued)) {
1185                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1186                         virtqueue_notify(vq);
1187                         PMD_RX_LOG(DEBUG, "Notified");
1188                 }
1189         }
1190
1191         return nb_rx;
1192 }
1193
1194
1195 uint16_t
1196 virtio_recv_pkts_inorder(void *rx_queue,
1197                         struct rte_mbuf **rx_pkts,
1198                         uint16_t nb_pkts)
1199 {
1200         struct virtnet_rx *rxvq = rx_queue;
1201         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1202         struct virtio_hw *hw = vq->hw;
1203         struct rte_mbuf *rxm;
1204         struct rte_mbuf *prev = NULL;
1205         uint16_t nb_used, num, nb_rx;
1206         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1207         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1208         int error;
1209         uint32_t nb_enqueued;
1210         uint32_t seg_num;
1211         uint32_t seg_res;
1212         uint32_t hdr_size;
1213         int32_t i;
1214
1215         nb_rx = 0;
1216         if (unlikely(hw->started == 0))
1217                 return nb_rx;
1218
1219         nb_used = virtqueue_nused(vq);
1220         nb_used = RTE_MIN(nb_used, nb_pkts);
1221         nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1222
1223         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1224
1225         nb_enqueued = 0;
1226         seg_num = 1;
1227         seg_res = 0;
1228         hdr_size = hw->vtnet_hdr_size;
1229
1230         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1231
1232         for (i = 0; i < num; i++) {
1233                 struct virtio_net_hdr_mrg_rxbuf *header;
1234
1235                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1236                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1237
1238                 rxm = rcv_pkts[i];
1239
1240                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1241                         PMD_RX_LOG(ERR, "Packet drop");
1242                         nb_enqueued++;
1243                         virtio_discard_rxbuf_inorder(vq, rxm);
1244                         rxvq->stats.errors++;
1245                         continue;
1246                 }
1247
1248                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1249                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1250                          - hdr_size);
1251
1252                 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1253                         seg_num = header->num_buffers;
1254                         if (seg_num == 0)
1255                                 seg_num = 1;
1256                 } else {
1257                         seg_num = 1;
1258                 }
1259
1260                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1261                 rxm->nb_segs = seg_num;
1262                 rxm->ol_flags = 0;
1263                 rxm->vlan_tci = 0;
1264                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1265                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1266
1267                 rxm->port = rxvq->port_id;
1268
1269                 rx_pkts[nb_rx] = rxm;
1270                 prev = rxm;
1271
1272                 if (vq->hw->has_rx_offload &&
1273                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1274                         virtio_discard_rxbuf_inorder(vq, rxm);
1275                         rxvq->stats.errors++;
1276                         continue;
1277                 }
1278
1279                 if (hw->vlan_strip)
1280                         rte_vlan_strip(rx_pkts[nb_rx]);
1281
1282                 seg_res = seg_num - 1;
1283
1284                 /* Merge remaining segments */
1285                 while (seg_res != 0 && i < (num - 1)) {
1286                         i++;
1287
1288                         rxm = rcv_pkts[i];
1289                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1290                         rxm->pkt_len = (uint32_t)(len[i]);
1291                         rxm->data_len = (uint16_t)(len[i]);
1292
1293                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1294
1295                         prev->next = rxm;
1296                         prev = rxm;
1297                         seg_res -= 1;
1298                 }
1299
1300                 if (!seg_res) {
1301                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1302                         nb_rx++;
1303                 }
1304         }
1305
1306         /* Last packet still need merge segments */
1307         while (seg_res != 0) {
1308                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1309                                         VIRTIO_MBUF_BURST_SZ);
1310
1311                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1312                         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1313                                                            rcv_cnt);
1314                         uint16_t extra_idx = 0;
1315
1316                         rcv_cnt = num;
1317                         while (extra_idx < rcv_cnt) {
1318                                 rxm = rcv_pkts[extra_idx];
1319                                 rxm->data_off =
1320                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1321                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1322                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1323                                 prev->next = rxm;
1324                                 prev = rxm;
1325                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1326                                 extra_idx += 1;
1327                         };
1328                         seg_res -= rcv_cnt;
1329
1330                         if (!seg_res) {
1331                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1332                                 nb_rx++;
1333                         }
1334                 } else {
1335                         PMD_RX_LOG(ERR,
1336                                         "No enough segments for packet.");
1337                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1338                         rxvq->stats.errors++;
1339                         break;
1340                 }
1341         }
1342
1343         rxvq->stats.packets += nb_rx;
1344
1345         /* Allocate new mbuf for the used descriptor */
1346
1347         if (likely(!virtqueue_full(vq))) {
1348                 /* free_cnt may include mrg descs */
1349                 uint16_t free_cnt = vq->vq_free_cnt;
1350                 struct rte_mbuf *new_pkts[free_cnt];
1351
1352                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1353                         error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1354                                         free_cnt);
1355                         if (unlikely(error)) {
1356                                 for (i = 0; i < free_cnt; i++)
1357                                         rte_pktmbuf_free(new_pkts[i]);
1358                         }
1359                         nb_enqueued += free_cnt;
1360                 } else {
1361                         struct rte_eth_dev *dev =
1362                                 &rte_eth_devices[rxvq->port_id];
1363                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1364                 }
1365         }
1366
1367         if (likely(nb_enqueued)) {
1368                 vq_update_avail_idx(vq);
1369
1370                 if (unlikely(virtqueue_kick_prepare(vq))) {
1371                         virtqueue_notify(vq);
1372                         PMD_RX_LOG(DEBUG, "Notified");
1373                 }
1374         }
1375
1376         return nb_rx;
1377 }
1378
1379 uint16_t
1380 virtio_recv_mergeable_pkts(void *rx_queue,
1381                         struct rte_mbuf **rx_pkts,
1382                         uint16_t nb_pkts)
1383 {
1384         struct virtnet_rx *rxvq = rx_queue;
1385         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1386         struct virtio_hw *hw = vq->hw;
1387         struct rte_mbuf *rxm;
1388         struct rte_mbuf *prev = NULL;
1389         uint16_t nb_used, num, nb_rx = 0;
1390         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1391         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1392         int error;
1393         uint32_t nb_enqueued = 0;
1394         uint32_t seg_num = 0;
1395         uint32_t seg_res = 0;
1396         uint32_t hdr_size = hw->vtnet_hdr_size;
1397         int32_t i;
1398
1399         if (unlikely(hw->started == 0))
1400                 return nb_rx;
1401
1402         nb_used = virtqueue_nused(vq);
1403
1404         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1405
1406         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1407         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1408                 num = VIRTIO_MBUF_BURST_SZ;
1409         if (likely(num > DESC_PER_CACHELINE))
1410                 num = num - ((vq->vq_used_cons_idx + num) %
1411                                 DESC_PER_CACHELINE);
1412
1413
1414         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1415
1416         for (i = 0; i < num; i++) {
1417                 struct virtio_net_hdr_mrg_rxbuf *header;
1418
1419                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1420                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1421
1422                 rxm = rcv_pkts[i];
1423
1424                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1425                         PMD_RX_LOG(ERR, "Packet drop");
1426                         nb_enqueued++;
1427                         virtio_discard_rxbuf(vq, rxm);
1428                         rxvq->stats.errors++;
1429                         continue;
1430                 }
1431
1432                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1433                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1434                          - hdr_size);
1435                 seg_num = header->num_buffers;
1436                 if (seg_num == 0)
1437                         seg_num = 1;
1438
1439                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1440                 rxm->nb_segs = seg_num;
1441                 rxm->ol_flags = 0;
1442                 rxm->vlan_tci = 0;
1443                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1444                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1445
1446                 rxm->port = rxvq->port_id;
1447
1448                 rx_pkts[nb_rx] = rxm;
1449                 prev = rxm;
1450
1451                 if (hw->has_rx_offload &&
1452                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1453                         virtio_discard_rxbuf(vq, rxm);
1454                         rxvq->stats.errors++;
1455                         continue;
1456                 }
1457
1458                 if (hw->vlan_strip)
1459                         rte_vlan_strip(rx_pkts[nb_rx]);
1460
1461                 seg_res = seg_num - 1;
1462
1463                 /* Merge remaining segments */
1464                 while (seg_res != 0 && i < (num - 1)) {
1465                         i++;
1466
1467                         rxm = rcv_pkts[i];
1468                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1469                         rxm->pkt_len = (uint32_t)(len[i]);
1470                         rxm->data_len = (uint16_t)(len[i]);
1471
1472                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1473
1474                         prev->next = rxm;
1475                         prev = rxm;
1476                         seg_res -= 1;
1477                 }
1478
1479                 if (!seg_res) {
1480                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1481                         nb_rx++;
1482                 }
1483         }
1484
1485         /* Last packet still need merge segments */
1486         while (seg_res != 0) {
1487                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1488                                         VIRTIO_MBUF_BURST_SZ);
1489
1490                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1491                         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1492                                                            rcv_cnt);
1493                         uint16_t extra_idx = 0;
1494
1495                         rcv_cnt = num;
1496                         while (extra_idx < rcv_cnt) {
1497                                 rxm = rcv_pkts[extra_idx];
1498                                 rxm->data_off =
1499                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1500                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1501                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1502                                 prev->next = rxm;
1503                                 prev = rxm;
1504                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1505                                 extra_idx += 1;
1506                         };
1507                         seg_res -= rcv_cnt;
1508
1509                         if (!seg_res) {
1510                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1511                                 nb_rx++;
1512                         }
1513                 } else {
1514                         PMD_RX_LOG(ERR,
1515                                         "No enough segments for packet.");
1516                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1517                         rxvq->stats.errors++;
1518                         break;
1519                 }
1520         }
1521
1522         rxvq->stats.packets += nb_rx;
1523
1524         /* Allocate new mbuf for the used descriptor */
1525         if (likely(!virtqueue_full(vq))) {
1526                 /* free_cnt may include mrg descs */
1527                 uint16_t free_cnt = vq->vq_free_cnt;
1528                 struct rte_mbuf *new_pkts[free_cnt];
1529
1530                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1531                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1532                                         free_cnt);
1533                         if (unlikely(error)) {
1534                                 for (i = 0; i < free_cnt; i++)
1535                                         rte_pktmbuf_free(new_pkts[i]);
1536                         }
1537                         nb_enqueued += free_cnt;
1538                 } else {
1539                         struct rte_eth_dev *dev =
1540                                 &rte_eth_devices[rxvq->port_id];
1541                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1542                 }
1543         }
1544
1545         if (likely(nb_enqueued)) {
1546                 vq_update_avail_idx(vq);
1547
1548                 if (unlikely(virtqueue_kick_prepare(vq))) {
1549                         virtqueue_notify(vq);
1550                         PMD_RX_LOG(DEBUG, "Notified");
1551                 }
1552         }
1553
1554         return nb_rx;
1555 }
1556
1557 uint16_t
1558 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1559                         struct rte_mbuf **rx_pkts,
1560                         uint16_t nb_pkts)
1561 {
1562         struct virtnet_rx *rxvq = rx_queue;
1563         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1564         struct virtio_hw *hw = vq->hw;
1565         struct rte_mbuf *rxm;
1566         struct rte_mbuf *prev = NULL;
1567         uint16_t num, nb_rx = 0;
1568         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1569         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1570         uint32_t nb_enqueued = 0;
1571         uint32_t seg_num = 0;
1572         uint32_t seg_res = 0;
1573         uint32_t hdr_size = hw->vtnet_hdr_size;
1574         int32_t i;
1575         int error;
1576
1577         if (unlikely(hw->started == 0))
1578                 return nb_rx;
1579
1580
1581         num = nb_pkts;
1582         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1583                 num = VIRTIO_MBUF_BURST_SZ;
1584         if (likely(num > DESC_PER_CACHELINE))
1585                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1586
1587         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1588
1589         for (i = 0; i < num; i++) {
1590                 struct virtio_net_hdr_mrg_rxbuf *header;
1591
1592                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1593                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1594
1595                 rxm = rcv_pkts[i];
1596
1597                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1598                         PMD_RX_LOG(ERR, "Packet drop");
1599                         nb_enqueued++;
1600                         virtio_discard_rxbuf(vq, rxm);
1601                         rxvq->stats.errors++;
1602                         continue;
1603                 }
1604
1605                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1606                           rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1607                 seg_num = header->num_buffers;
1608
1609                 if (seg_num == 0)
1610                         seg_num = 1;
1611
1612                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1613                 rxm->nb_segs = seg_num;
1614                 rxm->ol_flags = 0;
1615                 rxm->vlan_tci = 0;
1616                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1617                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1618
1619                 rxm->port = rxvq->port_id;
1620                 rx_pkts[nb_rx] = rxm;
1621                 prev = rxm;
1622
1623                 if (hw->has_rx_offload &&
1624                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1625                         virtio_discard_rxbuf(vq, rxm);
1626                         rxvq->stats.errors++;
1627                         continue;
1628                 }
1629
1630                 if (hw->vlan_strip)
1631                         rte_vlan_strip(rx_pkts[nb_rx]);
1632
1633                 seg_res = seg_num - 1;
1634
1635                 /* Merge remaining segments */
1636                 while (seg_res != 0 && i < (num - 1)) {
1637                         i++;
1638
1639                         rxm = rcv_pkts[i];
1640                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1641                         rxm->pkt_len = (uint32_t)(len[i]);
1642                         rxm->data_len = (uint16_t)(len[i]);
1643
1644                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1645
1646                         prev->next = rxm;
1647                         prev = rxm;
1648                         seg_res -= 1;
1649                 }
1650
1651                 if (!seg_res) {
1652                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1653                         nb_rx++;
1654                 }
1655         }
1656
1657         /* Last packet still need merge segments */
1658         while (seg_res != 0) {
1659                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1660                                         VIRTIO_MBUF_BURST_SZ);
1661                 uint16_t extra_idx = 0;
1662
1663                 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1664                                 len, rcv_cnt);
1665                 if (unlikely(rcv_cnt == 0)) {
1666                         PMD_RX_LOG(ERR, "No enough segments for packet.");
1667                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1668                         rxvq->stats.errors++;
1669                         break;
1670                 }
1671
1672                 while (extra_idx < rcv_cnt) {
1673                         rxm = rcv_pkts[extra_idx];
1674
1675                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1676                         rxm->pkt_len = (uint32_t)(len[extra_idx]);
1677                         rxm->data_len = (uint16_t)(len[extra_idx]);
1678
1679                         prev->next = rxm;
1680                         prev = rxm;
1681                         rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1682                         extra_idx += 1;
1683                 }
1684                 seg_res -= rcv_cnt;
1685                 if (!seg_res) {
1686                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1687                         nb_rx++;
1688                 }
1689         }
1690
1691         rxvq->stats.packets += nb_rx;
1692
1693         /* Allocate new mbuf for the used descriptor */
1694         if (likely(!virtqueue_full(vq))) {
1695                 /* free_cnt may include mrg descs */
1696                 uint16_t free_cnt = vq->vq_free_cnt;
1697                 struct rte_mbuf *new_pkts[free_cnt];
1698
1699                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1700                         error = virtqueue_enqueue_recv_refill_packed(vq,
1701                                         new_pkts, free_cnt);
1702                         if (unlikely(error)) {
1703                                 for (i = 0; i < free_cnt; i++)
1704                                         rte_pktmbuf_free(new_pkts[i]);
1705                         }
1706                         nb_enqueued += free_cnt;
1707                 } else {
1708                         struct rte_eth_dev *dev =
1709                                 &rte_eth_devices[rxvq->port_id];
1710                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1711                 }
1712         }
1713
1714         if (likely(nb_enqueued)) {
1715                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1716                         virtqueue_notify(vq);
1717                         PMD_RX_LOG(DEBUG, "Notified");
1718                 }
1719         }
1720
1721         return nb_rx;
1722 }
1723
1724 uint16_t
1725 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1726                         uint16_t nb_pkts)
1727 {
1728         uint16_t nb_tx;
1729         int error;
1730
1731         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1732                 struct rte_mbuf *m = tx_pkts[nb_tx];
1733
1734 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1735                 error = rte_validate_tx_offload(m);
1736                 if (unlikely(error)) {
1737                         rte_errno = -error;
1738                         break;
1739                 }
1740 #endif
1741
1742                 /* Do VLAN tag insertion */
1743                 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1744                         error = rte_vlan_insert(&m);
1745                         /* rte_vlan_insert() may change pointer
1746                          * even in the case of failure
1747                          */
1748                         tx_pkts[nb_tx] = m;
1749
1750                         if (unlikely(error)) {
1751                                 rte_errno = -error;
1752                                 break;
1753                         }
1754                 }
1755
1756                 error = rte_net_intel_cksum_prepare(m);
1757                 if (unlikely(error)) {
1758                         rte_errno = -error;
1759                         break;
1760                 }
1761
1762                 if (m->ol_flags & PKT_TX_TCP_SEG)
1763                         virtio_tso_fix_cksum(m);
1764         }
1765
1766         return nb_tx;
1767 }
1768
1769 uint16_t
1770 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1771                         uint16_t nb_pkts)
1772 {
1773         struct virtnet_tx *txvq = tx_queue;
1774         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1775         struct virtio_hw *hw = vq->hw;
1776         uint16_t hdr_size = hw->vtnet_hdr_size;
1777         uint16_t nb_tx = 0;
1778         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1779
1780         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1781                 return nb_tx;
1782
1783         if (unlikely(nb_pkts < 1))
1784                 return nb_pkts;
1785
1786         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1787
1788         if (nb_pkts > vq->vq_free_cnt)
1789                 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1790                                            in_order);
1791
1792         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1793                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1794                 int can_push = 0, use_indirect = 0, slots, need;
1795
1796                 /* optimize ring usage */
1797                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1798                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1799                     rte_mbuf_refcnt_read(txm) == 1 &&
1800                     RTE_MBUF_DIRECT(txm) &&
1801                     txm->nb_segs == 1 &&
1802                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1803                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1804                            __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1805                         can_push = 1;
1806                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1807                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1808                         use_indirect = 1;
1809                 /* How many main ring entries are needed to this Tx?
1810                  * indirect   => 1
1811                  * any_layout => number of segments
1812                  * default    => number of segments + 1
1813                  */
1814                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1815                 need = slots - vq->vq_free_cnt;
1816
1817                 /* Positive value indicates it need free vring descriptors */
1818                 if (unlikely(need > 0)) {
1819                         virtio_xmit_cleanup_packed(vq, need, in_order);
1820                         need = slots - vq->vq_free_cnt;
1821                         if (unlikely(need > 0)) {
1822                                 PMD_TX_LOG(ERR,
1823                                            "No free tx descriptors to transmit");
1824                                 break;
1825                         }
1826                 }
1827
1828                 /* Enqueue Packet buffers */
1829                 if (can_push)
1830                         virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1831                 else
1832                         virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1833                                                       use_indirect, 0,
1834                                                       in_order);
1835
1836                 virtio_update_packet_stats(&txvq->stats, txm);
1837         }
1838
1839         txvq->stats.packets += nb_tx;
1840
1841         if (likely(nb_tx)) {
1842                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1843                         virtqueue_notify(vq);
1844                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1845                 }
1846         }
1847
1848         return nb_tx;
1849 }
1850
1851 uint16_t
1852 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1853 {
1854         struct virtnet_tx *txvq = tx_queue;
1855         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1856         struct virtio_hw *hw = vq->hw;
1857         uint16_t hdr_size = hw->vtnet_hdr_size;
1858         uint16_t nb_used, nb_tx = 0;
1859
1860         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1861                 return nb_tx;
1862
1863         if (unlikely(nb_pkts < 1))
1864                 return nb_pkts;
1865
1866         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1867
1868         nb_used = virtqueue_nused(vq);
1869
1870         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1871                 virtio_xmit_cleanup(vq, nb_used);
1872
1873         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1874                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1875                 int can_push = 0, use_indirect = 0, slots, need;
1876
1877                 /* optimize ring usage */
1878                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1879                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1880                     rte_mbuf_refcnt_read(txm) == 1 &&
1881                     RTE_MBUF_DIRECT(txm) &&
1882                     txm->nb_segs == 1 &&
1883                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1884                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1885                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1886                         can_push = 1;
1887                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1888                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1889                         use_indirect = 1;
1890
1891                 /* How many main ring entries are needed to this Tx?
1892                  * any_layout => number of segments
1893                  * indirect   => 1
1894                  * default    => number of segments + 1
1895                  */
1896                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1897                 need = slots - vq->vq_free_cnt;
1898
1899                 /* Positive value indicates it need free vring descriptors */
1900                 if (unlikely(need > 0)) {
1901                         nb_used = virtqueue_nused(vq);
1902
1903                         need = RTE_MIN(need, (int)nb_used);
1904
1905                         virtio_xmit_cleanup(vq, need);
1906                         need = slots - vq->vq_free_cnt;
1907                         if (unlikely(need > 0)) {
1908                                 PMD_TX_LOG(ERR,
1909                                            "No free tx descriptors to transmit");
1910                                 break;
1911                         }
1912                 }
1913
1914                 /* Enqueue Packet buffers */
1915                 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1916                         can_push, 0);
1917
1918                 virtio_update_packet_stats(&txvq->stats, txm);
1919         }
1920
1921         txvq->stats.packets += nb_tx;
1922
1923         if (likely(nb_tx)) {
1924                 vq_update_avail_idx(vq);
1925
1926                 if (unlikely(virtqueue_kick_prepare(vq))) {
1927                         virtqueue_notify(vq);
1928                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1929                 }
1930         }
1931
1932         return nb_tx;
1933 }
1934
1935 static __rte_always_inline int
1936 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1937 {
1938         uint16_t nb_used, nb_clean, nb_descs;
1939
1940         nb_descs = vq->vq_free_cnt + need;
1941         nb_used = virtqueue_nused(vq);
1942         nb_clean = RTE_MIN(need, (int)nb_used);
1943
1944         virtio_xmit_cleanup_inorder(vq, nb_clean);
1945
1946         return nb_descs - vq->vq_free_cnt;
1947 }
1948
1949 uint16_t
1950 virtio_xmit_pkts_inorder(void *tx_queue,
1951                         struct rte_mbuf **tx_pkts,
1952                         uint16_t nb_pkts)
1953 {
1954         struct virtnet_tx *txvq = tx_queue;
1955         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1956         struct virtio_hw *hw = vq->hw;
1957         uint16_t hdr_size = hw->vtnet_hdr_size;
1958         uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1959         struct rte_mbuf *inorder_pkts[nb_pkts];
1960         int need;
1961
1962         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1963                 return nb_tx;
1964
1965         if (unlikely(nb_pkts < 1))
1966                 return nb_pkts;
1967
1968         VIRTQUEUE_DUMP(vq);
1969         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1970         nb_used = virtqueue_nused(vq);
1971
1972         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1973                 virtio_xmit_cleanup_inorder(vq, nb_used);
1974
1975         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1976                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1977                 int slots;
1978
1979                 /* optimize ring usage */
1980                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1981                      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1982                      rte_mbuf_refcnt_read(txm) == 1 &&
1983                      RTE_MBUF_DIRECT(txm) &&
1984                      txm->nb_segs == 1 &&
1985                      rte_pktmbuf_headroom(txm) >= hdr_size &&
1986                      rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1987                                 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1988                         inorder_pkts[nb_inorder_pkts] = txm;
1989                         nb_inorder_pkts++;
1990
1991                         continue;
1992                 }
1993
1994                 if (nb_inorder_pkts) {
1995                         need = nb_inorder_pkts - vq->vq_free_cnt;
1996                         if (unlikely(need > 0)) {
1997                                 need = virtio_xmit_try_cleanup_inorder(vq,
1998                                                                        need);
1999                                 if (unlikely(need > 0)) {
2000                                         PMD_TX_LOG(ERR,
2001                                                 "No free tx descriptors to "
2002                                                 "transmit");
2003                                         break;
2004                                 }
2005                         }
2006                         virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2007                                                         nb_inorder_pkts);
2008                         nb_inorder_pkts = 0;
2009                 }
2010
2011                 slots = txm->nb_segs + 1;
2012                 need = slots - vq->vq_free_cnt;
2013                 if (unlikely(need > 0)) {
2014                         need = virtio_xmit_try_cleanup_inorder(vq, slots);
2015
2016                         if (unlikely(need > 0)) {
2017                                 PMD_TX_LOG(ERR,
2018                                         "No free tx descriptors to transmit");
2019                                 break;
2020                         }
2021                 }
2022                 /* Enqueue Packet buffers */
2023                 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2024
2025                 virtio_update_packet_stats(&txvq->stats, txm);
2026         }
2027
2028         /* Transmit all inorder packets */
2029         if (nb_inorder_pkts) {
2030                 need = nb_inorder_pkts - vq->vq_free_cnt;
2031                 if (unlikely(need > 0)) {
2032                         need = virtio_xmit_try_cleanup_inorder(vq,
2033                                                                   need);
2034                         if (unlikely(need > 0)) {
2035                                 PMD_TX_LOG(ERR,
2036                                         "No free tx descriptors to transmit");
2037                                 nb_inorder_pkts = vq->vq_free_cnt;
2038                                 nb_tx -= need;
2039                         }
2040                 }
2041
2042                 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2043                                                 nb_inorder_pkts);
2044         }
2045
2046         txvq->stats.packets += nb_tx;
2047
2048         if (likely(nb_tx)) {
2049                 vq_update_avail_idx(vq);
2050
2051                 if (unlikely(virtqueue_kick_prepare(vq))) {
2052                         virtqueue_notify(vq);
2053                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2054                 }
2055         }
2056
2057         VIRTQUEUE_DUMP(vq);
2058
2059         return nb_tx;
2060 }
2061
2062 __rte_weak uint16_t
2063 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2064                             struct rte_mbuf **rx_pkts __rte_unused,
2065                             uint16_t nb_pkts __rte_unused)
2066 {
2067         return 0;
2068 }
2069
2070 __rte_weak uint16_t
2071 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2072                             struct rte_mbuf **tx_pkts __rte_unused,
2073                             uint16_t nb_pkts __rte_unused)
2074 {
2075         return 0;
2076 }