net/virtio: remove reference to virtqueue in vrings
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45         struct virtnet_rx *rxvq = rxq;
46         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
47
48         return virtqueue_nused(vq) >= offset;
49 }
50
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54         vq->vq_free_cnt += num;
55         vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61         struct vring_desc *dp, *dp_tail;
62         struct vq_desc_extra *dxp;
63         uint16_t desc_idx_last = desc_idx;
64
65         dp  = &vq->vq_split.ring.desc[desc_idx];
66         dxp = &vq->vq_descx[desc_idx];
67         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69                 while (dp->flags & VRING_DESC_F_NEXT) {
70                         desc_idx_last = dp->next;
71                         dp = &vq->vq_split.ring.desc[dp->next];
72                 }
73         }
74         dxp->ndescs = 0;
75
76         /*
77          * We must append the existing free chain, if any, to the end of
78          * newly freed chain. If the virtqueue was completely used, then
79          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80          */
81         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82                 vq->vq_desc_head_idx = desc_idx;
83         } else {
84                 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85                 dp_tail->next = desc_idx;
86         }
87
88         vq->vq_desc_tail_idx = desc_idx_last;
89         dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95         uint32_t s = mbuf->pkt_len;
96         struct rte_ether_addr *ea;
97
98         stats->bytes += s;
99
100         if (s == 64) {
101                 stats->size_bins[1]++;
102         } else if (s > 64 && s < 1024) {
103                 uint32_t bin;
104
105                 /* count zeros, and offset into correct bin */
106                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107                 stats->size_bins[bin]++;
108         } else {
109                 if (s < 64)
110                         stats->size_bins[0]++;
111                 else if (s < 1519)
112                         stats->size_bins[6]++;
113                 else
114                         stats->size_bins[7]++;
115         }
116
117         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118         if (rte_is_multicast_ether_addr(ea)) {
119                 if (rte_is_broadcast_ether_addr(ea))
120                         stats->broadcast++;
121                 else
122                         stats->multicast++;
123         }
124 }
125
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129         VIRTIO_DUMP_PACKET(m, m->data_len);
130
131         virtio_update_packet_stats(&rxvq->stats, m);
132 }
133
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136                                   struct rte_mbuf **rx_pkts,
137                                   uint32_t *len,
138                                   uint16_t num)
139 {
140         struct rte_mbuf *cookie;
141         uint16_t used_idx;
142         uint16_t id;
143         struct vring_packed_desc *desc;
144         uint16_t i;
145
146         desc = vq->vq_packed.ring.desc;
147
148         for (i = 0; i < num; i++) {
149                 used_idx = vq->vq_used_cons_idx;
150                 /* desc_is_used has a load-acquire or rte_io_rmb inside
151                  * and wait for used desc in virtqueue.
152                  */
153                 if (!desc_is_used(&desc[used_idx], vq))
154                         return i;
155                 len[i] = desc[used_idx].len;
156                 id = desc[used_idx].id;
157                 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158                 if (unlikely(cookie == NULL)) {
159                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160                                 vq->vq_used_cons_idx);
161                         break;
162                 }
163                 rte_prefetch0(cookie);
164                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165                 rx_pkts[i] = cookie;
166
167                 vq->vq_free_cnt++;
168                 vq->vq_used_cons_idx++;
169                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170                         vq->vq_used_cons_idx -= vq->vq_nentries;
171                         vq->vq_packed.used_wrap_counter ^= 1;
172                 }
173         }
174
175         return i;
176 }
177
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180                            uint32_t *len, uint16_t num)
181 {
182         struct vring_used_elem *uep;
183         struct rte_mbuf *cookie;
184         uint16_t used_idx, desc_idx;
185         uint16_t i;
186
187         /*  Caller does the check */
188         for (i = 0; i < num ; i++) {
189                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190                 uep = &vq->vq_split.ring.used->ring[used_idx];
191                 desc_idx = (uint16_t) uep->id;
192                 len[i] = uep->len;
193                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194
195                 if (unlikely(cookie == NULL)) {
196                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197                                 vq->vq_used_cons_idx);
198                         break;
199                 }
200
201                 rte_prefetch0(cookie);
202                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203                 rx_pkts[i]  = cookie;
204                 vq->vq_used_cons_idx++;
205                 vq_ring_free_chain(vq, desc_idx);
206                 vq->vq_descx[desc_idx].cookie = NULL;
207         }
208
209         return i;
210 }
211
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214                         struct rte_mbuf **rx_pkts,
215                         uint32_t *len,
216                         uint16_t num)
217 {
218         struct vring_used_elem *uep;
219         struct rte_mbuf *cookie;
220         uint16_t used_idx = 0;
221         uint16_t i;
222
223         if (unlikely(num == 0))
224                 return 0;
225
226         for (i = 0; i < num; i++) {
227                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228                 /* Desc idx same as used idx */
229                 uep = &vq->vq_split.ring.used->ring[used_idx];
230                 len[i] = uep->len;
231                 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232
233                 if (unlikely(cookie == NULL)) {
234                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235                                 vq->vq_used_cons_idx);
236                         break;
237                 }
238
239                 rte_prefetch0(cookie);
240                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241                 rx_pkts[i]  = cookie;
242                 vq->vq_used_cons_idx++;
243                 vq->vq_descx[used_idx].cookie = NULL;
244         }
245
246         vq_ring_free_inorder(vq, used_idx, i);
247         return i;
248 }
249
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252                         struct rte_mbuf **cookies,
253                         uint16_t num)
254 {
255         struct vq_desc_extra *dxp;
256         struct virtio_hw *hw = vq->hw;
257         struct vring_desc *start_dp;
258         uint16_t head_idx, idx, i = 0;
259
260         if (unlikely(vq->vq_free_cnt == 0))
261                 return -ENOSPC;
262         if (unlikely(vq->vq_free_cnt < num))
263                 return -EMSGSIZE;
264
265         head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266         start_dp = vq->vq_split.ring.desc;
267
268         while (i < num) {
269                 idx = head_idx & (vq->vq_nentries - 1);
270                 dxp = &vq->vq_descx[idx];
271                 dxp->cookie = (void *)cookies[i];
272                 dxp->ndescs = 1;
273
274                 start_dp[idx].addr = cookies[i]->buf_iova +
275                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276                 start_dp[idx].len = cookies[i]->buf_len -
277                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278                 start_dp[idx].flags =  VRING_DESC_F_WRITE;
279
280                 vq_update_avail_ring(vq, idx);
281                 head_idx++;
282                 i++;
283         }
284
285         vq->vq_desc_head_idx += num;
286         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
287         return 0;
288 }
289
290 static inline int
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
292                                 uint16_t num)
293 {
294         struct vq_desc_extra *dxp;
295         struct virtio_hw *hw = vq->hw;
296         struct vring_desc *start_dp = vq->vq_split.ring.desc;
297         uint16_t idx, i;
298
299         if (unlikely(vq->vq_free_cnt == 0))
300                 return -ENOSPC;
301         if (unlikely(vq->vq_free_cnt < num))
302                 return -EMSGSIZE;
303
304         if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
305                 return -EFAULT;
306
307         for (i = 0; i < num; i++) {
308                 idx = vq->vq_desc_head_idx;
309                 dxp = &vq->vq_descx[idx];
310                 dxp->cookie = (void *)cookie[i];
311                 dxp->ndescs = 1;
312
313                 start_dp[idx].addr = cookie[i]->buf_iova +
314                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315                 start_dp[idx].len = cookie[i]->buf_len -
316                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
317                 start_dp[idx].flags = VRING_DESC_F_WRITE;
318                 vq->vq_desc_head_idx = start_dp[idx].next;
319                 vq_update_avail_ring(vq, idx);
320                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
322                         break;
323                 }
324         }
325
326         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
327
328         return 0;
329 }
330
331 static inline int
332 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
333                                      struct rte_mbuf **cookie, uint16_t num)
334 {
335         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
336         uint16_t flags = vq->vq_packed.cached_flags;
337         struct virtio_hw *hw = vq->hw;
338         struct vq_desc_extra *dxp;
339         uint16_t idx;
340         int i;
341
342         if (unlikely(vq->vq_free_cnt == 0))
343                 return -ENOSPC;
344         if (unlikely(vq->vq_free_cnt < num))
345                 return -EMSGSIZE;
346
347         for (i = 0; i < num; i++) {
348                 idx = vq->vq_avail_idx;
349                 dxp = &vq->vq_descx[idx];
350                 dxp->cookie = (void *)cookie[i];
351                 dxp->ndescs = 1;
352
353                 start_dp[idx].addr = cookie[i]->buf_iova +
354                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
355                 start_dp[idx].len = cookie[i]->buf_len -
356                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
357
358                 vq->vq_desc_head_idx = dxp->next;
359                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
360                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
361
362                 virtqueue_store_flags_packed(&start_dp[idx], flags,
363                                              hw->weak_barriers);
364
365                 if (++vq->vq_avail_idx >= vq->vq_nentries) {
366                         vq->vq_avail_idx -= vq->vq_nentries;
367                         vq->vq_packed.cached_flags ^=
368                                 VRING_PACKED_DESC_F_AVAIL_USED;
369                         flags = vq->vq_packed.cached_flags;
370                 }
371         }
372         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
373         return 0;
374 }
375
376 /* When doing TSO, the IP length is not included in the pseudo header
377  * checksum of the packet given to the PMD, but for virtio it is
378  * expected.
379  */
380 static void
381 virtio_tso_fix_cksum(struct rte_mbuf *m)
382 {
383         /* common case: header is not fragmented */
384         if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
385                         m->l4_len)) {
386                 struct rte_ipv4_hdr *iph;
387                 struct rte_ipv6_hdr *ip6h;
388                 struct rte_tcp_hdr *th;
389                 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
390                 uint32_t tmp;
391
392                 iph = rte_pktmbuf_mtod_offset(m,
393                                         struct rte_ipv4_hdr *, m->l2_len);
394                 th = RTE_PTR_ADD(iph, m->l3_len);
395                 if ((iph->version_ihl >> 4) == 4) {
396                         iph->hdr_checksum = 0;
397                         iph->hdr_checksum = rte_ipv4_cksum(iph);
398                         ip_len = iph->total_length;
399                         ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
400                                 m->l3_len);
401                 } else {
402                         ip6h = (struct rte_ipv6_hdr *)iph;
403                         ip_paylen = ip6h->payload_len;
404                 }
405
406                 /* calculate the new phdr checksum not including ip_paylen */
407                 prev_cksum = th->cksum;
408                 tmp = prev_cksum;
409                 tmp += ip_paylen;
410                 tmp = (tmp & 0xffff) + (tmp >> 16);
411                 new_cksum = tmp;
412
413                 /* replace it in the packet */
414                 th->cksum = new_cksum;
415         }
416 }
417
418
419
420
421 static inline void
422 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
423                         struct rte_mbuf **cookies,
424                         uint16_t num)
425 {
426         struct vq_desc_extra *dxp;
427         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
428         struct vring_desc *start_dp;
429         struct virtio_net_hdr *hdr;
430         uint16_t idx;
431         int16_t head_size = vq->hw->vtnet_hdr_size;
432         uint16_t i = 0;
433
434         idx = vq->vq_desc_head_idx;
435         start_dp = vq->vq_split.ring.desc;
436
437         while (i < num) {
438                 idx = idx & (vq->vq_nentries - 1);
439                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
440                 dxp->cookie = (void *)cookies[i];
441                 dxp->ndescs = 1;
442                 virtio_update_packet_stats(&txvq->stats, cookies[i]);
443
444                 hdr = rte_pktmbuf_mtod_offset(cookies[i],
445                                 struct virtio_net_hdr *, -head_size);
446
447                 /* if offload disabled, hdr is not zeroed yet, do it now */
448                 if (!vq->hw->has_tx_offload)
449                         virtqueue_clear_net_hdr(hdr);
450                 else
451                         virtqueue_xmit_offload(hdr, cookies[i], true);
452
453                 start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
454                 start_dp[idx].len   = cookies[i]->data_len + head_size;
455                 start_dp[idx].flags = 0;
456
457
458                 vq_update_avail_ring(vq, idx);
459
460                 idx++;
461                 i++;
462         };
463
464         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
465         vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
466 }
467
468 static inline void
469 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
470                                    struct rte_mbuf *cookie,
471                                    int in_order)
472 {
473         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
474         struct vring_packed_desc *dp;
475         struct vq_desc_extra *dxp;
476         uint16_t idx, id, flags;
477         int16_t head_size = vq->hw->vtnet_hdr_size;
478         struct virtio_net_hdr *hdr;
479
480         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
481         idx = vq->vq_avail_idx;
482         dp = &vq->vq_packed.ring.desc[idx];
483
484         dxp = &vq->vq_descx[id];
485         dxp->ndescs = 1;
486         dxp->cookie = cookie;
487
488         flags = vq->vq_packed.cached_flags;
489
490         /* prepend cannot fail, checked by caller */
491         hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
492                                       -head_size);
493
494         /* if offload disabled, hdr is not zeroed yet, do it now */
495         if (!vq->hw->has_tx_offload)
496                 virtqueue_clear_net_hdr(hdr);
497         else
498                 virtqueue_xmit_offload(hdr, cookie, true);
499
500         dp->addr = rte_mbuf_data_iova(cookie) - head_size;
501         dp->len  = cookie->data_len + head_size;
502         dp->id   = id;
503
504         if (++vq->vq_avail_idx >= vq->vq_nentries) {
505                 vq->vq_avail_idx -= vq->vq_nentries;
506                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
507         }
508
509         vq->vq_free_cnt--;
510
511         if (!in_order) {
512                 vq->vq_desc_head_idx = dxp->next;
513                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
514                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
515         }
516
517         virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
518 }
519
520 static inline void
521 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
522                         uint16_t needed, int use_indirect, int can_push,
523                         int in_order)
524 {
525         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
526         struct vq_desc_extra *dxp;
527         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
528         struct vring_desc *start_dp;
529         uint16_t seg_num = cookie->nb_segs;
530         uint16_t head_idx, idx;
531         int16_t head_size = vq->hw->vtnet_hdr_size;
532         bool prepend_header = false;
533         struct virtio_net_hdr *hdr;
534
535         head_idx = vq->vq_desc_head_idx;
536         idx = head_idx;
537         if (in_order)
538                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
539         else
540                 dxp = &vq->vq_descx[idx];
541         dxp->cookie = (void *)cookie;
542         dxp->ndescs = needed;
543
544         start_dp = vq->vq_split.ring.desc;
545
546         if (can_push) {
547                 /* prepend cannot fail, checked by caller */
548                 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
549                                               -head_size);
550                 prepend_header = true;
551
552                 /* if offload disabled, it is not zeroed below, do it now */
553                 if (!vq->hw->has_tx_offload)
554                         virtqueue_clear_net_hdr(hdr);
555         } else if (use_indirect) {
556                 /* setup tx ring slot to point to indirect
557                  * descriptor list stored in reserved region.
558                  *
559                  * the first slot in indirect ring is already preset
560                  * to point to the header in reserved region
561                  */
562                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
563                         RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
564                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
565                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
566                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
567
568                 /* loop below will fill in rest of the indirect elements */
569                 start_dp = txr[idx].tx_indir;
570                 idx = 1;
571         } else {
572                 /* setup first tx ring slot to point to header
573                  * stored in reserved region.
574                  */
575                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
576                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
577                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
578                 start_dp[idx].flags = VRING_DESC_F_NEXT;
579                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
580
581                 idx = start_dp[idx].next;
582         }
583
584         virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
585
586         do {
587                 start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
588                 start_dp[idx].len   = cookie->data_len;
589                 if (prepend_header) {
590                         start_dp[idx].addr -= head_size;
591                         start_dp[idx].len += head_size;
592                         prepend_header = false;
593                 }
594                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
595                 idx = start_dp[idx].next;
596         } while ((cookie = cookie->next) != NULL);
597
598         if (use_indirect)
599                 idx = vq->vq_split.ring.desc[head_idx].next;
600
601         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
602
603         vq->vq_desc_head_idx = idx;
604         vq_update_avail_ring(vq, head_idx);
605
606         if (!in_order) {
607                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
608                         vq->vq_desc_tail_idx = idx;
609         }
610 }
611
612 void
613 virtio_dev_cq_start(struct rte_eth_dev *dev)
614 {
615         struct virtio_hw *hw = dev->data->dev_private;
616
617         if (hw->cvq) {
618                 rte_spinlock_init(&hw->cvq->lock);
619                 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
620         }
621 }
622
623 int
624 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
625                         uint16_t queue_idx,
626                         uint16_t nb_desc,
627                         unsigned int socket_id __rte_unused,
628                         const struct rte_eth_rxconf *rx_conf,
629                         struct rte_mempool *mp)
630 {
631         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
632         struct virtio_hw *hw = dev->data->dev_private;
633         struct virtqueue *vq = hw->vqs[vq_idx];
634         struct virtnet_rx *rxvq;
635         uint16_t rx_free_thresh;
636
637         PMD_INIT_FUNC_TRACE();
638
639         if (rx_conf->rx_deferred_start) {
640                 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
641                 return -EINVAL;
642         }
643
644         rx_free_thresh = rx_conf->rx_free_thresh;
645         if (rx_free_thresh == 0)
646                 rx_free_thresh =
647                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
648
649         if (rx_free_thresh & 0x3) {
650                 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
651                         " (rx_free_thresh=%u port=%u queue=%u)\n",
652                         rx_free_thresh, dev->data->port_id, queue_idx);
653                 return -EINVAL;
654         }
655
656         if (rx_free_thresh >= vq->vq_nentries) {
657                 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
658                         "number of RX entries (%u)."
659                         " (rx_free_thresh=%u port=%u queue=%u)\n",
660                         vq->vq_nentries,
661                         rx_free_thresh, dev->data->port_id, queue_idx);
662                 return -EINVAL;
663         }
664         vq->vq_free_thresh = rx_free_thresh;
665
666         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
667                 nb_desc = vq->vq_nentries;
668         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
669
670         rxvq = &vq->rxq;
671         rxvq->queue_id = queue_idx;
672         rxvq->mpool = mp;
673         dev->data->rx_queues[queue_idx] = rxvq;
674
675         return 0;
676 }
677
678 int
679 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
680 {
681         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
682         struct virtio_hw *hw = dev->data->dev_private;
683         struct virtqueue *vq = hw->vqs[vq_idx];
684         struct virtnet_rx *rxvq = &vq->rxq;
685         struct rte_mbuf *m;
686         uint16_t desc_idx;
687         int error, nbufs, i;
688         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
689
690         PMD_INIT_FUNC_TRACE();
691
692         /* Allocate blank mbufs for the each rx descriptor */
693         nbufs = 0;
694
695         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
696                 for (desc_idx = 0; desc_idx < vq->vq_nentries;
697                      desc_idx++) {
698                         vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
699                         vq->vq_split.ring.desc[desc_idx].flags =
700                                 VRING_DESC_F_WRITE;
701                 }
702
703                 virtio_rxq_vec_setup(rxvq);
704         }
705
706         memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
707         for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
708              desc_idx++) {
709                 vq->sw_ring[vq->vq_nentries + desc_idx] =
710                         &rxvq->fake_mbuf;
711         }
712
713         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
714                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
715                         virtio_rxq_rearm_vec(rxvq);
716                         nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
717                 }
718         } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
719                 if ((!virtqueue_full(vq))) {
720                         uint16_t free_cnt = vq->vq_free_cnt;
721                         struct rte_mbuf *pkts[free_cnt];
722
723                         if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
724                                 free_cnt)) {
725                                 error = virtqueue_enqueue_refill_inorder(vq,
726                                                 pkts,
727                                                 free_cnt);
728                                 if (unlikely(error)) {
729                                         for (i = 0; i < free_cnt; i++)
730                                                 rte_pktmbuf_free(pkts[i]);
731                                 }
732                         }
733
734                         nbufs += free_cnt;
735                         vq_update_avail_idx(vq);
736                 }
737         } else {
738                 while (!virtqueue_full(vq)) {
739                         m = rte_mbuf_raw_alloc(rxvq->mpool);
740                         if (m == NULL)
741                                 break;
742
743                         /* Enqueue allocated buffers */
744                         if (virtio_with_packed_queue(vq->hw))
745                                 error = virtqueue_enqueue_recv_refill_packed(vq,
746                                                 &m, 1);
747                         else
748                                 error = virtqueue_enqueue_recv_refill(vq,
749                                                 &m, 1);
750                         if (error) {
751                                 rte_pktmbuf_free(m);
752                                 break;
753                         }
754                         nbufs++;
755                 }
756
757                 if (!virtio_with_packed_queue(vq->hw))
758                         vq_update_avail_idx(vq);
759         }
760
761         PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
762
763         VIRTQUEUE_DUMP(vq);
764
765         return 0;
766 }
767
768 /*
769  * struct rte_eth_dev *dev: Used to update dev
770  * uint16_t nb_desc: Defaults to values read from config space
771  * unsigned int socket_id: Used to allocate memzone
772  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
773  * uint16_t queue_idx: Just used as an index in dev txq list
774  */
775 int
776 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
777                         uint16_t queue_idx,
778                         uint16_t nb_desc,
779                         unsigned int socket_id __rte_unused,
780                         const struct rte_eth_txconf *tx_conf)
781 {
782         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
783         struct virtio_hw *hw = dev->data->dev_private;
784         struct virtqueue *vq = hw->vqs[vq_idx];
785         struct virtnet_tx *txvq;
786         uint16_t tx_free_thresh;
787
788         PMD_INIT_FUNC_TRACE();
789
790         if (tx_conf->tx_deferred_start) {
791                 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
792                 return -EINVAL;
793         }
794
795         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
796                 nb_desc = vq->vq_nentries;
797         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
798
799         txvq = &vq->txq;
800         txvq->queue_id = queue_idx;
801
802         tx_free_thresh = tx_conf->tx_free_thresh;
803         if (tx_free_thresh == 0)
804                 tx_free_thresh =
805                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
806
807         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
808                 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
809                         "number of TX entries minus 3 (%u)."
810                         " (tx_free_thresh=%u port=%u queue=%u)\n",
811                         vq->vq_nentries - 3,
812                         tx_free_thresh, dev->data->port_id, queue_idx);
813                 return -EINVAL;
814         }
815
816         vq->vq_free_thresh = tx_free_thresh;
817
818         dev->data->tx_queues[queue_idx] = txvq;
819         return 0;
820 }
821
822 int
823 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
824                                 uint16_t queue_idx)
825 {
826         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
827         struct virtio_hw *hw = dev->data->dev_private;
828         struct virtqueue *vq = hw->vqs[vq_idx];
829
830         PMD_INIT_FUNC_TRACE();
831
832         if (!virtio_with_packed_queue(hw)) {
833                 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
834                         vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
835         }
836
837         VIRTQUEUE_DUMP(vq);
838
839         return 0;
840 }
841
842 static inline void
843 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
844 {
845         int error;
846         /*
847          * Requeue the discarded mbuf. This should always be
848          * successful since it was just dequeued.
849          */
850         if (virtio_with_packed_queue(vq->hw))
851                 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
852         else
853                 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
854
855         if (unlikely(error)) {
856                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
857                 rte_pktmbuf_free(m);
858         }
859 }
860
861 static inline void
862 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
863 {
864         int error;
865
866         error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
867         if (unlikely(error)) {
868                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
869                 rte_pktmbuf_free(m);
870         }
871 }
872
873 /* Optionally fill offload information in structure */
874 static inline int
875 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
876 {
877         struct rte_net_hdr_lens hdr_lens;
878         uint32_t hdrlen, ptype;
879         int l4_supported = 0;
880
881         /* nothing to do */
882         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
883                 return 0;
884
885         m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
886
887         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
888         m->packet_type = ptype;
889         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
890             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
891             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
892                 l4_supported = 1;
893
894         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
895                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
896                 if (hdr->csum_start <= hdrlen && l4_supported) {
897                         m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
898                 } else {
899                         /* Unknown proto or tunnel, do sw cksum. We can assume
900                          * the cksum field is in the first segment since the
901                          * buffers we provided to the host are large enough.
902                          * In case of SCTP, this will be wrong since it's a CRC
903                          * but there's nothing we can do.
904                          */
905                         uint16_t csum = 0, off;
906
907                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
908                                 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
909                                 &csum) < 0)
910                                 return -EINVAL;
911                         if (likely(csum != 0xffff))
912                                 csum = ~csum;
913                         off = hdr->csum_offset + hdr->csum_start;
914                         if (rte_pktmbuf_data_len(m) >= off + 1)
915                                 *rte_pktmbuf_mtod_offset(m, uint16_t *,
916                                         off) = csum;
917                 }
918         } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
919                 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
920         }
921
922         /* GSO request, save required information in mbuf */
923         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
924                 /* Check unsupported modes */
925                 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
926                     (hdr->gso_size == 0)) {
927                         return -EINVAL;
928                 }
929
930                 /* Update mss lengthes in mbuf */
931                 m->tso_segsz = hdr->gso_size;
932                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
933                         case VIRTIO_NET_HDR_GSO_TCPV4:
934                         case VIRTIO_NET_HDR_GSO_TCPV6:
935                                 m->ol_flags |= PKT_RX_LRO | \
936                                         PKT_RX_L4_CKSUM_NONE;
937                                 break;
938                         default:
939                                 return -EINVAL;
940                 }
941         }
942
943         return 0;
944 }
945
946 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
947 uint16_t
948 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
949 {
950         struct virtnet_rx *rxvq = rx_queue;
951         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
952         struct virtio_hw *hw = vq->hw;
953         struct rte_mbuf *rxm;
954         uint16_t nb_used, num, nb_rx;
955         uint32_t len[VIRTIO_MBUF_BURST_SZ];
956         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
957         int error;
958         uint32_t i, nb_enqueued;
959         uint32_t hdr_size;
960         struct virtio_net_hdr *hdr;
961
962         nb_rx = 0;
963         if (unlikely(hw->started == 0))
964                 return nb_rx;
965
966         nb_used = virtqueue_nused(vq);
967
968         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
969         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
970                 num = VIRTIO_MBUF_BURST_SZ;
971         if (likely(num > DESC_PER_CACHELINE))
972                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
973
974         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
975         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
976
977         nb_enqueued = 0;
978         hdr_size = hw->vtnet_hdr_size;
979
980         for (i = 0; i < num ; i++) {
981                 rxm = rcv_pkts[i];
982
983                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
984
985                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
986                         PMD_RX_LOG(ERR, "Packet drop");
987                         nb_enqueued++;
988                         virtio_discard_rxbuf(vq, rxm);
989                         rxvq->stats.errors++;
990                         continue;
991                 }
992
993                 rxm->port = rxvq->port_id;
994                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
995                 rxm->ol_flags = 0;
996                 rxm->vlan_tci = 0;
997
998                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
999                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1000
1001                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1002                         RTE_PKTMBUF_HEADROOM - hdr_size);
1003
1004                 if (hw->vlan_strip)
1005                         rte_vlan_strip(rxm);
1006
1007                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1008                         virtio_discard_rxbuf(vq, rxm);
1009                         rxvq->stats.errors++;
1010                         continue;
1011                 }
1012
1013                 virtio_rx_stats_updated(rxvq, rxm);
1014
1015                 rx_pkts[nb_rx++] = rxm;
1016         }
1017
1018         rxvq->stats.packets += nb_rx;
1019
1020         /* Allocate new mbuf for the used descriptor */
1021         if (likely(!virtqueue_full(vq))) {
1022                 uint16_t free_cnt = vq->vq_free_cnt;
1023                 struct rte_mbuf *new_pkts[free_cnt];
1024
1025                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1026                                                 free_cnt) == 0)) {
1027                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1028                                         free_cnt);
1029                         if (unlikely(error)) {
1030                                 for (i = 0; i < free_cnt; i++)
1031                                         rte_pktmbuf_free(new_pkts[i]);
1032                         }
1033                         nb_enqueued += free_cnt;
1034                 } else {
1035                         struct rte_eth_dev *dev =
1036                                 &rte_eth_devices[rxvq->port_id];
1037                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1038                 }
1039         }
1040
1041         if (likely(nb_enqueued)) {
1042                 vq_update_avail_idx(vq);
1043
1044                 if (unlikely(virtqueue_kick_prepare(vq))) {
1045                         virtqueue_notify(vq);
1046                         PMD_RX_LOG(DEBUG, "Notified");
1047                 }
1048         }
1049
1050         return nb_rx;
1051 }
1052
1053 uint16_t
1054 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1055                         uint16_t nb_pkts)
1056 {
1057         struct virtnet_rx *rxvq = rx_queue;
1058         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1059         struct virtio_hw *hw = vq->hw;
1060         struct rte_mbuf *rxm;
1061         uint16_t num, nb_rx;
1062         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1063         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1064         int error;
1065         uint32_t i, nb_enqueued;
1066         uint32_t hdr_size;
1067         struct virtio_net_hdr *hdr;
1068
1069         nb_rx = 0;
1070         if (unlikely(hw->started == 0))
1071                 return nb_rx;
1072
1073         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1074         if (likely(num > DESC_PER_CACHELINE))
1075                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1076
1077         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1078         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1079
1080         nb_enqueued = 0;
1081         hdr_size = hw->vtnet_hdr_size;
1082
1083         for (i = 0; i < num; i++) {
1084                 rxm = rcv_pkts[i];
1085
1086                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1087
1088                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1089                         PMD_RX_LOG(ERR, "Packet drop");
1090                         nb_enqueued++;
1091                         virtio_discard_rxbuf(vq, rxm);
1092                         rxvq->stats.errors++;
1093                         continue;
1094                 }
1095
1096                 rxm->port = rxvq->port_id;
1097                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1098                 rxm->ol_flags = 0;
1099                 rxm->vlan_tci = 0;
1100
1101                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1102                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1103
1104                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1105                         RTE_PKTMBUF_HEADROOM - hdr_size);
1106
1107                 if (hw->vlan_strip)
1108                         rte_vlan_strip(rxm);
1109
1110                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1111                         virtio_discard_rxbuf(vq, rxm);
1112                         rxvq->stats.errors++;
1113                         continue;
1114                 }
1115
1116                 virtio_rx_stats_updated(rxvq, rxm);
1117
1118                 rx_pkts[nb_rx++] = rxm;
1119         }
1120
1121         rxvq->stats.packets += nb_rx;
1122
1123         /* Allocate new mbuf for the used descriptor */
1124         if (likely(!virtqueue_full(vq))) {
1125                 uint16_t free_cnt = vq->vq_free_cnt;
1126                 struct rte_mbuf *new_pkts[free_cnt];
1127
1128                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1129                                                 free_cnt) == 0)) {
1130                         error = virtqueue_enqueue_recv_refill_packed(vq,
1131                                         new_pkts, free_cnt);
1132                         if (unlikely(error)) {
1133                                 for (i = 0; i < free_cnt; i++)
1134                                         rte_pktmbuf_free(new_pkts[i]);
1135                         }
1136                         nb_enqueued += free_cnt;
1137                 } else {
1138                         struct rte_eth_dev *dev =
1139                                 &rte_eth_devices[rxvq->port_id];
1140                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1141                 }
1142         }
1143
1144         if (likely(nb_enqueued)) {
1145                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1146                         virtqueue_notify(vq);
1147                         PMD_RX_LOG(DEBUG, "Notified");
1148                 }
1149         }
1150
1151         return nb_rx;
1152 }
1153
1154
1155 uint16_t
1156 virtio_recv_pkts_inorder(void *rx_queue,
1157                         struct rte_mbuf **rx_pkts,
1158                         uint16_t nb_pkts)
1159 {
1160         struct virtnet_rx *rxvq = rx_queue;
1161         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1162         struct virtio_hw *hw = vq->hw;
1163         struct rte_mbuf *rxm;
1164         struct rte_mbuf *prev = NULL;
1165         uint16_t nb_used, num, nb_rx;
1166         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1167         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1168         int error;
1169         uint32_t nb_enqueued;
1170         uint32_t seg_num;
1171         uint32_t seg_res;
1172         uint32_t hdr_size;
1173         int32_t i;
1174
1175         nb_rx = 0;
1176         if (unlikely(hw->started == 0))
1177                 return nb_rx;
1178
1179         nb_used = virtqueue_nused(vq);
1180         nb_used = RTE_MIN(nb_used, nb_pkts);
1181         nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1182
1183         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1184
1185         nb_enqueued = 0;
1186         seg_num = 1;
1187         seg_res = 0;
1188         hdr_size = hw->vtnet_hdr_size;
1189
1190         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1191
1192         for (i = 0; i < num; i++) {
1193                 struct virtio_net_hdr_mrg_rxbuf *header;
1194
1195                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1196                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1197
1198                 rxm = rcv_pkts[i];
1199
1200                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1201                         PMD_RX_LOG(ERR, "Packet drop");
1202                         nb_enqueued++;
1203                         virtio_discard_rxbuf_inorder(vq, rxm);
1204                         rxvq->stats.errors++;
1205                         continue;
1206                 }
1207
1208                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1209                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1210                          - hdr_size);
1211
1212                 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1213                         seg_num = header->num_buffers;
1214                         if (seg_num == 0)
1215                                 seg_num = 1;
1216                 } else {
1217                         seg_num = 1;
1218                 }
1219
1220                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1221                 rxm->nb_segs = seg_num;
1222                 rxm->ol_flags = 0;
1223                 rxm->vlan_tci = 0;
1224                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1225                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1226
1227                 rxm->port = rxvq->port_id;
1228
1229                 rx_pkts[nb_rx] = rxm;
1230                 prev = rxm;
1231
1232                 if (vq->hw->has_rx_offload &&
1233                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1234                         virtio_discard_rxbuf_inorder(vq, rxm);
1235                         rxvq->stats.errors++;
1236                         continue;
1237                 }
1238
1239                 if (hw->vlan_strip)
1240                         rte_vlan_strip(rx_pkts[nb_rx]);
1241
1242                 seg_res = seg_num - 1;
1243
1244                 /* Merge remaining segments */
1245                 while (seg_res != 0 && i < (num - 1)) {
1246                         i++;
1247
1248                         rxm = rcv_pkts[i];
1249                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1250                         rxm->pkt_len = (uint32_t)(len[i]);
1251                         rxm->data_len = (uint16_t)(len[i]);
1252
1253                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1254
1255                         prev->next = rxm;
1256                         prev = rxm;
1257                         seg_res -= 1;
1258                 }
1259
1260                 if (!seg_res) {
1261                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1262                         nb_rx++;
1263                 }
1264         }
1265
1266         /* Last packet still need merge segments */
1267         while (seg_res != 0) {
1268                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1269                                         VIRTIO_MBUF_BURST_SZ);
1270
1271                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1272                         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1273                                                            rcv_cnt);
1274                         uint16_t extra_idx = 0;
1275
1276                         rcv_cnt = num;
1277                         while (extra_idx < rcv_cnt) {
1278                                 rxm = rcv_pkts[extra_idx];
1279                                 rxm->data_off =
1280                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1281                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1282                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1283                                 prev->next = rxm;
1284                                 prev = rxm;
1285                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1286                                 extra_idx += 1;
1287                         };
1288                         seg_res -= rcv_cnt;
1289
1290                         if (!seg_res) {
1291                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1292                                 nb_rx++;
1293                         }
1294                 } else {
1295                         PMD_RX_LOG(ERR,
1296                                         "No enough segments for packet.");
1297                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1298                         rxvq->stats.errors++;
1299                         break;
1300                 }
1301         }
1302
1303         rxvq->stats.packets += nb_rx;
1304
1305         /* Allocate new mbuf for the used descriptor */
1306
1307         if (likely(!virtqueue_full(vq))) {
1308                 /* free_cnt may include mrg descs */
1309                 uint16_t free_cnt = vq->vq_free_cnt;
1310                 struct rte_mbuf *new_pkts[free_cnt];
1311
1312                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1313                         error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1314                                         free_cnt);
1315                         if (unlikely(error)) {
1316                                 for (i = 0; i < free_cnt; i++)
1317                                         rte_pktmbuf_free(new_pkts[i]);
1318                         }
1319                         nb_enqueued += free_cnt;
1320                 } else {
1321                         struct rte_eth_dev *dev =
1322                                 &rte_eth_devices[rxvq->port_id];
1323                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1324                 }
1325         }
1326
1327         if (likely(nb_enqueued)) {
1328                 vq_update_avail_idx(vq);
1329
1330                 if (unlikely(virtqueue_kick_prepare(vq))) {
1331                         virtqueue_notify(vq);
1332                         PMD_RX_LOG(DEBUG, "Notified");
1333                 }
1334         }
1335
1336         return nb_rx;
1337 }
1338
1339 uint16_t
1340 virtio_recv_mergeable_pkts(void *rx_queue,
1341                         struct rte_mbuf **rx_pkts,
1342                         uint16_t nb_pkts)
1343 {
1344         struct virtnet_rx *rxvq = rx_queue;
1345         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1346         struct virtio_hw *hw = vq->hw;
1347         struct rte_mbuf *rxm;
1348         struct rte_mbuf *prev = NULL;
1349         uint16_t nb_used, num, nb_rx = 0;
1350         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1351         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1352         int error;
1353         uint32_t nb_enqueued = 0;
1354         uint32_t seg_num = 0;
1355         uint32_t seg_res = 0;
1356         uint32_t hdr_size = hw->vtnet_hdr_size;
1357         int32_t i;
1358
1359         if (unlikely(hw->started == 0))
1360                 return nb_rx;
1361
1362         nb_used = virtqueue_nused(vq);
1363
1364         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1365
1366         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1367         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1368                 num = VIRTIO_MBUF_BURST_SZ;
1369         if (likely(num > DESC_PER_CACHELINE))
1370                 num = num - ((vq->vq_used_cons_idx + num) %
1371                                 DESC_PER_CACHELINE);
1372
1373
1374         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1375
1376         for (i = 0; i < num; i++) {
1377                 struct virtio_net_hdr_mrg_rxbuf *header;
1378
1379                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1380                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1381
1382                 rxm = rcv_pkts[i];
1383
1384                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1385                         PMD_RX_LOG(ERR, "Packet drop");
1386                         nb_enqueued++;
1387                         virtio_discard_rxbuf(vq, rxm);
1388                         rxvq->stats.errors++;
1389                         continue;
1390                 }
1391
1392                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1393                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1394                          - hdr_size);
1395                 seg_num = header->num_buffers;
1396                 if (seg_num == 0)
1397                         seg_num = 1;
1398
1399                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1400                 rxm->nb_segs = seg_num;
1401                 rxm->ol_flags = 0;
1402                 rxm->vlan_tci = 0;
1403                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1404                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1405
1406                 rxm->port = rxvq->port_id;
1407
1408                 rx_pkts[nb_rx] = rxm;
1409                 prev = rxm;
1410
1411                 if (hw->has_rx_offload &&
1412                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1413                         virtio_discard_rxbuf(vq, rxm);
1414                         rxvq->stats.errors++;
1415                         continue;
1416                 }
1417
1418                 if (hw->vlan_strip)
1419                         rte_vlan_strip(rx_pkts[nb_rx]);
1420
1421                 seg_res = seg_num - 1;
1422
1423                 /* Merge remaining segments */
1424                 while (seg_res != 0 && i < (num - 1)) {
1425                         i++;
1426
1427                         rxm = rcv_pkts[i];
1428                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1429                         rxm->pkt_len = (uint32_t)(len[i]);
1430                         rxm->data_len = (uint16_t)(len[i]);
1431
1432                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1433
1434                         prev->next = rxm;
1435                         prev = rxm;
1436                         seg_res -= 1;
1437                 }
1438
1439                 if (!seg_res) {
1440                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1441                         nb_rx++;
1442                 }
1443         }
1444
1445         /* Last packet still need merge segments */
1446         while (seg_res != 0) {
1447                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1448                                         VIRTIO_MBUF_BURST_SZ);
1449
1450                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1451                         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1452                                                            rcv_cnt);
1453                         uint16_t extra_idx = 0;
1454
1455                         rcv_cnt = num;
1456                         while (extra_idx < rcv_cnt) {
1457                                 rxm = rcv_pkts[extra_idx];
1458                                 rxm->data_off =
1459                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1460                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1461                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1462                                 prev->next = rxm;
1463                                 prev = rxm;
1464                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1465                                 extra_idx += 1;
1466                         };
1467                         seg_res -= rcv_cnt;
1468
1469                         if (!seg_res) {
1470                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1471                                 nb_rx++;
1472                         }
1473                 } else {
1474                         PMD_RX_LOG(ERR,
1475                                         "No enough segments for packet.");
1476                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1477                         rxvq->stats.errors++;
1478                         break;
1479                 }
1480         }
1481
1482         rxvq->stats.packets += nb_rx;
1483
1484         /* Allocate new mbuf for the used descriptor */
1485         if (likely(!virtqueue_full(vq))) {
1486                 /* free_cnt may include mrg descs */
1487                 uint16_t free_cnt = vq->vq_free_cnt;
1488                 struct rte_mbuf *new_pkts[free_cnt];
1489
1490                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1491                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1492                                         free_cnt);
1493                         if (unlikely(error)) {
1494                                 for (i = 0; i < free_cnt; i++)
1495                                         rte_pktmbuf_free(new_pkts[i]);
1496                         }
1497                         nb_enqueued += free_cnt;
1498                 } else {
1499                         struct rte_eth_dev *dev =
1500                                 &rte_eth_devices[rxvq->port_id];
1501                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1502                 }
1503         }
1504
1505         if (likely(nb_enqueued)) {
1506                 vq_update_avail_idx(vq);
1507
1508                 if (unlikely(virtqueue_kick_prepare(vq))) {
1509                         virtqueue_notify(vq);
1510                         PMD_RX_LOG(DEBUG, "Notified");
1511                 }
1512         }
1513
1514         return nb_rx;
1515 }
1516
1517 uint16_t
1518 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1519                         struct rte_mbuf **rx_pkts,
1520                         uint16_t nb_pkts)
1521 {
1522         struct virtnet_rx *rxvq = rx_queue;
1523         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1524         struct virtio_hw *hw = vq->hw;
1525         struct rte_mbuf *rxm;
1526         struct rte_mbuf *prev = NULL;
1527         uint16_t num, nb_rx = 0;
1528         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1529         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1530         uint32_t nb_enqueued = 0;
1531         uint32_t seg_num = 0;
1532         uint32_t seg_res = 0;
1533         uint32_t hdr_size = hw->vtnet_hdr_size;
1534         int32_t i;
1535         int error;
1536
1537         if (unlikely(hw->started == 0))
1538                 return nb_rx;
1539
1540
1541         num = nb_pkts;
1542         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1543                 num = VIRTIO_MBUF_BURST_SZ;
1544         if (likely(num > DESC_PER_CACHELINE))
1545                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1546
1547         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1548
1549         for (i = 0; i < num; i++) {
1550                 struct virtio_net_hdr_mrg_rxbuf *header;
1551
1552                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1553                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1554
1555                 rxm = rcv_pkts[i];
1556
1557                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1558                         PMD_RX_LOG(ERR, "Packet drop");
1559                         nb_enqueued++;
1560                         virtio_discard_rxbuf(vq, rxm);
1561                         rxvq->stats.errors++;
1562                         continue;
1563                 }
1564
1565                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1566                           rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1567                 seg_num = header->num_buffers;
1568
1569                 if (seg_num == 0)
1570                         seg_num = 1;
1571
1572                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1573                 rxm->nb_segs = seg_num;
1574                 rxm->ol_flags = 0;
1575                 rxm->vlan_tci = 0;
1576                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1577                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1578
1579                 rxm->port = rxvq->port_id;
1580                 rx_pkts[nb_rx] = rxm;
1581                 prev = rxm;
1582
1583                 if (hw->has_rx_offload &&
1584                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1585                         virtio_discard_rxbuf(vq, rxm);
1586                         rxvq->stats.errors++;
1587                         continue;
1588                 }
1589
1590                 if (hw->vlan_strip)
1591                         rte_vlan_strip(rx_pkts[nb_rx]);
1592
1593                 seg_res = seg_num - 1;
1594
1595                 /* Merge remaining segments */
1596                 while (seg_res != 0 && i < (num - 1)) {
1597                         i++;
1598
1599                         rxm = rcv_pkts[i];
1600                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1601                         rxm->pkt_len = (uint32_t)(len[i]);
1602                         rxm->data_len = (uint16_t)(len[i]);
1603
1604                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1605
1606                         prev->next = rxm;
1607                         prev = rxm;
1608                         seg_res -= 1;
1609                 }
1610
1611                 if (!seg_res) {
1612                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1613                         nb_rx++;
1614                 }
1615         }
1616
1617         /* Last packet still need merge segments */
1618         while (seg_res != 0) {
1619                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1620                                         VIRTIO_MBUF_BURST_SZ);
1621                 uint16_t extra_idx = 0;
1622
1623                 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1624                                 len, rcv_cnt);
1625                 if (unlikely(rcv_cnt == 0)) {
1626                         PMD_RX_LOG(ERR, "No enough segments for packet.");
1627                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1628                         rxvq->stats.errors++;
1629                         break;
1630                 }
1631
1632                 while (extra_idx < rcv_cnt) {
1633                         rxm = rcv_pkts[extra_idx];
1634
1635                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1636                         rxm->pkt_len = (uint32_t)(len[extra_idx]);
1637                         rxm->data_len = (uint16_t)(len[extra_idx]);
1638
1639                         prev->next = rxm;
1640                         prev = rxm;
1641                         rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1642                         extra_idx += 1;
1643                 }
1644                 seg_res -= rcv_cnt;
1645                 if (!seg_res) {
1646                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1647                         nb_rx++;
1648                 }
1649         }
1650
1651         rxvq->stats.packets += nb_rx;
1652
1653         /* Allocate new mbuf for the used descriptor */
1654         if (likely(!virtqueue_full(vq))) {
1655                 /* free_cnt may include mrg descs */
1656                 uint16_t free_cnt = vq->vq_free_cnt;
1657                 struct rte_mbuf *new_pkts[free_cnt];
1658
1659                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1660                         error = virtqueue_enqueue_recv_refill_packed(vq,
1661                                         new_pkts, free_cnt);
1662                         if (unlikely(error)) {
1663                                 for (i = 0; i < free_cnt; i++)
1664                                         rte_pktmbuf_free(new_pkts[i]);
1665                         }
1666                         nb_enqueued += free_cnt;
1667                 } else {
1668                         struct rte_eth_dev *dev =
1669                                 &rte_eth_devices[rxvq->port_id];
1670                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1671                 }
1672         }
1673
1674         if (likely(nb_enqueued)) {
1675                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1676                         virtqueue_notify(vq);
1677                         PMD_RX_LOG(DEBUG, "Notified");
1678                 }
1679         }
1680
1681         return nb_rx;
1682 }
1683
1684 uint16_t
1685 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1686                         uint16_t nb_pkts)
1687 {
1688         uint16_t nb_tx;
1689         int error;
1690
1691         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1692                 struct rte_mbuf *m = tx_pkts[nb_tx];
1693
1694 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1695                 error = rte_validate_tx_offload(m);
1696                 if (unlikely(error)) {
1697                         rte_errno = -error;
1698                         break;
1699                 }
1700 #endif
1701
1702                 /* Do VLAN tag insertion */
1703                 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1704                         error = rte_vlan_insert(&m);
1705                         /* rte_vlan_insert() may change pointer
1706                          * even in the case of failure
1707                          */
1708                         tx_pkts[nb_tx] = m;
1709
1710                         if (unlikely(error)) {
1711                                 rte_errno = -error;
1712                                 break;
1713                         }
1714                 }
1715
1716                 error = rte_net_intel_cksum_prepare(m);
1717                 if (unlikely(error)) {
1718                         rte_errno = -error;
1719                         break;
1720                 }
1721
1722                 if (m->ol_flags & PKT_TX_TCP_SEG)
1723                         virtio_tso_fix_cksum(m);
1724         }
1725
1726         return nb_tx;
1727 }
1728
1729 uint16_t
1730 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1731                         uint16_t nb_pkts)
1732 {
1733         struct virtnet_tx *txvq = tx_queue;
1734         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1735         struct virtio_hw *hw = vq->hw;
1736         uint16_t hdr_size = hw->vtnet_hdr_size;
1737         uint16_t nb_tx = 0;
1738         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1739
1740         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1741                 return nb_tx;
1742
1743         if (unlikely(nb_pkts < 1))
1744                 return nb_pkts;
1745
1746         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1747
1748         if (nb_pkts > vq->vq_free_cnt)
1749                 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1750                                            in_order);
1751
1752         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1753                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1754                 int can_push = 0, use_indirect = 0, slots, need;
1755
1756                 /* optimize ring usage */
1757                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1758                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1759                     rte_mbuf_refcnt_read(txm) == 1 &&
1760                     RTE_MBUF_DIRECT(txm) &&
1761                     txm->nb_segs == 1 &&
1762                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1763                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1764                            __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1765                         can_push = 1;
1766                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1767                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1768                         use_indirect = 1;
1769                 /* How many main ring entries are needed to this Tx?
1770                  * indirect   => 1
1771                  * any_layout => number of segments
1772                  * default    => number of segments + 1
1773                  */
1774                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1775                 need = slots - vq->vq_free_cnt;
1776
1777                 /* Positive value indicates it need free vring descriptors */
1778                 if (unlikely(need > 0)) {
1779                         virtio_xmit_cleanup_packed(vq, need, in_order);
1780                         need = slots - vq->vq_free_cnt;
1781                         if (unlikely(need > 0)) {
1782                                 PMD_TX_LOG(ERR,
1783                                            "No free tx descriptors to transmit");
1784                                 break;
1785                         }
1786                 }
1787
1788                 /* Enqueue Packet buffers */
1789                 if (can_push)
1790                         virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1791                 else
1792                         virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1793                                                       use_indirect, 0,
1794                                                       in_order);
1795
1796                 virtio_update_packet_stats(&txvq->stats, txm);
1797         }
1798
1799         txvq->stats.packets += nb_tx;
1800
1801         if (likely(nb_tx)) {
1802                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1803                         virtqueue_notify(vq);
1804                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1805                 }
1806         }
1807
1808         return nb_tx;
1809 }
1810
1811 uint16_t
1812 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1813 {
1814         struct virtnet_tx *txvq = tx_queue;
1815         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1816         struct virtio_hw *hw = vq->hw;
1817         uint16_t hdr_size = hw->vtnet_hdr_size;
1818         uint16_t nb_used, nb_tx = 0;
1819
1820         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1821                 return nb_tx;
1822
1823         if (unlikely(nb_pkts < 1))
1824                 return nb_pkts;
1825
1826         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1827
1828         nb_used = virtqueue_nused(vq);
1829
1830         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1831                 virtio_xmit_cleanup(vq, nb_used);
1832
1833         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1834                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1835                 int can_push = 0, use_indirect = 0, slots, need;
1836
1837                 /* optimize ring usage */
1838                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1839                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1840                     rte_mbuf_refcnt_read(txm) == 1 &&
1841                     RTE_MBUF_DIRECT(txm) &&
1842                     txm->nb_segs == 1 &&
1843                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1844                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1845                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1846                         can_push = 1;
1847                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1848                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1849                         use_indirect = 1;
1850
1851                 /* How many main ring entries are needed to this Tx?
1852                  * any_layout => number of segments
1853                  * indirect   => 1
1854                  * default    => number of segments + 1
1855                  */
1856                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1857                 need = slots - vq->vq_free_cnt;
1858
1859                 /* Positive value indicates it need free vring descriptors */
1860                 if (unlikely(need > 0)) {
1861                         nb_used = virtqueue_nused(vq);
1862
1863                         need = RTE_MIN(need, (int)nb_used);
1864
1865                         virtio_xmit_cleanup(vq, need);
1866                         need = slots - vq->vq_free_cnt;
1867                         if (unlikely(need > 0)) {
1868                                 PMD_TX_LOG(ERR,
1869                                            "No free tx descriptors to transmit");
1870                                 break;
1871                         }
1872                 }
1873
1874                 /* Enqueue Packet buffers */
1875                 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1876                         can_push, 0);
1877
1878                 virtio_update_packet_stats(&txvq->stats, txm);
1879         }
1880
1881         txvq->stats.packets += nb_tx;
1882
1883         if (likely(nb_tx)) {
1884                 vq_update_avail_idx(vq);
1885
1886                 if (unlikely(virtqueue_kick_prepare(vq))) {
1887                         virtqueue_notify(vq);
1888                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1889                 }
1890         }
1891
1892         return nb_tx;
1893 }
1894
1895 static __rte_always_inline int
1896 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1897 {
1898         uint16_t nb_used, nb_clean, nb_descs;
1899
1900         nb_descs = vq->vq_free_cnt + need;
1901         nb_used = virtqueue_nused(vq);
1902         nb_clean = RTE_MIN(need, (int)nb_used);
1903
1904         virtio_xmit_cleanup_inorder(vq, nb_clean);
1905
1906         return nb_descs - vq->vq_free_cnt;
1907 }
1908
1909 uint16_t
1910 virtio_xmit_pkts_inorder(void *tx_queue,
1911                         struct rte_mbuf **tx_pkts,
1912                         uint16_t nb_pkts)
1913 {
1914         struct virtnet_tx *txvq = tx_queue;
1915         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1916         struct virtio_hw *hw = vq->hw;
1917         uint16_t hdr_size = hw->vtnet_hdr_size;
1918         uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1919         struct rte_mbuf *inorder_pkts[nb_pkts];
1920         int need;
1921
1922         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1923                 return nb_tx;
1924
1925         if (unlikely(nb_pkts < 1))
1926                 return nb_pkts;
1927
1928         VIRTQUEUE_DUMP(vq);
1929         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1930         nb_used = virtqueue_nused(vq);
1931
1932         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1933                 virtio_xmit_cleanup_inorder(vq, nb_used);
1934
1935         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1936                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1937                 int slots;
1938
1939                 /* optimize ring usage */
1940                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1941                      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1942                      rte_mbuf_refcnt_read(txm) == 1 &&
1943                      RTE_MBUF_DIRECT(txm) &&
1944                      txm->nb_segs == 1 &&
1945                      rte_pktmbuf_headroom(txm) >= hdr_size &&
1946                      rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1947                                 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1948                         inorder_pkts[nb_inorder_pkts] = txm;
1949                         nb_inorder_pkts++;
1950
1951                         continue;
1952                 }
1953
1954                 if (nb_inorder_pkts) {
1955                         need = nb_inorder_pkts - vq->vq_free_cnt;
1956                         if (unlikely(need > 0)) {
1957                                 need = virtio_xmit_try_cleanup_inorder(vq,
1958                                                                        need);
1959                                 if (unlikely(need > 0)) {
1960                                         PMD_TX_LOG(ERR,
1961                                                 "No free tx descriptors to "
1962                                                 "transmit");
1963                                         break;
1964                                 }
1965                         }
1966                         virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1967                                                         nb_inorder_pkts);
1968                         nb_inorder_pkts = 0;
1969                 }
1970
1971                 slots = txm->nb_segs + 1;
1972                 need = slots - vq->vq_free_cnt;
1973                 if (unlikely(need > 0)) {
1974                         need = virtio_xmit_try_cleanup_inorder(vq, slots);
1975
1976                         if (unlikely(need > 0)) {
1977                                 PMD_TX_LOG(ERR,
1978                                         "No free tx descriptors to transmit");
1979                                 break;
1980                         }
1981                 }
1982                 /* Enqueue Packet buffers */
1983                 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1984
1985                 virtio_update_packet_stats(&txvq->stats, txm);
1986         }
1987
1988         /* Transmit all inorder packets */
1989         if (nb_inorder_pkts) {
1990                 need = nb_inorder_pkts - vq->vq_free_cnt;
1991                 if (unlikely(need > 0)) {
1992                         need = virtio_xmit_try_cleanup_inorder(vq,
1993                                                                   need);
1994                         if (unlikely(need > 0)) {
1995                                 PMD_TX_LOG(ERR,
1996                                         "No free tx descriptors to transmit");
1997                                 nb_inorder_pkts = vq->vq_free_cnt;
1998                                 nb_tx -= need;
1999                         }
2000                 }
2001
2002                 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2003                                                 nb_inorder_pkts);
2004         }
2005
2006         txvq->stats.packets += nb_tx;
2007
2008         if (likely(nb_tx)) {
2009                 vq_update_avail_idx(vq);
2010
2011                 if (unlikely(virtqueue_kick_prepare(vq))) {
2012                         virtqueue_notify(vq);
2013                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2014                 }
2015         }
2016
2017         VIRTQUEUE_DUMP(vq);
2018
2019         return nb_tx;
2020 }
2021
2022 __rte_weak uint16_t
2023 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2024                             struct rte_mbuf **rx_pkts __rte_unused,
2025                             uint16_t nb_pkts __rte_unused)
2026 {
2027         return 0;
2028 }
2029
2030 __rte_weak uint16_t
2031 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2032                             struct rte_mbuf **tx_pkts __rte_unused,
2033                             uint16_t nb_pkts __rte_unused)
2034 {
2035         return 0;
2036 }