e8e6ed20a5ac9da65c8411cba95329de9bcc794c
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45         struct virtnet_rx *rxvq = rxq;
46         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
47
48         return virtqueue_nused(vq) >= offset;
49 }
50
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54         vq->vq_free_cnt += num;
55         vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61         struct vring_desc *dp, *dp_tail;
62         struct vq_desc_extra *dxp;
63         uint16_t desc_idx_last = desc_idx;
64
65         dp  = &vq->vq_split.ring.desc[desc_idx];
66         dxp = &vq->vq_descx[desc_idx];
67         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69                 while (dp->flags & VRING_DESC_F_NEXT) {
70                         desc_idx_last = dp->next;
71                         dp = &vq->vq_split.ring.desc[dp->next];
72                 }
73         }
74         dxp->ndescs = 0;
75
76         /*
77          * We must append the existing free chain, if any, to the end of
78          * newly freed chain. If the virtqueue was completely used, then
79          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80          */
81         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82                 vq->vq_desc_head_idx = desc_idx;
83         } else {
84                 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85                 dp_tail->next = desc_idx;
86         }
87
88         vq->vq_desc_tail_idx = desc_idx_last;
89         dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95         uint32_t s = mbuf->pkt_len;
96         struct rte_ether_addr *ea;
97
98         stats->bytes += s;
99
100         if (s == 64) {
101                 stats->size_bins[1]++;
102         } else if (s > 64 && s < 1024) {
103                 uint32_t bin;
104
105                 /* count zeros, and offset into correct bin */
106                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107                 stats->size_bins[bin]++;
108         } else {
109                 if (s < 64)
110                         stats->size_bins[0]++;
111                 else if (s < 1519)
112                         stats->size_bins[6]++;
113                 else
114                         stats->size_bins[7]++;
115         }
116
117         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118         if (rte_is_multicast_ether_addr(ea)) {
119                 if (rte_is_broadcast_ether_addr(ea))
120                         stats->broadcast++;
121                 else
122                         stats->multicast++;
123         }
124 }
125
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129         VIRTIO_DUMP_PACKET(m, m->data_len);
130
131         virtio_update_packet_stats(&rxvq->stats, m);
132 }
133
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136                                   struct rte_mbuf **rx_pkts,
137                                   uint32_t *len,
138                                   uint16_t num)
139 {
140         struct rte_mbuf *cookie;
141         uint16_t used_idx;
142         uint16_t id;
143         struct vring_packed_desc *desc;
144         uint16_t i;
145
146         desc = vq->vq_packed.ring.desc;
147
148         for (i = 0; i < num; i++) {
149                 used_idx = vq->vq_used_cons_idx;
150                 /* desc_is_used has a load-acquire or rte_io_rmb inside
151                  * and wait for used desc in virtqueue.
152                  */
153                 if (!desc_is_used(&desc[used_idx], vq))
154                         return i;
155                 len[i] = desc[used_idx].len;
156                 id = desc[used_idx].id;
157                 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158                 if (unlikely(cookie == NULL)) {
159                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160                                 vq->vq_used_cons_idx);
161                         break;
162                 }
163                 rte_prefetch0(cookie);
164                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165                 rx_pkts[i] = cookie;
166
167                 vq->vq_free_cnt++;
168                 vq->vq_used_cons_idx++;
169                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170                         vq->vq_used_cons_idx -= vq->vq_nentries;
171                         vq->vq_packed.used_wrap_counter ^= 1;
172                 }
173         }
174
175         return i;
176 }
177
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180                            uint32_t *len, uint16_t num)
181 {
182         struct vring_used_elem *uep;
183         struct rte_mbuf *cookie;
184         uint16_t used_idx, desc_idx;
185         uint16_t i;
186
187         /*  Caller does the check */
188         for (i = 0; i < num ; i++) {
189                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190                 uep = &vq->vq_split.ring.used->ring[used_idx];
191                 desc_idx = (uint16_t) uep->id;
192                 len[i] = uep->len;
193                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194
195                 if (unlikely(cookie == NULL)) {
196                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197                                 vq->vq_used_cons_idx);
198                         break;
199                 }
200
201                 rte_prefetch0(cookie);
202                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203                 rx_pkts[i]  = cookie;
204                 vq->vq_used_cons_idx++;
205                 vq_ring_free_chain(vq, desc_idx);
206                 vq->vq_descx[desc_idx].cookie = NULL;
207         }
208
209         return i;
210 }
211
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214                         struct rte_mbuf **rx_pkts,
215                         uint32_t *len,
216                         uint16_t num)
217 {
218         struct vring_used_elem *uep;
219         struct rte_mbuf *cookie;
220         uint16_t used_idx = 0;
221         uint16_t i;
222
223         if (unlikely(num == 0))
224                 return 0;
225
226         for (i = 0; i < num; i++) {
227                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228                 /* Desc idx same as used idx */
229                 uep = &vq->vq_split.ring.used->ring[used_idx];
230                 len[i] = uep->len;
231                 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232
233                 if (unlikely(cookie == NULL)) {
234                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235                                 vq->vq_used_cons_idx);
236                         break;
237                 }
238
239                 rte_prefetch0(cookie);
240                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241                 rx_pkts[i]  = cookie;
242                 vq->vq_used_cons_idx++;
243                 vq->vq_descx[used_idx].cookie = NULL;
244         }
245
246         vq_ring_free_inorder(vq, used_idx, i);
247         return i;
248 }
249
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252                         struct rte_mbuf **cookies,
253                         uint16_t num)
254 {
255         struct vq_desc_extra *dxp;
256         struct virtio_hw *hw = vq->hw;
257         struct vring_desc *start_dp;
258         uint16_t head_idx, idx, i = 0;
259
260         if (unlikely(vq->vq_free_cnt == 0))
261                 return -ENOSPC;
262         if (unlikely(vq->vq_free_cnt < num))
263                 return -EMSGSIZE;
264
265         head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266         start_dp = vq->vq_split.ring.desc;
267
268         while (i < num) {
269                 idx = head_idx & (vq->vq_nentries - 1);
270                 dxp = &vq->vq_descx[idx];
271                 dxp->cookie = (void *)cookies[i];
272                 dxp->ndescs = 1;
273
274                 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
275                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276                 start_dp[idx].len = cookies[i]->buf_len -
277                         RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278                 start_dp[idx].flags =  VRING_DESC_F_WRITE;
279
280                 vq_update_avail_ring(vq, idx);
281                 head_idx++;
282                 i++;
283         }
284
285         vq->vq_desc_head_idx += num;
286         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
287         return 0;
288 }
289
290 static inline int
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
292                                 uint16_t num)
293 {
294         struct vq_desc_extra *dxp;
295         struct virtio_hw *hw = vq->hw;
296         struct vring_desc *start_dp = vq->vq_split.ring.desc;
297         uint16_t idx, i;
298
299         if (unlikely(vq->vq_free_cnt == 0))
300                 return -ENOSPC;
301         if (unlikely(vq->vq_free_cnt < num))
302                 return -EMSGSIZE;
303
304         if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
305                 return -EFAULT;
306
307         for (i = 0; i < num; i++) {
308                 idx = vq->vq_desc_head_idx;
309                 dxp = &vq->vq_descx[idx];
310                 dxp->cookie = (void *)cookie[i];
311                 dxp->ndescs = 1;
312
313                 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
314                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315                 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
316                         hw->vtnet_hdr_size;
317                 start_dp[idx].flags = VRING_DESC_F_WRITE;
318                 vq->vq_desc_head_idx = start_dp[idx].next;
319                 vq_update_avail_ring(vq, idx);
320                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
322                         break;
323                 }
324         }
325
326         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
327
328         return 0;
329 }
330
331 static inline void
332 virtqueue_refill_single_packed(struct virtqueue *vq,
333                                struct vring_packed_desc *dp,
334                                struct rte_mbuf *cookie)
335 {
336         uint16_t flags = vq->vq_packed.cached_flags;
337         struct virtio_hw *hw = vq->hw;
338
339         dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
340         dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
341
342         virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
343
344         if (++vq->vq_avail_idx >= vq->vq_nentries) {
345                 vq->vq_avail_idx -= vq->vq_nentries;
346                 vq->vq_packed.cached_flags ^=
347                         VRING_PACKED_DESC_F_AVAIL_USED;
348                 flags = vq->vq_packed.cached_flags;
349         }
350 }
351
352 static inline int
353 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
354                                      struct rte_mbuf **cookie, uint16_t num)
355 {
356         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
357         struct vq_desc_extra *dxp;
358         uint16_t idx;
359         int i;
360
361         if (unlikely(vq->vq_free_cnt == 0))
362                 return -ENOSPC;
363         if (unlikely(vq->vq_free_cnt < num))
364                 return -EMSGSIZE;
365
366         for (i = 0; i < num; i++) {
367                 idx = vq->vq_avail_idx;
368                 dxp = &vq->vq_descx[idx];
369                 dxp->cookie = (void *)cookie[i];
370                 dxp->ndescs = 1;
371
372                 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
373         }
374         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
375         return 0;
376 }
377
378 static inline int
379 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
380                                      struct rte_mbuf **cookie, uint16_t num)
381 {
382         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
383         struct vq_desc_extra *dxp;
384         uint16_t idx, did;
385         int i;
386
387         if (unlikely(vq->vq_free_cnt == 0))
388                 return -ENOSPC;
389         if (unlikely(vq->vq_free_cnt < num))
390                 return -EMSGSIZE;
391
392         for (i = 0; i < num; i++) {
393                 idx = vq->vq_avail_idx;
394                 did = start_dp[idx].id;
395                 dxp = &vq->vq_descx[did];
396                 dxp->cookie = (void *)cookie[i];
397                 dxp->ndescs = 1;
398
399                 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
400         }
401         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
402         return 0;
403 }
404
405 /* When doing TSO, the IP length is not included in the pseudo header
406  * checksum of the packet given to the PMD, but for virtio it is
407  * expected.
408  */
409 static void
410 virtio_tso_fix_cksum(struct rte_mbuf *m)
411 {
412         /* common case: header is not fragmented */
413         if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
414                         m->l4_len)) {
415                 struct rte_ipv4_hdr *iph;
416                 struct rte_ipv6_hdr *ip6h;
417                 struct rte_tcp_hdr *th;
418                 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
419                 uint32_t tmp;
420
421                 iph = rte_pktmbuf_mtod_offset(m,
422                                         struct rte_ipv4_hdr *, m->l2_len);
423                 th = RTE_PTR_ADD(iph, m->l3_len);
424                 if ((iph->version_ihl >> 4) == 4) {
425                         iph->hdr_checksum = 0;
426                         iph->hdr_checksum = rte_ipv4_cksum(iph);
427                         ip_len = iph->total_length;
428                         ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
429                                 m->l3_len);
430                 } else {
431                         ip6h = (struct rte_ipv6_hdr *)iph;
432                         ip_paylen = ip6h->payload_len;
433                 }
434
435                 /* calculate the new phdr checksum not including ip_paylen */
436                 prev_cksum = th->cksum;
437                 tmp = prev_cksum;
438                 tmp += ip_paylen;
439                 tmp = (tmp & 0xffff) + (tmp >> 16);
440                 new_cksum = tmp;
441
442                 /* replace it in the packet */
443                 th->cksum = new_cksum;
444         }
445 }
446
447
448
449
450 static inline void
451 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
452                         struct rte_mbuf **cookies,
453                         uint16_t num)
454 {
455         struct vq_desc_extra *dxp;
456         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
457         struct vring_desc *start_dp;
458         struct virtio_net_hdr *hdr;
459         uint16_t idx;
460         int16_t head_size = vq->hw->vtnet_hdr_size;
461         uint16_t i = 0;
462
463         idx = vq->vq_desc_head_idx;
464         start_dp = vq->vq_split.ring.desc;
465
466         while (i < num) {
467                 idx = idx & (vq->vq_nentries - 1);
468                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
469                 dxp->cookie = (void *)cookies[i];
470                 dxp->ndescs = 1;
471                 virtio_update_packet_stats(&txvq->stats, cookies[i]);
472
473                 hdr = rte_pktmbuf_mtod_offset(cookies[i],
474                                 struct virtio_net_hdr *, -head_size);
475
476                 /* if offload disabled, hdr is not zeroed yet, do it now */
477                 if (!vq->hw->has_tx_offload)
478                         virtqueue_clear_net_hdr(hdr);
479                 else
480                         virtqueue_xmit_offload(hdr, cookies[i]);
481
482                 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
483                 start_dp[idx].len = cookies[i]->data_len + head_size;
484                 start_dp[idx].flags = 0;
485
486
487                 vq_update_avail_ring(vq, idx);
488
489                 idx++;
490                 i++;
491         };
492
493         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
494         vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
495 }
496
497 static inline void
498 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
499                                    struct rte_mbuf *cookie,
500                                    int in_order)
501 {
502         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
503         struct vring_packed_desc *dp;
504         struct vq_desc_extra *dxp;
505         uint16_t idx, id, flags;
506         int16_t head_size = vq->hw->vtnet_hdr_size;
507         struct virtio_net_hdr *hdr;
508
509         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
510         idx = vq->vq_avail_idx;
511         dp = &vq->vq_packed.ring.desc[idx];
512
513         dxp = &vq->vq_descx[id];
514         dxp->ndescs = 1;
515         dxp->cookie = cookie;
516
517         flags = vq->vq_packed.cached_flags;
518
519         /* prepend cannot fail, checked by caller */
520         hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
521                                       -head_size);
522
523         /* if offload disabled, hdr is not zeroed yet, do it now */
524         if (!vq->hw->has_tx_offload)
525                 virtqueue_clear_net_hdr(hdr);
526         else
527                 virtqueue_xmit_offload(hdr, cookie);
528
529         dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
530         dp->len = cookie->data_len + head_size;
531         dp->id = id;
532
533         if (++vq->vq_avail_idx >= vq->vq_nentries) {
534                 vq->vq_avail_idx -= vq->vq_nentries;
535                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
536         }
537
538         vq->vq_free_cnt--;
539
540         if (!in_order) {
541                 vq->vq_desc_head_idx = dxp->next;
542                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
543                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
544         }
545
546         virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
547 }
548
549 static inline void
550 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
551                         uint16_t needed, int use_indirect, int can_push,
552                         int in_order)
553 {
554         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
555         struct vq_desc_extra *dxp;
556         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
557         struct vring_desc *start_dp;
558         uint16_t seg_num = cookie->nb_segs;
559         uint16_t head_idx, idx;
560         int16_t head_size = vq->hw->vtnet_hdr_size;
561         bool prepend_header = false;
562         struct virtio_net_hdr *hdr;
563
564         head_idx = vq->vq_desc_head_idx;
565         idx = head_idx;
566         if (in_order)
567                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
568         else
569                 dxp = &vq->vq_descx[idx];
570         dxp->cookie = (void *)cookie;
571         dxp->ndescs = needed;
572
573         start_dp = vq->vq_split.ring.desc;
574
575         if (can_push) {
576                 /* prepend cannot fail, checked by caller */
577                 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
578                                               -head_size);
579                 prepend_header = true;
580
581                 /* if offload disabled, it is not zeroed below, do it now */
582                 if (!vq->hw->has_tx_offload)
583                         virtqueue_clear_net_hdr(hdr);
584         } else if (use_indirect) {
585                 /* setup tx ring slot to point to indirect
586                  * descriptor list stored in reserved region.
587                  *
588                  * the first slot in indirect ring is already preset
589                  * to point to the header in reserved region
590                  */
591                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
592                         RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
593                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
594                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
595                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
596
597                 /* loop below will fill in rest of the indirect elements */
598                 start_dp = txr[idx].tx_indir;
599                 idx = 1;
600         } else {
601                 /* setup first tx ring slot to point to header
602                  * stored in reserved region.
603                  */
604                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
605                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
606                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
607                 start_dp[idx].flags = VRING_DESC_F_NEXT;
608                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
609
610                 idx = start_dp[idx].next;
611         }
612
613         if (vq->hw->has_tx_offload)
614                 virtqueue_xmit_offload(hdr, cookie);
615
616         do {
617                 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
618                 start_dp[idx].len = cookie->data_len;
619                 if (prepend_header) {
620                         start_dp[idx].addr -= head_size;
621                         start_dp[idx].len += head_size;
622                         prepend_header = false;
623                 }
624                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
625                 idx = start_dp[idx].next;
626         } while ((cookie = cookie->next) != NULL);
627
628         if (use_indirect)
629                 idx = vq->vq_split.ring.desc[head_idx].next;
630
631         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
632
633         vq->vq_desc_head_idx = idx;
634         vq_update_avail_ring(vq, head_idx);
635
636         if (!in_order) {
637                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
638                         vq->vq_desc_tail_idx = idx;
639         }
640 }
641
642 void
643 virtio_dev_cq_start(struct rte_eth_dev *dev)
644 {
645         struct virtio_hw *hw = dev->data->dev_private;
646
647         if (hw->cvq) {
648                 rte_spinlock_init(&hw->cvq->lock);
649                 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
650         }
651 }
652
653 int
654 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
655                         uint16_t queue_idx,
656                         uint16_t nb_desc,
657                         unsigned int socket_id __rte_unused,
658                         const struct rte_eth_rxconf *rx_conf,
659                         struct rte_mempool *mp)
660 {
661         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
662         struct virtio_hw *hw = dev->data->dev_private;
663         struct virtqueue *vq = hw->vqs[vq_idx];
664         struct virtnet_rx *rxvq;
665         uint16_t rx_free_thresh;
666         uint16_t buf_size;
667         const char *error;
668
669         PMD_INIT_FUNC_TRACE();
670
671         if (rx_conf->rx_deferred_start) {
672                 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
673                 return -EINVAL;
674         }
675
676         buf_size = virtio_rx_mem_pool_buf_size(mp);
677         if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
678                                      hw->rx_ol_scatter, &error)) {
679                 PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
680                              queue_idx, error);
681                 return -EINVAL;
682         }
683
684         rx_free_thresh = rx_conf->rx_free_thresh;
685         if (rx_free_thresh == 0)
686                 rx_free_thresh =
687                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
688
689         if (rx_free_thresh & 0x3) {
690                 PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
691                         " (rx_free_thresh=%u port=%u queue=%u)",
692                         rx_free_thresh, dev->data->port_id, queue_idx);
693                 return -EINVAL;
694         }
695
696         if (rx_free_thresh >= vq->vq_nentries) {
697                 PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
698                         "number of RX entries (%u)."
699                         " (rx_free_thresh=%u port=%u queue=%u)",
700                         vq->vq_nentries,
701                         rx_free_thresh, dev->data->port_id, queue_idx);
702                 return -EINVAL;
703         }
704         vq->vq_free_thresh = rx_free_thresh;
705
706         /*
707          * For split ring vectorized path descriptors number must be
708          * equal to the ring size.
709          */
710         if (nb_desc > vq->vq_nentries ||
711             (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
712                 nb_desc = vq->vq_nentries;
713         }
714         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
715
716         rxvq = &vq->rxq;
717         rxvq->queue_id = queue_idx;
718         rxvq->mpool = mp;
719         dev->data->rx_queues[queue_idx] = rxvq;
720
721         return 0;
722 }
723
724 int
725 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
726 {
727         uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
728         struct virtio_hw *hw = dev->data->dev_private;
729         struct virtqueue *vq = hw->vqs[vq_idx];
730         struct virtnet_rx *rxvq = &vq->rxq;
731         struct rte_mbuf *m;
732         uint16_t desc_idx;
733         int error, nbufs, i;
734         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
735
736         PMD_INIT_FUNC_TRACE();
737
738         /* Allocate blank mbufs for the each rx descriptor */
739         nbufs = 0;
740
741         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
742                 for (desc_idx = 0; desc_idx < vq->vq_nentries;
743                      desc_idx++) {
744                         vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
745                         vq->vq_split.ring.desc[desc_idx].flags =
746                                 VRING_DESC_F_WRITE;
747                 }
748
749                 virtio_rxq_vec_setup(rxvq);
750         }
751
752         memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
753         for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
754                 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
755
756         if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
757                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
758                         virtio_rxq_rearm_vec(rxvq);
759                         nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
760                 }
761         } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
762                 if ((!virtqueue_full(vq))) {
763                         uint16_t free_cnt = vq->vq_free_cnt;
764                         struct rte_mbuf *pkts[free_cnt];
765
766                         if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
767                                 free_cnt)) {
768                                 error = virtqueue_enqueue_refill_inorder(vq,
769                                                 pkts,
770                                                 free_cnt);
771                                 if (unlikely(error)) {
772                                         for (i = 0; i < free_cnt; i++)
773                                                 rte_pktmbuf_free(pkts[i]);
774                                 } else {
775                                         nbufs += free_cnt;
776                                 }
777                         }
778
779                         vq_update_avail_idx(vq);
780                 }
781         } else {
782                 while (!virtqueue_full(vq)) {
783                         m = rte_mbuf_raw_alloc(rxvq->mpool);
784                         if (m == NULL)
785                                 break;
786
787                         /* Enqueue allocated buffers */
788                         if (virtio_with_packed_queue(vq->hw))
789                                 error = virtqueue_enqueue_recv_refill_packed_init(vq,
790                                                 &m, 1);
791                         else
792                                 error = virtqueue_enqueue_recv_refill(vq,
793                                                 &m, 1);
794                         if (error) {
795                                 rte_pktmbuf_free(m);
796                                 break;
797                         }
798                         nbufs++;
799                 }
800
801                 if (!virtio_with_packed_queue(vq->hw))
802                         vq_update_avail_idx(vq);
803         }
804
805         PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
806
807         VIRTQUEUE_DUMP(vq);
808
809         return 0;
810 }
811
812 /*
813  * struct rte_eth_dev *dev: Used to update dev
814  * uint16_t nb_desc: Defaults to values read from config space
815  * unsigned int socket_id: Used to allocate memzone
816  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
817  * uint16_t queue_idx: Just used as an index in dev txq list
818  */
819 int
820 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
821                         uint16_t queue_idx,
822                         uint16_t nb_desc,
823                         unsigned int socket_id __rte_unused,
824                         const struct rte_eth_txconf *tx_conf)
825 {
826         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
827         struct virtio_hw *hw = dev->data->dev_private;
828         struct virtqueue *vq = hw->vqs[vq_idx];
829         struct virtnet_tx *txvq;
830         uint16_t tx_free_thresh;
831
832         PMD_INIT_FUNC_TRACE();
833
834         if (tx_conf->tx_deferred_start) {
835                 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
836                 return -EINVAL;
837         }
838
839         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
840                 nb_desc = vq->vq_nentries;
841         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
842
843         txvq = &vq->txq;
844         txvq->queue_id = queue_idx;
845
846         tx_free_thresh = tx_conf->tx_free_thresh;
847         if (tx_free_thresh == 0)
848                 tx_free_thresh =
849                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
850
851         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
852                 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
853                         "number of TX entries minus 3 (%u)."
854                         " (tx_free_thresh=%u port=%u queue=%u)",
855                         vq->vq_nentries - 3,
856                         tx_free_thresh, dev->data->port_id, queue_idx);
857                 return -EINVAL;
858         }
859
860         vq->vq_free_thresh = tx_free_thresh;
861
862         dev->data->tx_queues[queue_idx] = txvq;
863         return 0;
864 }
865
866 int
867 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
868                                 uint16_t queue_idx)
869 {
870         uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
871         struct virtio_hw *hw = dev->data->dev_private;
872         struct virtqueue *vq = hw->vqs[vq_idx];
873
874         PMD_INIT_FUNC_TRACE();
875
876         if (!virtio_with_packed_queue(hw)) {
877                 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
878                         vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
879         }
880
881         VIRTQUEUE_DUMP(vq);
882
883         return 0;
884 }
885
886 static inline void
887 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
888 {
889         int error;
890         /*
891          * Requeue the discarded mbuf. This should always be
892          * successful since it was just dequeued.
893          */
894         if (virtio_with_packed_queue(vq->hw))
895                 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
896         else
897                 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
898
899         if (unlikely(error)) {
900                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
901                 rte_pktmbuf_free(m);
902         }
903 }
904
905 static inline void
906 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
907 {
908         int error;
909
910         error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
911         if (unlikely(error)) {
912                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
913                 rte_pktmbuf_free(m);
914         }
915 }
916
917 /* Optionally fill offload information in structure */
918 static inline int
919 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
920 {
921         struct rte_net_hdr_lens hdr_lens;
922         uint32_t hdrlen, ptype;
923         int l4_supported = 0;
924
925         /* nothing to do */
926         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
927                 return 0;
928
929         m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
930
931         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
932         m->packet_type = ptype;
933         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
934             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
935             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
936                 l4_supported = 1;
937
938         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
939                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
940                 if (hdr->csum_start <= hdrlen && l4_supported) {
941                         m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
942                 } else {
943                         /* Unknown proto or tunnel, do sw cksum. We can assume
944                          * the cksum field is in the first segment since the
945                          * buffers we provided to the host are large enough.
946                          * In case of SCTP, this will be wrong since it's a CRC
947                          * but there's nothing we can do.
948                          */
949                         uint16_t csum = 0, off;
950
951                         if (rte_raw_cksum_mbuf(m, hdr->csum_start,
952                                 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
953                                 &csum) < 0)
954                                 return -EINVAL;
955                         if (likely(csum != 0xffff))
956                                 csum = ~csum;
957                         off = hdr->csum_offset + hdr->csum_start;
958                         if (rte_pktmbuf_data_len(m) >= off + 1)
959                                 *rte_pktmbuf_mtod_offset(m, uint16_t *,
960                                         off) = csum;
961                 }
962         } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
963                 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
964         }
965
966         /* GSO request, save required information in mbuf */
967         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
968                 /* Check unsupported modes */
969                 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
970                     (hdr->gso_size == 0)) {
971                         return -EINVAL;
972                 }
973
974                 /* Update mss lengthes in mbuf */
975                 m->tso_segsz = hdr->gso_size;
976                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
977                         case VIRTIO_NET_HDR_GSO_TCPV4:
978                         case VIRTIO_NET_HDR_GSO_TCPV6:
979                                 m->ol_flags |= PKT_RX_LRO | \
980                                         PKT_RX_L4_CKSUM_NONE;
981                                 break;
982                         default:
983                                 return -EINVAL;
984                 }
985         }
986
987         return 0;
988 }
989
990 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
991 uint16_t
992 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
993 {
994         struct virtnet_rx *rxvq = rx_queue;
995         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
996         struct virtio_hw *hw = vq->hw;
997         struct rte_mbuf *rxm;
998         uint16_t nb_used, num, nb_rx;
999         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1000         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1001         int error;
1002         uint32_t i, nb_enqueued;
1003         uint32_t hdr_size;
1004         struct virtio_net_hdr *hdr;
1005
1006         nb_rx = 0;
1007         if (unlikely(hw->started == 0))
1008                 return nb_rx;
1009
1010         nb_used = virtqueue_nused(vq);
1011
1012         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1013         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1014                 num = VIRTIO_MBUF_BURST_SZ;
1015         if (likely(num > DESC_PER_CACHELINE))
1016                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1017
1018         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1019         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1020
1021         nb_enqueued = 0;
1022         hdr_size = hw->vtnet_hdr_size;
1023
1024         for (i = 0; i < num ; i++) {
1025                 rxm = rcv_pkts[i];
1026
1027                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1028
1029                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1030                         PMD_RX_LOG(ERR, "Packet drop");
1031                         nb_enqueued++;
1032                         virtio_discard_rxbuf(vq, rxm);
1033                         rxvq->stats.errors++;
1034                         continue;
1035                 }
1036
1037                 rxm->port = rxvq->port_id;
1038                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1039                 rxm->ol_flags = 0;
1040                 rxm->vlan_tci = 0;
1041
1042                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1043                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1044
1045                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1046                         RTE_PKTMBUF_HEADROOM - hdr_size);
1047
1048                 if (hw->vlan_strip)
1049                         rte_vlan_strip(rxm);
1050
1051                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1052                         virtio_discard_rxbuf(vq, rxm);
1053                         rxvq->stats.errors++;
1054                         continue;
1055                 }
1056
1057                 virtio_rx_stats_updated(rxvq, rxm);
1058
1059                 rx_pkts[nb_rx++] = rxm;
1060         }
1061
1062         rxvq->stats.packets += nb_rx;
1063
1064         /* Allocate new mbuf for the used descriptor */
1065         if (likely(!virtqueue_full(vq))) {
1066                 uint16_t free_cnt = vq->vq_free_cnt;
1067                 struct rte_mbuf *new_pkts[free_cnt];
1068
1069                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1070                                                 free_cnt) == 0)) {
1071                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1072                                         free_cnt);
1073                         if (unlikely(error)) {
1074                                 for (i = 0; i < free_cnt; i++)
1075                                         rte_pktmbuf_free(new_pkts[i]);
1076                         }
1077                         nb_enqueued += free_cnt;
1078                 } else {
1079                         struct rte_eth_dev *dev =
1080                                 &rte_eth_devices[rxvq->port_id];
1081                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1082                 }
1083         }
1084
1085         if (likely(nb_enqueued)) {
1086                 vq_update_avail_idx(vq);
1087
1088                 if (unlikely(virtqueue_kick_prepare(vq))) {
1089                         virtqueue_notify(vq);
1090                         PMD_RX_LOG(DEBUG, "Notified");
1091                 }
1092         }
1093
1094         return nb_rx;
1095 }
1096
1097 uint16_t
1098 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1099                         uint16_t nb_pkts)
1100 {
1101         struct virtnet_rx *rxvq = rx_queue;
1102         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1103         struct virtio_hw *hw = vq->hw;
1104         struct rte_mbuf *rxm;
1105         uint16_t num, nb_rx;
1106         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1107         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1108         int error;
1109         uint32_t i, nb_enqueued;
1110         uint32_t hdr_size;
1111         struct virtio_net_hdr *hdr;
1112
1113         nb_rx = 0;
1114         if (unlikely(hw->started == 0))
1115                 return nb_rx;
1116
1117         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1118         if (likely(num > DESC_PER_CACHELINE))
1119                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1120
1121         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1122         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1123
1124         nb_enqueued = 0;
1125         hdr_size = hw->vtnet_hdr_size;
1126
1127         for (i = 0; i < num; i++) {
1128                 rxm = rcv_pkts[i];
1129
1130                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1131
1132                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1133                         PMD_RX_LOG(ERR, "Packet drop");
1134                         nb_enqueued++;
1135                         virtio_discard_rxbuf(vq, rxm);
1136                         rxvq->stats.errors++;
1137                         continue;
1138                 }
1139
1140                 rxm->port = rxvq->port_id;
1141                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1142                 rxm->ol_flags = 0;
1143                 rxm->vlan_tci = 0;
1144
1145                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1146                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1147
1148                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1149                         RTE_PKTMBUF_HEADROOM - hdr_size);
1150
1151                 if (hw->vlan_strip)
1152                         rte_vlan_strip(rxm);
1153
1154                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1155                         virtio_discard_rxbuf(vq, rxm);
1156                         rxvq->stats.errors++;
1157                         continue;
1158                 }
1159
1160                 virtio_rx_stats_updated(rxvq, rxm);
1161
1162                 rx_pkts[nb_rx++] = rxm;
1163         }
1164
1165         rxvq->stats.packets += nb_rx;
1166
1167         /* Allocate new mbuf for the used descriptor */
1168         if (likely(!virtqueue_full(vq))) {
1169                 uint16_t free_cnt = vq->vq_free_cnt;
1170                 struct rte_mbuf *new_pkts[free_cnt];
1171
1172                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1173                                                 free_cnt) == 0)) {
1174                         error = virtqueue_enqueue_recv_refill_packed(vq,
1175                                         new_pkts, free_cnt);
1176                         if (unlikely(error)) {
1177                                 for (i = 0; i < free_cnt; i++)
1178                                         rte_pktmbuf_free(new_pkts[i]);
1179                         }
1180                         nb_enqueued += free_cnt;
1181                 } else {
1182                         struct rte_eth_dev *dev =
1183                                 &rte_eth_devices[rxvq->port_id];
1184                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1185                 }
1186         }
1187
1188         if (likely(nb_enqueued)) {
1189                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1190                         virtqueue_notify(vq);
1191                         PMD_RX_LOG(DEBUG, "Notified");
1192                 }
1193         }
1194
1195         return nb_rx;
1196 }
1197
1198
1199 uint16_t
1200 virtio_recv_pkts_inorder(void *rx_queue,
1201                         struct rte_mbuf **rx_pkts,
1202                         uint16_t nb_pkts)
1203 {
1204         struct virtnet_rx *rxvq = rx_queue;
1205         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1206         struct virtio_hw *hw = vq->hw;
1207         struct rte_mbuf *rxm;
1208         struct rte_mbuf *prev = NULL;
1209         uint16_t nb_used, num, nb_rx;
1210         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1211         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1212         int error;
1213         uint32_t nb_enqueued;
1214         uint32_t seg_num;
1215         uint32_t seg_res;
1216         uint32_t hdr_size;
1217         int32_t i;
1218
1219         nb_rx = 0;
1220         if (unlikely(hw->started == 0))
1221                 return nb_rx;
1222
1223         nb_used = virtqueue_nused(vq);
1224         nb_used = RTE_MIN(nb_used, nb_pkts);
1225         nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1226
1227         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1228
1229         nb_enqueued = 0;
1230         seg_num = 1;
1231         seg_res = 0;
1232         hdr_size = hw->vtnet_hdr_size;
1233
1234         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1235
1236         for (i = 0; i < num; i++) {
1237                 struct virtio_net_hdr_mrg_rxbuf *header;
1238
1239                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1240                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1241
1242                 rxm = rcv_pkts[i];
1243
1244                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1245                         PMD_RX_LOG(ERR, "Packet drop");
1246                         nb_enqueued++;
1247                         virtio_discard_rxbuf_inorder(vq, rxm);
1248                         rxvq->stats.errors++;
1249                         continue;
1250                 }
1251
1252                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1253                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1254                          - hdr_size);
1255
1256                 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1257                         seg_num = header->num_buffers;
1258                         if (seg_num == 0)
1259                                 seg_num = 1;
1260                 } else {
1261                         seg_num = 1;
1262                 }
1263
1264                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1265                 rxm->nb_segs = seg_num;
1266                 rxm->ol_flags = 0;
1267                 rxm->vlan_tci = 0;
1268                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1269                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1270
1271                 rxm->port = rxvq->port_id;
1272
1273                 rx_pkts[nb_rx] = rxm;
1274                 prev = rxm;
1275
1276                 if (vq->hw->has_rx_offload &&
1277                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1278                         virtio_discard_rxbuf_inorder(vq, rxm);
1279                         rxvq->stats.errors++;
1280                         continue;
1281                 }
1282
1283                 if (hw->vlan_strip)
1284                         rte_vlan_strip(rx_pkts[nb_rx]);
1285
1286                 seg_res = seg_num - 1;
1287
1288                 /* Merge remaining segments */
1289                 while (seg_res != 0 && i < (num - 1)) {
1290                         i++;
1291
1292                         rxm = rcv_pkts[i];
1293                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1294                         rxm->pkt_len = (uint32_t)(len[i]);
1295                         rxm->data_len = (uint16_t)(len[i]);
1296
1297                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1298
1299                         prev->next = rxm;
1300                         prev = rxm;
1301                         seg_res -= 1;
1302                 }
1303
1304                 if (!seg_res) {
1305                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1306                         nb_rx++;
1307                 }
1308         }
1309
1310         /* Last packet still need merge segments */
1311         while (seg_res != 0) {
1312                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1313                                         VIRTIO_MBUF_BURST_SZ);
1314
1315                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1316                         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1317                                                            rcv_cnt);
1318                         uint16_t extra_idx = 0;
1319
1320                         rcv_cnt = num;
1321                         while (extra_idx < rcv_cnt) {
1322                                 rxm = rcv_pkts[extra_idx];
1323                                 rxm->data_off =
1324                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1325                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1326                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1327                                 prev->next = rxm;
1328                                 prev = rxm;
1329                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1330                                 extra_idx += 1;
1331                         };
1332                         seg_res -= rcv_cnt;
1333
1334                         if (!seg_res) {
1335                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1336                                 nb_rx++;
1337                         }
1338                 } else {
1339                         PMD_RX_LOG(ERR,
1340                                         "No enough segments for packet.");
1341                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1342                         rxvq->stats.errors++;
1343                         break;
1344                 }
1345         }
1346
1347         rxvq->stats.packets += nb_rx;
1348
1349         /* Allocate new mbuf for the used descriptor */
1350
1351         if (likely(!virtqueue_full(vq))) {
1352                 /* free_cnt may include mrg descs */
1353                 uint16_t free_cnt = vq->vq_free_cnt;
1354                 struct rte_mbuf *new_pkts[free_cnt];
1355
1356                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1357                         error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1358                                         free_cnt);
1359                         if (unlikely(error)) {
1360                                 for (i = 0; i < free_cnt; i++)
1361                                         rte_pktmbuf_free(new_pkts[i]);
1362                         }
1363                         nb_enqueued += free_cnt;
1364                 } else {
1365                         struct rte_eth_dev *dev =
1366                                 &rte_eth_devices[rxvq->port_id];
1367                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1368                 }
1369         }
1370
1371         if (likely(nb_enqueued)) {
1372                 vq_update_avail_idx(vq);
1373
1374                 if (unlikely(virtqueue_kick_prepare(vq))) {
1375                         virtqueue_notify(vq);
1376                         PMD_RX_LOG(DEBUG, "Notified");
1377                 }
1378         }
1379
1380         return nb_rx;
1381 }
1382
1383 uint16_t
1384 virtio_recv_mergeable_pkts(void *rx_queue,
1385                         struct rte_mbuf **rx_pkts,
1386                         uint16_t nb_pkts)
1387 {
1388         struct virtnet_rx *rxvq = rx_queue;
1389         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1390         struct virtio_hw *hw = vq->hw;
1391         struct rte_mbuf *rxm;
1392         struct rte_mbuf *prev = NULL;
1393         uint16_t nb_used, num, nb_rx = 0;
1394         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1395         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1396         int error;
1397         uint32_t nb_enqueued = 0;
1398         uint32_t seg_num = 0;
1399         uint32_t seg_res = 0;
1400         uint32_t hdr_size = hw->vtnet_hdr_size;
1401         int32_t i;
1402
1403         if (unlikely(hw->started == 0))
1404                 return nb_rx;
1405
1406         nb_used = virtqueue_nused(vq);
1407
1408         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1409
1410         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1411         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1412                 num = VIRTIO_MBUF_BURST_SZ;
1413         if (likely(num > DESC_PER_CACHELINE))
1414                 num = num - ((vq->vq_used_cons_idx + num) %
1415                                 DESC_PER_CACHELINE);
1416
1417
1418         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1419
1420         for (i = 0; i < num; i++) {
1421                 struct virtio_net_hdr_mrg_rxbuf *header;
1422
1423                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1424                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1425
1426                 rxm = rcv_pkts[i];
1427
1428                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1429                         PMD_RX_LOG(ERR, "Packet drop");
1430                         nb_enqueued++;
1431                         virtio_discard_rxbuf(vq, rxm);
1432                         rxvq->stats.errors++;
1433                         continue;
1434                 }
1435
1436                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1437                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1438                          - hdr_size);
1439                 seg_num = header->num_buffers;
1440                 if (seg_num == 0)
1441                         seg_num = 1;
1442
1443                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1444                 rxm->nb_segs = seg_num;
1445                 rxm->ol_flags = 0;
1446                 rxm->vlan_tci = 0;
1447                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1448                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1449
1450                 rxm->port = rxvq->port_id;
1451
1452                 rx_pkts[nb_rx] = rxm;
1453                 prev = rxm;
1454
1455                 if (hw->has_rx_offload &&
1456                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1457                         virtio_discard_rxbuf(vq, rxm);
1458                         rxvq->stats.errors++;
1459                         continue;
1460                 }
1461
1462                 if (hw->vlan_strip)
1463                         rte_vlan_strip(rx_pkts[nb_rx]);
1464
1465                 seg_res = seg_num - 1;
1466
1467                 /* Merge remaining segments */
1468                 while (seg_res != 0 && i < (num - 1)) {
1469                         i++;
1470
1471                         rxm = rcv_pkts[i];
1472                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1473                         rxm->pkt_len = (uint32_t)(len[i]);
1474                         rxm->data_len = (uint16_t)(len[i]);
1475
1476                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1477
1478                         prev->next = rxm;
1479                         prev = rxm;
1480                         seg_res -= 1;
1481                 }
1482
1483                 if (!seg_res) {
1484                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1485                         nb_rx++;
1486                 }
1487         }
1488
1489         /* Last packet still need merge segments */
1490         while (seg_res != 0) {
1491                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1492                                         VIRTIO_MBUF_BURST_SZ);
1493
1494                 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1495                         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1496                                                            rcv_cnt);
1497                         uint16_t extra_idx = 0;
1498
1499                         rcv_cnt = num;
1500                         while (extra_idx < rcv_cnt) {
1501                                 rxm = rcv_pkts[extra_idx];
1502                                 rxm->data_off =
1503                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1504                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1505                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1506                                 prev->next = rxm;
1507                                 prev = rxm;
1508                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1509                                 extra_idx += 1;
1510                         };
1511                         seg_res -= rcv_cnt;
1512
1513                         if (!seg_res) {
1514                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1515                                 nb_rx++;
1516                         }
1517                 } else {
1518                         PMD_RX_LOG(ERR,
1519                                         "No enough segments for packet.");
1520                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1521                         rxvq->stats.errors++;
1522                         break;
1523                 }
1524         }
1525
1526         rxvq->stats.packets += nb_rx;
1527
1528         /* Allocate new mbuf for the used descriptor */
1529         if (likely(!virtqueue_full(vq))) {
1530                 /* free_cnt may include mrg descs */
1531                 uint16_t free_cnt = vq->vq_free_cnt;
1532                 struct rte_mbuf *new_pkts[free_cnt];
1533
1534                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1535                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1536                                         free_cnt);
1537                         if (unlikely(error)) {
1538                                 for (i = 0; i < free_cnt; i++)
1539                                         rte_pktmbuf_free(new_pkts[i]);
1540                         }
1541                         nb_enqueued += free_cnt;
1542                 } else {
1543                         struct rte_eth_dev *dev =
1544                                 &rte_eth_devices[rxvq->port_id];
1545                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1546                 }
1547         }
1548
1549         if (likely(nb_enqueued)) {
1550                 vq_update_avail_idx(vq);
1551
1552                 if (unlikely(virtqueue_kick_prepare(vq))) {
1553                         virtqueue_notify(vq);
1554                         PMD_RX_LOG(DEBUG, "Notified");
1555                 }
1556         }
1557
1558         return nb_rx;
1559 }
1560
1561 uint16_t
1562 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1563                         struct rte_mbuf **rx_pkts,
1564                         uint16_t nb_pkts)
1565 {
1566         struct virtnet_rx *rxvq = rx_queue;
1567         struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1568         struct virtio_hw *hw = vq->hw;
1569         struct rte_mbuf *rxm;
1570         struct rte_mbuf *prev = NULL;
1571         uint16_t num, nb_rx = 0;
1572         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1573         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1574         uint32_t nb_enqueued = 0;
1575         uint32_t seg_num = 0;
1576         uint32_t seg_res = 0;
1577         uint32_t hdr_size = hw->vtnet_hdr_size;
1578         int32_t i;
1579         int error;
1580
1581         if (unlikely(hw->started == 0))
1582                 return nb_rx;
1583
1584
1585         num = nb_pkts;
1586         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1587                 num = VIRTIO_MBUF_BURST_SZ;
1588         if (likely(num > DESC_PER_CACHELINE))
1589                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1590
1591         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1592
1593         for (i = 0; i < num; i++) {
1594                 struct virtio_net_hdr_mrg_rxbuf *header;
1595
1596                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1597                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1598
1599                 rxm = rcv_pkts[i];
1600
1601                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1602                         PMD_RX_LOG(ERR, "Packet drop");
1603                         nb_enqueued++;
1604                         virtio_discard_rxbuf(vq, rxm);
1605                         rxvq->stats.errors++;
1606                         continue;
1607                 }
1608
1609                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1610                           rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1611                 seg_num = header->num_buffers;
1612
1613                 if (seg_num == 0)
1614                         seg_num = 1;
1615
1616                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1617                 rxm->nb_segs = seg_num;
1618                 rxm->ol_flags = 0;
1619                 rxm->vlan_tci = 0;
1620                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1621                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1622
1623                 rxm->port = rxvq->port_id;
1624                 rx_pkts[nb_rx] = rxm;
1625                 prev = rxm;
1626
1627                 if (hw->has_rx_offload &&
1628                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1629                         virtio_discard_rxbuf(vq, rxm);
1630                         rxvq->stats.errors++;
1631                         continue;
1632                 }
1633
1634                 if (hw->vlan_strip)
1635                         rte_vlan_strip(rx_pkts[nb_rx]);
1636
1637                 seg_res = seg_num - 1;
1638
1639                 /* Merge remaining segments */
1640                 while (seg_res != 0 && i < (num - 1)) {
1641                         i++;
1642
1643                         rxm = rcv_pkts[i];
1644                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1645                         rxm->pkt_len = (uint32_t)(len[i]);
1646                         rxm->data_len = (uint16_t)(len[i]);
1647
1648                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1649
1650                         prev->next = rxm;
1651                         prev = rxm;
1652                         seg_res -= 1;
1653                 }
1654
1655                 if (!seg_res) {
1656                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1657                         nb_rx++;
1658                 }
1659         }
1660
1661         /* Last packet still need merge segments */
1662         while (seg_res != 0) {
1663                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1664                                         VIRTIO_MBUF_BURST_SZ);
1665                 uint16_t extra_idx = 0;
1666
1667                 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1668                                 len, rcv_cnt);
1669                 if (unlikely(rcv_cnt == 0)) {
1670                         PMD_RX_LOG(ERR, "No enough segments for packet.");
1671                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1672                         rxvq->stats.errors++;
1673                         break;
1674                 }
1675
1676                 while (extra_idx < rcv_cnt) {
1677                         rxm = rcv_pkts[extra_idx];
1678
1679                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1680                         rxm->pkt_len = (uint32_t)(len[extra_idx]);
1681                         rxm->data_len = (uint16_t)(len[extra_idx]);
1682
1683                         prev->next = rxm;
1684                         prev = rxm;
1685                         rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1686                         extra_idx += 1;
1687                 }
1688                 seg_res -= rcv_cnt;
1689                 if (!seg_res) {
1690                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1691                         nb_rx++;
1692                 }
1693         }
1694
1695         rxvq->stats.packets += nb_rx;
1696
1697         /* Allocate new mbuf for the used descriptor */
1698         if (likely(!virtqueue_full(vq))) {
1699                 /* free_cnt may include mrg descs */
1700                 uint16_t free_cnt = vq->vq_free_cnt;
1701                 struct rte_mbuf *new_pkts[free_cnt];
1702
1703                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1704                         error = virtqueue_enqueue_recv_refill_packed(vq,
1705                                         new_pkts, free_cnt);
1706                         if (unlikely(error)) {
1707                                 for (i = 0; i < free_cnt; i++)
1708                                         rte_pktmbuf_free(new_pkts[i]);
1709                         }
1710                         nb_enqueued += free_cnt;
1711                 } else {
1712                         struct rte_eth_dev *dev =
1713                                 &rte_eth_devices[rxvq->port_id];
1714                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1715                 }
1716         }
1717
1718         if (likely(nb_enqueued)) {
1719                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1720                         virtqueue_notify(vq);
1721                         PMD_RX_LOG(DEBUG, "Notified");
1722                 }
1723         }
1724
1725         return nb_rx;
1726 }
1727
1728 uint16_t
1729 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1730                         uint16_t nb_pkts)
1731 {
1732         uint16_t nb_tx;
1733         int error;
1734
1735         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1736                 struct rte_mbuf *m = tx_pkts[nb_tx];
1737
1738 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1739                 error = rte_validate_tx_offload(m);
1740                 if (unlikely(error)) {
1741                         rte_errno = -error;
1742                         break;
1743                 }
1744 #endif
1745
1746                 /* Do VLAN tag insertion */
1747                 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1748                         error = rte_vlan_insert(&m);
1749                         /* rte_vlan_insert() may change pointer
1750                          * even in the case of failure
1751                          */
1752                         tx_pkts[nb_tx] = m;
1753
1754                         if (unlikely(error)) {
1755                                 rte_errno = -error;
1756                                 break;
1757                         }
1758                 }
1759
1760                 error = rte_net_intel_cksum_prepare(m);
1761                 if (unlikely(error)) {
1762                         rte_errno = -error;
1763                         break;
1764                 }
1765
1766                 if (m->ol_flags & PKT_TX_TCP_SEG)
1767                         virtio_tso_fix_cksum(m);
1768         }
1769
1770         return nb_tx;
1771 }
1772
1773 uint16_t
1774 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1775                         uint16_t nb_pkts)
1776 {
1777         struct virtnet_tx *txvq = tx_queue;
1778         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1779         struct virtio_hw *hw = vq->hw;
1780         uint16_t hdr_size = hw->vtnet_hdr_size;
1781         uint16_t nb_tx = 0;
1782         bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1783
1784         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1785                 return nb_tx;
1786
1787         if (unlikely(nb_pkts < 1))
1788                 return nb_pkts;
1789
1790         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1791
1792         if (nb_pkts > vq->vq_free_cnt)
1793                 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1794                                            in_order);
1795
1796         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1797                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1798                 int can_push = 0, use_indirect = 0, slots, need;
1799
1800                 /* optimize ring usage */
1801                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1802                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1803                     rte_mbuf_refcnt_read(txm) == 1 &&
1804                     RTE_MBUF_DIRECT(txm) &&
1805                     txm->nb_segs == 1 &&
1806                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1807                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1808                            __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1809                         can_push = 1;
1810                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1811                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1812                         use_indirect = 1;
1813                 /* How many main ring entries are needed to this Tx?
1814                  * indirect   => 1
1815                  * any_layout => number of segments
1816                  * default    => number of segments + 1
1817                  */
1818                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1819                 need = slots - vq->vq_free_cnt;
1820
1821                 /* Positive value indicates it need free vring descriptors */
1822                 if (unlikely(need > 0)) {
1823                         virtio_xmit_cleanup_packed(vq, need, in_order);
1824                         need = slots - vq->vq_free_cnt;
1825                         if (unlikely(need > 0)) {
1826                                 PMD_TX_LOG(ERR,
1827                                            "No free tx descriptors to transmit");
1828                                 break;
1829                         }
1830                 }
1831
1832                 /* Enqueue Packet buffers */
1833                 if (can_push)
1834                         virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1835                 else
1836                         virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1837                                                       use_indirect, 0,
1838                                                       in_order);
1839
1840                 virtio_update_packet_stats(&txvq->stats, txm);
1841         }
1842
1843         txvq->stats.packets += nb_tx;
1844
1845         if (likely(nb_tx)) {
1846                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1847                         virtqueue_notify(vq);
1848                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1849                 }
1850         }
1851
1852         return nb_tx;
1853 }
1854
1855 uint16_t
1856 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1857 {
1858         struct virtnet_tx *txvq = tx_queue;
1859         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1860         struct virtio_hw *hw = vq->hw;
1861         uint16_t hdr_size = hw->vtnet_hdr_size;
1862         uint16_t nb_used, nb_tx = 0;
1863
1864         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1865                 return nb_tx;
1866
1867         if (unlikely(nb_pkts < 1))
1868                 return nb_pkts;
1869
1870         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1871
1872         nb_used = virtqueue_nused(vq);
1873
1874         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1875                 virtio_xmit_cleanup(vq, nb_used);
1876
1877         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1878                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1879                 int can_push = 0, use_indirect = 0, slots, need;
1880
1881                 /* optimize ring usage */
1882                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1883                       virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1884                     rte_mbuf_refcnt_read(txm) == 1 &&
1885                     RTE_MBUF_DIRECT(txm) &&
1886                     txm->nb_segs == 1 &&
1887                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1888                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1889                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1890                         can_push = 1;
1891                 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1892                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1893                         use_indirect = 1;
1894
1895                 /* How many main ring entries are needed to this Tx?
1896                  * any_layout => number of segments
1897                  * indirect   => 1
1898                  * default    => number of segments + 1
1899                  */
1900                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1901                 need = slots - vq->vq_free_cnt;
1902
1903                 /* Positive value indicates it need free vring descriptors */
1904                 if (unlikely(need > 0)) {
1905                         nb_used = virtqueue_nused(vq);
1906
1907                         need = RTE_MIN(need, (int)nb_used);
1908
1909                         virtio_xmit_cleanup(vq, need);
1910                         need = slots - vq->vq_free_cnt;
1911                         if (unlikely(need > 0)) {
1912                                 PMD_TX_LOG(ERR,
1913                                            "No free tx descriptors to transmit");
1914                                 break;
1915                         }
1916                 }
1917
1918                 /* Enqueue Packet buffers */
1919                 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1920                         can_push, 0);
1921
1922                 virtio_update_packet_stats(&txvq->stats, txm);
1923         }
1924
1925         txvq->stats.packets += nb_tx;
1926
1927         if (likely(nb_tx)) {
1928                 vq_update_avail_idx(vq);
1929
1930                 if (unlikely(virtqueue_kick_prepare(vq))) {
1931                         virtqueue_notify(vq);
1932                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1933                 }
1934         }
1935
1936         return nb_tx;
1937 }
1938
1939 static __rte_always_inline int
1940 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1941 {
1942         uint16_t nb_used, nb_clean, nb_descs;
1943
1944         nb_descs = vq->vq_free_cnt + need;
1945         nb_used = virtqueue_nused(vq);
1946         nb_clean = RTE_MIN(need, (int)nb_used);
1947
1948         virtio_xmit_cleanup_inorder(vq, nb_clean);
1949
1950         return nb_descs - vq->vq_free_cnt;
1951 }
1952
1953 uint16_t
1954 virtio_xmit_pkts_inorder(void *tx_queue,
1955                         struct rte_mbuf **tx_pkts,
1956                         uint16_t nb_pkts)
1957 {
1958         struct virtnet_tx *txvq = tx_queue;
1959         struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1960         struct virtio_hw *hw = vq->hw;
1961         uint16_t hdr_size = hw->vtnet_hdr_size;
1962         uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1963         struct rte_mbuf *inorder_pkts[nb_pkts];
1964         int need;
1965
1966         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1967                 return nb_tx;
1968
1969         if (unlikely(nb_pkts < 1))
1970                 return nb_pkts;
1971
1972         VIRTQUEUE_DUMP(vq);
1973         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1974         nb_used = virtqueue_nused(vq);
1975
1976         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1977                 virtio_xmit_cleanup_inorder(vq, nb_used);
1978
1979         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1980                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1981                 int slots;
1982
1983                 /* optimize ring usage */
1984                 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1985                      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1986                      rte_mbuf_refcnt_read(txm) == 1 &&
1987                      RTE_MBUF_DIRECT(txm) &&
1988                      txm->nb_segs == 1 &&
1989                      rte_pktmbuf_headroom(txm) >= hdr_size &&
1990                      rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1991                                 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1992                         inorder_pkts[nb_inorder_pkts] = txm;
1993                         nb_inorder_pkts++;
1994
1995                         continue;
1996                 }
1997
1998                 if (nb_inorder_pkts) {
1999                         need = nb_inorder_pkts - vq->vq_free_cnt;
2000                         if (unlikely(need > 0)) {
2001                                 need = virtio_xmit_try_cleanup_inorder(vq,
2002                                                                        need);
2003                                 if (unlikely(need > 0)) {
2004                                         PMD_TX_LOG(ERR,
2005                                                 "No free tx descriptors to "
2006                                                 "transmit");
2007                                         break;
2008                                 }
2009                         }
2010                         virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2011                                                         nb_inorder_pkts);
2012                         nb_inorder_pkts = 0;
2013                 }
2014
2015                 slots = txm->nb_segs + 1;
2016                 need = slots - vq->vq_free_cnt;
2017                 if (unlikely(need > 0)) {
2018                         need = virtio_xmit_try_cleanup_inorder(vq, slots);
2019
2020                         if (unlikely(need > 0)) {
2021                                 PMD_TX_LOG(ERR,
2022                                         "No free tx descriptors to transmit");
2023                                 break;
2024                         }
2025                 }
2026                 /* Enqueue Packet buffers */
2027                 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2028
2029                 virtio_update_packet_stats(&txvq->stats, txm);
2030         }
2031
2032         /* Transmit all inorder packets */
2033         if (nb_inorder_pkts) {
2034                 need = nb_inorder_pkts - vq->vq_free_cnt;
2035                 if (unlikely(need > 0)) {
2036                         need = virtio_xmit_try_cleanup_inorder(vq,
2037                                                                   need);
2038                         if (unlikely(need > 0)) {
2039                                 PMD_TX_LOG(ERR,
2040                                         "No free tx descriptors to transmit");
2041                                 nb_inorder_pkts = vq->vq_free_cnt;
2042                                 nb_tx -= need;
2043                         }
2044                 }
2045
2046                 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2047                                                 nb_inorder_pkts);
2048         }
2049
2050         txvq->stats.packets += nb_tx;
2051
2052         if (likely(nb_tx)) {
2053                 vq_update_avail_idx(vq);
2054
2055                 if (unlikely(virtqueue_kick_prepare(vq))) {
2056                         virtqueue_notify(vq);
2057                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2058                 }
2059         }
2060
2061         VIRTQUEUE_DUMP(vq);
2062
2063         return nb_tx;
2064 }
2065
2066 __rte_weak uint16_t
2067 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2068                             struct rte_mbuf **rx_pkts __rte_unused,
2069                             uint16_t nb_pkts __rte_unused)
2070 {
2071         return 0;
2072 }
2073
2074 __rte_weak uint16_t
2075 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2076                             struct rte_mbuf **tx_pkts __rte_unused,
2077                             uint16_t nb_pkts __rte_unused)
2078 {
2079         return 0;
2080 }