net/virtio: add vectorized packed ring Rx
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41
42 int
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 {
45         struct virtnet_rx *rxvq = rxq;
46         struct virtqueue *vq = rxvq->vq;
47
48         return VIRTQUEUE_NUSED(vq) >= offset;
49 }
50
51 void
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 {
54         vq->vq_free_cnt += num;
55         vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
56 }
57
58 void
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 {
61         struct vring_desc *dp, *dp_tail;
62         struct vq_desc_extra *dxp;
63         uint16_t desc_idx_last = desc_idx;
64
65         dp  = &vq->vq_split.ring.desc[desc_idx];
66         dxp = &vq->vq_descx[desc_idx];
67         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69                 while (dp->flags & VRING_DESC_F_NEXT) {
70                         desc_idx_last = dp->next;
71                         dp = &vq->vq_split.ring.desc[dp->next];
72                 }
73         }
74         dxp->ndescs = 0;
75
76         /*
77          * We must append the existing free chain, if any, to the end of
78          * newly freed chain. If the virtqueue was completely used, then
79          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80          */
81         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82                 vq->vq_desc_head_idx = desc_idx;
83         } else {
84                 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85                 dp_tail->next = desc_idx;
86         }
87
88         vq->vq_desc_tail_idx = desc_idx_last;
89         dp->next = VQ_RING_DESC_CHAIN_END;
90 }
91
92 void
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
94 {
95         uint32_t s = mbuf->pkt_len;
96         struct rte_ether_addr *ea;
97
98         stats->bytes += s;
99
100         if (s == 64) {
101                 stats->size_bins[1]++;
102         } else if (s > 64 && s < 1024) {
103                 uint32_t bin;
104
105                 /* count zeros, and offset into correct bin */
106                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107                 stats->size_bins[bin]++;
108         } else {
109                 if (s < 64)
110                         stats->size_bins[0]++;
111                 else if (s < 1519)
112                         stats->size_bins[6]++;
113                 else
114                         stats->size_bins[7]++;
115         }
116
117         ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118         if (rte_is_multicast_ether_addr(ea)) {
119                 if (rte_is_broadcast_ether_addr(ea))
120                         stats->broadcast++;
121                 else
122                         stats->multicast++;
123         }
124 }
125
126 static inline void
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
128 {
129         VIRTIO_DUMP_PACKET(m, m->data_len);
130
131         virtio_update_packet_stats(&rxvq->stats, m);
132 }
133
134 static uint16_t
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136                                   struct rte_mbuf **rx_pkts,
137                                   uint32_t *len,
138                                   uint16_t num)
139 {
140         struct rte_mbuf *cookie;
141         uint16_t used_idx;
142         uint16_t id;
143         struct vring_packed_desc *desc;
144         uint16_t i;
145
146         desc = vq->vq_packed.ring.desc;
147
148         for (i = 0; i < num; i++) {
149                 used_idx = vq->vq_used_cons_idx;
150                 /* desc_is_used has a load-acquire or rte_cio_rmb inside
151                  * and wait for used desc in virtqueue.
152                  */
153                 if (!desc_is_used(&desc[used_idx], vq))
154                         return i;
155                 len[i] = desc[used_idx].len;
156                 id = desc[used_idx].id;
157                 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158                 if (unlikely(cookie == NULL)) {
159                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160                                 vq->vq_used_cons_idx);
161                         break;
162                 }
163                 rte_prefetch0(cookie);
164                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
165                 rx_pkts[i] = cookie;
166
167                 vq->vq_free_cnt++;
168                 vq->vq_used_cons_idx++;
169                 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170                         vq->vq_used_cons_idx -= vq->vq_nentries;
171                         vq->vq_packed.used_wrap_counter ^= 1;
172                 }
173         }
174
175         return i;
176 }
177
178 static uint16_t
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180                            uint32_t *len, uint16_t num)
181 {
182         struct vring_used_elem *uep;
183         struct rte_mbuf *cookie;
184         uint16_t used_idx, desc_idx;
185         uint16_t i;
186
187         /*  Caller does the check */
188         for (i = 0; i < num ; i++) {
189                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190                 uep = &vq->vq_split.ring.used->ring[used_idx];
191                 desc_idx = (uint16_t) uep->id;
192                 len[i] = uep->len;
193                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
194
195                 if (unlikely(cookie == NULL)) {
196                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197                                 vq->vq_used_cons_idx);
198                         break;
199                 }
200
201                 rte_prefetch0(cookie);
202                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
203                 rx_pkts[i]  = cookie;
204                 vq->vq_used_cons_idx++;
205                 vq_ring_free_chain(vq, desc_idx);
206                 vq->vq_descx[desc_idx].cookie = NULL;
207         }
208
209         return i;
210 }
211
212 static uint16_t
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214                         struct rte_mbuf **rx_pkts,
215                         uint32_t *len,
216                         uint16_t num)
217 {
218         struct vring_used_elem *uep;
219         struct rte_mbuf *cookie;
220         uint16_t used_idx = 0;
221         uint16_t i;
222
223         if (unlikely(num == 0))
224                 return 0;
225
226         for (i = 0; i < num; i++) {
227                 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228                 /* Desc idx same as used idx */
229                 uep = &vq->vq_split.ring.used->ring[used_idx];
230                 len[i] = uep->len;
231                 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
232
233                 if (unlikely(cookie == NULL)) {
234                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235                                 vq->vq_used_cons_idx);
236                         break;
237                 }
238
239                 rte_prefetch0(cookie);
240                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
241                 rx_pkts[i]  = cookie;
242                 vq->vq_used_cons_idx++;
243                 vq->vq_descx[used_idx].cookie = NULL;
244         }
245
246         vq_ring_free_inorder(vq, used_idx, i);
247         return i;
248 }
249
250 static inline int
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252                         struct rte_mbuf **cookies,
253                         uint16_t num)
254 {
255         struct vq_desc_extra *dxp;
256         struct virtio_hw *hw = vq->hw;
257         struct vring_desc *start_dp;
258         uint16_t head_idx, idx, i = 0;
259
260         if (unlikely(vq->vq_free_cnt == 0))
261                 return -ENOSPC;
262         if (unlikely(vq->vq_free_cnt < num))
263                 return -EMSGSIZE;
264
265         head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266         start_dp = vq->vq_split.ring.desc;
267
268         while (i < num) {
269                 idx = head_idx & (vq->vq_nentries - 1);
270                 dxp = &vq->vq_descx[idx];
271                 dxp->cookie = (void *)cookies[i];
272                 dxp->ndescs = 1;
273
274                 start_dp[idx].addr =
275                                 VIRTIO_MBUF_ADDR(cookies[i], vq) +
276                                 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
277                 start_dp[idx].len =
278                                 cookies[i]->buf_len -
279                                 RTE_PKTMBUF_HEADROOM +
280                                 hw->vtnet_hdr_size;
281                 start_dp[idx].flags =  VRING_DESC_F_WRITE;
282
283                 vq_update_avail_ring(vq, idx);
284                 head_idx++;
285                 i++;
286         }
287
288         vq->vq_desc_head_idx += num;
289         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
290         return 0;
291 }
292
293 static inline int
294 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
295                                 uint16_t num)
296 {
297         struct vq_desc_extra *dxp;
298         struct virtio_hw *hw = vq->hw;
299         struct vring_desc *start_dp = vq->vq_split.ring.desc;
300         uint16_t idx, i;
301
302         if (unlikely(vq->vq_free_cnt == 0))
303                 return -ENOSPC;
304         if (unlikely(vq->vq_free_cnt < num))
305                 return -EMSGSIZE;
306
307         if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
308                 return -EFAULT;
309
310         for (i = 0; i < num; i++) {
311                 idx = vq->vq_desc_head_idx;
312                 dxp = &vq->vq_descx[idx];
313                 dxp->cookie = (void *)cookie[i];
314                 dxp->ndescs = 1;
315
316                 start_dp[idx].addr =
317                         VIRTIO_MBUF_ADDR(cookie[i], vq) +
318                         RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
319                 start_dp[idx].len =
320                         cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
321                         hw->vtnet_hdr_size;
322                 start_dp[idx].flags = VRING_DESC_F_WRITE;
323                 vq->vq_desc_head_idx = start_dp[idx].next;
324                 vq_update_avail_ring(vq, idx);
325                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
326                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
327                         break;
328                 }
329         }
330
331         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
332
333         return 0;
334 }
335
336 static inline int
337 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
338                                      struct rte_mbuf **cookie, uint16_t num)
339 {
340         struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
341         uint16_t flags = vq->vq_packed.cached_flags;
342         struct virtio_hw *hw = vq->hw;
343         struct vq_desc_extra *dxp;
344         uint16_t idx;
345         int i;
346
347         if (unlikely(vq->vq_free_cnt == 0))
348                 return -ENOSPC;
349         if (unlikely(vq->vq_free_cnt < num))
350                 return -EMSGSIZE;
351
352         for (i = 0; i < num; i++) {
353                 idx = vq->vq_avail_idx;
354                 dxp = &vq->vq_descx[idx];
355                 dxp->cookie = (void *)cookie[i];
356                 dxp->ndescs = 1;
357
358                 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
359                                 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
360                 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
361                                         + hw->vtnet_hdr_size;
362
363                 vq->vq_desc_head_idx = dxp->next;
364                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
365                         vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
366
367                 virtqueue_store_flags_packed(&start_dp[idx], flags,
368                                              hw->weak_barriers);
369
370                 if (++vq->vq_avail_idx >= vq->vq_nentries) {
371                         vq->vq_avail_idx -= vq->vq_nentries;
372                         vq->vq_packed.cached_flags ^=
373                                 VRING_PACKED_DESC_F_AVAIL_USED;
374                         flags = vq->vq_packed.cached_flags;
375                 }
376         }
377         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
378         return 0;
379 }
380
381 /* When doing TSO, the IP length is not included in the pseudo header
382  * checksum of the packet given to the PMD, but for virtio it is
383  * expected.
384  */
385 static void
386 virtio_tso_fix_cksum(struct rte_mbuf *m)
387 {
388         /* common case: header is not fragmented */
389         if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
390                         m->l4_len)) {
391                 struct rte_ipv4_hdr *iph;
392                 struct rte_ipv6_hdr *ip6h;
393                 struct rte_tcp_hdr *th;
394                 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
395                 uint32_t tmp;
396
397                 iph = rte_pktmbuf_mtod_offset(m,
398                                         struct rte_ipv4_hdr *, m->l2_len);
399                 th = RTE_PTR_ADD(iph, m->l3_len);
400                 if ((iph->version_ihl >> 4) == 4) {
401                         iph->hdr_checksum = 0;
402                         iph->hdr_checksum = rte_ipv4_cksum(iph);
403                         ip_len = iph->total_length;
404                         ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
405                                 m->l3_len);
406                 } else {
407                         ip6h = (struct rte_ipv6_hdr *)iph;
408                         ip_paylen = ip6h->payload_len;
409                 }
410
411                 /* calculate the new phdr checksum not including ip_paylen */
412                 prev_cksum = th->cksum;
413                 tmp = prev_cksum;
414                 tmp += ip_paylen;
415                 tmp = (tmp & 0xffff) + (tmp >> 16);
416                 new_cksum = tmp;
417
418                 /* replace it in the packet */
419                 th->cksum = new_cksum;
420         }
421 }
422
423
424
425
426 static inline void
427 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
428                         struct rte_mbuf **cookies,
429                         uint16_t num)
430 {
431         struct vq_desc_extra *dxp;
432         struct virtqueue *vq = txvq->vq;
433         struct vring_desc *start_dp;
434         struct virtio_net_hdr *hdr;
435         uint16_t idx;
436         int16_t head_size = vq->hw->vtnet_hdr_size;
437         uint16_t i = 0;
438
439         idx = vq->vq_desc_head_idx;
440         start_dp = vq->vq_split.ring.desc;
441
442         while (i < num) {
443                 idx = idx & (vq->vq_nentries - 1);
444                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
445                 dxp->cookie = (void *)cookies[i];
446                 dxp->ndescs = 1;
447                 virtio_update_packet_stats(&txvq->stats, cookies[i]);
448
449                 hdr = rte_pktmbuf_mtod_offset(cookies[i],
450                                 struct virtio_net_hdr *, -head_size);
451
452                 /* if offload disabled, hdr is not zeroed yet, do it now */
453                 if (!vq->hw->has_tx_offload)
454                         virtqueue_clear_net_hdr(hdr);
455                 else
456                         virtqueue_xmit_offload(hdr, cookies[i], true);
457
458                 start_dp[idx].addr  =
459                         VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
460                 start_dp[idx].len   = cookies[i]->data_len + head_size;
461                 start_dp[idx].flags = 0;
462
463
464                 vq_update_avail_ring(vq, idx);
465
466                 idx++;
467                 i++;
468         };
469
470         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
471         vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
472 }
473
474 static inline void
475 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
476                                    struct rte_mbuf *cookie,
477                                    int in_order)
478 {
479         struct virtqueue *vq = txvq->vq;
480         struct vring_packed_desc *dp;
481         struct vq_desc_extra *dxp;
482         uint16_t idx, id, flags;
483         int16_t head_size = vq->hw->vtnet_hdr_size;
484         struct virtio_net_hdr *hdr;
485
486         id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
487         idx = vq->vq_avail_idx;
488         dp = &vq->vq_packed.ring.desc[idx];
489
490         dxp = &vq->vq_descx[id];
491         dxp->ndescs = 1;
492         dxp->cookie = cookie;
493
494         flags = vq->vq_packed.cached_flags;
495
496         /* prepend cannot fail, checked by caller */
497         hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
498                                       -head_size);
499
500         /* if offload disabled, hdr is not zeroed yet, do it now */
501         if (!vq->hw->has_tx_offload)
502                 virtqueue_clear_net_hdr(hdr);
503         else
504                 virtqueue_xmit_offload(hdr, cookie, true);
505
506         dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
507         dp->len  = cookie->data_len + head_size;
508         dp->id   = id;
509
510         if (++vq->vq_avail_idx >= vq->vq_nentries) {
511                 vq->vq_avail_idx -= vq->vq_nentries;
512                 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
513         }
514
515         vq->vq_free_cnt--;
516
517         if (!in_order) {
518                 vq->vq_desc_head_idx = dxp->next;
519                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
520                         vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
521         }
522
523         virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
524 }
525
526 static inline void
527 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
528                         uint16_t needed, int use_indirect, int can_push,
529                         int in_order)
530 {
531         struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
532         struct vq_desc_extra *dxp;
533         struct virtqueue *vq = txvq->vq;
534         struct vring_desc *start_dp;
535         uint16_t seg_num = cookie->nb_segs;
536         uint16_t head_idx, idx;
537         int16_t head_size = vq->hw->vtnet_hdr_size;
538         bool prepend_header = false;
539         struct virtio_net_hdr *hdr;
540
541         head_idx = vq->vq_desc_head_idx;
542         idx = head_idx;
543         if (in_order)
544                 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
545         else
546                 dxp = &vq->vq_descx[idx];
547         dxp->cookie = (void *)cookie;
548         dxp->ndescs = needed;
549
550         start_dp = vq->vq_split.ring.desc;
551
552         if (can_push) {
553                 /* prepend cannot fail, checked by caller */
554                 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
555                                               -head_size);
556                 prepend_header = true;
557
558                 /* if offload disabled, it is not zeroed below, do it now */
559                 if (!vq->hw->has_tx_offload)
560                         virtqueue_clear_net_hdr(hdr);
561         } else if (use_indirect) {
562                 /* setup tx ring slot to point to indirect
563                  * descriptor list stored in reserved region.
564                  *
565                  * the first slot in indirect ring is already preset
566                  * to point to the header in reserved region
567                  */
568                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
569                         RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
570                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
571                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
572                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
573
574                 /* loop below will fill in rest of the indirect elements */
575                 start_dp = txr[idx].tx_indir;
576                 idx = 1;
577         } else {
578                 /* setup first tx ring slot to point to header
579                  * stored in reserved region.
580                  */
581                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
582                         RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
583                 start_dp[idx].len   = vq->hw->vtnet_hdr_size;
584                 start_dp[idx].flags = VRING_DESC_F_NEXT;
585                 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
586
587                 idx = start_dp[idx].next;
588         }
589
590         virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
591
592         do {
593                 start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
594                 start_dp[idx].len   = cookie->data_len;
595                 if (prepend_header) {
596                         start_dp[idx].addr -= head_size;
597                         start_dp[idx].len += head_size;
598                         prepend_header = false;
599                 }
600                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
601                 idx = start_dp[idx].next;
602         } while ((cookie = cookie->next) != NULL);
603
604         if (use_indirect)
605                 idx = vq->vq_split.ring.desc[head_idx].next;
606
607         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
608
609         vq->vq_desc_head_idx = idx;
610         vq_update_avail_ring(vq, head_idx);
611
612         if (!in_order) {
613                 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
614                         vq->vq_desc_tail_idx = idx;
615         }
616 }
617
618 void
619 virtio_dev_cq_start(struct rte_eth_dev *dev)
620 {
621         struct virtio_hw *hw = dev->data->dev_private;
622
623         if (hw->cvq && hw->cvq->vq) {
624                 rte_spinlock_init(&hw->cvq->lock);
625                 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
626         }
627 }
628
629 int
630 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
631                         uint16_t queue_idx,
632                         uint16_t nb_desc,
633                         unsigned int socket_id __rte_unused,
634                         const struct rte_eth_rxconf *rx_conf,
635                         struct rte_mempool *mp)
636 {
637         uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
638         struct virtio_hw *hw = dev->data->dev_private;
639         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
640         struct virtnet_rx *rxvq;
641         uint16_t rx_free_thresh;
642
643         PMD_INIT_FUNC_TRACE();
644
645         if (rx_conf->rx_deferred_start) {
646                 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
647                 return -EINVAL;
648         }
649
650         rx_free_thresh = rx_conf->rx_free_thresh;
651         if (rx_free_thresh == 0)
652                 rx_free_thresh =
653                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
654
655         if (rx_free_thresh & 0x3) {
656                 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
657                         " (rx_free_thresh=%u port=%u queue=%u)\n",
658                         rx_free_thresh, dev->data->port_id, queue_idx);
659                 return -EINVAL;
660         }
661
662         if (rx_free_thresh >= vq->vq_nentries) {
663                 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
664                         "number of RX entries (%u)."
665                         " (rx_free_thresh=%u port=%u queue=%u)\n",
666                         vq->vq_nentries,
667                         rx_free_thresh, dev->data->port_id, queue_idx);
668                 return -EINVAL;
669         }
670         vq->vq_free_thresh = rx_free_thresh;
671
672         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
673                 nb_desc = vq->vq_nentries;
674         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
675
676         rxvq = &vq->rxq;
677         rxvq->queue_id = queue_idx;
678         rxvq->mpool = mp;
679         dev->data->rx_queues[queue_idx] = rxvq;
680
681         return 0;
682 }
683
684 int
685 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
686 {
687         uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
688         struct virtio_hw *hw = dev->data->dev_private;
689         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
690         struct virtnet_rx *rxvq = &vq->rxq;
691         struct rte_mbuf *m;
692         uint16_t desc_idx;
693         int error, nbufs, i;
694         bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
695
696         PMD_INIT_FUNC_TRACE();
697
698         /* Allocate blank mbufs for the each rx descriptor */
699         nbufs = 0;
700
701         if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
702                 for (desc_idx = 0; desc_idx < vq->vq_nentries;
703                      desc_idx++) {
704                         vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
705                         vq->vq_split.ring.desc[desc_idx].flags =
706                                 VRING_DESC_F_WRITE;
707                 }
708
709                 virtio_rxq_vec_setup(rxvq);
710         }
711
712         memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
713         for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
714              desc_idx++) {
715                 vq->sw_ring[vq->vq_nentries + desc_idx] =
716                         &rxvq->fake_mbuf;
717         }
718
719         if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
720                 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
721                         virtio_rxq_rearm_vec(rxvq);
722                         nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
723                 }
724         } else if (!vtpci_packed_queue(vq->hw) && in_order) {
725                 if ((!virtqueue_full(vq))) {
726                         uint16_t free_cnt = vq->vq_free_cnt;
727                         struct rte_mbuf *pkts[free_cnt];
728
729                         if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
730                                 free_cnt)) {
731                                 error = virtqueue_enqueue_refill_inorder(vq,
732                                                 pkts,
733                                                 free_cnt);
734                                 if (unlikely(error)) {
735                                         for (i = 0; i < free_cnt; i++)
736                                                 rte_pktmbuf_free(pkts[i]);
737                                 }
738                         }
739
740                         nbufs += free_cnt;
741                         vq_update_avail_idx(vq);
742                 }
743         } else {
744                 while (!virtqueue_full(vq)) {
745                         m = rte_mbuf_raw_alloc(rxvq->mpool);
746                         if (m == NULL)
747                                 break;
748
749                         /* Enqueue allocated buffers */
750                         if (vtpci_packed_queue(vq->hw))
751                                 error = virtqueue_enqueue_recv_refill_packed(vq,
752                                                 &m, 1);
753                         else
754                                 error = virtqueue_enqueue_recv_refill(vq,
755                                                 &m, 1);
756                         if (error) {
757                                 rte_pktmbuf_free(m);
758                                 break;
759                         }
760                         nbufs++;
761                 }
762
763                 if (!vtpci_packed_queue(vq->hw))
764                         vq_update_avail_idx(vq);
765         }
766
767         PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
768
769         VIRTQUEUE_DUMP(vq);
770
771         return 0;
772 }
773
774 /*
775  * struct rte_eth_dev *dev: Used to update dev
776  * uint16_t nb_desc: Defaults to values read from config space
777  * unsigned int socket_id: Used to allocate memzone
778  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
779  * uint16_t queue_idx: Just used as an index in dev txq list
780  */
781 int
782 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
783                         uint16_t queue_idx,
784                         uint16_t nb_desc,
785                         unsigned int socket_id __rte_unused,
786                         const struct rte_eth_txconf *tx_conf)
787 {
788         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
789         struct virtio_hw *hw = dev->data->dev_private;
790         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
791         struct virtnet_tx *txvq;
792         uint16_t tx_free_thresh;
793
794         PMD_INIT_FUNC_TRACE();
795
796         if (tx_conf->tx_deferred_start) {
797                 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
798                 return -EINVAL;
799         }
800
801         if (nb_desc == 0 || nb_desc > vq->vq_nentries)
802                 nb_desc = vq->vq_nentries;
803         vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
804
805         txvq = &vq->txq;
806         txvq->queue_id = queue_idx;
807
808         tx_free_thresh = tx_conf->tx_free_thresh;
809         if (tx_free_thresh == 0)
810                 tx_free_thresh =
811                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
812
813         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
814                 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
815                         "number of TX entries minus 3 (%u)."
816                         " (tx_free_thresh=%u port=%u queue=%u)\n",
817                         vq->vq_nentries - 3,
818                         tx_free_thresh, dev->data->port_id, queue_idx);
819                 return -EINVAL;
820         }
821
822         vq->vq_free_thresh = tx_free_thresh;
823
824         dev->data->tx_queues[queue_idx] = txvq;
825         return 0;
826 }
827
828 int
829 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
830                                 uint16_t queue_idx)
831 {
832         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
833         struct virtio_hw *hw = dev->data->dev_private;
834         struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
835
836         PMD_INIT_FUNC_TRACE();
837
838         if (!vtpci_packed_queue(hw)) {
839                 if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
840                         vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
841         }
842
843         VIRTQUEUE_DUMP(vq);
844
845         return 0;
846 }
847
848 static inline void
849 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
850 {
851         int error;
852         /*
853          * Requeue the discarded mbuf. This should always be
854          * successful since it was just dequeued.
855          */
856         if (vtpci_packed_queue(vq->hw))
857                 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
858         else
859                 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
860
861         if (unlikely(error)) {
862                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
863                 rte_pktmbuf_free(m);
864         }
865 }
866
867 static inline void
868 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
869 {
870         int error;
871
872         error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
873         if (unlikely(error)) {
874                 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
875                 rte_pktmbuf_free(m);
876         }
877 }
878
879 /* Optionally fill offload information in structure */
880 static inline int
881 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
882 {
883         struct rte_net_hdr_lens hdr_lens;
884         uint32_t hdrlen, ptype;
885         int l4_supported = 0;
886
887         /* nothing to do */
888         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
889                 return 0;
890
891         m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
892
893         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
894         m->packet_type = ptype;
895         if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
896             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
897             (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
898                 l4_supported = 1;
899
900         if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
901                 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
902                 if (hdr->csum_start <= hdrlen && l4_supported) {
903                         m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
904                 } else {
905                         /* Unknown proto or tunnel, do sw cksum. We can assume
906                          * the cksum field is in the first segment since the
907                          * buffers we provided to the host are large enough.
908                          * In case of SCTP, this will be wrong since it's a CRC
909                          * but there's nothing we can do.
910                          */
911                         uint16_t csum = 0, off;
912
913                         rte_raw_cksum_mbuf(m, hdr->csum_start,
914                                 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
915                                 &csum);
916                         if (likely(csum != 0xffff))
917                                 csum = ~csum;
918                         off = hdr->csum_offset + hdr->csum_start;
919                         if (rte_pktmbuf_data_len(m) >= off + 1)
920                                 *rte_pktmbuf_mtod_offset(m, uint16_t *,
921                                         off) = csum;
922                 }
923         } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
924                 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
925         }
926
927         /* GSO request, save required information in mbuf */
928         if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
929                 /* Check unsupported modes */
930                 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
931                     (hdr->gso_size == 0)) {
932                         return -EINVAL;
933                 }
934
935                 /* Update mss lengthes in mbuf */
936                 m->tso_segsz = hdr->gso_size;
937                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
938                         case VIRTIO_NET_HDR_GSO_TCPV4:
939                         case VIRTIO_NET_HDR_GSO_TCPV6:
940                                 m->ol_flags |= PKT_RX_LRO | \
941                                         PKT_RX_L4_CKSUM_NONE;
942                                 break;
943                         default:
944                                 return -EINVAL;
945                 }
946         }
947
948         return 0;
949 }
950
951 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
952 uint16_t
953 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
954 {
955         struct virtnet_rx *rxvq = rx_queue;
956         struct virtqueue *vq = rxvq->vq;
957         struct virtio_hw *hw = vq->hw;
958         struct rte_mbuf *rxm;
959         uint16_t nb_used, num, nb_rx;
960         uint32_t len[VIRTIO_MBUF_BURST_SZ];
961         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
962         int error;
963         uint32_t i, nb_enqueued;
964         uint32_t hdr_size;
965         struct virtio_net_hdr *hdr;
966
967         nb_rx = 0;
968         if (unlikely(hw->started == 0))
969                 return nb_rx;
970
971         nb_used = VIRTQUEUE_NUSED(vq);
972
973         virtio_rmb(hw->weak_barriers);
974
975         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
976         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
977                 num = VIRTIO_MBUF_BURST_SZ;
978         if (likely(num > DESC_PER_CACHELINE))
979                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
980
981         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
982         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
983
984         nb_enqueued = 0;
985         hdr_size = hw->vtnet_hdr_size;
986
987         for (i = 0; i < num ; i++) {
988                 rxm = rcv_pkts[i];
989
990                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
991
992                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
993                         PMD_RX_LOG(ERR, "Packet drop");
994                         nb_enqueued++;
995                         virtio_discard_rxbuf(vq, rxm);
996                         rxvq->stats.errors++;
997                         continue;
998                 }
999
1000                 rxm->port = rxvq->port_id;
1001                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1002                 rxm->ol_flags = 0;
1003                 rxm->vlan_tci = 0;
1004
1005                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1006                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1007
1008                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1009                         RTE_PKTMBUF_HEADROOM - hdr_size);
1010
1011                 if (hw->vlan_strip)
1012                         rte_vlan_strip(rxm);
1013
1014                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1015                         virtio_discard_rxbuf(vq, rxm);
1016                         rxvq->stats.errors++;
1017                         continue;
1018                 }
1019
1020                 virtio_rx_stats_updated(rxvq, rxm);
1021
1022                 rx_pkts[nb_rx++] = rxm;
1023         }
1024
1025         rxvq->stats.packets += nb_rx;
1026
1027         /* Allocate new mbuf for the used descriptor */
1028         if (likely(!virtqueue_full(vq))) {
1029                 uint16_t free_cnt = vq->vq_free_cnt;
1030                 struct rte_mbuf *new_pkts[free_cnt];
1031
1032                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1033                                                 free_cnt) == 0)) {
1034                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1035                                         free_cnt);
1036                         if (unlikely(error)) {
1037                                 for (i = 0; i < free_cnt; i++)
1038                                         rte_pktmbuf_free(new_pkts[i]);
1039                         }
1040                         nb_enqueued += free_cnt;
1041                 } else {
1042                         struct rte_eth_dev *dev =
1043                                 &rte_eth_devices[rxvq->port_id];
1044                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1045                 }
1046         }
1047
1048         if (likely(nb_enqueued)) {
1049                 vq_update_avail_idx(vq);
1050
1051                 if (unlikely(virtqueue_kick_prepare(vq))) {
1052                         virtqueue_notify(vq);
1053                         PMD_RX_LOG(DEBUG, "Notified");
1054                 }
1055         }
1056
1057         return nb_rx;
1058 }
1059
1060 uint16_t
1061 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1062                         uint16_t nb_pkts)
1063 {
1064         struct virtnet_rx *rxvq = rx_queue;
1065         struct virtqueue *vq = rxvq->vq;
1066         struct virtio_hw *hw = vq->hw;
1067         struct rte_mbuf *rxm;
1068         uint16_t num, nb_rx;
1069         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1070         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1071         int error;
1072         uint32_t i, nb_enqueued;
1073         uint32_t hdr_size;
1074         struct virtio_net_hdr *hdr;
1075
1076         nb_rx = 0;
1077         if (unlikely(hw->started == 0))
1078                 return nb_rx;
1079
1080         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1081         if (likely(num > DESC_PER_CACHELINE))
1082                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1083
1084         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1085         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1086
1087         nb_enqueued = 0;
1088         hdr_size = hw->vtnet_hdr_size;
1089
1090         for (i = 0; i < num; i++) {
1091                 rxm = rcv_pkts[i];
1092
1093                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1094
1095                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1096                         PMD_RX_LOG(ERR, "Packet drop");
1097                         nb_enqueued++;
1098                         virtio_discard_rxbuf(vq, rxm);
1099                         rxvq->stats.errors++;
1100                         continue;
1101                 }
1102
1103                 rxm->port = rxvq->port_id;
1104                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1105                 rxm->ol_flags = 0;
1106                 rxm->vlan_tci = 0;
1107
1108                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1109                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1110
1111                 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1112                         RTE_PKTMBUF_HEADROOM - hdr_size);
1113
1114                 if (hw->vlan_strip)
1115                         rte_vlan_strip(rxm);
1116
1117                 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1118                         virtio_discard_rxbuf(vq, rxm);
1119                         rxvq->stats.errors++;
1120                         continue;
1121                 }
1122
1123                 virtio_rx_stats_updated(rxvq, rxm);
1124
1125                 rx_pkts[nb_rx++] = rxm;
1126         }
1127
1128         rxvq->stats.packets += nb_rx;
1129
1130         /* Allocate new mbuf for the used descriptor */
1131         if (likely(!virtqueue_full(vq))) {
1132                 uint16_t free_cnt = vq->vq_free_cnt;
1133                 struct rte_mbuf *new_pkts[free_cnt];
1134
1135                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1136                                                 free_cnt) == 0)) {
1137                         error = virtqueue_enqueue_recv_refill_packed(vq,
1138                                         new_pkts, free_cnt);
1139                         if (unlikely(error)) {
1140                                 for (i = 0; i < free_cnt; i++)
1141                                         rte_pktmbuf_free(new_pkts[i]);
1142                         }
1143                         nb_enqueued += free_cnt;
1144                 } else {
1145                         struct rte_eth_dev *dev =
1146                                 &rte_eth_devices[rxvq->port_id];
1147                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1148                 }
1149         }
1150
1151         if (likely(nb_enqueued)) {
1152                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1153                         virtqueue_notify(vq);
1154                         PMD_RX_LOG(DEBUG, "Notified");
1155                 }
1156         }
1157
1158         return nb_rx;
1159 }
1160
1161
1162 uint16_t
1163 virtio_recv_pkts_inorder(void *rx_queue,
1164                         struct rte_mbuf **rx_pkts,
1165                         uint16_t nb_pkts)
1166 {
1167         struct virtnet_rx *rxvq = rx_queue;
1168         struct virtqueue *vq = rxvq->vq;
1169         struct virtio_hw *hw = vq->hw;
1170         struct rte_mbuf *rxm;
1171         struct rte_mbuf *prev = NULL;
1172         uint16_t nb_used, num, nb_rx;
1173         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1174         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1175         int error;
1176         uint32_t nb_enqueued;
1177         uint32_t seg_num;
1178         uint32_t seg_res;
1179         uint32_t hdr_size;
1180         int32_t i;
1181
1182         nb_rx = 0;
1183         if (unlikely(hw->started == 0))
1184                 return nb_rx;
1185
1186         nb_used = VIRTQUEUE_NUSED(vq);
1187         nb_used = RTE_MIN(nb_used, nb_pkts);
1188         nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1189
1190         virtio_rmb(hw->weak_barriers);
1191
1192         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1193
1194         nb_enqueued = 0;
1195         seg_num = 1;
1196         seg_res = 0;
1197         hdr_size = hw->vtnet_hdr_size;
1198
1199         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1200
1201         for (i = 0; i < num; i++) {
1202                 struct virtio_net_hdr_mrg_rxbuf *header;
1203
1204                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1205                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1206
1207                 rxm = rcv_pkts[i];
1208
1209                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1210                         PMD_RX_LOG(ERR, "Packet drop");
1211                         nb_enqueued++;
1212                         virtio_discard_rxbuf_inorder(vq, rxm);
1213                         rxvq->stats.errors++;
1214                         continue;
1215                 }
1216
1217                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1218                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1219                          - hdr_size);
1220
1221                 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1222                         seg_num = header->num_buffers;
1223                         if (seg_num == 0)
1224                                 seg_num = 1;
1225                 } else {
1226                         seg_num = 1;
1227                 }
1228
1229                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1230                 rxm->nb_segs = seg_num;
1231                 rxm->ol_flags = 0;
1232                 rxm->vlan_tci = 0;
1233                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1234                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1235
1236                 rxm->port = rxvq->port_id;
1237
1238                 rx_pkts[nb_rx] = rxm;
1239                 prev = rxm;
1240
1241                 if (vq->hw->has_rx_offload &&
1242                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1243                         virtio_discard_rxbuf_inorder(vq, rxm);
1244                         rxvq->stats.errors++;
1245                         continue;
1246                 }
1247
1248                 if (hw->vlan_strip)
1249                         rte_vlan_strip(rx_pkts[nb_rx]);
1250
1251                 seg_res = seg_num - 1;
1252
1253                 /* Merge remaining segments */
1254                 while (seg_res != 0 && i < (num - 1)) {
1255                         i++;
1256
1257                         rxm = rcv_pkts[i];
1258                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1259                         rxm->pkt_len = (uint32_t)(len[i]);
1260                         rxm->data_len = (uint16_t)(len[i]);
1261
1262                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1263
1264                         prev->next = rxm;
1265                         prev = rxm;
1266                         seg_res -= 1;
1267                 }
1268
1269                 if (!seg_res) {
1270                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1271                         nb_rx++;
1272                 }
1273         }
1274
1275         /* Last packet still need merge segments */
1276         while (seg_res != 0) {
1277                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1278                                         VIRTIO_MBUF_BURST_SZ);
1279
1280                 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1281                         virtio_rmb(hw->weak_barriers);
1282                         num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1283                                                            rcv_cnt);
1284                         uint16_t extra_idx = 0;
1285
1286                         rcv_cnt = num;
1287                         while (extra_idx < rcv_cnt) {
1288                                 rxm = rcv_pkts[extra_idx];
1289                                 rxm->data_off =
1290                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1291                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1292                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1293                                 prev->next = rxm;
1294                                 prev = rxm;
1295                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1296                                 extra_idx += 1;
1297                         };
1298                         seg_res -= rcv_cnt;
1299
1300                         if (!seg_res) {
1301                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1302                                 nb_rx++;
1303                         }
1304                 } else {
1305                         PMD_RX_LOG(ERR,
1306                                         "No enough segments for packet.");
1307                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1308                         rxvq->stats.errors++;
1309                         break;
1310                 }
1311         }
1312
1313         rxvq->stats.packets += nb_rx;
1314
1315         /* Allocate new mbuf for the used descriptor */
1316
1317         if (likely(!virtqueue_full(vq))) {
1318                 /* free_cnt may include mrg descs */
1319                 uint16_t free_cnt = vq->vq_free_cnt;
1320                 struct rte_mbuf *new_pkts[free_cnt];
1321
1322                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1323                         error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1324                                         free_cnt);
1325                         if (unlikely(error)) {
1326                                 for (i = 0; i < free_cnt; i++)
1327                                         rte_pktmbuf_free(new_pkts[i]);
1328                         }
1329                         nb_enqueued += free_cnt;
1330                 } else {
1331                         struct rte_eth_dev *dev =
1332                                 &rte_eth_devices[rxvq->port_id];
1333                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1334                 }
1335         }
1336
1337         if (likely(nb_enqueued)) {
1338                 vq_update_avail_idx(vq);
1339
1340                 if (unlikely(virtqueue_kick_prepare(vq))) {
1341                         virtqueue_notify(vq);
1342                         PMD_RX_LOG(DEBUG, "Notified");
1343                 }
1344         }
1345
1346         return nb_rx;
1347 }
1348
1349 uint16_t
1350 virtio_recv_mergeable_pkts(void *rx_queue,
1351                         struct rte_mbuf **rx_pkts,
1352                         uint16_t nb_pkts)
1353 {
1354         struct virtnet_rx *rxvq = rx_queue;
1355         struct virtqueue *vq = rxvq->vq;
1356         struct virtio_hw *hw = vq->hw;
1357         struct rte_mbuf *rxm;
1358         struct rte_mbuf *prev = NULL;
1359         uint16_t nb_used, num, nb_rx = 0;
1360         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1361         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1362         int error;
1363         uint32_t nb_enqueued = 0;
1364         uint32_t seg_num = 0;
1365         uint32_t seg_res = 0;
1366         uint32_t hdr_size = hw->vtnet_hdr_size;
1367         int32_t i;
1368
1369         if (unlikely(hw->started == 0))
1370                 return nb_rx;
1371
1372         nb_used = VIRTQUEUE_NUSED(vq);
1373
1374         virtio_rmb(hw->weak_barriers);
1375
1376         PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1377
1378         num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1379         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1380                 num = VIRTIO_MBUF_BURST_SZ;
1381         if (likely(num > DESC_PER_CACHELINE))
1382                 num = num - ((vq->vq_used_cons_idx + num) %
1383                                 DESC_PER_CACHELINE);
1384
1385
1386         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1387
1388         for (i = 0; i < num; i++) {
1389                 struct virtio_net_hdr_mrg_rxbuf *header;
1390
1391                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1392                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1393
1394                 rxm = rcv_pkts[i];
1395
1396                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1397                         PMD_RX_LOG(ERR, "Packet drop");
1398                         nb_enqueued++;
1399                         virtio_discard_rxbuf(vq, rxm);
1400                         rxvq->stats.errors++;
1401                         continue;
1402                 }
1403
1404                 header = (struct virtio_net_hdr_mrg_rxbuf *)
1405                          ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1406                          - hdr_size);
1407                 seg_num = header->num_buffers;
1408                 if (seg_num == 0)
1409                         seg_num = 1;
1410
1411                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1412                 rxm->nb_segs = seg_num;
1413                 rxm->ol_flags = 0;
1414                 rxm->vlan_tci = 0;
1415                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1416                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1417
1418                 rxm->port = rxvq->port_id;
1419
1420                 rx_pkts[nb_rx] = rxm;
1421                 prev = rxm;
1422
1423                 if (hw->has_rx_offload &&
1424                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1425                         virtio_discard_rxbuf(vq, rxm);
1426                         rxvq->stats.errors++;
1427                         continue;
1428                 }
1429
1430                 if (hw->vlan_strip)
1431                         rte_vlan_strip(rx_pkts[nb_rx]);
1432
1433                 seg_res = seg_num - 1;
1434
1435                 /* Merge remaining segments */
1436                 while (seg_res != 0 && i < (num - 1)) {
1437                         i++;
1438
1439                         rxm = rcv_pkts[i];
1440                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1441                         rxm->pkt_len = (uint32_t)(len[i]);
1442                         rxm->data_len = (uint16_t)(len[i]);
1443
1444                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1445
1446                         prev->next = rxm;
1447                         prev = rxm;
1448                         seg_res -= 1;
1449                 }
1450
1451                 if (!seg_res) {
1452                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1453                         nb_rx++;
1454                 }
1455         }
1456
1457         /* Last packet still need merge segments */
1458         while (seg_res != 0) {
1459                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1460                                         VIRTIO_MBUF_BURST_SZ);
1461
1462                 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1463                         virtio_rmb(hw->weak_barriers);
1464                         num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1465                                                            rcv_cnt);
1466                         uint16_t extra_idx = 0;
1467
1468                         rcv_cnt = num;
1469                         while (extra_idx < rcv_cnt) {
1470                                 rxm = rcv_pkts[extra_idx];
1471                                 rxm->data_off =
1472                                         RTE_PKTMBUF_HEADROOM - hdr_size;
1473                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1474                                 rxm->data_len = (uint16_t)(len[extra_idx]);
1475                                 prev->next = rxm;
1476                                 prev = rxm;
1477                                 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1478                                 extra_idx += 1;
1479                         };
1480                         seg_res -= rcv_cnt;
1481
1482                         if (!seg_res) {
1483                                 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1484                                 nb_rx++;
1485                         }
1486                 } else {
1487                         PMD_RX_LOG(ERR,
1488                                         "No enough segments for packet.");
1489                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1490                         rxvq->stats.errors++;
1491                         break;
1492                 }
1493         }
1494
1495         rxvq->stats.packets += nb_rx;
1496
1497         /* Allocate new mbuf for the used descriptor */
1498         if (likely(!virtqueue_full(vq))) {
1499                 /* free_cnt may include mrg descs */
1500                 uint16_t free_cnt = vq->vq_free_cnt;
1501                 struct rte_mbuf *new_pkts[free_cnt];
1502
1503                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1504                         error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1505                                         free_cnt);
1506                         if (unlikely(error)) {
1507                                 for (i = 0; i < free_cnt; i++)
1508                                         rte_pktmbuf_free(new_pkts[i]);
1509                         }
1510                         nb_enqueued += free_cnt;
1511                 } else {
1512                         struct rte_eth_dev *dev =
1513                                 &rte_eth_devices[rxvq->port_id];
1514                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1515                 }
1516         }
1517
1518         if (likely(nb_enqueued)) {
1519                 vq_update_avail_idx(vq);
1520
1521                 if (unlikely(virtqueue_kick_prepare(vq))) {
1522                         virtqueue_notify(vq);
1523                         PMD_RX_LOG(DEBUG, "Notified");
1524                 }
1525         }
1526
1527         return nb_rx;
1528 }
1529
1530 uint16_t
1531 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1532                         struct rte_mbuf **rx_pkts,
1533                         uint16_t nb_pkts)
1534 {
1535         struct virtnet_rx *rxvq = rx_queue;
1536         struct virtqueue *vq = rxvq->vq;
1537         struct virtio_hw *hw = vq->hw;
1538         struct rte_mbuf *rxm;
1539         struct rte_mbuf *prev = NULL;
1540         uint16_t num, nb_rx = 0;
1541         uint32_t len[VIRTIO_MBUF_BURST_SZ];
1542         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1543         uint32_t nb_enqueued = 0;
1544         uint32_t seg_num = 0;
1545         uint32_t seg_res = 0;
1546         uint32_t hdr_size = hw->vtnet_hdr_size;
1547         int32_t i;
1548         int error;
1549
1550         if (unlikely(hw->started == 0))
1551                 return nb_rx;
1552
1553
1554         num = nb_pkts;
1555         if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1556                 num = VIRTIO_MBUF_BURST_SZ;
1557         if (likely(num > DESC_PER_CACHELINE))
1558                 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1559
1560         num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1561
1562         for (i = 0; i < num; i++) {
1563                 struct virtio_net_hdr_mrg_rxbuf *header;
1564
1565                 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1566                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1567
1568                 rxm = rcv_pkts[i];
1569
1570                 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1571                         PMD_RX_LOG(ERR, "Packet drop");
1572                         nb_enqueued++;
1573                         virtio_discard_rxbuf(vq, rxm);
1574                         rxvq->stats.errors++;
1575                         continue;
1576                 }
1577
1578                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1579                           rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1580                 seg_num = header->num_buffers;
1581
1582                 if (seg_num == 0)
1583                         seg_num = 1;
1584
1585                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1586                 rxm->nb_segs = seg_num;
1587                 rxm->ol_flags = 0;
1588                 rxm->vlan_tci = 0;
1589                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1590                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1591
1592                 rxm->port = rxvq->port_id;
1593                 rx_pkts[nb_rx] = rxm;
1594                 prev = rxm;
1595
1596                 if (hw->has_rx_offload &&
1597                                 virtio_rx_offload(rxm, &header->hdr) < 0) {
1598                         virtio_discard_rxbuf(vq, rxm);
1599                         rxvq->stats.errors++;
1600                         continue;
1601                 }
1602
1603                 if (hw->vlan_strip)
1604                         rte_vlan_strip(rx_pkts[nb_rx]);
1605
1606                 seg_res = seg_num - 1;
1607
1608                 /* Merge remaining segments */
1609                 while (seg_res != 0 && i < (num - 1)) {
1610                         i++;
1611
1612                         rxm = rcv_pkts[i];
1613                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1614                         rxm->pkt_len = (uint32_t)(len[i]);
1615                         rxm->data_len = (uint16_t)(len[i]);
1616
1617                         rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1618
1619                         prev->next = rxm;
1620                         prev = rxm;
1621                         seg_res -= 1;
1622                 }
1623
1624                 if (!seg_res) {
1625                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1626                         nb_rx++;
1627                 }
1628         }
1629
1630         /* Last packet still need merge segments */
1631         while (seg_res != 0) {
1632                 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1633                                         VIRTIO_MBUF_BURST_SZ);
1634                 uint16_t extra_idx = 0;
1635
1636                 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1637                                 len, rcv_cnt);
1638                 if (unlikely(rcv_cnt == 0)) {
1639                         PMD_RX_LOG(ERR, "No enough segments for packet.");
1640                         rte_pktmbuf_free(rx_pkts[nb_rx]);
1641                         rxvq->stats.errors++;
1642                         break;
1643                 }
1644
1645                 while (extra_idx < rcv_cnt) {
1646                         rxm = rcv_pkts[extra_idx];
1647
1648                         rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1649                         rxm->pkt_len = (uint32_t)(len[extra_idx]);
1650                         rxm->data_len = (uint16_t)(len[extra_idx]);
1651
1652                         prev->next = rxm;
1653                         prev = rxm;
1654                         rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1655                         extra_idx += 1;
1656                 }
1657                 seg_res -= rcv_cnt;
1658                 if (!seg_res) {
1659                         virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1660                         nb_rx++;
1661                 }
1662         }
1663
1664         rxvq->stats.packets += nb_rx;
1665
1666         /* Allocate new mbuf for the used descriptor */
1667         if (likely(!virtqueue_full(vq))) {
1668                 /* free_cnt may include mrg descs */
1669                 uint16_t free_cnt = vq->vq_free_cnt;
1670                 struct rte_mbuf *new_pkts[free_cnt];
1671
1672                 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1673                         error = virtqueue_enqueue_recv_refill_packed(vq,
1674                                         new_pkts, free_cnt);
1675                         if (unlikely(error)) {
1676                                 for (i = 0; i < free_cnt; i++)
1677                                         rte_pktmbuf_free(new_pkts[i]);
1678                         }
1679                         nb_enqueued += free_cnt;
1680                 } else {
1681                         struct rte_eth_dev *dev =
1682                                 &rte_eth_devices[rxvq->port_id];
1683                         dev->data->rx_mbuf_alloc_failed += free_cnt;
1684                 }
1685         }
1686
1687         if (likely(nb_enqueued)) {
1688                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1689                         virtqueue_notify(vq);
1690                         PMD_RX_LOG(DEBUG, "Notified");
1691                 }
1692         }
1693
1694         return nb_rx;
1695 }
1696
1697 uint16_t
1698 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1699                         uint16_t nb_pkts)
1700 {
1701         uint16_t nb_tx;
1702         int error;
1703
1704         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1705                 struct rte_mbuf *m = tx_pkts[nb_tx];
1706
1707 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1708                 error = rte_validate_tx_offload(m);
1709                 if (unlikely(error)) {
1710                         rte_errno = -error;
1711                         break;
1712                 }
1713 #endif
1714
1715                 /* Do VLAN tag insertion */
1716                 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1717                         error = rte_vlan_insert(&m);
1718                         /* rte_vlan_insert() may change pointer
1719                          * even in the case of failure
1720                          */
1721                         tx_pkts[nb_tx] = m;
1722
1723                         if (unlikely(error)) {
1724                                 rte_errno = -error;
1725                                 break;
1726                         }
1727                 }
1728
1729                 error = rte_net_intel_cksum_prepare(m);
1730                 if (unlikely(error)) {
1731                         rte_errno = -error;
1732                         break;
1733                 }
1734
1735                 if (m->ol_flags & PKT_TX_TCP_SEG)
1736                         virtio_tso_fix_cksum(m);
1737         }
1738
1739         return nb_tx;
1740 }
1741
1742 uint16_t
1743 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1744                         uint16_t nb_pkts)
1745 {
1746         struct virtnet_tx *txvq = tx_queue;
1747         struct virtqueue *vq = txvq->vq;
1748         struct virtio_hw *hw = vq->hw;
1749         uint16_t hdr_size = hw->vtnet_hdr_size;
1750         uint16_t nb_tx = 0;
1751         bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
1752
1753         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1754                 return nb_tx;
1755
1756         if (unlikely(nb_pkts < 1))
1757                 return nb_pkts;
1758
1759         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1760
1761         if (nb_pkts > vq->vq_free_cnt)
1762                 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1763                                            in_order);
1764
1765         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1766                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1767                 int can_push = 0, slots, need;
1768
1769                 /* optimize ring usage */
1770                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1771                       vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1772                     rte_mbuf_refcnt_read(txm) == 1 &&
1773                     RTE_MBUF_DIRECT(txm) &&
1774                     txm->nb_segs == 1 &&
1775                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1776                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1777                            __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1778                         can_push = 1;
1779
1780                 /* How many main ring entries are needed to this Tx?
1781                  * any_layout => number of segments
1782                  * default    => number of segments + 1
1783                  */
1784                 slots = txm->nb_segs + !can_push;
1785                 need = slots - vq->vq_free_cnt;
1786
1787                 /* Positive value indicates it need free vring descriptors */
1788                 if (unlikely(need > 0)) {
1789                         virtio_xmit_cleanup_packed(vq, need, in_order);
1790                         need = slots - vq->vq_free_cnt;
1791                         if (unlikely(need > 0)) {
1792                                 PMD_TX_LOG(ERR,
1793                                            "No free tx descriptors to transmit");
1794                                 break;
1795                         }
1796                 }
1797
1798                 /* Enqueue Packet buffers */
1799                 if (can_push)
1800                         virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1801                 else
1802                         virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
1803                                                       in_order);
1804
1805                 virtio_update_packet_stats(&txvq->stats, txm);
1806         }
1807
1808         txvq->stats.packets += nb_tx;
1809
1810         if (likely(nb_tx)) {
1811                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1812                         virtqueue_notify(vq);
1813                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1814                 }
1815         }
1816
1817         return nb_tx;
1818 }
1819
1820 uint16_t
1821 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1822 {
1823         struct virtnet_tx *txvq = tx_queue;
1824         struct virtqueue *vq = txvq->vq;
1825         struct virtio_hw *hw = vq->hw;
1826         uint16_t hdr_size = hw->vtnet_hdr_size;
1827         uint16_t nb_used, nb_tx = 0;
1828
1829         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1830                 return nb_tx;
1831
1832         if (unlikely(nb_pkts < 1))
1833                 return nb_pkts;
1834
1835         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1836         nb_used = VIRTQUEUE_NUSED(vq);
1837
1838         virtio_rmb(hw->weak_barriers);
1839         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1840                 virtio_xmit_cleanup(vq, nb_used);
1841
1842         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1843                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1844                 int can_push = 0, use_indirect = 0, slots, need;
1845
1846                 /* optimize ring usage */
1847                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1848                       vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1849                     rte_mbuf_refcnt_read(txm) == 1 &&
1850                     RTE_MBUF_DIRECT(txm) &&
1851                     txm->nb_segs == 1 &&
1852                     rte_pktmbuf_headroom(txm) >= hdr_size &&
1853                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1854                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1855                         can_push = 1;
1856                 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1857                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1858                         use_indirect = 1;
1859
1860                 /* How many main ring entries are needed to this Tx?
1861                  * any_layout => number of segments
1862                  * indirect   => 1
1863                  * default    => number of segments + 1
1864                  */
1865                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1866                 need = slots - vq->vq_free_cnt;
1867
1868                 /* Positive value indicates it need free vring descriptors */
1869                 if (unlikely(need > 0)) {
1870                         nb_used = VIRTQUEUE_NUSED(vq);
1871                         virtio_rmb(hw->weak_barriers);
1872                         need = RTE_MIN(need, (int)nb_used);
1873
1874                         virtio_xmit_cleanup(vq, need);
1875                         need = slots - vq->vq_free_cnt;
1876                         if (unlikely(need > 0)) {
1877                                 PMD_TX_LOG(ERR,
1878                                            "No free tx descriptors to transmit");
1879                                 break;
1880                         }
1881                 }
1882
1883                 /* Enqueue Packet buffers */
1884                 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1885                         can_push, 0);
1886
1887                 virtio_update_packet_stats(&txvq->stats, txm);
1888         }
1889
1890         txvq->stats.packets += nb_tx;
1891
1892         if (likely(nb_tx)) {
1893                 vq_update_avail_idx(vq);
1894
1895                 if (unlikely(virtqueue_kick_prepare(vq))) {
1896                         virtqueue_notify(vq);
1897                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1898                 }
1899         }
1900
1901         return nb_tx;
1902 }
1903
1904 static __rte_always_inline int
1905 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1906 {
1907         uint16_t nb_used, nb_clean, nb_descs;
1908         struct virtio_hw *hw = vq->hw;
1909
1910         nb_descs = vq->vq_free_cnt + need;
1911         nb_used = VIRTQUEUE_NUSED(vq);
1912         virtio_rmb(hw->weak_barriers);
1913         nb_clean = RTE_MIN(need, (int)nb_used);
1914
1915         virtio_xmit_cleanup_inorder(vq, nb_clean);
1916
1917         return nb_descs - vq->vq_free_cnt;
1918 }
1919
1920 uint16_t
1921 virtio_xmit_pkts_inorder(void *tx_queue,
1922                         struct rte_mbuf **tx_pkts,
1923                         uint16_t nb_pkts)
1924 {
1925         struct virtnet_tx *txvq = tx_queue;
1926         struct virtqueue *vq = txvq->vq;
1927         struct virtio_hw *hw = vq->hw;
1928         uint16_t hdr_size = hw->vtnet_hdr_size;
1929         uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1930         struct rte_mbuf *inorder_pkts[nb_pkts];
1931         int need;
1932
1933         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1934                 return nb_tx;
1935
1936         if (unlikely(nb_pkts < 1))
1937                 return nb_pkts;
1938
1939         VIRTQUEUE_DUMP(vq);
1940         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1941         nb_used = VIRTQUEUE_NUSED(vq);
1942
1943         virtio_rmb(hw->weak_barriers);
1944         if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1945                 virtio_xmit_cleanup_inorder(vq, nb_used);
1946
1947         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1948                 struct rte_mbuf *txm = tx_pkts[nb_tx];
1949                 int slots;
1950
1951                 /* optimize ring usage */
1952                 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1953                      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1954                      rte_mbuf_refcnt_read(txm) == 1 &&
1955                      RTE_MBUF_DIRECT(txm) &&
1956                      txm->nb_segs == 1 &&
1957                      rte_pktmbuf_headroom(txm) >= hdr_size &&
1958                      rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1959                                 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1960                         inorder_pkts[nb_inorder_pkts] = txm;
1961                         nb_inorder_pkts++;
1962
1963                         continue;
1964                 }
1965
1966                 if (nb_inorder_pkts) {
1967                         need = nb_inorder_pkts - vq->vq_free_cnt;
1968                         if (unlikely(need > 0)) {
1969                                 need = virtio_xmit_try_cleanup_inorder(vq,
1970                                                                        need);
1971                                 if (unlikely(need > 0)) {
1972                                         PMD_TX_LOG(ERR,
1973                                                 "No free tx descriptors to "
1974                                                 "transmit");
1975                                         break;
1976                                 }
1977                         }
1978                         virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1979                                                         nb_inorder_pkts);
1980                         nb_inorder_pkts = 0;
1981                 }
1982
1983                 slots = txm->nb_segs + 1;
1984                 need = slots - vq->vq_free_cnt;
1985                 if (unlikely(need > 0)) {
1986                         need = virtio_xmit_try_cleanup_inorder(vq, slots);
1987
1988                         if (unlikely(need > 0)) {
1989                                 PMD_TX_LOG(ERR,
1990                                         "No free tx descriptors to transmit");
1991                                 break;
1992                         }
1993                 }
1994                 /* Enqueue Packet buffers */
1995                 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1996
1997                 virtio_update_packet_stats(&txvq->stats, txm);
1998         }
1999
2000         /* Transmit all inorder packets */
2001         if (nb_inorder_pkts) {
2002                 need = nb_inorder_pkts - vq->vq_free_cnt;
2003                 if (unlikely(need > 0)) {
2004                         need = virtio_xmit_try_cleanup_inorder(vq,
2005                                                                   need);
2006                         if (unlikely(need > 0)) {
2007                                 PMD_TX_LOG(ERR,
2008                                         "No free tx descriptors to transmit");
2009                                 nb_inorder_pkts = vq->vq_free_cnt;
2010                                 nb_tx -= need;
2011                         }
2012                 }
2013
2014                 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2015                                                 nb_inorder_pkts);
2016         }
2017
2018         txvq->stats.packets += nb_tx;
2019
2020         if (likely(nb_tx)) {
2021                 vq_update_avail_idx(vq);
2022
2023                 if (unlikely(virtqueue_kick_prepare(vq))) {
2024                         virtqueue_notify(vq);
2025                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2026                 }
2027         }
2028
2029         VIRTQUEUE_DUMP(vq);
2030
2031         return nb_tx;
2032 }
2033
2034 #ifndef CC_AVX512_SUPPORT
2035 uint16_t
2036 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2037                             struct rte_mbuf **rx_pkts __rte_unused,
2038                             uint16_t nb_pkts __rte_unused)
2039 {
2040         return 0;
2041 }
2042 #endif /* ifndef CC_AVX512_SUPPORT */