virtio: use any layout on Tx
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_mempool.h>
45 #include <rte_malloc.h>
46 #include <rte_mbuf.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_prefetch.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52 #include <rte_byteorder.h>
53
54 #include "virtio_logs.h"
55 #include "virtio_ethdev.h"
56 #include "virtio_pci.h"
57 #include "virtqueue.h"
58 #include "virtio_rxtx.h"
59
60 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
61 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
62 #else
63 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
64 #endif
65
66
67 #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
68         ETH_TXQ_FLAGS_NOOFFLOADS)
69
70 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
71 static int use_simple_rxtx;
72 #endif
73
74 static void
75 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
76 {
77         struct vring_desc *dp, *dp_tail;
78         struct vq_desc_extra *dxp;
79         uint16_t desc_idx_last = desc_idx;
80
81         dp  = &vq->vq_ring.desc[desc_idx];
82         dxp = &vq->vq_descx[desc_idx];
83         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
84         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
85                 while (dp->flags & VRING_DESC_F_NEXT) {
86                         desc_idx_last = dp->next;
87                         dp = &vq->vq_ring.desc[dp->next];
88                 }
89         }
90         dxp->ndescs = 0;
91
92         /*
93          * We must append the existing free chain, if any, to the end of
94          * newly freed chain. If the virtqueue was completely used, then
95          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
96          */
97         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
98                 vq->vq_desc_head_idx = desc_idx;
99         } else {
100                 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
101                 dp_tail->next = desc_idx;
102         }
103
104         vq->vq_desc_tail_idx = desc_idx_last;
105         dp->next = VQ_RING_DESC_CHAIN_END;
106 }
107
108 static uint16_t
109 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
110                            uint32_t *len, uint16_t num)
111 {
112         struct vring_used_elem *uep;
113         struct rte_mbuf *cookie;
114         uint16_t used_idx, desc_idx;
115         uint16_t i;
116
117         /*  Caller does the check */
118         for (i = 0; i < num ; i++) {
119                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
120                 uep = &vq->vq_ring.used->ring[used_idx];
121                 desc_idx = (uint16_t) uep->id;
122                 len[i] = uep->len;
123                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
124
125                 if (unlikely(cookie == NULL)) {
126                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
127                                 vq->vq_used_cons_idx);
128                         break;
129                 }
130
131                 rte_prefetch0(cookie);
132                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
133                 rx_pkts[i]  = cookie;
134                 vq->vq_used_cons_idx++;
135                 vq_ring_free_chain(vq, desc_idx);
136                 vq->vq_descx[desc_idx].cookie = NULL;
137         }
138
139         return i;
140 }
141
142 #ifndef DEFAULT_TX_FREE_THRESH
143 #define DEFAULT_TX_FREE_THRESH 32
144 #endif
145
146 /* Cleanup from completed transmits. */
147 static void
148 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
149 {
150         uint16_t i, used_idx, desc_idx;
151         for (i = 0; i < num; i++) {
152                 struct vring_used_elem *uep;
153                 struct vq_desc_extra *dxp;
154
155                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
156                 uep = &vq->vq_ring.used->ring[used_idx];
157
158                 desc_idx = (uint16_t) uep->id;
159                 dxp = &vq->vq_descx[desc_idx];
160                 vq->vq_used_cons_idx++;
161                 vq_ring_free_chain(vq, desc_idx);
162
163                 if (dxp->cookie != NULL) {
164                         rte_pktmbuf_free(dxp->cookie);
165                         dxp->cookie = NULL;
166                 }
167         }
168 }
169
170
171 static inline int
172 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
173 {
174         struct vq_desc_extra *dxp;
175         struct virtio_hw *hw = vq->hw;
176         struct vring_desc *start_dp;
177         uint16_t needed = 1;
178         uint16_t head_idx, idx;
179
180         if (unlikely(vq->vq_free_cnt == 0))
181                 return -ENOSPC;
182         if (unlikely(vq->vq_free_cnt < needed))
183                 return -EMSGSIZE;
184
185         head_idx = vq->vq_desc_head_idx;
186         if (unlikely(head_idx >= vq->vq_nentries))
187                 return -EFAULT;
188
189         idx = head_idx;
190         dxp = &vq->vq_descx[idx];
191         dxp->cookie = (void *)cookie;
192         dxp->ndescs = needed;
193
194         start_dp = vq->vq_ring.desc;
195         start_dp[idx].addr =
196                 (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM
197                 - hw->vtnet_hdr_size);
198         start_dp[idx].len =
199                 cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
200         start_dp[idx].flags =  VRING_DESC_F_WRITE;
201         idx = start_dp[idx].next;
202         vq->vq_desc_head_idx = idx;
203         if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
204                 vq->vq_desc_tail_idx = idx;
205         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
206         vq_update_avail_ring(vq, head_idx);
207
208         return 0;
209 }
210
211 static int
212 virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
213                        uint16_t needed, int use_indirect, int can_push)
214 {
215         struct vq_desc_extra *dxp;
216         struct vring_desc *start_dp;
217         uint16_t seg_num = cookie->nb_segs;
218         uint16_t head_idx, idx;
219         uint16_t head_size = txvq->hw->vtnet_hdr_size;
220         unsigned long offs;
221
222         if (unlikely(txvq->vq_free_cnt == 0))
223                 return -ENOSPC;
224         if (unlikely(txvq->vq_free_cnt < needed))
225                 return -EMSGSIZE;
226         head_idx = txvq->vq_desc_head_idx;
227         if (unlikely(head_idx >= txvq->vq_nentries))
228                 return -EFAULT;
229
230         idx = head_idx;
231         dxp = &txvq->vq_descx[idx];
232         dxp->cookie = (void *)cookie;
233         dxp->ndescs = needed;
234
235         start_dp = txvq->vq_ring.desc;
236
237         if (can_push) {
238                 /* put on zero'd transmit header (no offloads) */
239                 void *hdr = rte_pktmbuf_prepend(cookie, head_size);
240
241                 memset(hdr, 0, head_size);
242         } else if (use_indirect) {
243                 /* setup tx ring slot to point to indirect
244                  * descriptor list stored in reserved region.
245                  *
246                  * the first slot in indirect ring is already preset
247                  * to point to the header in reserved region
248                  */
249                 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
250
251                 offs = idx * sizeof(struct virtio_tx_region)
252                         + offsetof(struct virtio_tx_region, tx_indir);
253
254                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;
255                 start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
256                 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
257
258                 /* loop below will fill in rest of the indirect elements */
259                 start_dp = txr[idx].tx_indir;
260                 idx = 1;
261         } else {
262                 /* setup first tx ring slot to point to header
263                  * stored in reserved region.
264                  */
265                 offs = idx * sizeof(struct virtio_tx_region)
266                         + offsetof(struct virtio_tx_region, tx_hdr);
267
268                 start_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;
269                 start_dp[idx].len   = txvq->hw->vtnet_hdr_size;
270                 start_dp[idx].flags = VRING_DESC_F_NEXT;
271                 idx = start_dp[idx].next;
272         }
273
274         do {
275                 start_dp[idx].addr  = rte_mbuf_data_dma_addr(cookie);
276                 start_dp[idx].len   = cookie->data_len;
277                 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
278                 idx = start_dp[idx].next;
279         } while ((cookie = cookie->next) != NULL);
280
281         start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
282
283         if (use_indirect)
284                 idx = txvq->vq_ring.desc[head_idx].next;
285
286         txvq->vq_desc_head_idx = idx;
287         if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
288                 txvq->vq_desc_tail_idx = idx;
289         txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
290         vq_update_avail_ring(txvq, head_idx);
291
292         return 0;
293 }
294
295 static inline struct rte_mbuf *
296 rte_rxmbuf_alloc(struct rte_mempool *mp)
297 {
298         struct rte_mbuf *m;
299
300         m = __rte_mbuf_raw_alloc(mp);
301         __rte_mbuf_sanity_check_raw(m, 0);
302
303         return m;
304 }
305
306 static void
307 virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
308 {
309         struct rte_mbuf *m;
310         int i, nbufs, error, size = vq->vq_nentries;
311         struct vring *vr = &vq->vq_ring;
312         uint8_t *ring_mem = vq->vq_ring_virt_mem;
313
314         PMD_INIT_FUNC_TRACE();
315
316         /*
317          * Reinitialise since virtio port might have been stopped and restarted
318          */
319         memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
320         vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
321         vq->vq_used_cons_idx = 0;
322         vq->vq_desc_head_idx = 0;
323         vq->vq_avail_idx = 0;
324         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
325         vq->vq_free_cnt = vq->vq_nentries;
326         memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
327
328         vring_desc_init(vr->desc, size);
329
330         /*
331          * Disable device(host) interrupting guest
332          */
333         virtqueue_disable_intr(vq);
334
335         /* Only rx virtqueue needs mbufs to be allocated at initialization */
336         if (queue_type == VTNET_RQ) {
337                 if (vq->mpool == NULL)
338                         rte_exit(EXIT_FAILURE,
339                         "Cannot allocate initial mbufs for rx virtqueue");
340
341                 /* Allocate blank mbufs for the each rx descriptor */
342                 nbufs = 0;
343                 error = ENOSPC;
344
345 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
346                 if (use_simple_rxtx)
347                         for (i = 0; i < vq->vq_nentries; i++) {
348                                 vq->vq_ring.avail->ring[i] = i;
349                                 vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
350                         }
351 #endif
352                 memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf));
353                 for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++)
354                         vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
355
356                 while (!virtqueue_full(vq)) {
357                         m = rte_rxmbuf_alloc(vq->mpool);
358                         if (m == NULL)
359                                 break;
360
361                         /******************************************
362                         *         Enqueue allocated buffers        *
363                         *******************************************/
364 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
365                         if (use_simple_rxtx)
366                                 error = virtqueue_enqueue_recv_refill_simple(vq, m);
367                         else
368 #endif
369                                 error = virtqueue_enqueue_recv_refill(vq, m);
370                         if (error) {
371                                 rte_pktmbuf_free(m);
372                                 break;
373                         }
374                         nbufs++;
375                 }
376
377                 vq_update_avail_idx(vq);
378
379                 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
380         } else if (queue_type == VTNET_TQ) {
381 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
382                 if (use_simple_rxtx) {
383                         int mid_idx  = vq->vq_nentries >> 1;
384                         for (i = 0; i < mid_idx; i++) {
385                                 vq->vq_ring.avail->ring[i] = i + mid_idx;
386                                 vq->vq_ring.desc[i + mid_idx].next = i;
387                                 vq->vq_ring.desc[i + mid_idx].addr =
388                                         vq->virtio_net_hdr_mem +
389                                                 i * vq->hw->vtnet_hdr_size;
390                                 vq->vq_ring.desc[i + mid_idx].len =
391                                         vq->hw->vtnet_hdr_size;
392                                 vq->vq_ring.desc[i + mid_idx].flags =
393                                         VRING_DESC_F_NEXT;
394                                 vq->vq_ring.desc[i].flags = 0;
395                         }
396                         for (i = mid_idx; i < vq->vq_nentries; i++)
397                                 vq->vq_ring.avail->ring[i] = i;
398                 }
399 #endif
400         }
401 }
402
403 void
404 virtio_dev_cq_start(struct rte_eth_dev *dev)
405 {
406         struct virtio_hw *hw = dev->data->dev_private;
407
408         if (hw->cvq) {
409                 virtio_dev_vring_start(hw->cvq, VTNET_CQ);
410                 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
411         }
412 }
413
414 void
415 virtio_dev_rxtx_start(struct rte_eth_dev *dev)
416 {
417         /*
418          * Start receive and transmit vrings
419          * -    Setup vring structure for all queues
420          * -    Initialize descriptor for the rx vring
421          * -    Allocate blank mbufs for the each rx descriptor
422          *
423          */
424         int i;
425
426         PMD_INIT_FUNC_TRACE();
427
428         /* Start rx vring. */
429         for (i = 0; i < dev->data->nb_rx_queues; i++) {
430                 virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
431                 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
432         }
433
434         /* Start tx vring. */
435         for (i = 0; i < dev->data->nb_tx_queues; i++) {
436                 virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
437                 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
438         }
439 }
440
441 int
442 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
443                         uint16_t queue_idx,
444                         uint16_t nb_desc,
445                         unsigned int socket_id,
446                         __rte_unused const struct rte_eth_rxconf *rx_conf,
447                         struct rte_mempool *mp)
448 {
449         uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
450         struct virtqueue *vq;
451         int ret;
452
453         PMD_INIT_FUNC_TRACE();
454         ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
455                         nb_desc, socket_id, &vq);
456         if (ret < 0) {
457                 PMD_INIT_LOG(ERR, "rvq initialization failed");
458                 return ret;
459         }
460
461         /* Create mempool for rx mbuf allocation */
462         vq->mpool = mp;
463
464         dev->data->rx_queues[queue_idx] = vq;
465
466 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
467         virtio_rxq_vec_setup(vq);
468 #endif
469
470         return 0;
471 }
472
473 void
474 virtio_dev_rx_queue_release(void *rxq)
475 {
476         virtio_dev_queue_release(rxq);
477 }
478
479 /*
480  * struct rte_eth_dev *dev: Used to update dev
481  * uint16_t nb_desc: Defaults to values read from config space
482  * unsigned int socket_id: Used to allocate memzone
483  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
484  * uint16_t queue_idx: Just used as an index in dev txq list
485  */
486 int
487 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
488                         uint16_t queue_idx,
489                         uint16_t nb_desc,
490                         unsigned int socket_id,
491                         const struct rte_eth_txconf *tx_conf)
492 {
493         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
494
495 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
496         struct virtio_hw *hw = dev->data->dev_private;
497 #endif
498         struct virtqueue *vq;
499         uint16_t tx_free_thresh;
500         int ret;
501
502         PMD_INIT_FUNC_TRACE();
503
504         if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
505             != ETH_TXQ_FLAGS_NOXSUMS) {
506                 PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
507                 return -EINVAL;
508         }
509
510 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
511         /* Use simple rx/tx func if single segment and no offloads */
512         if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
513              !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
514                 PMD_INIT_LOG(INFO, "Using simple rx/tx path");
515                 dev->tx_pkt_burst = virtio_xmit_pkts_simple;
516                 dev->rx_pkt_burst = virtio_recv_pkts_vec;
517                 use_simple_rxtx = 1;
518         }
519 #endif
520
521         ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
522                         nb_desc, socket_id, &vq);
523         if (ret < 0) {
524                 PMD_INIT_LOG(ERR, "rvq initialization failed");
525                 return ret;
526         }
527
528         tx_free_thresh = tx_conf->tx_free_thresh;
529         if (tx_free_thresh == 0)
530                 tx_free_thresh =
531                         RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
532
533         if (tx_free_thresh >= (vq->vq_nentries - 3)) {
534                 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
535                         "number of TX entries minus 3 (%u)."
536                         " (tx_free_thresh=%u port=%u queue=%u)\n",
537                         vq->vq_nentries - 3,
538                         tx_free_thresh, dev->data->port_id, queue_idx);
539                 return -EINVAL;
540         }
541
542         vq->vq_free_thresh = tx_free_thresh;
543
544         dev->data->tx_queues[queue_idx] = vq;
545         return 0;
546 }
547
548 void
549 virtio_dev_tx_queue_release(void *txq)
550 {
551         virtio_dev_queue_release(txq);
552 }
553
554 static void
555 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
556 {
557         int error;
558         /*
559          * Requeue the discarded mbuf. This should always be
560          * successful since it was just dequeued.
561          */
562         error = virtqueue_enqueue_recv_refill(vq, m);
563         if (unlikely(error)) {
564                 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
565                 rte_pktmbuf_free(m);
566         }
567 }
568
569 static void
570 virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
571 {
572         uint32_t s = mbuf->pkt_len;
573         struct ether_addr *ea;
574
575         if (s == 64) {
576                 vq->size_bins[1]++;
577         } else if (s > 64 && s < 1024) {
578                 uint32_t bin;
579
580                 /* count zeros, and offset into correct bin */
581                 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
582                 vq->size_bins[bin]++;
583         } else {
584                 if (s < 64)
585                         vq->size_bins[0]++;
586                 else if (s < 1519)
587                         vq->size_bins[6]++;
588                 else if (s >= 1519)
589                         vq->size_bins[7]++;
590         }
591
592         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
593         if (is_multicast_ether_addr(ea)) {
594                 if (is_broadcast_ether_addr(ea))
595                         vq->broadcast++;
596                 else
597                         vq->multicast++;
598         }
599 }
600
601 #define VIRTIO_MBUF_BURST_SZ 64
602 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
603 uint16_t
604 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
605 {
606         struct virtqueue *rxvq = rx_queue;
607         struct virtio_hw *hw;
608         struct rte_mbuf *rxm, *new_mbuf;
609         uint16_t nb_used, num, nb_rx;
610         uint32_t len[VIRTIO_MBUF_BURST_SZ];
611         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
612         int error;
613         uint32_t i, nb_enqueued;
614         uint32_t hdr_size;
615
616         nb_used = VIRTQUEUE_NUSED(rxvq);
617
618         virtio_rmb();
619
620         num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
621         num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
622         if (likely(num > DESC_PER_CACHELINE))
623                 num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
624
625         if (num == 0)
626                 return 0;
627
628         num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
629         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
630
631         hw = rxvq->hw;
632         nb_rx = 0;
633         nb_enqueued = 0;
634         hdr_size = hw->vtnet_hdr_size;
635
636         for (i = 0; i < num ; i++) {
637                 rxm = rcv_pkts[i];
638
639                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
640
641                 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
642                         PMD_RX_LOG(ERR, "Packet drop");
643                         nb_enqueued++;
644                         virtio_discard_rxbuf(rxvq, rxm);
645                         rxvq->errors++;
646                         continue;
647                 }
648
649                 rxm->port = rxvq->port_id;
650                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
651                 rxm->ol_flags = 0;
652                 rxm->vlan_tci = 0;
653
654                 rxm->nb_segs = 1;
655                 rxm->next = NULL;
656                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
657                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
658
659                 if (hw->vlan_strip)
660                         rte_vlan_strip(rxm);
661
662                 VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
663
664                 rx_pkts[nb_rx++] = rxm;
665
666                 rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
667                 virtio_update_packet_stats(rxvq, rxm);
668         }
669
670         rxvq->packets += nb_rx;
671
672         /* Allocate new mbuf for the used descriptor */
673         error = ENOSPC;
674         while (likely(!virtqueue_full(rxvq))) {
675                 new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
676                 if (unlikely(new_mbuf == NULL)) {
677                         struct rte_eth_dev *dev
678                                 = &rte_eth_devices[rxvq->port_id];
679                         dev->data->rx_mbuf_alloc_failed++;
680                         break;
681                 }
682                 error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
683                 if (unlikely(error)) {
684                         rte_pktmbuf_free(new_mbuf);
685                         break;
686                 }
687                 nb_enqueued++;
688         }
689
690         if (likely(nb_enqueued)) {
691                 vq_update_avail_idx(rxvq);
692
693                 if (unlikely(virtqueue_kick_prepare(rxvq))) {
694                         virtqueue_notify(rxvq);
695                         PMD_RX_LOG(DEBUG, "Notified\n");
696                 }
697         }
698
699         return nb_rx;
700 }
701
702 uint16_t
703 virtio_recv_mergeable_pkts(void *rx_queue,
704                         struct rte_mbuf **rx_pkts,
705                         uint16_t nb_pkts)
706 {
707         struct virtqueue *rxvq = rx_queue;
708         struct virtio_hw *hw;
709         struct rte_mbuf *rxm, *new_mbuf;
710         uint16_t nb_used, num, nb_rx;
711         uint32_t len[VIRTIO_MBUF_BURST_SZ];
712         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
713         struct rte_mbuf *prev;
714         int error;
715         uint32_t i, nb_enqueued;
716         uint32_t seg_num;
717         uint16_t extra_idx;
718         uint32_t seg_res;
719         uint32_t hdr_size;
720
721         nb_used = VIRTQUEUE_NUSED(rxvq);
722
723         virtio_rmb();
724
725         if (nb_used == 0)
726                 return 0;
727
728         PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
729
730         hw = rxvq->hw;
731         nb_rx = 0;
732         i = 0;
733         nb_enqueued = 0;
734         seg_num = 0;
735         extra_idx = 0;
736         seg_res = 0;
737         hdr_size = hw->vtnet_hdr_size;
738
739         while (i < nb_used) {
740                 struct virtio_net_hdr_mrg_rxbuf *header;
741
742                 if (nb_rx == nb_pkts)
743                         break;
744
745                 num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);
746                 if (num != 1)
747                         continue;
748
749                 i++;
750
751                 PMD_RX_LOG(DEBUG, "dequeue:%d\n", num);
752                 PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]);
753
754                 rxm = rcv_pkts[0];
755
756                 if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
757                         PMD_RX_LOG(ERR, "Packet drop\n");
758                         nb_enqueued++;
759                         virtio_discard_rxbuf(rxvq, rxm);
760                         rxvq->errors++;
761                         continue;
762                 }
763
764                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
765                         RTE_PKTMBUF_HEADROOM - hdr_size);
766                 seg_num = header->num_buffers;
767
768                 if (seg_num == 0)
769                         seg_num = 1;
770
771                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
772                 rxm->nb_segs = seg_num;
773                 rxm->next = NULL;
774                 rxm->ol_flags = 0;
775                 rxm->vlan_tci = 0;
776                 rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
777                 rxm->data_len = (uint16_t)(len[0] - hdr_size);
778
779                 rxm->port = rxvq->port_id;
780                 rx_pkts[nb_rx] = rxm;
781                 prev = rxm;
782
783                 seg_res = seg_num - 1;
784
785                 while (seg_res != 0) {
786                         /*
787                          * Get extra segments for current uncompleted packet.
788                          */
789                         uint16_t  rcv_cnt =
790                                 RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
791                         if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
792                                 uint32_t rx_num =
793                                         virtqueue_dequeue_burst_rx(rxvq,
794                                         rcv_pkts, len, rcv_cnt);
795                                 i += rx_num;
796                                 rcv_cnt = rx_num;
797                         } else {
798                                 PMD_RX_LOG(ERR,
799                                         "No enough segments for packet.\n");
800                                 nb_enqueued++;
801                                 virtio_discard_rxbuf(rxvq, rxm);
802                                 rxvq->errors++;
803                                 break;
804                         }
805
806                         extra_idx = 0;
807
808                         while (extra_idx < rcv_cnt) {
809                                 rxm = rcv_pkts[extra_idx];
810
811                                 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
812                                 rxm->next = NULL;
813                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
814                                 rxm->data_len = (uint16_t)(len[extra_idx]);
815
816                                 if (prev)
817                                         prev->next = rxm;
818
819                                 prev = rxm;
820                                 rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
821                                 extra_idx++;
822                         };
823                         seg_res -= rcv_cnt;
824                 }
825
826                 if (hw->vlan_strip)
827                         rte_vlan_strip(rx_pkts[nb_rx]);
828
829                 VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
830                         rx_pkts[nb_rx]->data_len);
831
832                 rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
833                 virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]);
834                 nb_rx++;
835         }
836
837         rxvq->packets += nb_rx;
838
839         /* Allocate new mbuf for the used descriptor */
840         error = ENOSPC;
841         while (likely(!virtqueue_full(rxvq))) {
842                 new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
843                 if (unlikely(new_mbuf == NULL)) {
844                         struct rte_eth_dev *dev
845                                 = &rte_eth_devices[rxvq->port_id];
846                         dev->data->rx_mbuf_alloc_failed++;
847                         break;
848                 }
849                 error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
850                 if (unlikely(error)) {
851                         rte_pktmbuf_free(new_mbuf);
852                         break;
853                 }
854                 nb_enqueued++;
855         }
856
857         if (likely(nb_enqueued)) {
858                 vq_update_avail_idx(rxvq);
859
860                 if (unlikely(virtqueue_kick_prepare(rxvq))) {
861                         virtqueue_notify(rxvq);
862                         PMD_RX_LOG(DEBUG, "Notified");
863                 }
864         }
865
866         return nb_rx;
867 }
868
869 uint16_t
870 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
871 {
872         struct virtqueue *txvq = tx_queue;
873         struct virtio_hw *hw = txvq->hw;
874         uint16_t hdr_size = hw->vtnet_hdr_size;
875         uint16_t nb_used, nb_tx;
876         int error;
877
878         if (unlikely(nb_pkts < 1))
879                 return nb_pkts;
880
881         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
882         nb_used = VIRTQUEUE_NUSED(txvq);
883
884         virtio_rmb();
885         if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))
886                 virtio_xmit_cleanup(txvq, nb_used);
887
888         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
889                 struct rte_mbuf *txm = tx_pkts[nb_tx];
890                 int can_push = 0, use_indirect = 0, slots, need;
891
892                 /* Do VLAN tag insertion */
893                 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
894                         error = rte_vlan_insert(&txm);
895                         if (unlikely(error)) {
896                                 rte_pktmbuf_free(txm);
897                                 continue;
898                         }
899                 }
900
901                 /* optimize ring usage */
902                 if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
903                     rte_mbuf_refcnt_read(txm) == 1 &&
904                     txm->nb_segs == 1 &&
905                     rte_pktmbuf_headroom(txm) >= hdr_size &&
906                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
907                                    __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
908                         can_push = 1;
909                 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
910                          txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
911                         use_indirect = 1;
912
913                 /* How many main ring entries are needed to this Tx?
914                  * any_layout => number of segments
915                  * indirect   => 1
916                  * default    => number of segments + 1
917                  */
918                 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
919                 need = slots - txvq->vq_free_cnt;
920
921                 /* Positive value indicates it need free vring descriptors */
922                 if (unlikely(need > 0)) {
923                         nb_used = VIRTQUEUE_NUSED(txvq);
924                         virtio_rmb();
925                         need = RTE_MIN(need, (int)nb_used);
926
927                         virtio_xmit_cleanup(txvq, need);
928                         need = slots - txvq->vq_free_cnt;
929                         if (unlikely(need > 0)) {
930                                 PMD_TX_LOG(ERR,
931                                            "No free tx descriptors to transmit");
932                                 break;
933                         }
934                 }
935
936                 /* Enqueue Packet buffers */
937                 error = virtqueue_enqueue_xmit(txvq, txm, slots,
938                                                use_indirect, can_push);
939                 if (unlikely(error)) {
940                         if (error == ENOSPC)
941                                 PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
942                         else if (error == EMSGSIZE)
943                                 PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
944                         else
945                                 PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
946                         break;
947                 }
948
949                 txvq->bytes += txm->pkt_len;
950                 virtio_update_packet_stats(txvq, txm);
951         }
952
953         txvq->packets += nb_tx;
954
955         if (likely(nb_tx)) {
956                 vq_update_avail_idx(txvq);
957
958                 if (unlikely(virtqueue_kick_prepare(txvq))) {
959                         virtqueue_notify(txvq);
960                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
961                 }
962         }
963
964         return nb_tx;
965 }