mbuf: replace data pointer by an offset
[dpdk.git] / lib / librte_pmd_virtio / virtio_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_mempool.h>
45 #include <rte_malloc.h>
46 #include <rte_mbuf.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_prefetch.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52
53 #include "virtio_logs.h"
54 #include "virtio_ethdev.h"
55 #include "virtqueue.h"
56
57 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
58 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
59 #else
60 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
61 #endif
62
63 static void
64 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
65 {
66         struct vring_desc *dp, *dp_tail;
67         struct vq_desc_extra *dxp;
68         uint16_t desc_idx_last = desc_idx;
69
70         dp  = &vq->vq_ring.desc[desc_idx];
71         dxp = &vq->vq_descx[desc_idx];
72         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
73         if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
74                 while (dp->flags & VRING_DESC_F_NEXT) {
75                         desc_idx_last = dp->next;
76                         dp = &vq->vq_ring.desc[dp->next];
77                 }
78         }
79         dxp->ndescs = 0;
80
81         /*
82          * We must append the existing free chain, if any, to the end of
83          * newly freed chain. If the virtqueue was completely used, then
84          * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
85          */
86         if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
87                 vq->vq_desc_head_idx = desc_idx;
88         } else {
89                 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
90                 dp_tail->next = desc_idx;
91         }
92
93         vq->vq_desc_tail_idx = desc_idx_last;
94         dp->next = VQ_RING_DESC_CHAIN_END;
95 }
96
97 static uint16_t
98 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
99                            uint32_t *len, uint16_t num)
100 {
101         struct vring_used_elem *uep;
102         struct rte_mbuf *cookie;
103         uint16_t used_idx, desc_idx;
104         uint16_t i;
105
106         /*  Caller does the check */
107         for (i = 0; i < num ; i++) {
108                 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
109                 uep = &vq->vq_ring.used->ring[used_idx];
110                 desc_idx = (uint16_t) uep->id;
111                 len[i] = uep->len;
112                 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
113
114                 if (unlikely(cookie == NULL)) {
115                         PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
116                                 vq->vq_used_cons_idx);
117                         break;
118                 }
119
120                 rte_prefetch0(cookie);
121                 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
122                 rx_pkts[i]  = cookie;
123                 vq->vq_used_cons_idx++;
124                 vq_ring_free_chain(vq, desc_idx);
125                 vq->vq_descx[desc_idx].cookie = NULL;
126         }
127
128         return i;
129 }
130
131 static void
132 virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
133 {
134         struct vring_used_elem *uep;
135         uint16_t used_idx, desc_idx;
136
137         used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
138         uep = &vq->vq_ring.used->ring[used_idx];
139         desc_idx = (uint16_t) uep->id;
140         vq->vq_used_cons_idx++;
141         vq_ring_free_chain(vq, desc_idx);
142 }
143
144
145 static inline int
146 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
147 {
148         struct vq_desc_extra *dxp;
149         struct virtio_hw *hw = vq->hw;
150         struct vring_desc *start_dp;
151         uint16_t needed = 1;
152         uint16_t head_idx, idx;
153
154         if (unlikely(vq->vq_free_cnt == 0))
155                 return -ENOSPC;
156         if (unlikely(vq->vq_free_cnt < needed))
157                 return -EMSGSIZE;
158
159         head_idx = vq->vq_desc_head_idx;
160         if (unlikely(head_idx >= vq->vq_nentries))
161                 return -EFAULT;
162
163         idx = head_idx;
164         dxp = &vq->vq_descx[idx];
165         dxp->cookie = (void *)cookie;
166         dxp->ndescs = needed;
167
168         start_dp = vq->vq_ring.desc;
169         start_dp[idx].addr =
170                 (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM
171                 - hw->vtnet_hdr_size);
172         start_dp[idx].len =
173                 cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
174         start_dp[idx].flags =  VRING_DESC_F_WRITE;
175         idx = start_dp[idx].next;
176         vq->vq_desc_head_idx = idx;
177         if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
178                 vq->vq_desc_tail_idx = idx;
179         vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
180         vq_update_avail_ring(vq, head_idx);
181
182         return 0;
183 }
184
185 static int
186 virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
187 {
188         struct vq_desc_extra *dxp;
189         struct vring_desc *start_dp;
190         uint16_t seg_num = cookie->nb_segs;
191         uint16_t needed = 1 + seg_num;
192         uint16_t head_idx, idx;
193         uint16_t head_size = txvq->hw->vtnet_hdr_size;
194
195         if (unlikely(txvq->vq_free_cnt == 0))
196                 return -ENOSPC;
197         if (unlikely(txvq->vq_free_cnt < needed))
198                 return -EMSGSIZE;
199         head_idx = txvq->vq_desc_head_idx;
200         if (unlikely(head_idx >= txvq->vq_nentries))
201                 return -EFAULT;
202
203         idx = head_idx;
204         dxp = &txvq->vq_descx[idx];
205         if (dxp->cookie != NULL)
206                 rte_pktmbuf_free(dxp->cookie);
207         dxp->cookie = (void *)cookie;
208         dxp->ndescs = needed;
209
210         start_dp = txvq->vq_ring.desc;
211         start_dp[idx].addr =
212                 txvq->virtio_net_hdr_mem + idx * head_size;
213         start_dp[idx].len = (uint32_t)head_size;
214         start_dp[idx].flags = VRING_DESC_F_NEXT;
215
216         for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
217                 idx = start_dp[idx].next;
218                 start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
219                 start_dp[idx].len   = cookie->data_len;
220                 start_dp[idx].flags = VRING_DESC_F_NEXT;
221                 cookie = cookie->next;
222         }
223
224         start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
225         idx = start_dp[idx].next;
226         txvq->vq_desc_head_idx = idx;
227         if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
228                 txvq->vq_desc_tail_idx = idx;
229         txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
230         vq_update_avail_ring(txvq, head_idx);
231
232         return 0;
233 }
234
235 static inline struct rte_mbuf *
236 rte_rxmbuf_alloc(struct rte_mempool *mp)
237 {
238         struct rte_mbuf *m;
239
240         m = __rte_mbuf_raw_alloc(mp);
241         __rte_mbuf_sanity_check_raw(m, 0);
242
243         return m;
244 }
245
246 static void
247 virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
248 {
249         struct rte_mbuf *m;
250         int i, nbufs, error, size = vq->vq_nentries;
251         struct vring *vr = &vq->vq_ring;
252         uint8_t *ring_mem = vq->vq_ring_virt_mem;
253
254         PMD_INIT_FUNC_TRACE();
255
256         /*
257          * Reinitialise since virtio port might have been stopped and restarted
258          */
259         memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
260         vring_init(vr, size, ring_mem, vq->vq_alignment);
261         vq->vq_used_cons_idx = 0;
262         vq->vq_desc_head_idx = 0;
263         vq->vq_avail_idx = 0;
264         vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
265         vq->vq_free_cnt = vq->vq_nentries;
266         memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
267
268         /* Chain all the descriptors in the ring with an END */
269         for (i = 0; i < size - 1; i++)
270                 vr->desc[i].next = (uint16_t)(i + 1);
271         vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
272
273         /*
274          * Disable device(host) interrupting guest
275          */
276         virtqueue_disable_intr(vq);
277
278         /* Only rx virtqueue needs mbufs to be allocated at initialization */
279         if (queue_type == VTNET_RQ) {
280                 if (vq->mpool == NULL)
281                         rte_exit(EXIT_FAILURE,
282                         "Cannot allocate initial mbufs for rx virtqueue");
283
284                 /* Allocate blank mbufs for the each rx descriptor */
285                 nbufs = 0;
286                 error = ENOSPC;
287                 while (!virtqueue_full(vq)) {
288                         m = rte_rxmbuf_alloc(vq->mpool);
289                         if (m == NULL)
290                                 break;
291
292                         /******************************************
293                         *         Enqueue allocated buffers        *
294                         *******************************************/
295                         error = virtqueue_enqueue_recv_refill(vq, m);
296
297                         if (error) {
298                                 rte_pktmbuf_free(m);
299                                 break;
300                         }
301                         nbufs++;
302                 }
303
304                 vq_update_avail_idx(vq);
305
306                 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
307
308                 VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
309                         vq->vq_queue_index);
310                 VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
311                         vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
312         } else if (queue_type == VTNET_TQ) {
313                 VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
314                         vq->vq_queue_index);
315                 VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
316                         vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
317         } else {
318                 VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL,
319                         vq->vq_queue_index);
320                 VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
321                         vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
322         }
323 }
324
325 void
326 virtio_dev_cq_start(struct rte_eth_dev *dev)
327 {
328         struct virtio_hw *hw
329                 = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
330
331         virtio_dev_vring_start(hw->cvq, VTNET_CQ);
332         VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
333 }
334
335 void
336 virtio_dev_rxtx_start(struct rte_eth_dev *dev)
337 {
338         /*
339          * Start receive and transmit vrings
340          * -    Setup vring structure for all queues
341          * -    Initialize descriptor for the rx vring
342          * -    Allocate blank mbufs for the each rx descriptor
343          *
344          */
345         int i;
346
347         PMD_INIT_FUNC_TRACE();
348
349         /* Start rx vring. */
350         for (i = 0; i < dev->data->nb_rx_queues; i++) {
351                 virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
352                 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
353         }
354
355         /* Start tx vring. */
356         for (i = 0; i < dev->data->nb_tx_queues; i++) {
357                 virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
358                 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
359         }
360 }
361
362 int
363 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
364                         uint16_t queue_idx,
365                         uint16_t nb_desc,
366                         unsigned int socket_id,
367                         __rte_unused const struct rte_eth_rxconf *rx_conf,
368                         struct rte_mempool *mp)
369 {
370         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
371         struct virtqueue *vq;
372         int ret;
373
374         PMD_INIT_FUNC_TRACE();
375         ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
376                         nb_desc, socket_id, &vq);
377         if (ret < 0) {
378                 PMD_INIT_LOG(ERR, "tvq initialization failed");
379                 return ret;
380         }
381
382         /* Create mempool for rx mbuf allocation */
383         vq->mpool = mp;
384
385         dev->data->rx_queues[queue_idx] = vq;
386         return 0;
387 }
388
389 /*
390  * struct rte_eth_dev *dev: Used to update dev
391  * uint16_t nb_desc: Defaults to values read from config space
392  * unsigned int socket_id: Used to allocate memzone
393  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
394  * uint16_t queue_idx: Just used as an index in dev txq list
395  */
396 int
397 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
398                         uint16_t queue_idx,
399                         uint16_t nb_desc,
400                         unsigned int socket_id,
401                         const struct rte_eth_txconf *tx_conf)
402 {
403         uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
404         struct virtqueue *vq;
405         int ret;
406
407         PMD_INIT_FUNC_TRACE();
408
409         if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
410             != ETH_TXQ_FLAGS_NOOFFLOADS) {
411                 PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
412                 return -EINVAL;
413         }
414
415         ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
416                         nb_desc, socket_id, &vq);
417         if (ret < 0) {
418                 PMD_INIT_LOG(ERR, "rvq initialization failed");
419                 return ret;
420         }
421
422         dev->data->tx_queues[queue_idx] = vq;
423         return 0;
424 }
425
426 static void
427 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
428 {
429         int error;
430         /*
431          * Requeue the discarded mbuf. This should always be
432          * successful since it was just dequeued.
433          */
434         error = virtqueue_enqueue_recv_refill(vq, m);
435         if (unlikely(error)) {
436                 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
437                 rte_pktmbuf_free(m);
438         }
439 }
440
441 #define VIRTIO_MBUF_BURST_SZ 64
442 #define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
443 uint16_t
444 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
445 {
446         struct virtqueue *rxvq = rx_queue;
447         struct rte_mbuf *rxm, *new_mbuf;
448         uint16_t nb_used, num, nb_rx = 0;
449         uint32_t len[VIRTIO_MBUF_BURST_SZ];
450         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
451         int error;
452         uint32_t i, nb_enqueued = 0;
453         const uint32_t hdr_size = sizeof(struct virtio_net_hdr);
454
455         nb_used = VIRTQUEUE_NUSED(rxvq);
456
457         rmb();
458
459         num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
460         num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
461         if (likely(num > DESC_PER_CACHELINE))
462                 num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
463
464         if (num == 0)
465                 return 0;
466
467         num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
468         PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
469         for (i = 0; i < num ; i++) {
470                 rxm = rcv_pkts[i];
471
472                 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
473
474                 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
475                         PMD_RX_LOG(ERR, "Packet drop");
476                         nb_enqueued++;
477                         virtio_discard_rxbuf(rxvq, rxm);
478                         rxvq->errors++;
479                         continue;
480                 }
481
482                 rxm->port = rxvq->port_id;
483                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
484
485                 rxm->nb_segs = 1;
486                 rxm->next = NULL;
487                 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
488                 rxm->data_len = (uint16_t)(len[i] - hdr_size);
489
490                 VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
491
492                 rx_pkts[nb_rx++] = rxm;
493                 rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
494         }
495
496         rxvq->packets += nb_rx;
497
498         /* Allocate new mbuf for the used descriptor */
499         error = ENOSPC;
500         while (likely(!virtqueue_full(rxvq))) {
501                 new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
502                 if (unlikely(new_mbuf == NULL)) {
503                         struct rte_eth_dev *dev
504                                 = &rte_eth_devices[rxvq->port_id];
505                         dev->data->rx_mbuf_alloc_failed++;
506                         break;
507                 }
508                 error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
509                 if (unlikely(error)) {
510                         rte_pktmbuf_free(new_mbuf);
511                         break;
512                 }
513                 nb_enqueued++;
514         }
515
516         if (likely(nb_enqueued)) {
517                 if (unlikely(virtqueue_kick_prepare(rxvq))) {
518                         virtqueue_notify(rxvq);
519                         PMD_RX_LOG(DEBUG, "Notified\n");
520                 }
521         }
522
523         vq_update_avail_idx(rxvq);
524
525         return nb_rx;
526 }
527
528 uint16_t
529 virtio_recv_mergeable_pkts(void *rx_queue,
530                         struct rte_mbuf **rx_pkts,
531                         uint16_t nb_pkts)
532 {
533         struct virtqueue *rxvq = rx_queue;
534         struct rte_mbuf *rxm, *new_mbuf;
535         uint16_t nb_used, num, nb_rx = 0;
536         uint32_t len[VIRTIO_MBUF_BURST_SZ];
537         struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
538         struct rte_mbuf *prev;
539         int error;
540         uint32_t i = 0, nb_enqueued = 0;
541         uint32_t seg_num = 0;
542         uint16_t extra_idx = 0;
543         uint32_t seg_res = 0;
544         const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
545
546         nb_used = VIRTQUEUE_NUSED(rxvq);
547
548         rmb();
549
550         if (nb_used == 0)
551                 return 0;
552
553         PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
554
555         while (i < nb_used) {
556                 struct virtio_net_hdr_mrg_rxbuf *header;
557
558                 if (nb_rx == nb_pkts)
559                         break;
560
561                 num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);
562                 if (num != 1)
563                         continue;
564
565                 i++;
566
567                 PMD_RX_LOG(DEBUG, "dequeue:%d\n", num);
568                 PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]);
569
570                 rxm = rcv_pkts[0];
571
572                 if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
573                         PMD_RX_LOG(ERR, "Packet drop\n");
574                         nb_enqueued++;
575                         virtio_discard_rxbuf(rxvq, rxm);
576                         rxvq->errors++;
577                         continue;
578                 }
579
580                 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
581                         RTE_PKTMBUF_HEADROOM - hdr_size);
582                 seg_num = header->num_buffers;
583
584                 if (seg_num == 0)
585                         seg_num = 1;
586
587                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
588                 rxm->nb_segs = seg_num;
589                 rxm->next = NULL;
590                 rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
591                 rxm->data_len = (uint16_t)(len[0] - hdr_size);
592
593                 rxm->port = rxvq->port_id;
594                 rx_pkts[nb_rx] = rxm;
595                 prev = rxm;
596
597                 seg_res = seg_num - 1;
598
599                 while (seg_res != 0) {
600                         /*
601                          * Get extra segments for current uncompleted packet.
602                          */
603                         uint32_t  rcv_cnt =
604                                 RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
605                         if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
606                                 uint32_t rx_num =
607                                         virtqueue_dequeue_burst_rx(rxvq,
608                                         rcv_pkts, len, rcv_cnt);
609                                 i += rx_num;
610                                 rcv_cnt = rx_num;
611                         } else {
612                                 PMD_RX_LOG(ERR,
613                                         "No enough segments for packet.\n");
614                                 nb_enqueued++;
615                                 virtio_discard_rxbuf(rxvq, rxm);
616                                 rxvq->errors++;
617                                 break;
618                         }
619
620                         extra_idx = 0;
621
622                         while (extra_idx < rcv_cnt) {
623                                 rxm = rcv_pkts[extra_idx];
624
625                                 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
626                                 rxm->next = NULL;
627                                 rxm->pkt_len = (uint32_t)(len[extra_idx]);
628                                 rxm->data_len = (uint16_t)(len[extra_idx]);
629
630                                 if (prev)
631                                         prev->next = rxm;
632
633                                 prev = rxm;
634                                 rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
635                                 extra_idx++;
636                         };
637                         seg_res -= rcv_cnt;
638                 }
639
640                 VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
641                         rx_pkts[nb_rx]->data_len);
642
643                 rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
644                 nb_rx++;
645         }
646
647         rxvq->packets += nb_rx;
648
649         /* Allocate new mbuf for the used descriptor */
650         error = ENOSPC;
651         while (likely(!virtqueue_full(rxvq))) {
652                 new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
653                 if (unlikely(new_mbuf == NULL)) {
654                         struct rte_eth_dev *dev
655                                 = &rte_eth_devices[rxvq->port_id];
656                         dev->data->rx_mbuf_alloc_failed++;
657                         break;
658                 }
659                 error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
660                 if (unlikely(error)) {
661                         rte_pktmbuf_free(new_mbuf);
662                         break;
663                 }
664                 nb_enqueued++;
665         }
666
667         if (likely(nb_enqueued)) {
668                 if (unlikely(virtqueue_kick_prepare(rxvq))) {
669                         virtqueue_notify(rxvq);
670                         PMD_RX_LOG(DEBUG, "Notified");
671                 }
672         }
673
674         vq_update_avail_idx(rxvq);
675
676         return nb_rx;
677 }
678
679 uint16_t
680 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
681 {
682         struct virtqueue *txvq = tx_queue;
683         struct rte_mbuf *txm;
684         uint16_t nb_used, nb_tx, num;
685         int error;
686
687         nb_tx = 0;
688
689         if (unlikely(nb_pkts < 1))
690                 return nb_pkts;
691
692         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
693         nb_used = VIRTQUEUE_NUSED(txvq);
694
695         rmb();
696
697         num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
698
699         while (nb_tx < nb_pkts) {
700                 int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt;
701                 int deq_cnt = RTE_MIN(need, (int)num);
702
703                 num -= (deq_cnt > 0) ? deq_cnt : 0;
704                 while (deq_cnt > 0) {
705                         virtqueue_dequeue_pkt_tx(txvq);
706                         deq_cnt--;
707                 }
708
709                 if (tx_pkts[nb_tx]->nb_segs <= txvq->vq_free_cnt) {
710                         txm = tx_pkts[nb_tx];
711                         /* Enqueue Packet buffers */
712                         error = virtqueue_enqueue_xmit(txvq, txm);
713                         if (unlikely(error)) {
714                                 if (error == ENOSPC)
715                                         PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
716                                 else if (error == EMSGSIZE)
717                                         PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
718                                 else
719                                         PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
720                                 break;
721                         }
722                         nb_tx++;
723                         txvq->bytes += txm->pkt_len;
724                 } else {
725                         PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
726                         break;
727                 }
728         }
729         vq_update_avail_idx(txvq);
730
731         txvq->packets += nb_tx;
732
733         if (unlikely(virtqueue_kick_prepare(txvq))) {
734                 virtqueue_notify(txvq);
735                 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
736         }
737
738         return nb_tx;
739 }