4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_mempool.h>
45 #include <rte_malloc.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_prefetch.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
53 #include "virtio_logs.h"
54 #include "virtio_ethdev.h"
55 #include "virtqueue.h"
57 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
58 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(m, len)
60 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
63 static inline struct rte_mbuf *
64 rte_rxmbuf_alloc(struct rte_mempool *mp)
68 m = __rte_mbuf_raw_alloc(mp);
69 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
75 virtio_dev_vring_start(struct rte_eth_dev *dev, struct virtqueue *vq, int queue_type)
78 int i, nbufs, error, size = vq->vq_nentries;
79 struct vring *vr = &vq->vq_ring;
80 uint8_t *ring_mem = vq->vq_ring_virt_mem;
81 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
82 PMD_INIT_FUNC_TRACE();
85 * Reinitialise since virtio port might have been stopped and restarted
87 memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
88 vring_init(vr, size, ring_mem, vq->vq_alignment);
89 vq->vq_used_cons_idx = 0;
90 vq->vq_desc_head_idx = 0;
92 vq->vq_desc_tail_idx = vq->vq_nentries - 1;
93 vq->vq_free_cnt = vq->vq_nentries;
94 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
96 /* Chain all the descriptors in the ring with an END */
97 for (i = 0; i < size - 1; i++)
98 vr->desc[i].next = (uint16_t)(i + 1);
99 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
102 * Disable device(host) interrupting guest
104 virtqueue_disable_intr(vq);
106 rte_snprintf(vq_name, sizeof(vq_name), "port_%d_rx_vq",
108 PMD_INIT_LOG(DEBUG, "vq name: %s\n", vq->vq_name);
110 /* Only rx virtqueue needs mbufs to be allocated at initialization */
111 if (queue_type == VTNET_RQ) {
112 if (vq->mpool == NULL)
113 rte_exit(EXIT_FAILURE, "Cannot allocate initial mbufs for rx virtqueue\n");
114 /* Allocate blank mbufs for the each rx descriptor */
117 while (!virtqueue_full(vq)) {
118 m = rte_rxmbuf_alloc(vq->mpool);
121 /******************************************
122 * Enqueue allocated buffers *
123 *******************************************/
124 error = virtqueue_enqueue_recv_refill(vq, m);
126 rte_pktmbuf_free_seg(m);
131 vq_update_avail_idx(vq);
132 PMD_INIT_LOG(DEBUG, "Allocated %d bufs\n", nbufs);
133 VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, VTNET_SQ_RQ_QUEUE_IDX);
134 VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
135 vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
137 VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, VTNET_SQ_TQ_QUEUE_IDX);
138 VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN,
139 vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
144 virtio_dev_rxtx_start(struct rte_eth_dev *dev)
147 * Start recieve and transmit vrings
148 * - Setup vring structure for all queues
149 * - Initialize descriptor for the rx vring
150 * - Allocate blank mbufs for the each rx descriptor
153 PMD_INIT_FUNC_TRACE();
155 /* Start rx vring: by default we have 1 rx virtqueue. */
156 virtio_dev_vring_start(dev, dev->data->rx_queues[0], VTNET_RQ);
157 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[0]);
159 /* Start tx vring: by default we have 1 tx virtqueue. */
160 virtio_dev_vring_start(dev, dev->data->tx_queues[0], VTNET_TQ);
161 VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[0]);
165 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
168 unsigned int socket_id,
169 __rte_unused const struct rte_eth_rxconf *rx_conf,
170 struct rte_mempool *mp)
172 uint8_t vtpci_queue_idx = VTNET_SQ_RQ_QUEUE_IDX;
173 struct virtqueue *vq;
176 PMD_INIT_FUNC_TRACE();
177 ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
178 nb_desc, socket_id, &vq);
180 PMD_INIT_LOG(ERR, "tvq initialization failed\n");
183 /* Create mempool for rx mbuf allocation */
186 dev->data->rx_queues[queue_idx] = vq;
191 * struct rte_eth_dev *dev: Used to update dev
192 * uint16_t nb_desc: Defaults to values read from config space
193 * unsigned int socket_id: Used to allocate memzone
194 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
195 * uint16_t queue_idx: Just used as an index in dev txq list
198 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
201 unsigned int socket_id,
202 __rte_unused const struct rte_eth_txconf *tx_conf)
204 uint8_t vtpci_queue_idx = VTNET_SQ_TQ_QUEUE_IDX;
205 struct virtqueue *vq;
208 PMD_INIT_FUNC_TRACE();
209 ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
210 nb_desc, socket_id, &vq);
212 PMD_INIT_LOG(ERR, "rvq initialization failed\n");
216 dev->data->tx_queues[queue_idx] = vq;
221 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
225 * Requeue the discarded mbuf. This should always be
226 * successful since it was just dequeued.
228 error = virtqueue_enqueue_recv_refill(vq, m);
229 if (unlikely(error)) {
230 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
231 rte_pktmbuf_free_seg(m);
235 #define VIRTIO_MBUF_BURST_SZ 64
236 #define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
238 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
240 struct virtqueue *rxvq = rx_queue;
241 struct virtio_hw *hw = rxvq->hw;
242 struct rte_mbuf *rxm, *new_mbuf;
243 uint16_t nb_used, num, nb_rx = 0;
244 uint32_t len[VIRTIO_MBUF_BURST_SZ];
245 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
247 uint32_t i, nb_enqueued = 0;
249 nb_used = VIRTQUEUE_NUSED(rxvq);
253 num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
254 num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
255 if (likely(num > DESC_PER_CACHELINE))
256 num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
257 if(num == 0) return 0;
258 num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
259 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
260 for (i = 0; i < num ; i ++) {
262 PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
263 if (unlikely(len[i] < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
264 PMD_RX_LOG(ERR, "Packet drop\n");
266 virtio_discard_rxbuf(rxvq, rxm);
267 hw->eth_stats.ierrors++;
270 rxm->pkt.in_port = rxvq->port_id;
271 rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
272 rxm->pkt.nb_segs = 1;
273 rxm->pkt.next = NULL;
274 rxm->pkt.pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
275 rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
276 VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
277 rx_pkts[nb_rx++] = rxm;
278 hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
280 hw->eth_stats.ipackets += nb_rx;
282 /* Allocate new mbuf for the used descriptor */
284 while (likely(!virtqueue_full(rxvq))) {
285 new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
286 if (unlikely(new_mbuf == NULL)) {
287 hw->eth_stats.rx_nombuf++;
290 error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
291 if (unlikely(error)) {
292 rte_pktmbuf_free_seg(new_mbuf);
297 if(likely(nb_enqueued)) {
298 if(unlikely(virtqueue_kick_prepare(rxvq))) {
299 virtqueue_notify(rxvq);
300 PMD_RX_LOG(DEBUG, "Notified\n");
303 vq_update_avail_idx(rxvq);
309 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
311 struct virtqueue *txvq = tx_queue;
312 struct rte_mbuf *txm;
313 uint16_t nb_used, nb_tx, num;
315 struct virtio_hw *hw;
319 if (unlikely(nb_pkts < 1))
322 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
323 nb_used = VIRTQUEUE_NUSED(txvq);
328 num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
330 while (nb_tx < nb_pkts) {
331 if (virtqueue_full(txvq) && num) {
332 virtqueue_dequeue_pkt_tx(txvq);
335 if(!virtqueue_full(txvq)) {
336 txm = tx_pkts[nb_tx];
337 /* Enqueue Packet buffers */
338 error = virtqueue_enqueue_xmit(txvq, txm);
339 if (unlikely(error)) {
341 PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
342 else if (error == EMSGSIZE)
343 PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
345 PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
349 hw->eth_stats.obytes += txm->pkt.data_len;
351 PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
355 vq_update_avail_idx(txvq);
357 hw->eth_stats.opackets += nb_tx;
359 if(unlikely(virtqueue_kick_prepare(txvq))) {
360 virtqueue_notify(txvq);
361 PMD_TX_LOG(DEBUG, "Notified backend after xmit\n");