4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
37 #include <rte_bus_pci.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_memory.h>
43 #include "virtual_pmd.h"
45 #define MAX_PKT_BURST 512
47 static const char *virtual_ethdev_driver_name = "Virtual PMD";
49 struct virtual_ethdev_private {
50 struct eth_dev_ops dev_ops;
51 struct rte_eth_stats eth_stats;
53 struct rte_ring *rx_queue;
54 struct rte_ring *tx_queue;
56 int tx_burst_fail_count;
59 struct virtual_ethdev_queue {
65 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
67 eth_dev->data->dev_started = 1;
73 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
75 eth_dev->data->dev_started = 0;
79 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
82 struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
84 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
85 eth_dev->data->dev_started = 0;
86 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
87 rte_pktmbuf_free(pkt);
89 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
90 rte_pktmbuf_free(pkt);
94 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
98 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
104 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
110 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
111 struct rte_eth_dev_info *dev_info)
113 dev_info->driver_name = virtual_ethdev_driver_name;
114 dev_info->max_mac_addrs = 1;
116 dev_info->max_rx_pktlen = (uint32_t)2048;
118 dev_info->max_rx_queues = (uint16_t)128;
119 dev_info->max_tx_queues = (uint16_t)512;
121 dev_info->min_rx_bufsize = 0;
125 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
126 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
127 unsigned int socket_id,
128 const struct rte_eth_rxconf *rx_conf __rte_unused,
129 struct rte_mempool *mb_pool __rte_unused)
131 struct virtual_ethdev_queue *rx_q;
133 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
134 sizeof(struct virtual_ethdev_queue), 0, socket_id);
139 rx_q->port_id = dev->data->port_id;
140 rx_q->queue_id = rx_queue_id;
142 dev->data->rx_queues[rx_queue_id] = rx_q;
148 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
149 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
150 unsigned int socket_id __rte_unused,
151 const struct rte_eth_rxconf *rx_conf __rte_unused,
152 struct rte_mempool *mb_pool __rte_unused)
158 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
159 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
160 unsigned int socket_id,
161 const struct rte_eth_txconf *tx_conf __rte_unused)
163 struct virtual_ethdev_queue *tx_q;
165 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
166 sizeof(struct virtual_ethdev_queue), 0, socket_id);
171 tx_q->port_id = dev->data->port_id;
172 tx_q->queue_id = tx_queue_id;
174 dev->data->tx_queues[tx_queue_id] = tx_q;
180 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
181 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
182 unsigned int socket_id __rte_unused,
183 const struct rte_eth_txconf *tx_conf __rte_unused)
189 virtual_ethdev_rx_queue_release(void *q __rte_unused)
194 virtual_ethdev_tx_queue_release(void *q __rte_unused)
199 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
200 int wait_to_complete __rte_unused)
202 if (!bonded_eth_dev->data->dev_started)
203 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
209 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
210 int wait_to_complete __rte_unused)
216 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
218 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
221 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
227 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
229 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
232 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
233 rte_pktmbuf_free(pkt);
235 /* Reset internal statistics */
236 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
240 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
244 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
248 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
249 .dev_configure = virtual_ethdev_configure_success,
250 .dev_start = virtual_ethdev_start_success,
251 .dev_stop = virtual_ethdev_stop,
252 .dev_close = virtual_ethdev_close,
253 .dev_infos_get = virtual_ethdev_info_get,
254 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
255 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
256 .rx_queue_release = virtual_ethdev_rx_queue_release,
257 .tx_queue_release = virtual_ethdev_tx_queue_release,
258 .link_update = virtual_ethdev_link_update_success,
259 .stats_get = virtual_ethdev_stats_get,
260 .stats_reset = virtual_ethdev_stats_reset,
261 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
262 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
267 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
269 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
270 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
271 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
274 dev_ops->dev_start = virtual_ethdev_start_success;
276 dev_ops->dev_start = virtual_ethdev_start_fail;
281 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
283 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
284 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
285 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
288 dev_ops->dev_configure = virtual_ethdev_configure_success;
290 dev_ops->dev_configure = virtual_ethdev_configure_fail;
294 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
296 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
297 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
298 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
301 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
303 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
307 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
309 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
310 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
311 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
314 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
316 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
320 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
322 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
323 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
324 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
327 dev_ops->link_update = virtual_ethdev_link_update_success;
329 dev_ops->link_update = virtual_ethdev_link_update_fail;
334 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
335 struct rte_mbuf **bufs,
338 struct rte_eth_dev *vrtl_eth_dev;
339 struct virtual_ethdev_queue *pq_map;
340 struct virtual_ethdev_private *dev_private;
344 pq_map = (struct virtual_ethdev_queue *)queue;
345 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
346 dev_private = vrtl_eth_dev->data->dev_private;
348 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
351 /* increments ipackets count */
352 dev_private->eth_stats.ipackets += rx_count;
354 /* increments ibytes count */
355 for (i = 0; i < rx_count; i++)
356 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
362 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
363 struct rte_mbuf **bufs __rte_unused,
364 uint16_t nb_pkts __rte_unused)
370 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
373 struct virtual_ethdev_queue *tx_q = queue;
375 struct rte_eth_dev *vrtl_eth_dev;
376 struct virtual_ethdev_private *dev_private;
380 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
381 dev_private = vrtl_eth_dev->data->dev_private;
383 if (!vrtl_eth_dev->data->dev_link.link_status)
386 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
389 /* increment opacket count */
390 dev_private->eth_stats.opackets += nb_pkts;
392 /* increment obytes count */
393 for (i = 0; i < nb_pkts; i++)
394 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
400 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
403 struct rte_eth_dev *vrtl_eth_dev = NULL;
404 struct virtual_ethdev_queue *tx_q = NULL;
405 struct virtual_ethdev_private *dev_private = NULL;
410 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
411 dev_private = vrtl_eth_dev->data->dev_private;
413 if (dev_private->tx_burst_fail_count < nb_pkts) {
414 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
416 /* increment opacket count */
417 dev_private->eth_stats.opackets += successfully_txd;
419 /* free packets in burst */
420 for (i = 0; i < successfully_txd; i++) {
421 /* free packets in burst */
423 rte_pktmbuf_free(bufs[i]);
428 return successfully_txd;
436 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
438 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
441 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
443 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
448 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
450 struct virtual_ethdev_private *dev_private = NULL;
451 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
453 dev_private = vrtl_eth_dev->data->dev_private;
456 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
458 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
460 dev_private->tx_burst_fail_count = 0;
464 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
465 uint8_t packet_fail_count)
467 struct virtual_ethdev_private *dev_private = NULL;
468 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
471 dev_private = vrtl_eth_dev->data->dev_private;
472 dev_private->tx_burst_fail_count = packet_fail_count;
476 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
478 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
480 vrtl_eth_dev->data->dev_link.link_status = link_status;
484 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
487 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
489 vrtl_eth_dev->data->dev_link.link_status = link_status;
491 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
496 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
497 struct rte_mbuf **pkt_burst, int burst_length)
499 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
500 struct virtual_ethdev_private *dev_private =
501 vrtl_eth_dev->data->dev_private;
503 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
508 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
509 struct rte_mbuf **pkt_burst, int burst_length)
511 struct virtual_ethdev_private *dev_private;
512 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
514 dev_private = vrtl_eth_dev->data->dev_private;
515 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
521 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
522 uint8_t socket_id, uint8_t isr_support)
524 struct rte_pci_device *pci_dev = NULL;
525 struct rte_eth_dev *eth_dev = NULL;
526 struct rte_pci_driver *pci_drv = NULL;
527 struct rte_pci_id *id_table = NULL;
528 struct virtual_ethdev_private *dev_private = NULL;
529 char name_buf[RTE_RING_NAMESIZE];
532 /* now do all data allocation - for eth_dev structure, dummy pci driver
533 * and internal (dev_private) data
536 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
540 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
544 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
545 if (id_table == NULL)
547 id_table->device_id = 0xBEEF;
549 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
550 if (dev_private == NULL)
553 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
554 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
556 if (dev_private->rx_queue == NULL)
559 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
560 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
562 if (dev_private->tx_queue == NULL)
565 /* reserve an ethdev entry */
566 eth_dev = rte_eth_dev_allocate(name);
570 pci_dev->device.numa_node = socket_id;
571 pci_dev->device.name = eth_dev->data->name;
572 pci_drv->driver.name = virtual_ethdev_driver_name;
573 pci_drv->id_table = id_table;
576 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
578 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
581 eth_dev->device = &pci_dev->device;
582 eth_dev->device->driver = &pci_drv->driver;
584 eth_dev->data->nb_rx_queues = (uint16_t)1;
585 eth_dev->data->nb_tx_queues = (uint16_t)1;
587 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
588 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
589 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
591 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
592 if (eth_dev->data->mac_addrs == NULL)
595 memcpy(eth_dev->data->mac_addrs, mac_addr,
596 sizeof(*eth_dev->data->mac_addrs));
598 eth_dev->data->dev_started = 0;
599 eth_dev->data->promiscuous = 0;
600 eth_dev->data->scattered_rx = 0;
601 eth_dev->data->all_multicast = 0;
603 eth_dev->data->dev_private = dev_private;
605 /* Copy default device operation functions */
606 dev_private->dev_ops = virtual_ethdev_default_dev_ops;
607 eth_dev->dev_ops = &dev_private->dev_ops;
609 pci_dev->device.driver = &pci_drv->driver;
610 eth_dev->device = &pci_dev->device;
612 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
613 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
615 return eth_dev->data->port_id;
621 rte_free(dev_private);