4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
57 /* the maximum number of external ports supported */
58 #define MAX_SUP_PORTS 1
61 * Calculate the number of buffers needed per port
63 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
64 (nb_switching_cores * MAX_PKT_BURST) +\
65 (nb_switching_cores * \
66 RTE_TEST_TX_DESC_DEFAULT) +\
67 (nb_switching_cores * MBUF_CACHE_SIZE))
69 #define MBUF_CACHE_SIZE 128
70 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
72 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
73 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
75 /* Defines how long we wait between retries on RX */
76 #define BURST_RX_WAIT_US 15
78 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
80 #define JUMBO_FRAME_MAX_SIZE 0x2600
82 /* State of virtio device. */
83 #define DEVICE_MAC_LEARNING 0
85 #define DEVICE_SAFE_REMOVE 2
87 /* Config_core_flag status definitions. */
88 #define REQUEST_DEV_REMOVAL 1
89 #define ACK_DEV_REMOVAL 0
91 /* Configurable number of RX/TX ring descriptors */
92 #define RTE_TEST_RX_DESC_DEFAULT 1024
93 #define RTE_TEST_TX_DESC_DEFAULT 512
95 /* Get first 4 bytes in mbuf headroom. */
96 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
97 + sizeof(struct rte_mbuf)))
99 #define INVALID_PORT_ID 0xFF
101 /* Size of buffers used for snprintfs. */
102 #define MAX_PRINT_BUFF 6072
104 /* Maximum character device basename size. */
105 #define MAX_BASENAME_SZ 20
107 /* Maximum long option length for option parsing. */
108 #define MAX_LONG_OPT_SZ 64
110 /* Used to compare MAC addresses. */
111 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
113 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
114 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
115 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
116 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
117 #define CMD_LINE_OPT_STATS "stats"
118 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
120 /* mask of enabled ports */
121 static uint32_t enabled_port_mask;
123 /*Number of switching cores enabled*/
124 static uint32_t nb_switching_cores;
126 /* number of devices/queues to support*/
129 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
130 #define MAX_RING_DESC 4096
133 struct rte_mempool *pool;
134 struct rte_ring *ring;
136 } vpool_array[MAX_QUEUES+MAX_QUEUES];
139 uint32_t enable_stats = 0;
140 /* Enable retries on RX. */
141 static uint32_t enable_retry = 1;
142 /* Specify timeout (in useconds) between retries on RX. */
143 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
144 /* Specify the number of retries on RX. */
145 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
147 /* Character device basename. Can be set by user. */
148 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
150 static unsigned lcore_ids[RTE_MAX_LCORE];
151 uint8_t ports[RTE_MAX_ETHPORTS];
153 static unsigned nb_ports; /**< The number of ports specified in command line */
155 /* ethernet addresses of ports */
156 struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
158 /* heads for the main used and free linked lists for the data path. */
159 static struct virtio_net_data_ll *ll_root_used;
160 static struct virtio_net_data_ll *ll_root_free;
163 * Array of data core structures containing information on
164 * individual core linked lists.
166 static struct lcore_info lcore_info[RTE_MAX_LCORE];
168 /* Used for queueing bursts of TX packets. */
172 struct rte_mbuf *m_table[MAX_PKT_BURST];
175 /* TX queue for each data core. */
176 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
178 struct device_statistics dev_statistics[MAX_DEVICES];
181 * Set character device basename.
184 us_vhost_parse_basename(const char *q_arg)
186 /* parse number string */
187 if (strlen(q_arg) >= MAX_BASENAME_SZ)
190 snprintf((char *)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
196 * Parse the portmask provided at run time.
199 parse_portmask(const char *portmask)
204 /* parse hexadecimal string */
205 pm = strtoul(portmask, &end, 16);
206 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
216 * Parse num options at run time.
219 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
224 /* parse unsigned int string */
225 num = strtoul(q_arg, &end, 10);
226 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
229 if (num > max_valid_value)
239 tep_termination_usage(const char *prgname)
241 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
242 " --nb-devices[1-64]: The number of virtIO device\n"
243 " -p PORTMASK: Set mask for ports to be used by application\n"
244 " --rx-retry [0|1]: disable/enable(default) retries on rx."
245 " Enable retry if destintation queue is full\n"
246 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
247 " This makes effect only if retries on rx enabled\n"
248 " --rx-retry-num [0-N]: the number of retries on rx."
249 " This makes effect only if retries on rx enabled\n"
250 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
251 " --dev-basename: The basename to be used for the character device.\n",
256 * Parse the arguments given in the command line of the application.
259 tep_termination_parse_args(int argc, char **argv)
264 const char *prgname = argv[0];
265 static struct option long_option[] = {
266 {CMD_LINE_OPT_NB_DEVICES, required_argument, NULL, 0},
267 {CMD_LINE_OPT_RX_RETRY, required_argument, NULL, 0},
268 {CMD_LINE_OPT_RX_RETRY_DELAY, required_argument, NULL, 0},
269 {CMD_LINE_OPT_RX_RETRY_NUM, required_argument, NULL, 0},
270 {CMD_LINE_OPT_STATS, required_argument, NULL, 0},
271 {CMD_LINE_OPT_DEV_BASENAME, required_argument, NULL, 0},
275 /* Parse command line */
276 while ((opt = getopt_long(argc, argv, "p:",
277 long_option, &option_index)) != EOF) {
281 enabled_port_mask = parse_portmask(optarg);
282 if (enabled_port_mask == 0) {
283 RTE_LOG(INFO, VHOST_CONFIG,
284 "Invalid portmask\n");
285 tep_termination_usage(prgname);
290 if (!strncmp(long_option[option_index].name,
291 CMD_LINE_OPT_NB_DEVICES,
292 sizeof(CMD_LINE_OPT_NB_DEVICES))) {
293 ret = parse_num_opt(optarg, MAX_DEVICES);
295 RTE_LOG(INFO, VHOST_CONFIG,
296 "Invalid argument for nb-devices [0-%d]\n",
298 tep_termination_usage(prgname);
304 /* Enable/disable retries on RX. */
305 if (!strncmp(long_option[option_index].name,
306 CMD_LINE_OPT_RX_RETRY,
307 sizeof(CMD_LINE_OPT_RX_RETRY))) {
308 ret = parse_num_opt(optarg, 1);
310 RTE_LOG(INFO, VHOST_CONFIG,
311 "Invalid argument for rx-retry [0|1]\n");
312 tep_termination_usage(prgname);
319 /* Specify the retries delay time (in useconds) on RX.*/
320 if (!strncmp(long_option[option_index].name,
321 CMD_LINE_OPT_RX_RETRY_DELAY,
322 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY))) {
323 ret = parse_num_opt(optarg, INT32_MAX);
325 RTE_LOG(INFO, VHOST_CONFIG,
326 "Invalid argument for rx-retry-delay [0-N]\n");
327 tep_termination_usage(prgname);
330 burst_rx_delay_time = ret;
334 /* Specify the retries number on RX. */
335 if (!strncmp(long_option[option_index].name,
336 CMD_LINE_OPT_RX_RETRY_NUM,
337 sizeof(CMD_LINE_OPT_RX_RETRY_NUM))) {
338 ret = parse_num_opt(optarg, INT32_MAX);
340 RTE_LOG(INFO, VHOST_CONFIG,
341 "Invalid argument for rx-retry-num [0-N]\n");
342 tep_termination_usage(prgname);
345 burst_rx_retry_num = ret;
349 /* Enable/disable stats. */
350 if (!strncmp(long_option[option_index].name,
352 sizeof(CMD_LINE_OPT_STATS))) {
353 ret = parse_num_opt(optarg, INT32_MAX);
355 RTE_LOG(INFO, VHOST_CONFIG,
356 "Invalid argument for stats [0..N]\n");
357 tep_termination_usage(prgname);
364 /* Set character device basename. */
365 if (!strncmp(long_option[option_index].name,
366 CMD_LINE_OPT_DEV_BASENAME,
367 sizeof(CMD_LINE_OPT_DEV_BASENAME))) {
368 if (us_vhost_parse_basename(optarg) == -1) {
369 RTE_LOG(INFO, VHOST_CONFIG,
370 "Invalid argument for character "
371 "device basename (Max %d characters)\n",
373 tep_termination_usage(prgname);
380 /* Invalid option - print options. */
382 tep_termination_usage(prgname);
387 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
388 if (enabled_port_mask & (1 << i))
389 ports[nb_ports++] = (uint8_t)i;
392 if ((nb_ports == 0) || (nb_ports > MAX_SUP_PORTS)) {
393 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
394 "but only %u port can be enabled\n", nb_ports,
403 * Update the global var NB_PORTS and array PORTS
404 * according to system ports number and return valid ports number
407 check_ports_num(unsigned max_nb_ports)
409 unsigned valid_nb_ports = nb_ports;
412 if (nb_ports > max_nb_ports) {
413 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) "
414 " exceeds total system port number(%u)\n",
415 nb_ports, max_nb_ports);
416 nb_ports = max_nb_ports;
419 for (portid = 0; portid < nb_ports; portid++) {
420 if (ports[portid] >= max_nb_ports) {
421 RTE_LOG(INFO, VHOST_PORT,
422 "\nSpecified port ID(%u) exceeds max "
423 " system port ID(%u)\n",
424 ports[portid], (max_nb_ports - 1));
425 ports[portid] = INVALID_PORT_ID;
429 return valid_nb_ports;
433 * This function routes the TX packet to the correct interface. This may be a local device
434 * or the physical port.
436 static inline void __attribute__((always_inline))
437 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
439 struct mbuf_table *tx_q;
440 struct rte_mbuf **m_table;
441 unsigned len, ret = 0;
442 const uint16_t lcore_id = rte_lcore_id();
443 struct virtio_net *dev = vdev->dev;
445 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
448 /* Add packet to the port tx queue */
449 tx_q = &lcore_tx_queue[lcore_id];
452 tx_q->m_table[len] = m;
455 dev_statistics[dev->device_fh].tx_total++;
456 dev_statistics[dev->device_fh].tx++;
459 if (unlikely(len == MAX_PKT_BURST)) {
460 m_table = (struct rte_mbuf **)tx_q->m_table;
461 /* Free any buffers not handled by TX and update
464 if (unlikely(ret < len)) {
466 rte_pktmbuf_free(m_table[ret]);
467 } while (++ret < len);
478 * This function is called by each data core. It handles all
479 * RX/TX registered with the core. For TX the specific lcore
480 * linked list is used. For RX, MAC addresses are compared
481 * with all devices in the main linked list.
484 switch_worker(__rte_unused void *arg)
486 struct rte_mempool *mbuf_pool = arg;
487 struct virtio_net *dev = NULL;
488 struct vhost_dev *vdev = NULL;
489 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
490 struct virtio_net_data_ll *dev_ll;
491 struct mbuf_table *tx_q;
492 volatile struct lcore_ll_info *lcore_ll;
493 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
494 / US_PER_S * BURST_TX_DRAIN_US;
495 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
497 const uint16_t lcore_id = rte_lcore_id();
498 const uint16_t num_cores = (uint16_t)rte_lcore_count();
499 uint16_t rx_count = 0;
503 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
504 lcore_ll = lcore_info[lcore_id].lcore_ll;
507 tx_q = &lcore_tx_queue[lcore_id];
508 for (i = 0; i < num_cores; i++) {
509 if (lcore_ids[i] == lcore_id) {
516 cur_tsc = rte_rdtsc();
518 * TX burst queue drain
520 diff_tsc = cur_tsc - prev_tsc;
521 if (unlikely(diff_tsc > drain_tsc)) {
524 LOG_DEBUG(VHOST_DATA, "TX queue drained after "
525 "timeout with burst size %u\n",
527 if (unlikely(ret < tx_q->len)) {
529 rte_pktmbuf_free(tx_q->m_table[ret]);
530 } while (++ret < tx_q->len);
540 rte_prefetch0(lcore_ll->ll_root_used);
543 * Inform the configuration core that we have exited
544 * the linked list and that no devices are
545 * in use if requested.
547 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
548 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
553 dev_ll = lcore_ll->ll_root_used;
555 while (dev_ll != NULL) {
559 if (unlikely(vdev->remove)) {
560 dev_ll = dev_ll->next;
561 vdev->ready = DEVICE_SAFE_REMOVE;
564 if (likely(vdev->ready == DEVICE_RX)) {
565 /* Handle guest RX */
566 rx_count = rte_eth_rx_burst(ports[0],
567 vdev->rx_q, pkts_burst, MAX_PKT_BURST);
571 * Retry is enabled and the queue is
572 * full then we wait and retry to
573 * avoid packet loss. Here MAX_PKT_BURST
574 * must be less than virtio queue size
576 if (enable_retry && unlikely(rx_count >
577 rte_vring_available_entries(dev, VIRTIO_RXQ))) {
578 for (retry = 0; retry < burst_rx_retry_num;
580 rte_delay_us(burst_rx_delay_time);
581 if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
588 &dev_statistics[dev->device_fh].rx_total_atomic,
591 &dev_statistics[dev->device_fh].rx_atomic, ret_count);
593 while (likely(rx_count)) {
595 rte_pktmbuf_free(pkts_burst[rx_count]);
601 if (likely(!vdev->remove)) {
603 tx_count = rte_vhost_dequeue_burst(dev,
604 VIRTIO_TXQ, mbuf_pool,
605 pkts_burst, MAX_PKT_BURST);
606 /* If this is the first received packet we need to learn the MAC */
607 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
610 rte_pktmbuf_free(pkts_burst[--tx_count]);
614 virtio_tx_route(vdev, pkts_burst[--tx_count]);
617 /* move to the next device in the list */
618 dev_ll = dev_ll->next;
626 * Add an entry to a used linked list. A free entry must first be found
627 * in the free linked list using get_data_ll_free_entry();
630 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
631 struct virtio_net_data_ll *ll_dev)
633 struct virtio_net_data_ll *ll = *ll_root_addr;
635 /* Set next as NULL and use a compiler barrier to avoid reordering. */
637 rte_compiler_barrier();
639 /* If ll == NULL then this is the first device. */
641 /* Increment to the tail of the linked list. */
642 while (ll->next != NULL)
647 *ll_root_addr = ll_dev;
652 * Remove an entry from a used linked list. The entry must then be added to
653 * the free linked list using put_data_ll_free_entry().
656 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
657 struct virtio_net_data_ll *ll_dev,
658 struct virtio_net_data_ll *ll_dev_last)
660 struct virtio_net_data_ll *ll = *ll_root_addr;
662 if (unlikely((ll == NULL) || (ll_dev == NULL)))
666 *ll_root_addr = ll_dev->next;
668 if (likely(ll_dev_last != NULL))
669 ll_dev_last->next = ll_dev->next;
671 RTE_LOG(ERR, VHOST_CONFIG,
672 "Remove entry form ll failed.\n");
676 * Find and return an entry from the free linked list.
678 static struct virtio_net_data_ll *
679 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
681 struct virtio_net_data_ll *ll_free = *ll_root_addr;
682 struct virtio_net_data_ll *ll_dev;
688 *ll_root_addr = ll_free->next;
694 * Place an entry back on to the free linked list.
697 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
698 struct virtio_net_data_ll *ll_dev)
700 struct virtio_net_data_ll *ll_free = *ll_root_addr;
705 ll_dev->next = ll_free;
706 *ll_root_addr = ll_dev;
710 * Creates a linked list of a given size.
712 static struct virtio_net_data_ll *
713 alloc_data_ll(uint32_t size)
715 struct virtio_net_data_ll *ll_new;
718 /* Malloc and then chain the linked list. */
719 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
720 if (ll_new == NULL) {
721 RTE_LOG(ERR, VHOST_CONFIG,
722 "Failed to allocate memory for ll_new.\n");
726 for (i = 0; i < size - 1; i++) {
727 ll_new[i].vdev = NULL;
728 ll_new[i].next = &ll_new[i+1];
730 ll_new[i].next = NULL;
736 * Create the main linked list along with each individual cores
737 * linked list. A used and a free list are created to manage entries.
744 RTE_LCORE_FOREACH_SLAVE(lcore) {
745 lcore_info[lcore].lcore_ll =
746 malloc(sizeof(struct lcore_ll_info));
747 if (lcore_info[lcore].lcore_ll == NULL) {
748 RTE_LOG(ERR, VHOST_CONFIG,
749 "Failed to allocate memory for lcore_ll.\n");
753 lcore_info[lcore].lcore_ll->device_num = 0;
754 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
755 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
756 if (nb_devices % nb_switching_cores)
757 lcore_info[lcore].lcore_ll->ll_root_free =
758 alloc_data_ll((nb_devices / nb_switching_cores)
761 lcore_info[lcore].lcore_ll->ll_root_free =
762 alloc_data_ll(nb_devices / nb_switching_cores);
765 /* Allocate devices up to a maximum of MAX_DEVICES. */
766 ll_root_free = alloc_data_ll(MIN((nb_devices), MAX_DEVICES));
772 * Remove a device from the specific data core linked list and
773 * from the main linked list. Synchonization occurs through the use
774 * of the lcore dev_removal_flag. Device is made volatile here
775 * to avoid re-ordering of dev->remove=1 which can cause an infinite
776 * loop in the rte_pause loop.
779 destroy_device(volatile struct virtio_net *dev)
781 struct virtio_net_data_ll *ll_lcore_dev_cur;
782 struct virtio_net_data_ll *ll_main_dev_cur;
783 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
784 struct virtio_net_data_ll *ll_main_dev_last = NULL;
785 struct vhost_dev *vdev;
788 dev->flags &= ~VIRTIO_DEV_RUNNING;
790 vdev = (struct vhost_dev *)dev->priv;
792 /* set the remove flag. */
794 while (vdev->ready != DEVICE_SAFE_REMOVE)
797 /* Search for entry to be removed from lcore ll */
798 ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
799 while (ll_lcore_dev_cur != NULL) {
800 if (ll_lcore_dev_cur->vdev == vdev) {
803 ll_lcore_dev_last = ll_lcore_dev_cur;
804 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
808 if (ll_lcore_dev_cur == NULL) {
809 RTE_LOG(ERR, VHOST_CONFIG,
810 "(%"PRIu64") Failed to find the dev to be destroy.\n",
815 /* Search for entry to be removed from main ll */
816 ll_main_dev_cur = ll_root_used;
817 ll_main_dev_last = NULL;
818 while (ll_main_dev_cur != NULL) {
819 if (ll_main_dev_cur->vdev == vdev) {
822 ll_main_dev_last = ll_main_dev_cur;
823 ll_main_dev_cur = ll_main_dev_cur->next;
827 /* Remove entries from the lcore and main ll. */
828 rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
829 ll_lcore_dev_cur, ll_lcore_dev_last);
830 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
832 /* Set the dev_removal_flag on each lcore. */
833 RTE_LCORE_FOREACH_SLAVE(lcore) {
834 lcore_info[lcore].lcore_ll->dev_removal_flag =
839 * Once each core has set the dev_removal_flag to
840 * ACK_DEV_REMOVAL we can be sure that they can no longer access
841 * the device removed from the linked lists and that the devices
842 * are no longer in use.
844 RTE_LCORE_FOREACH_SLAVE(lcore) {
845 while (lcore_info[lcore].lcore_ll->dev_removal_flag
850 /* Add the entries back to the lcore and main free ll.*/
851 put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free,
853 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
855 /* Decrement number of device on the lcore. */
856 lcore_info[vdev->coreid].lcore_ll->device_num--;
858 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed "
859 "from data core\n", dev->device_fh);
866 * A new device is added to a data core. First the device is added
867 * to the main linked list and the allocated to a specific data core.
870 new_device(struct virtio_net *dev)
872 struct virtio_net_data_ll *ll_dev;
873 int lcore, core_add = 0;
874 uint32_t device_num_min = nb_devices;
875 struct vhost_dev *vdev;
877 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
879 RTE_LOG(INFO, VHOST_DATA,
880 "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
886 /* Add device to main ll */
887 ll_dev = get_data_ll_free_entry(&ll_root_free);
888 if (ll_dev == NULL) {
889 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in"
890 " linked list Device limit of %d devices per core"
891 " has been reached\n", dev->device_fh, nb_devices);
892 if (vdev->regions_hpa)
893 rte_free(vdev->regions_hpa);
898 add_data_ll_entry(&ll_root_used, ll_dev);
899 vdev->rx_q = dev->device_fh;
901 /* reset ready flag */
902 vdev->ready = DEVICE_MAC_LEARNING;
905 /* Find a suitable lcore to add the device. */
906 RTE_LCORE_FOREACH_SLAVE(lcore) {
907 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
908 device_num_min = lcore_info[lcore].lcore_ll->device_num;
912 /* Add device to lcore ll */
913 ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
914 if (ll_dev == NULL) {
915 RTE_LOG(INFO, VHOST_DATA,
916 "(%"PRIu64") Failed to add device to data core\n",
918 vdev->ready = DEVICE_SAFE_REMOVE;
920 rte_free(vdev->regions_hpa);
925 vdev->coreid = core_add;
927 add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
930 /* Initialize device stats */
931 memset(&dev_statistics[dev->device_fh], 0,
932 sizeof(struct device_statistics));
934 /* Disable notifications. */
935 rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
936 rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
937 lcore_info[vdev->coreid].lcore_ll->device_num++;
938 dev->flags |= VIRTIO_DEV_RUNNING;
940 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n",
941 dev->device_fh, vdev->coreid);
947 * These callback allow devices to be added to the data core when configuration
948 * has been fully complete.
950 static const struct virtio_net_device_ops virtio_net_device_ops = {
951 .new_device = new_device,
952 .destroy_device = destroy_device,
956 * This is a thread will wake up after a period to print stats if the user has
962 struct virtio_net_data_ll *dev_ll;
963 uint64_t tx_dropped, rx_dropped;
964 uint64_t tx, tx_total, rx, rx_total;
966 const char clr[] = { 27, '[', '2', 'J', '\0' };
967 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
972 /* Clear screen and move to top left */
973 printf("%s%s", clr, top_left);
975 printf("\nDevice statistics ================================");
977 dev_ll = ll_root_used;
978 while (dev_ll != NULL) {
979 device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
980 tx_total = dev_statistics[device_fh].tx_total;
981 tx = dev_statistics[device_fh].tx;
982 tx_dropped = tx_total - tx;
984 rx_total = rte_atomic64_read(
985 &dev_statistics[device_fh].rx_total_atomic);
986 rx = rte_atomic64_read(
987 &dev_statistics[device_fh].rx_atomic);
988 rx_dropped = rx_total - rx;
990 printf("\nStatistics for device %"PRIu32" ----------"
991 "\nTX total: %"PRIu64""
992 "\nTX dropped: %"PRIu64""
993 "\nTX successful: %"PRIu64""
994 "\nRX total: %"PRIu64""
995 "\nRX dropped: %"PRIu64""
996 "\nRX successful: %"PRIu64"",
1005 dev_ll = dev_ll->next;
1007 printf("\n================================================\n");
1012 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1013 * device is also registered here to handle the IOCTLs.
1016 main(int argc, char *argv[])
1018 struct rte_mempool *mbuf_pool = NULL;
1019 unsigned lcore_id, core_id = 0;
1020 unsigned nb_ports, valid_nb_ports;
1024 static pthread_t tid;
1027 ret = rte_eal_init(argc, argv);
1029 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1033 /* parse app arguments */
1034 ret = tep_termination_parse_args(argc, argv);
1036 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1038 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1039 if (rte_lcore_is_enabled(lcore_id))
1040 lcore_ids[core_id++] = lcore_id;
1042 /* set the number of swithcing cores available */
1043 nb_switching_cores = rte_lcore_count()-1;
1045 /* Get the number of physical ports. */
1046 nb_ports = rte_eth_dev_count();
1047 if (nb_ports > RTE_MAX_ETHPORTS)
1048 nb_ports = RTE_MAX_ETHPORTS;
1051 * Update the global var NB_PORTS and global array PORTS
1052 * and get value of var VALID_NB_PORTS according to system ports number
1054 valid_nb_ports = check_ports_num(nb_ports);
1056 if ((valid_nb_ports == 0) || (valid_nb_ports > MAX_SUP_PORTS)) {
1057 rte_exit(EXIT_FAILURE, "Current enabled port number is %u,"
1058 "but only %u port can be enabled\n", nb_ports,
1062 /* Create the mbuf pool. */
1063 mbuf_pool = rte_mempool_create(
1067 MBUF_SIZE, MBUF_CACHE_SIZE,
1068 sizeof(struct rte_pktmbuf_pool_private),
1069 rte_pktmbuf_pool_init, NULL,
1070 rte_pktmbuf_init, NULL,
1071 rte_socket_id(), 0);
1072 if (mbuf_pool == NULL)
1073 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1075 for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
1076 vpool_array[queue_id].pool = mbuf_pool;
1078 /* Set log level. */
1079 rte_set_log_level(LOG_LEVEL);
1081 /* initialize all ports */
1082 for (portid = 0; portid < nb_ports; portid++) {
1083 /* skip ports that are not enabled */
1084 if ((enabled_port_mask & (1 << portid)) == 0) {
1085 RTE_LOG(INFO, VHOST_PORT,
1086 "Skipping disabled port %d\n", portid);
1091 /* Initialise all linked lists. */
1092 if (init_data_ll() == -1)
1093 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1095 /* Initialize device stats */
1096 memset(&dev_statistics, 0, sizeof(dev_statistics));
1098 /* Enable stats if the user option is set. */
1100 pthread_create(&tid, NULL, (void *)print_stats, NULL);
1102 /* Launch all data cores. */
1103 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1104 rte_eal_remote_launch(switch_worker,
1105 mbuf_pool, lcore_id);
1108 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1110 /* Register CUSE device to handle IOCTLs. */
1111 ret = rte_vhost_driver_register((char *)&dev_basename);
1113 rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n");
1115 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1117 /* Start CUSE session. */
1118 rte_vhost_driver_session_start();