4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
56 /* the maximum number of external ports supported */
57 #define MAX_SUP_PORTS 1
60 * Calculate the number of buffers needed per port
62 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
63 (nb_switching_cores * MAX_PKT_BURST) +\
64 (nb_switching_cores * \
65 RTE_TEST_TX_DESC_DEFAULT) +\
66 (nb_switching_cores * MBUF_CACHE_SIZE))
68 #define MBUF_CACHE_SIZE 128
69 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
71 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
72 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
74 /* Defines how long we wait between retries on RX */
75 #define BURST_RX_WAIT_US 15
77 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
79 #define JUMBO_FRAME_MAX_SIZE 0x2600
81 /* State of virtio device. */
82 #define DEVICE_MAC_LEARNING 0
84 #define DEVICE_SAFE_REMOVE 2
86 /* Config_core_flag status definitions. */
87 #define REQUEST_DEV_REMOVAL 1
88 #define ACK_DEV_REMOVAL 0
90 /* Configurable number of RX/TX ring descriptors */
91 #define RTE_TEST_RX_DESC_DEFAULT 1024
92 #define RTE_TEST_TX_DESC_DEFAULT 512
94 /* Get first 4 bytes in mbuf headroom. */
95 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
96 + sizeof(struct rte_mbuf)))
98 #define INVALID_PORT_ID 0xFF
100 /* Size of buffers used for snprintfs. */
101 #define MAX_PRINT_BUFF 6072
103 /* Maximum character device basename size. */
104 #define MAX_BASENAME_SZ 20
106 /* Maximum long option length for option parsing. */
107 #define MAX_LONG_OPT_SZ 64
109 /* Used to compare MAC addresses. */
110 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
112 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
113 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
114 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
115 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
116 #define CMD_LINE_OPT_STATS "stats"
117 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
119 /* mask of enabled ports */
120 static uint32_t enabled_port_mask;
122 /*Number of switching cores enabled*/
123 static uint32_t nb_switching_cores;
125 /* number of devices/queues to support*/
128 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
129 #define MAX_RING_DESC 4096
132 struct rte_mempool *pool;
133 struct rte_ring *ring;
135 } vpool_array[MAX_QUEUES+MAX_QUEUES];
138 uint32_t enable_stats = 0;
139 /* Enable retries on RX. */
140 static uint32_t enable_retry = 1;
141 /* Specify timeout (in useconds) between retries on RX. */
142 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
143 /* Specify the number of retries on RX. */
144 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
146 /* Character device basename. Can be set by user. */
147 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
149 static unsigned lcore_ids[RTE_MAX_LCORE];
150 uint8_t ports[RTE_MAX_ETHPORTS];
152 static unsigned nb_ports; /**< The number of ports specified in command line */
154 /* ethernet addresses of ports */
155 struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
157 /* heads for the main used and free linked lists for the data path. */
158 static struct virtio_net_data_ll *ll_root_used;
159 static struct virtio_net_data_ll *ll_root_free;
162 * Array of data core structures containing information on
163 * individual core linked lists.
165 static struct lcore_info lcore_info[RTE_MAX_LCORE];
167 /* Used for queueing bursts of TX packets. */
171 struct rte_mbuf *m_table[MAX_PKT_BURST];
174 /* TX queue for each data core. */
175 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
177 struct device_statistics dev_statistics[MAX_DEVICES];
180 * Set character device basename.
183 us_vhost_parse_basename(const char *q_arg)
185 /* parse number string */
186 if (strlen(q_arg) >= MAX_BASENAME_SZ)
189 snprintf((char *)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
195 * Parse the portmask provided at run time.
198 parse_portmask(const char *portmask)
203 /* parse hexadecimal string */
204 pm = strtoul(portmask, &end, 16);
205 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
215 * Parse num options at run time.
218 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
223 /* parse unsigned int string */
224 num = strtoul(q_arg, &end, 10);
225 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
228 if (num > max_valid_value)
238 tep_termination_usage(const char *prgname)
240 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
241 " --nb-devices[1-64]: The number of virtIO device\n"
242 " -p PORTMASK: Set mask for ports to be used by application\n"
243 " --rx-retry [0|1]: disable/enable(default) retries on rx."
244 " Enable retry if destintation queue is full\n"
245 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
246 " This makes effect only if retries on rx enabled\n"
247 " --rx-retry-num [0-N]: the number of retries on rx."
248 " This makes effect only if retries on rx enabled\n"
249 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
250 " --dev-basename: The basename to be used for the character device.\n",
255 * Parse the arguments given in the command line of the application.
258 tep_termination_parse_args(int argc, char **argv)
263 const char *prgname = argv[0];
264 static struct option long_option[] = {
265 {CMD_LINE_OPT_NB_DEVICES, required_argument, NULL, 0},
266 {CMD_LINE_OPT_RX_RETRY, required_argument, NULL, 0},
267 {CMD_LINE_OPT_RX_RETRY_DELAY, required_argument, NULL, 0},
268 {CMD_LINE_OPT_RX_RETRY_NUM, required_argument, NULL, 0},
269 {CMD_LINE_OPT_STATS, required_argument, NULL, 0},
270 {CMD_LINE_OPT_DEV_BASENAME, required_argument, NULL, 0},
274 /* Parse command line */
275 while ((opt = getopt_long(argc, argv, "p:",
276 long_option, &option_index)) != EOF) {
280 enabled_port_mask = parse_portmask(optarg);
281 if (enabled_port_mask == 0) {
282 RTE_LOG(INFO, VHOST_CONFIG,
283 "Invalid portmask\n");
284 tep_termination_usage(prgname);
289 if (!strncmp(long_option[option_index].name,
290 CMD_LINE_OPT_NB_DEVICES,
291 sizeof(CMD_LINE_OPT_NB_DEVICES))) {
292 ret = parse_num_opt(optarg, MAX_DEVICES);
294 RTE_LOG(INFO, VHOST_CONFIG,
295 "Invalid argument for nb-devices [0-%d]\n",
297 tep_termination_usage(prgname);
303 /* Enable/disable retries on RX. */
304 if (!strncmp(long_option[option_index].name,
305 CMD_LINE_OPT_RX_RETRY,
306 sizeof(CMD_LINE_OPT_RX_RETRY))) {
307 ret = parse_num_opt(optarg, 1);
309 RTE_LOG(INFO, VHOST_CONFIG,
310 "Invalid argument for rx-retry [0|1]\n");
311 tep_termination_usage(prgname);
318 /* Specify the retries delay time (in useconds) on RX.*/
319 if (!strncmp(long_option[option_index].name,
320 CMD_LINE_OPT_RX_RETRY_DELAY,
321 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY))) {
322 ret = parse_num_opt(optarg, INT32_MAX);
324 RTE_LOG(INFO, VHOST_CONFIG,
325 "Invalid argument for rx-retry-delay [0-N]\n");
326 tep_termination_usage(prgname);
329 burst_rx_delay_time = ret;
333 /* Specify the retries number on RX. */
334 if (!strncmp(long_option[option_index].name,
335 CMD_LINE_OPT_RX_RETRY_NUM,
336 sizeof(CMD_LINE_OPT_RX_RETRY_NUM))) {
337 ret = parse_num_opt(optarg, INT32_MAX);
339 RTE_LOG(INFO, VHOST_CONFIG,
340 "Invalid argument for rx-retry-num [0-N]\n");
341 tep_termination_usage(prgname);
344 burst_rx_retry_num = ret;
348 /* Enable/disable stats. */
349 if (!strncmp(long_option[option_index].name,
351 sizeof(CMD_LINE_OPT_STATS))) {
352 ret = parse_num_opt(optarg, INT32_MAX);
354 RTE_LOG(INFO, VHOST_CONFIG,
355 "Invalid argument for stats [0..N]\n");
356 tep_termination_usage(prgname);
363 /* Set character device basename. */
364 if (!strncmp(long_option[option_index].name,
365 CMD_LINE_OPT_DEV_BASENAME,
366 sizeof(CMD_LINE_OPT_DEV_BASENAME))) {
367 if (us_vhost_parse_basename(optarg) == -1) {
368 RTE_LOG(INFO, VHOST_CONFIG,
369 "Invalid argument for character "
370 "device basename (Max %d characters)\n",
372 tep_termination_usage(prgname);
379 /* Invalid option - print options. */
381 tep_termination_usage(prgname);
386 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
387 if (enabled_port_mask & (1 << i))
388 ports[nb_ports++] = (uint8_t)i;
391 if ((nb_ports == 0) || (nb_ports > MAX_SUP_PORTS)) {
392 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
393 "but only %u port can be enabled\n", nb_ports,
402 * Update the global var NB_PORTS and array PORTS
403 * according to system ports number and return valid ports number
406 check_ports_num(unsigned max_nb_ports)
408 unsigned valid_nb_ports = nb_ports;
411 if (nb_ports > max_nb_ports) {
412 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) "
413 " exceeds total system port number(%u)\n",
414 nb_ports, max_nb_ports);
415 nb_ports = max_nb_ports;
418 for (portid = 0; portid < nb_ports; portid++) {
419 if (ports[portid] >= max_nb_ports) {
420 RTE_LOG(INFO, VHOST_PORT,
421 "\nSpecified port ID(%u) exceeds max "
422 " system port ID(%u)\n",
423 ports[portid], (max_nb_ports - 1));
424 ports[portid] = INVALID_PORT_ID;
428 return valid_nb_ports;
432 * This function routes the TX packet to the correct interface. This may be a local device
433 * or the physical port.
435 static inline void __attribute__((always_inline))
436 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
438 struct mbuf_table *tx_q;
439 struct rte_mbuf **m_table;
440 unsigned len, ret = 0;
441 const uint16_t lcore_id = rte_lcore_id();
442 struct virtio_net *dev = vdev->dev;
444 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
447 /* Add packet to the port tx queue */
448 tx_q = &lcore_tx_queue[lcore_id];
451 tx_q->m_table[len] = m;
454 dev_statistics[dev->device_fh].tx_total++;
455 dev_statistics[dev->device_fh].tx++;
458 if (unlikely(len == MAX_PKT_BURST)) {
459 m_table = (struct rte_mbuf **)tx_q->m_table;
460 /* Free any buffers not handled by TX and update
463 if (unlikely(ret < len)) {
465 rte_pktmbuf_free(m_table[ret]);
466 } while (++ret < len);
477 * This function is called by each data core. It handles all
478 * RX/TX registered with the core. For TX the specific lcore
479 * linked list is used. For RX, MAC addresses are compared
480 * with all devices in the main linked list.
483 switch_worker(__rte_unused void *arg)
485 struct rte_mempool *mbuf_pool = arg;
486 struct virtio_net *dev = NULL;
487 struct vhost_dev *vdev = NULL;
488 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
489 struct virtio_net_data_ll *dev_ll;
490 struct mbuf_table *tx_q;
491 volatile struct lcore_ll_info *lcore_ll;
492 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
493 / US_PER_S * BURST_TX_DRAIN_US;
494 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
496 const uint16_t lcore_id = rte_lcore_id();
497 const uint16_t num_cores = (uint16_t)rte_lcore_count();
498 uint16_t rx_count = 0;
502 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
503 lcore_ll = lcore_info[lcore_id].lcore_ll;
506 tx_q = &lcore_tx_queue[lcore_id];
507 for (i = 0; i < num_cores; i++) {
508 if (lcore_ids[i] == lcore_id) {
515 cur_tsc = rte_rdtsc();
517 * TX burst queue drain
519 diff_tsc = cur_tsc - prev_tsc;
520 if (unlikely(diff_tsc > drain_tsc)) {
523 LOG_DEBUG(VHOST_DATA, "TX queue drained after "
524 "timeout with burst size %u\n",
526 if (unlikely(ret < tx_q->len)) {
528 rte_pktmbuf_free(tx_q->m_table[ret]);
529 } while (++ret < tx_q->len);
539 rte_prefetch0(lcore_ll->ll_root_used);
542 * Inform the configuration core that we have exited
543 * the linked list and that no devices are
544 * in use if requested.
546 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
547 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
552 dev_ll = lcore_ll->ll_root_used;
554 while (dev_ll != NULL) {
558 if (unlikely(vdev->remove)) {
559 dev_ll = dev_ll->next;
560 vdev->ready = DEVICE_SAFE_REMOVE;
563 if (likely(vdev->ready == DEVICE_RX)) {
564 /* Handle guest RX */
565 rx_count = rte_eth_rx_burst(ports[0],
566 vdev->rx_q, pkts_burst, MAX_PKT_BURST);
570 * Retry is enabled and the queue is
571 * full then we wait and retry to
572 * avoid packet loss. Here MAX_PKT_BURST
573 * must be less than virtio queue size
575 if (enable_retry && unlikely(rx_count >
576 rte_vring_available_entries(dev, VIRTIO_RXQ))) {
577 for (retry = 0; retry < burst_rx_retry_num;
579 rte_delay_us(burst_rx_delay_time);
580 if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
587 &dev_statistics[dev->device_fh].rx_total_atomic,
590 &dev_statistics[dev->device_fh].rx_atomic, ret_count);
592 while (likely(rx_count)) {
594 rte_pktmbuf_free(pkts_burst[rx_count]);
600 if (likely(!vdev->remove)) {
602 tx_count = rte_vhost_dequeue_burst(dev,
603 VIRTIO_TXQ, mbuf_pool,
604 pkts_burst, MAX_PKT_BURST);
605 /* If this is the first received packet we need to learn the MAC */
606 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
609 rte_pktmbuf_free(pkts_burst[--tx_count]);
613 virtio_tx_route(vdev, pkts_burst[--tx_count]);
616 /* move to the next device in the list */
617 dev_ll = dev_ll->next;
625 * Add an entry to a used linked list. A free entry must first be found
626 * in the free linked list using get_data_ll_free_entry();
629 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
630 struct virtio_net_data_ll *ll_dev)
632 struct virtio_net_data_ll *ll = *ll_root_addr;
634 /* Set next as NULL and use a compiler barrier to avoid reordering. */
636 rte_compiler_barrier();
638 /* If ll == NULL then this is the first device. */
640 /* Increment to the tail of the linked list. */
641 while (ll->next != NULL)
646 *ll_root_addr = ll_dev;
651 * Remove an entry from a used linked list. The entry must then be added to
652 * the free linked list using put_data_ll_free_entry().
655 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
656 struct virtio_net_data_ll *ll_dev,
657 struct virtio_net_data_ll *ll_dev_last)
659 struct virtio_net_data_ll *ll = *ll_root_addr;
661 if (unlikely((ll == NULL) || (ll_dev == NULL)))
665 *ll_root_addr = ll_dev->next;
667 if (likely(ll_dev_last != NULL))
668 ll_dev_last->next = ll_dev->next;
670 RTE_LOG(ERR, VHOST_CONFIG,
671 "Remove entry form ll failed.\n");
675 * Find and return an entry from the free linked list.
677 static struct virtio_net_data_ll *
678 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
680 struct virtio_net_data_ll *ll_free = *ll_root_addr;
681 struct virtio_net_data_ll *ll_dev;
687 *ll_root_addr = ll_free->next;
693 * Place an entry back on to the free linked list.
696 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
697 struct virtio_net_data_ll *ll_dev)
699 struct virtio_net_data_ll *ll_free = *ll_root_addr;
704 ll_dev->next = ll_free;
705 *ll_root_addr = ll_dev;
709 * Creates a linked list of a given size.
711 static struct virtio_net_data_ll *
712 alloc_data_ll(uint32_t size)
714 struct virtio_net_data_ll *ll_new;
717 /* Malloc and then chain the linked list. */
718 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
719 if (ll_new == NULL) {
720 RTE_LOG(ERR, VHOST_CONFIG,
721 "Failed to allocate memory for ll_new.\n");
725 for (i = 0; i < size - 1; i++) {
726 ll_new[i].vdev = NULL;
727 ll_new[i].next = &ll_new[i+1];
729 ll_new[i].next = NULL;
735 * Create the main linked list along with each individual cores
736 * linked list. A used and a free list are created to manage entries.
743 RTE_LCORE_FOREACH_SLAVE(lcore) {
744 lcore_info[lcore].lcore_ll =
745 malloc(sizeof(struct lcore_ll_info));
746 if (lcore_info[lcore].lcore_ll == NULL) {
747 RTE_LOG(ERR, VHOST_CONFIG,
748 "Failed to allocate memory for lcore_ll.\n");
752 lcore_info[lcore].lcore_ll->device_num = 0;
753 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
754 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
755 if (nb_devices % nb_switching_cores)
756 lcore_info[lcore].lcore_ll->ll_root_free =
757 alloc_data_ll((nb_devices / nb_switching_cores)
760 lcore_info[lcore].lcore_ll->ll_root_free =
761 alloc_data_ll(nb_devices / nb_switching_cores);
764 /* Allocate devices up to a maximum of MAX_DEVICES. */
765 ll_root_free = alloc_data_ll(MIN((nb_devices), MAX_DEVICES));
771 * Remove a device from the specific data core linked list and
772 * from the main linked list. Synchonization occurs through the use
773 * of the lcore dev_removal_flag. Device is made volatile here
774 * to avoid re-ordering of dev->remove=1 which can cause an infinite
775 * loop in the rte_pause loop.
778 destroy_device(volatile struct virtio_net *dev)
780 struct virtio_net_data_ll *ll_lcore_dev_cur;
781 struct virtio_net_data_ll *ll_main_dev_cur;
782 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
783 struct virtio_net_data_ll *ll_main_dev_last = NULL;
784 struct vhost_dev *vdev;
787 dev->flags &= ~VIRTIO_DEV_RUNNING;
789 vdev = (struct vhost_dev *)dev->priv;
791 /* set the remove flag. */
793 while (vdev->ready != DEVICE_SAFE_REMOVE)
796 /* Search for entry to be removed from lcore ll */
797 ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
798 while (ll_lcore_dev_cur != NULL) {
799 if (ll_lcore_dev_cur->vdev == vdev) {
802 ll_lcore_dev_last = ll_lcore_dev_cur;
803 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
807 if (ll_lcore_dev_cur == NULL) {
808 RTE_LOG(ERR, VHOST_CONFIG,
809 "(%"PRIu64") Failed to find the dev to be destroy.\n",
814 /* Search for entry to be removed from main ll */
815 ll_main_dev_cur = ll_root_used;
816 ll_main_dev_last = NULL;
817 while (ll_main_dev_cur != NULL) {
818 if (ll_main_dev_cur->vdev == vdev) {
821 ll_main_dev_last = ll_main_dev_cur;
822 ll_main_dev_cur = ll_main_dev_cur->next;
826 /* Remove entries from the lcore and main ll. */
827 rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
828 ll_lcore_dev_cur, ll_lcore_dev_last);
829 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
831 /* Set the dev_removal_flag on each lcore. */
832 RTE_LCORE_FOREACH_SLAVE(lcore) {
833 lcore_info[lcore].lcore_ll->dev_removal_flag =
838 * Once each core has set the dev_removal_flag to
839 * ACK_DEV_REMOVAL we can be sure that they can no longer access
840 * the device removed from the linked lists and that the devices
841 * are no longer in use.
843 RTE_LCORE_FOREACH_SLAVE(lcore) {
844 while (lcore_info[lcore].lcore_ll->dev_removal_flag
849 /* Add the entries back to the lcore and main free ll.*/
850 put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free,
852 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
854 /* Decrement number of device on the lcore. */
855 lcore_info[vdev->coreid].lcore_ll->device_num--;
857 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed "
858 "from data core\n", dev->device_fh);
865 * A new device is added to a data core. First the device is added
866 * to the main linked list and the allocated to a specific data core.
869 new_device(struct virtio_net *dev)
871 struct virtio_net_data_ll *ll_dev;
872 int lcore, core_add = 0;
873 uint32_t device_num_min = nb_devices;
874 struct vhost_dev *vdev;
876 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
878 RTE_LOG(INFO, VHOST_DATA,
879 "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
885 /* Add device to main ll */
886 ll_dev = get_data_ll_free_entry(&ll_root_free);
887 if (ll_dev == NULL) {
888 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in"
889 " linked list Device limit of %d devices per core"
890 " has been reached\n", dev->device_fh, nb_devices);
891 if (vdev->regions_hpa)
892 rte_free(vdev->regions_hpa);
897 add_data_ll_entry(&ll_root_used, ll_dev);
898 vdev->rx_q = dev->device_fh;
900 /* reset ready flag */
901 vdev->ready = DEVICE_MAC_LEARNING;
904 /* Find a suitable lcore to add the device. */
905 RTE_LCORE_FOREACH_SLAVE(lcore) {
906 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
907 device_num_min = lcore_info[lcore].lcore_ll->device_num;
911 /* Add device to lcore ll */
912 ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
913 if (ll_dev == NULL) {
914 RTE_LOG(INFO, VHOST_DATA,
915 "(%"PRIu64") Failed to add device to data core\n",
917 vdev->ready = DEVICE_SAFE_REMOVE;
919 rte_free(vdev->regions_hpa);
924 vdev->coreid = core_add;
926 add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
929 /* Initialize device stats */
930 memset(&dev_statistics[dev->device_fh], 0,
931 sizeof(struct device_statistics));
933 /* Disable notifications. */
934 rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
935 rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
936 lcore_info[vdev->coreid].lcore_ll->device_num++;
937 dev->flags |= VIRTIO_DEV_RUNNING;
939 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n",
940 dev->device_fh, vdev->coreid);
946 * These callback allow devices to be added to the data core when configuration
947 * has been fully complete.
949 static const struct virtio_net_device_ops virtio_net_device_ops = {
950 .new_device = new_device,
951 .destroy_device = destroy_device,
955 * This is a thread will wake up after a period to print stats if the user has
961 struct virtio_net_data_ll *dev_ll;
962 uint64_t tx_dropped, rx_dropped;
963 uint64_t tx, tx_total, rx, rx_total;
965 const char clr[] = { 27, '[', '2', 'J', '\0' };
966 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
971 /* Clear screen and move to top left */
972 printf("%s%s", clr, top_left);
974 printf("\nDevice statistics ================================");
976 dev_ll = ll_root_used;
977 while (dev_ll != NULL) {
978 device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
979 tx_total = dev_statistics[device_fh].tx_total;
980 tx = dev_statistics[device_fh].tx;
981 tx_dropped = tx_total - tx;
983 rx_total = rte_atomic64_read(
984 &dev_statistics[device_fh].rx_total_atomic);
985 rx = rte_atomic64_read(
986 &dev_statistics[device_fh].rx_atomic);
987 rx_dropped = rx_total - rx;
989 printf("\nStatistics for device %"PRIu32" ----------"
990 "\nTX total: %"PRIu64""
991 "\nTX dropped: %"PRIu64""
992 "\nTX successful: %"PRIu64""
993 "\nRX total: %"PRIu64""
994 "\nRX dropped: %"PRIu64""
995 "\nRX successful: %"PRIu64"",
1004 dev_ll = dev_ll->next;
1006 printf("\n================================================\n");
1011 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1012 * device is also registered here to handle the IOCTLs.
1015 main(int argc, char *argv[])
1017 struct rte_mempool *mbuf_pool = NULL;
1018 unsigned lcore_id, core_id = 0;
1019 unsigned nb_ports, valid_nb_ports;
1023 static pthread_t tid;
1026 ret = rte_eal_init(argc, argv);
1028 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1032 /* parse app arguments */
1033 ret = tep_termination_parse_args(argc, argv);
1035 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1037 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1038 if (rte_lcore_is_enabled(lcore_id))
1039 lcore_ids[core_id++] = lcore_id;
1041 /* set the number of swithcing cores available */
1042 nb_switching_cores = rte_lcore_count()-1;
1044 /* Get the number of physical ports. */
1045 nb_ports = rte_eth_dev_count();
1046 if (nb_ports > RTE_MAX_ETHPORTS)
1047 nb_ports = RTE_MAX_ETHPORTS;
1050 * Update the global var NB_PORTS and global array PORTS
1051 * and get value of var VALID_NB_PORTS according to system ports number
1053 valid_nb_ports = check_ports_num(nb_ports);
1055 if ((valid_nb_ports == 0) || (valid_nb_ports > MAX_SUP_PORTS)) {
1056 rte_exit(EXIT_FAILURE, "Current enabled port number is %u,"
1057 "but only %u port can be enabled\n", nb_ports,
1061 /* Create the mbuf pool. */
1062 mbuf_pool = rte_mempool_create(
1066 MBUF_SIZE, MBUF_CACHE_SIZE,
1067 sizeof(struct rte_pktmbuf_pool_private),
1068 rte_pktmbuf_pool_init, NULL,
1069 rte_pktmbuf_init, NULL,
1070 rte_socket_id(), 0);
1071 if (mbuf_pool == NULL)
1072 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1074 for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
1075 vpool_array[queue_id].pool = mbuf_pool;
1077 /* Set log level. */
1078 rte_set_log_level(LOG_LEVEL);
1080 /* initialize all ports */
1081 for (portid = 0; portid < nb_ports; portid++) {
1082 /* skip ports that are not enabled */
1083 if ((enabled_port_mask & (1 << portid)) == 0) {
1084 RTE_LOG(INFO, VHOST_PORT,
1085 "Skipping disabled port %d\n", portid);
1090 /* Initialise all linked lists. */
1091 if (init_data_ll() == -1)
1092 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1094 /* Initialize device stats */
1095 memset(&dev_statistics, 0, sizeof(dev_statistics));
1097 /* Enable stats if the user option is set. */
1099 pthread_create(&tid, NULL, (void *)print_stats, NULL);
1101 /* Launch all data cores. */
1102 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1103 rte_eal_remote_launch(switch_worker,
1104 mbuf_pool, lcore_id);
1107 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1109 /* Register CUSE device to handle IOCTLs. */
1110 ret = rte_vhost_driver_register((char *)&dev_basename);
1112 rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n");
1114 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1116 /* Start CUSE session. */
1117 rte_vhost_driver_session_start();