4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
56 #include "vxlan_setup.h"
58 /* the maximum number of external ports supported */
59 #define MAX_SUP_PORTS 1
62 * Calculate the number of buffers needed per port
64 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
65 (nb_switching_cores * MAX_PKT_BURST) +\
66 (nb_switching_cores * \
67 RTE_TEST_TX_DESC_DEFAULT) +\
68 (nb_switching_cores * MBUF_CACHE_SIZE))
70 #define MBUF_CACHE_SIZE 128
71 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
74 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
76 /* Defines how long we wait between retries on RX */
77 #define BURST_RX_WAIT_US 15
79 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
81 #define JUMBO_FRAME_MAX_SIZE 0x2600
83 /* State of virtio device. */
84 #define DEVICE_MAC_LEARNING 0
86 #define DEVICE_SAFE_REMOVE 2
88 /* Config_core_flag status definitions. */
89 #define REQUEST_DEV_REMOVAL 1
90 #define ACK_DEV_REMOVAL 0
92 /* Configurable number of RX/TX ring descriptors */
93 #define RTE_TEST_RX_DESC_DEFAULT 1024
94 #define RTE_TEST_TX_DESC_DEFAULT 512
96 /* Get first 4 bytes in mbuf headroom. */
97 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
98 + sizeof(struct rte_mbuf)))
100 #define INVALID_PORT_ID 0xFF
102 /* Size of buffers used for snprintfs. */
103 #define MAX_PRINT_BUFF 6072
105 /* Maximum character device basename size. */
106 #define MAX_BASENAME_SZ 20
108 /* Maximum long option length for option parsing. */
109 #define MAX_LONG_OPT_SZ 64
111 /* Used to compare MAC addresses. */
112 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
114 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
115 #define CMD_LINE_OPT_UDP_PORT "udp-port"
116 #define CMD_LINE_OPT_FILTER_TYPE "filter-type"
117 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
118 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
119 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
120 #define CMD_LINE_OPT_STATS "stats"
121 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
123 /* mask of enabled ports */
124 static uint32_t enabled_port_mask;
126 /*Number of switching cores enabled*/
127 static uint32_t nb_switching_cores;
129 /* number of devices/queues to support*/
130 uint16_t nb_devices = 2;
132 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
133 #define MAX_RING_DESC 4096
136 struct rte_mempool *pool;
137 struct rte_ring *ring;
139 } vpool_array[MAX_QUEUES+MAX_QUEUES];
141 /* UDP tunneling port */
142 uint16_t udp_port = 4789;
144 /* RX filter type for tunneling packet */
145 uint8_t filter_idx = 1;
147 /* overlay packet operation */
148 struct ol_switch_ops overlay_options = {
149 .port_configure = vxlan_port_init,
150 .tunnel_setup = vxlan_link,
151 .tunnel_destroy = vxlan_unlink,
152 .tx_handle = vxlan_tx_pkts,
153 .rx_handle = vxlan_rx_pkts,
154 .param_handle = NULL,
158 uint32_t enable_stats = 0;
159 /* Enable retries on RX. */
160 static uint32_t enable_retry = 1;
161 /* Specify timeout (in useconds) between retries on RX. */
162 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
163 /* Specify the number of retries on RX. */
164 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
166 /* Character device basename. Can be set by user. */
167 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
169 static unsigned lcore_ids[RTE_MAX_LCORE];
170 uint8_t ports[RTE_MAX_ETHPORTS];
172 static unsigned nb_ports; /**< The number of ports specified in command line */
174 /* ethernet addresses of ports */
175 struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
177 /* heads for the main used and free linked lists for the data path. */
178 static struct virtio_net_data_ll *ll_root_used;
179 static struct virtio_net_data_ll *ll_root_free;
182 * Array of data core structures containing information on
183 * individual core linked lists.
185 static struct lcore_info lcore_info[RTE_MAX_LCORE];
187 /* Used for queueing bursts of TX packets. */
191 struct rte_mbuf *m_table[MAX_PKT_BURST];
194 /* TX queue for each data core. */
195 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
197 struct device_statistics dev_statistics[MAX_DEVICES];
200 * Set character device basename.
203 us_vhost_parse_basename(const char *q_arg)
205 /* parse number string */
206 if (strlen(q_arg) >= MAX_BASENAME_SZ)
209 snprintf((char *)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
215 * Parse the portmask provided at run time.
218 parse_portmask(const char *portmask)
223 /* parse hexadecimal string */
224 pm = strtoul(portmask, &end, 16);
225 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
235 * Parse num options at run time.
238 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
243 /* parse unsigned int string */
244 num = strtoul(q_arg, &end, 10);
245 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
248 if (num > max_valid_value)
258 tep_termination_usage(const char *prgname)
260 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
261 " --udp-port: UDP destination port for VXLAN packet\n"
262 " --nb-devices[1-64]: The number of virtIO device\n"
263 " --filter-type[1-3]: filter type for tunneling packet\n"
264 " 1: Inner MAC and tenent ID\n"
265 " 2: Inner MAC and VLAN, and tenent ID\n"
266 " 3: Outer MAC, Inner MAC and tenent ID\n"
267 " -p PORTMASK: Set mask for ports to be used by application\n"
268 " --rx-retry [0|1]: disable/enable(default) retries on rx."
269 " Enable retry if destintation queue is full\n"
270 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
271 " This makes effect only if retries on rx enabled\n"
272 " --rx-retry-num [0-N]: the number of retries on rx."
273 " This makes effect only if retries on rx enabled\n"
274 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
275 " --dev-basename: The basename to be used for the character device.\n",
280 * Parse the arguments given in the command line of the application.
283 tep_termination_parse_args(int argc, char **argv)
288 const char *prgname = argv[0];
289 static struct option long_option[] = {
290 {CMD_LINE_OPT_NB_DEVICES, required_argument, NULL, 0},
291 {CMD_LINE_OPT_UDP_PORT, required_argument, NULL, 0},
292 {CMD_LINE_OPT_FILTER_TYPE, required_argument, NULL, 0},
293 {CMD_LINE_OPT_RX_RETRY, required_argument, NULL, 0},
294 {CMD_LINE_OPT_RX_RETRY_DELAY, required_argument, NULL, 0},
295 {CMD_LINE_OPT_RX_RETRY_NUM, required_argument, NULL, 0},
296 {CMD_LINE_OPT_STATS, required_argument, NULL, 0},
297 {CMD_LINE_OPT_DEV_BASENAME, required_argument, NULL, 0},
301 /* Parse command line */
302 while ((opt = getopt_long(argc, argv, "p:",
303 long_option, &option_index)) != EOF) {
307 enabled_port_mask = parse_portmask(optarg);
308 if (enabled_port_mask == 0) {
309 RTE_LOG(INFO, VHOST_CONFIG,
310 "Invalid portmask\n");
311 tep_termination_usage(prgname);
316 if (!strncmp(long_option[option_index].name,
317 CMD_LINE_OPT_NB_DEVICES,
318 sizeof(CMD_LINE_OPT_NB_DEVICES))) {
319 ret = parse_num_opt(optarg, MAX_DEVICES);
321 RTE_LOG(INFO, VHOST_CONFIG,
322 "Invalid argument for nb-devices [0-%d]\n",
324 tep_termination_usage(prgname);
330 /* Enable/disable retries on RX. */
331 if (!strncmp(long_option[option_index].name,
332 CMD_LINE_OPT_RX_RETRY,
333 sizeof(CMD_LINE_OPT_RX_RETRY))) {
334 ret = parse_num_opt(optarg, 1);
336 RTE_LOG(INFO, VHOST_CONFIG,
337 "Invalid argument for rx-retry [0|1]\n");
338 tep_termination_usage(prgname);
344 if (!strncmp(long_option[option_index].name,
345 CMD_LINE_OPT_UDP_PORT,
346 sizeof(CMD_LINE_OPT_UDP_PORT))) {
347 ret = parse_num_opt(optarg, INT16_MAX);
349 RTE_LOG(INFO, VHOST_CONFIG,
350 "Invalid argument for UDP port [0-N]\n");
351 tep_termination_usage(prgname);
357 /* Specify the retries delay time (in useconds) on RX.*/
358 if (!strncmp(long_option[option_index].name,
359 CMD_LINE_OPT_RX_RETRY_DELAY,
360 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY))) {
361 ret = parse_num_opt(optarg, INT32_MAX);
363 RTE_LOG(INFO, VHOST_CONFIG,
364 "Invalid argument for rx-retry-delay [0-N]\n");
365 tep_termination_usage(prgname);
368 burst_rx_delay_time = ret;
371 /* Specify the retries number on RX. */
372 if (!strncmp(long_option[option_index].name,
373 CMD_LINE_OPT_RX_RETRY_NUM,
374 sizeof(CMD_LINE_OPT_RX_RETRY_NUM))) {
375 ret = parse_num_opt(optarg, INT32_MAX);
377 RTE_LOG(INFO, VHOST_CONFIG,
378 "Invalid argument for rx-retry-num [0-N]\n");
379 tep_termination_usage(prgname);
382 burst_rx_retry_num = ret;
385 if (!strncmp(long_option[option_index].name,
386 CMD_LINE_OPT_FILTER_TYPE,
387 sizeof(CMD_LINE_OPT_FILTER_TYPE))) {
388 ret = parse_num_opt(optarg, 3);
389 if ((ret == -1) || (ret == 0)) {
390 RTE_LOG(INFO, VHOST_CONFIG,
391 "Invalid argument for filter type [1-3]\n");
392 tep_termination_usage(prgname);
395 filter_idx = ret - 1;
398 /* Enable/disable stats. */
399 if (!strncmp(long_option[option_index].name,
401 sizeof(CMD_LINE_OPT_STATS))) {
402 ret = parse_num_opt(optarg, INT32_MAX);
404 RTE_LOG(INFO, VHOST_CONFIG,
405 "Invalid argument for stats [0..N]\n");
406 tep_termination_usage(prgname);
412 /* Set character device basename. */
413 if (!strncmp(long_option[option_index].name,
414 CMD_LINE_OPT_DEV_BASENAME,
415 sizeof(CMD_LINE_OPT_DEV_BASENAME))) {
416 if (us_vhost_parse_basename(optarg) == -1) {
417 RTE_LOG(INFO, VHOST_CONFIG,
418 "Invalid argument for character "
419 "device basename (Max %d characters)\n",
421 tep_termination_usage(prgname);
428 /* Invalid option - print options. */
430 tep_termination_usage(prgname);
435 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
436 if (enabled_port_mask & (1 << i))
437 ports[nb_ports++] = (uint8_t)i;
440 if ((nb_ports == 0) || (nb_ports > MAX_SUP_PORTS)) {
441 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
442 "but only %u port can be enabled\n", nb_ports,
451 * Update the global var NB_PORTS and array PORTS
452 * according to system ports number and return valid ports number
455 check_ports_num(unsigned max_nb_ports)
457 unsigned valid_nb_ports = nb_ports;
460 if (nb_ports > max_nb_ports) {
461 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) "
462 " exceeds total system port number(%u)\n",
463 nb_ports, max_nb_ports);
464 nb_ports = max_nb_ports;
467 for (portid = 0; portid < nb_ports; portid++) {
468 if (ports[portid] >= max_nb_ports) {
469 RTE_LOG(INFO, VHOST_PORT,
470 "\nSpecified port ID(%u) exceeds max "
471 " system port ID(%u)\n",
472 ports[portid], (max_nb_ports - 1));
473 ports[portid] = INVALID_PORT_ID;
477 return valid_nb_ports;
481 * This function routes the TX packet to the correct interface. This may be a local device
482 * or the physical port.
484 static inline void __attribute__((always_inline))
485 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
487 struct mbuf_table *tx_q;
488 struct rte_mbuf **m_table;
489 unsigned len, ret = 0;
490 const uint16_t lcore_id = rte_lcore_id();
491 struct virtio_net *dev = vdev->dev;
493 LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
496 /* Add packet to the port tx queue */
497 tx_q = &lcore_tx_queue[lcore_id];
500 tx_q->m_table[len] = m;
503 dev_statistics[dev->device_fh].tx_total++;
504 dev_statistics[dev->device_fh].tx++;
507 if (unlikely(len == MAX_PKT_BURST)) {
508 m_table = (struct rte_mbuf **)tx_q->m_table;
509 ret = overlay_options.tx_handle(ports[0],
510 (uint16_t)tx_q->txq_id, m_table,
511 (uint16_t)tx_q->len);
513 /* Free any buffers not handled by TX and update
516 if (unlikely(ret < len)) {
518 rte_pktmbuf_free(m_table[ret]);
519 } while (++ret < len);
530 * This function is called by each data core. It handles all
531 * RX/TX registered with the core. For TX the specific lcore
532 * linked list is used. For RX, MAC addresses are compared
533 * with all devices in the main linked list.
536 switch_worker(__rte_unused void *arg)
538 struct rte_mempool *mbuf_pool = arg;
539 struct virtio_net *dev = NULL;
540 struct vhost_dev *vdev = NULL;
541 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
542 struct virtio_net_data_ll *dev_ll;
543 struct mbuf_table *tx_q;
544 volatile struct lcore_ll_info *lcore_ll;
545 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
546 / US_PER_S * BURST_TX_DRAIN_US;
547 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
549 const uint16_t lcore_id = rte_lcore_id();
550 const uint16_t num_cores = (uint16_t)rte_lcore_count();
551 uint16_t rx_count = 0;
555 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
556 lcore_ll = lcore_info[lcore_id].lcore_ll;
559 tx_q = &lcore_tx_queue[lcore_id];
560 for (i = 0; i < num_cores; i++) {
561 if (lcore_ids[i] == lcore_id) {
568 cur_tsc = rte_rdtsc();
570 * TX burst queue drain
572 diff_tsc = cur_tsc - prev_tsc;
573 if (unlikely(diff_tsc > drain_tsc)) {
576 LOG_DEBUG(VHOST_DATA, "TX queue drained after "
577 "timeout with burst size %u\n",
579 ret = overlay_options.tx_handle(ports[0],
580 (uint16_t)tx_q->txq_id,
581 (struct rte_mbuf **)tx_q->m_table,
582 (uint16_t)tx_q->len);
583 if (unlikely(ret < tx_q->len)) {
585 rte_pktmbuf_free(tx_q->m_table[ret]);
586 } while (++ret < tx_q->len);
596 rte_prefetch0(lcore_ll->ll_root_used);
599 * Inform the configuration core that we have exited
600 * the linked list and that no devices are
601 * in use if requested.
603 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
604 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
609 dev_ll = lcore_ll->ll_root_used;
611 while (dev_ll != NULL) {
615 if (unlikely(vdev->remove)) {
616 dev_ll = dev_ll->next;
617 overlay_options.tunnel_destroy(vdev);
618 vdev->ready = DEVICE_SAFE_REMOVE;
621 if (likely(vdev->ready == DEVICE_RX)) {
622 /* Handle guest RX */
623 rx_count = rte_eth_rx_burst(ports[0],
624 vdev->rx_q, pkts_burst, MAX_PKT_BURST);
628 * Retry is enabled and the queue is
629 * full then we wait and retry to
630 * avoid packet loss. Here MAX_PKT_BURST
631 * must be less than virtio queue size
633 if (enable_retry && unlikely(rx_count >
634 rte_vring_available_entries(dev, VIRTIO_RXQ))) {
635 for (retry = 0; retry < burst_rx_retry_num;
637 rte_delay_us(burst_rx_delay_time);
638 if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
643 ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
646 &dev_statistics[dev->device_fh].rx_total_atomic,
649 &dev_statistics[dev->device_fh].rx_atomic, ret_count);
651 while (likely(rx_count)) {
653 rte_pktmbuf_free(pkts_burst[rx_count]);
659 if (likely(!vdev->remove)) {
661 tx_count = rte_vhost_dequeue_burst(dev,
662 VIRTIO_TXQ, mbuf_pool,
663 pkts_burst, MAX_PKT_BURST);
664 /* If this is the first received packet we need to learn the MAC */
665 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
667 (overlay_options.tunnel_setup(vdev, pkts_burst[0]) == -1)) {
669 rte_pktmbuf_free(pkts_burst[--tx_count]);
673 virtio_tx_route(vdev, pkts_burst[--tx_count]);
676 /* move to the next device in the list */
677 dev_ll = dev_ll->next;
685 * Add an entry to a used linked list. A free entry must first be found
686 * in the free linked list using get_data_ll_free_entry();
689 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
690 struct virtio_net_data_ll *ll_dev)
692 struct virtio_net_data_ll *ll = *ll_root_addr;
694 /* Set next as NULL and use a compiler barrier to avoid reordering. */
696 rte_compiler_barrier();
698 /* If ll == NULL then this is the first device. */
700 /* Increment to the tail of the linked list. */
701 while (ll->next != NULL)
706 *ll_root_addr = ll_dev;
711 * Remove an entry from a used linked list. The entry must then be added to
712 * the free linked list using put_data_ll_free_entry().
715 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
716 struct virtio_net_data_ll *ll_dev,
717 struct virtio_net_data_ll *ll_dev_last)
719 struct virtio_net_data_ll *ll = *ll_root_addr;
721 if (unlikely((ll == NULL) || (ll_dev == NULL)))
725 *ll_root_addr = ll_dev->next;
727 if (likely(ll_dev_last != NULL))
728 ll_dev_last->next = ll_dev->next;
730 RTE_LOG(ERR, VHOST_CONFIG,
731 "Remove entry form ll failed.\n");
735 * Find and return an entry from the free linked list.
737 static struct virtio_net_data_ll *
738 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
740 struct virtio_net_data_ll *ll_free = *ll_root_addr;
741 struct virtio_net_data_ll *ll_dev;
747 *ll_root_addr = ll_free->next;
753 * Place an entry back on to the free linked list.
756 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
757 struct virtio_net_data_ll *ll_dev)
759 struct virtio_net_data_ll *ll_free = *ll_root_addr;
764 ll_dev->next = ll_free;
765 *ll_root_addr = ll_dev;
769 * Creates a linked list of a given size.
771 static struct virtio_net_data_ll *
772 alloc_data_ll(uint32_t size)
774 struct virtio_net_data_ll *ll_new;
777 /* Malloc and then chain the linked list. */
778 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
779 if (ll_new == NULL) {
780 RTE_LOG(ERR, VHOST_CONFIG,
781 "Failed to allocate memory for ll_new.\n");
785 for (i = 0; i < size - 1; i++) {
786 ll_new[i].vdev = NULL;
787 ll_new[i].next = &ll_new[i+1];
789 ll_new[i].next = NULL;
795 * Create the main linked list along with each individual cores
796 * linked list. A used and a free list are created to manage entries.
803 RTE_LCORE_FOREACH_SLAVE(lcore) {
804 lcore_info[lcore].lcore_ll =
805 malloc(sizeof(struct lcore_ll_info));
806 if (lcore_info[lcore].lcore_ll == NULL) {
807 RTE_LOG(ERR, VHOST_CONFIG,
808 "Failed to allocate memory for lcore_ll.\n");
812 lcore_info[lcore].lcore_ll->device_num = 0;
813 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
814 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
815 if (nb_devices % nb_switching_cores)
816 lcore_info[lcore].lcore_ll->ll_root_free =
817 alloc_data_ll((nb_devices / nb_switching_cores)
820 lcore_info[lcore].lcore_ll->ll_root_free =
821 alloc_data_ll(nb_devices / nb_switching_cores);
824 /* Allocate devices up to a maximum of MAX_DEVICES. */
825 ll_root_free = alloc_data_ll(MIN((nb_devices), MAX_DEVICES));
831 * Remove a device from the specific data core linked list and
832 * from the main linked list. Synchonization occurs through the use
833 * of the lcore dev_removal_flag. Device is made volatile here
834 * to avoid re-ordering of dev->remove=1 which can cause an infinite
835 * loop in the rte_pause loop.
838 destroy_device(volatile struct virtio_net *dev)
840 struct virtio_net_data_ll *ll_lcore_dev_cur;
841 struct virtio_net_data_ll *ll_main_dev_cur;
842 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
843 struct virtio_net_data_ll *ll_main_dev_last = NULL;
844 struct vhost_dev *vdev;
847 dev->flags &= ~VIRTIO_DEV_RUNNING;
849 vdev = (struct vhost_dev *)dev->priv;
851 /* set the remove flag. */
853 while (vdev->ready != DEVICE_SAFE_REMOVE)
856 /* Search for entry to be removed from lcore ll */
857 ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
858 while (ll_lcore_dev_cur != NULL) {
859 if (ll_lcore_dev_cur->vdev == vdev) {
862 ll_lcore_dev_last = ll_lcore_dev_cur;
863 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
867 if (ll_lcore_dev_cur == NULL) {
868 RTE_LOG(ERR, VHOST_CONFIG,
869 "(%"PRIu64") Failed to find the dev to be destroy.\n",
874 /* Search for entry to be removed from main ll */
875 ll_main_dev_cur = ll_root_used;
876 ll_main_dev_last = NULL;
877 while (ll_main_dev_cur != NULL) {
878 if (ll_main_dev_cur->vdev == vdev) {
881 ll_main_dev_last = ll_main_dev_cur;
882 ll_main_dev_cur = ll_main_dev_cur->next;
886 /* Remove entries from the lcore and main ll. */
887 rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
888 ll_lcore_dev_cur, ll_lcore_dev_last);
889 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
891 /* Set the dev_removal_flag on each lcore. */
892 RTE_LCORE_FOREACH_SLAVE(lcore) {
893 lcore_info[lcore].lcore_ll->dev_removal_flag =
898 * Once each core has set the dev_removal_flag to
899 * ACK_DEV_REMOVAL we can be sure that they can no longer access
900 * the device removed from the linked lists and that the devices
901 * are no longer in use.
903 RTE_LCORE_FOREACH_SLAVE(lcore) {
904 while (lcore_info[lcore].lcore_ll->dev_removal_flag
909 /* Add the entries back to the lcore and main free ll.*/
910 put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free,
912 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
914 /* Decrement number of device on the lcore. */
915 lcore_info[vdev->coreid].lcore_ll->device_num--;
917 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed "
918 "from data core\n", dev->device_fh);
925 * A new device is added to a data core. First the device is added
926 * to the main linked list and the allocated to a specific data core.
929 new_device(struct virtio_net *dev)
931 struct virtio_net_data_ll *ll_dev;
932 int lcore, core_add = 0;
933 uint32_t device_num_min = nb_devices;
934 struct vhost_dev *vdev;
936 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
938 RTE_LOG(INFO, VHOST_DATA,
939 "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
945 /* Add device to main ll */
946 ll_dev = get_data_ll_free_entry(&ll_root_free);
947 if (ll_dev == NULL) {
948 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in"
949 " linked list Device limit of %d devices per core"
950 " has been reached\n", dev->device_fh, nb_devices);
951 if (vdev->regions_hpa)
952 rte_free(vdev->regions_hpa);
957 add_data_ll_entry(&ll_root_used, ll_dev);
958 vdev->rx_q = dev->device_fh;
960 /* reset ready flag */
961 vdev->ready = DEVICE_MAC_LEARNING;
964 /* Find a suitable lcore to add the device. */
965 RTE_LCORE_FOREACH_SLAVE(lcore) {
966 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
967 device_num_min = lcore_info[lcore].lcore_ll->device_num;
971 /* Add device to lcore ll */
972 ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
973 if (ll_dev == NULL) {
974 RTE_LOG(INFO, VHOST_DATA,
975 "(%"PRIu64") Failed to add device to data core\n",
977 vdev->ready = DEVICE_SAFE_REMOVE;
979 rte_free(vdev->regions_hpa);
984 vdev->coreid = core_add;
986 add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
989 /* Initialize device stats */
990 memset(&dev_statistics[dev->device_fh], 0,
991 sizeof(struct device_statistics));
993 /* Disable notifications. */
994 rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
995 rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
996 lcore_info[vdev->coreid].lcore_ll->device_num++;
997 dev->flags |= VIRTIO_DEV_RUNNING;
999 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n",
1000 dev->device_fh, vdev->coreid);
1006 * These callback allow devices to be added to the data core when configuration
1007 * has been fully complete.
1009 static const struct virtio_net_device_ops virtio_net_device_ops = {
1010 .new_device = new_device,
1011 .destroy_device = destroy_device,
1015 * This is a thread will wake up after a period to print stats if the user has
1021 struct virtio_net_data_ll *dev_ll;
1022 uint64_t tx_dropped, rx_dropped;
1023 uint64_t tx, tx_total, rx, rx_total;
1025 const char clr[] = { 27, '[', '2', 'J', '\0' };
1026 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
1029 sleep(enable_stats);
1031 /* Clear screen and move to top left */
1032 printf("%s%s", clr, top_left);
1034 printf("\nDevice statistics ================================");
1036 dev_ll = ll_root_used;
1037 while (dev_ll != NULL) {
1038 device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
1039 tx_total = dev_statistics[device_fh].tx_total;
1040 tx = dev_statistics[device_fh].tx;
1041 tx_dropped = tx_total - tx;
1043 rx_total = rte_atomic64_read(
1044 &dev_statistics[device_fh].rx_total_atomic);
1045 rx = rte_atomic64_read(
1046 &dev_statistics[device_fh].rx_atomic);
1047 rx_dropped = rx_total - rx;
1049 printf("\nStatistics for device %"PRIu32" ----------"
1050 "\nTX total: %"PRIu64""
1051 "\nTX dropped: %"PRIu64""
1052 "\nTX successful: %"PRIu64""
1053 "\nRX total: %"PRIu64""
1054 "\nRX dropped: %"PRIu64""
1055 "\nRX successful: %"PRIu64"",
1064 dev_ll = dev_ll->next;
1066 printf("\n================================================\n");
1071 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1072 * device is also registered here to handle the IOCTLs.
1075 main(int argc, char *argv[])
1077 struct rte_mempool *mbuf_pool = NULL;
1078 unsigned lcore_id, core_id = 0;
1079 unsigned nb_ports, valid_nb_ports;
1083 static pthread_t tid;
1086 ret = rte_eal_init(argc, argv);
1088 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1092 /* parse app arguments */
1093 ret = tep_termination_parse_args(argc, argv);
1095 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1097 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1098 if (rte_lcore_is_enabled(lcore_id))
1099 lcore_ids[core_id++] = lcore_id;
1101 /* set the number of swithcing cores available */
1102 nb_switching_cores = rte_lcore_count()-1;
1104 /* Get the number of physical ports. */
1105 nb_ports = rte_eth_dev_count();
1106 if (nb_ports > RTE_MAX_ETHPORTS)
1107 nb_ports = RTE_MAX_ETHPORTS;
1110 * Update the global var NB_PORTS and global array PORTS
1111 * and get value of var VALID_NB_PORTS according to system ports number
1113 valid_nb_ports = check_ports_num(nb_ports);
1115 if ((valid_nb_ports == 0) || (valid_nb_ports > MAX_SUP_PORTS)) {
1116 rte_exit(EXIT_FAILURE, "Current enabled port number is %u,"
1117 "but only %u port can be enabled\n", nb_ports,
1120 /* Create the mbuf pool. */
1121 mbuf_pool = rte_mempool_create(
1125 MBUF_SIZE, MBUF_CACHE_SIZE,
1126 sizeof(struct rte_pktmbuf_pool_private),
1127 rte_pktmbuf_pool_init, NULL,
1128 rte_pktmbuf_init, NULL,
1129 rte_socket_id(), 0);
1130 if (mbuf_pool == NULL)
1131 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1133 for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
1134 vpool_array[queue_id].pool = mbuf_pool;
1136 /* Set log level. */
1137 rte_set_log_level(LOG_LEVEL);
1139 /* initialize all ports */
1140 for (portid = 0; portid < nb_ports; portid++) {
1141 /* skip ports that are not enabled */
1142 if ((enabled_port_mask & (1 << portid)) == 0) {
1143 RTE_LOG(INFO, VHOST_PORT,
1144 "Skipping disabled port %d\n", portid);
1147 if (overlay_options.port_configure(portid, mbuf_pool) != 0)
1148 rte_exit(EXIT_FAILURE,
1149 "Cannot initialize network ports\n");
1152 /* Initialise all linked lists. */
1153 if (init_data_ll() == -1)
1154 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1156 /* Initialize device stats */
1157 memset(&dev_statistics, 0, sizeof(dev_statistics));
1159 /* Enable stats if the user option is set. */
1161 pthread_create(&tid, NULL, (void *)print_stats, NULL);
1163 /* Launch all data cores. */
1164 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1165 rte_eal_remote_launch(switch_worker,
1166 mbuf_pool, lcore_id);
1168 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1170 /* Register CUSE device to handle IOCTLs. */
1171 ret = rte_vhost_driver_register((char *)&dev_basename);
1173 rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n");
1175 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1177 /* Start CUSE session. */
1178 rte_vhost_driver_session_start();