1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_pause.h>
28 #include "vxlan_setup.h"
30 /* the maximum number of external ports supported */
31 #define MAX_SUP_PORTS 1
34 * Calculate the number of buffers needed per port
36 #define NUM_MBUFS_PER_PORT ((MAX_QUEUES * RTE_TEST_RX_DESC_DEFAULT) +\
37 (nb_switching_cores * MAX_PKT_BURST) +\
38 (nb_switching_cores * \
39 RTE_TEST_TX_DESC_DEFAULT) +\
40 (nb_switching_cores * MBUF_CACHE_SIZE))
42 #define MBUF_CACHE_SIZE 128
43 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
45 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
46 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
48 /* Defines how long we wait between retries on RX */
49 #define BURST_RX_WAIT_US 15
51 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
53 #define JUMBO_FRAME_MAX_SIZE 0x2600
55 /* State of virtio device. */
56 #define DEVICE_MAC_LEARNING 0
58 #define DEVICE_SAFE_REMOVE 2
60 /* Config_core_flag status definitions. */
61 #define REQUEST_DEV_REMOVAL 1
62 #define ACK_DEV_REMOVAL 0
64 /* Configurable number of RX/TX ring descriptors */
65 #define RTE_TEST_RX_DESC_DEFAULT 1024
66 #define RTE_TEST_TX_DESC_DEFAULT 512
68 /* Get first 4 bytes in mbuf headroom. */
69 #define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
70 + sizeof(struct rte_mbuf)))
72 #define INVALID_PORT_ID 0xFFFF
74 /* Size of buffers used for snprintfs. */
75 #define MAX_PRINT_BUFF 6072
77 /* Maximum character device basename size. */
78 #define MAX_BASENAME_SZ 20
80 /* Maximum long option length for option parsing. */
81 #define MAX_LONG_OPT_SZ 64
83 /* Used to compare MAC addresses. */
84 #define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
86 #define CMD_LINE_OPT_NB_DEVICES "nb-devices"
87 #define CMD_LINE_OPT_UDP_PORT "udp-port"
88 #define CMD_LINE_OPT_TX_CHECKSUM "tx-checksum"
89 #define CMD_LINE_OPT_TSO_SEGSZ "tso-segsz"
90 #define CMD_LINE_OPT_FILTER_TYPE "filter-type"
91 #define CMD_LINE_OPT_ENCAP "encap"
92 #define CMD_LINE_OPT_DECAP "decap"
93 #define CMD_LINE_OPT_RX_RETRY "rx-retry"
94 #define CMD_LINE_OPT_RX_RETRY_DELAY "rx-retry-delay"
95 #define CMD_LINE_OPT_RX_RETRY_NUM "rx-retry-num"
96 #define CMD_LINE_OPT_STATS "stats"
97 #define CMD_LINE_OPT_DEV_BASENAME "dev-basename"
99 /* mask of enabled ports */
100 static uint32_t enabled_port_mask;
102 /*Number of switching cores enabled*/
103 static uint32_t nb_switching_cores;
105 /* number of devices/queues to support*/
106 uint16_t nb_devices = 2;
108 /* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
109 #define MAX_RING_DESC 4096
112 struct rte_mempool *pool;
113 struct rte_ring *ring;
115 } vpool_array[MAX_QUEUES+MAX_QUEUES];
117 /* UDP tunneling port */
118 uint16_t udp_port = 4789;
120 /* enable/disable inner TX checksum */
121 uint8_t tx_checksum = 0;
123 /* TCP segment size */
124 uint16_t tso_segsz = 0;
126 /* enable/disable decapsulation */
127 uint8_t rx_decap = 1;
129 /* enable/disable encapsulation */
130 uint8_t tx_encap = 1;
132 /* RX filter type for tunneling packet */
133 uint8_t filter_idx = 1;
135 /* overlay packet operation */
136 struct ol_switch_ops overlay_options = {
137 .port_configure = vxlan_port_init,
138 .tunnel_setup = vxlan_link,
139 .tunnel_destroy = vxlan_unlink,
140 .tx_handle = vxlan_tx_pkts,
141 .rx_handle = vxlan_rx_pkts,
142 .param_handle = NULL,
146 uint32_t enable_stats = 0;
147 /* Enable retries on RX. */
148 static uint32_t enable_retry = 1;
149 /* Specify timeout (in useconds) between retries on RX. */
150 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
151 /* Specify the number of retries on RX. */
152 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
154 /* Character device basename. Can be set by user. */
155 static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
157 static unsigned lcore_ids[RTE_MAX_LCORE];
158 uint16_t ports[RTE_MAX_ETHPORTS];
160 static unsigned nb_ports; /**< The number of ports specified in command line */
162 /* ethernet addresses of ports */
163 struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
165 /* heads for the main used and free linked lists for the data path. */
166 static struct virtio_net_data_ll *ll_root_used;
167 static struct virtio_net_data_ll *ll_root_free;
170 * Array of data core structures containing information on
171 * individual core linked lists.
173 static struct lcore_info lcore_info[RTE_MAX_LCORE];
175 /* Used for queueing bursts of TX packets. */
179 struct rte_mbuf *m_table[MAX_PKT_BURST];
182 /* TX queue for each data core. */
183 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
185 struct device_statistics dev_statistics[MAX_DEVICES];
188 * Set character device basename.
191 us_vhost_parse_basename(const char *q_arg)
193 /* parse number string */
194 if (strlen(q_arg) >= MAX_BASENAME_SZ)
197 snprintf((char *)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
203 * Parse the portmask provided at run time.
206 parse_portmask(const char *portmask)
211 /* parse hexadecimal string */
212 pm = strtoul(portmask, &end, 16);
213 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
223 * Parse num options at run time.
226 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
231 /* parse unsigned int string */
232 num = strtoul(q_arg, &end, 10);
233 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
236 if (num > max_valid_value)
246 tep_termination_usage(const char *prgname)
248 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
249 " --udp-port: UDP destination port for VXLAN packet\n"
250 " --nb-devices[1-64]: The number of virtIO device\n"
251 " --tx-checksum [0|1]: inner Tx checksum offload\n"
252 " --tso-segsz [0-N]: TCP segment size\n"
253 " --decap [0|1]: tunneling packet decapsulation\n"
254 " --encap [0|1]: tunneling packet encapsulation\n"
255 " --filter-type[1-3]: filter type for tunneling packet\n"
256 " 1: Inner MAC and tenent ID\n"
257 " 2: Inner MAC and VLAN, and tenent ID\n"
258 " 3: Outer MAC, Inner MAC and tenent ID\n"
259 " -p PORTMASK: Set mask for ports to be used by application\n"
260 " --rx-retry [0|1]: disable/enable(default) retries on rx."
261 " Enable retry if destintation queue is full\n"
262 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX."
263 " This makes effect only if retries on rx enabled\n"
264 " --rx-retry-num [0-N]: the number of retries on rx."
265 " This makes effect only if retries on rx enabled\n"
266 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
267 " --dev-basename: The basename to be used for the character device.\n",
272 * Parse the arguments given in the command line of the application.
275 tep_termination_parse_args(int argc, char **argv)
280 const char *prgname = argv[0];
281 static struct option long_option[] = {
282 {CMD_LINE_OPT_NB_DEVICES, required_argument, NULL, 0},
283 {CMD_LINE_OPT_UDP_PORT, required_argument, NULL, 0},
284 {CMD_LINE_OPT_TX_CHECKSUM, required_argument, NULL, 0},
285 {CMD_LINE_OPT_TSO_SEGSZ, required_argument, NULL, 0},
286 {CMD_LINE_OPT_DECAP, required_argument, NULL, 0},
287 {CMD_LINE_OPT_ENCAP, required_argument, NULL, 0},
288 {CMD_LINE_OPT_FILTER_TYPE, required_argument, NULL, 0},
289 {CMD_LINE_OPT_RX_RETRY, required_argument, NULL, 0},
290 {CMD_LINE_OPT_RX_RETRY_DELAY, required_argument, NULL, 0},
291 {CMD_LINE_OPT_RX_RETRY_NUM, required_argument, NULL, 0},
292 {CMD_LINE_OPT_STATS, required_argument, NULL, 0},
293 {CMD_LINE_OPT_DEV_BASENAME, required_argument, NULL, 0},
297 /* Parse command line */
298 while ((opt = getopt_long(argc, argv, "p:",
299 long_option, &option_index)) != EOF) {
303 enabled_port_mask = parse_portmask(optarg);
304 if (enabled_port_mask == 0) {
305 RTE_LOG(INFO, VHOST_CONFIG,
306 "Invalid portmask\n");
307 tep_termination_usage(prgname);
312 if (!strncmp(long_option[option_index].name,
313 CMD_LINE_OPT_NB_DEVICES,
314 sizeof(CMD_LINE_OPT_NB_DEVICES))) {
315 ret = parse_num_opt(optarg, MAX_DEVICES);
317 RTE_LOG(INFO, VHOST_CONFIG,
318 "Invalid argument for nb-devices [0-%d]\n",
320 tep_termination_usage(prgname);
326 /* Enable/disable retries on RX. */
327 if (!strncmp(long_option[option_index].name,
328 CMD_LINE_OPT_RX_RETRY,
329 sizeof(CMD_LINE_OPT_RX_RETRY))) {
330 ret = parse_num_opt(optarg, 1);
332 RTE_LOG(INFO, VHOST_CONFIG,
333 "Invalid argument for rx-retry [0|1]\n");
334 tep_termination_usage(prgname);
340 if (!strncmp(long_option[option_index].name,
341 CMD_LINE_OPT_TSO_SEGSZ,
342 sizeof(CMD_LINE_OPT_TSO_SEGSZ))) {
343 ret = parse_num_opt(optarg, INT16_MAX);
345 RTE_LOG(INFO, VHOST_CONFIG,
346 "Invalid argument for TCP segment size [0-N]\n");
347 tep_termination_usage(prgname);
353 if (!strncmp(long_option[option_index].name,
354 CMD_LINE_OPT_UDP_PORT,
355 sizeof(CMD_LINE_OPT_UDP_PORT))) {
356 ret = parse_num_opt(optarg, INT16_MAX);
358 RTE_LOG(INFO, VHOST_CONFIG,
359 "Invalid argument for UDP port [0-N]\n");
360 tep_termination_usage(prgname);
366 /* Specify the retries delay time (in useconds) on RX.*/
367 if (!strncmp(long_option[option_index].name,
368 CMD_LINE_OPT_RX_RETRY_DELAY,
369 sizeof(CMD_LINE_OPT_RX_RETRY_DELAY))) {
370 ret = parse_num_opt(optarg, INT32_MAX);
372 RTE_LOG(INFO, VHOST_CONFIG,
373 "Invalid argument for rx-retry-delay [0-N]\n");
374 tep_termination_usage(prgname);
377 burst_rx_delay_time = ret;
380 /* Specify the retries number on RX. */
381 if (!strncmp(long_option[option_index].name,
382 CMD_LINE_OPT_RX_RETRY_NUM,
383 sizeof(CMD_LINE_OPT_RX_RETRY_NUM))) {
384 ret = parse_num_opt(optarg, INT32_MAX);
386 RTE_LOG(INFO, VHOST_CONFIG,
387 "Invalid argument for rx-retry-num [0-N]\n");
388 tep_termination_usage(prgname);
391 burst_rx_retry_num = ret;
394 if (!strncmp(long_option[option_index].name,
395 CMD_LINE_OPT_TX_CHECKSUM,
396 sizeof(CMD_LINE_OPT_TX_CHECKSUM))) {
397 ret = parse_num_opt(optarg, 1);
399 RTE_LOG(INFO, VHOST_CONFIG,
400 "Invalid argument for tx-checksum [0|1]\n");
401 tep_termination_usage(prgname);
407 if (!strncmp(long_option[option_index].name,
408 CMD_LINE_OPT_FILTER_TYPE,
409 sizeof(CMD_LINE_OPT_FILTER_TYPE))) {
410 ret = parse_num_opt(optarg, 3);
411 if ((ret == -1) || (ret == 0)) {
412 RTE_LOG(INFO, VHOST_CONFIG,
413 "Invalid argument for filter type [1-3]\n");
414 tep_termination_usage(prgname);
417 filter_idx = ret - 1;
420 /* Enable/disable encapsulation on RX. */
421 if (!strncmp(long_option[option_index].name,
423 sizeof(CMD_LINE_OPT_DECAP))) {
424 ret = parse_num_opt(optarg, 1);
426 RTE_LOG(INFO, VHOST_CONFIG,
427 "Invalid argument for decap [0|1]\n");
428 tep_termination_usage(prgname);
434 /* Enable/disable encapsulation on TX. */
435 if (!strncmp(long_option[option_index].name,
437 sizeof(CMD_LINE_OPT_ENCAP))) {
438 ret = parse_num_opt(optarg, 1);
440 RTE_LOG(INFO, VHOST_CONFIG,
441 "Invalid argument for encap [0|1]\n");
442 tep_termination_usage(prgname);
448 /* Enable/disable stats. */
449 if (!strncmp(long_option[option_index].name,
451 sizeof(CMD_LINE_OPT_STATS))) {
452 ret = parse_num_opt(optarg, INT32_MAX);
454 RTE_LOG(INFO, VHOST_CONFIG,
455 "Invalid argument for stats [0..N]\n");
456 tep_termination_usage(prgname);
462 /* Set character device basename. */
463 if (!strncmp(long_option[option_index].name,
464 CMD_LINE_OPT_DEV_BASENAME,
465 sizeof(CMD_LINE_OPT_DEV_BASENAME))) {
466 if (us_vhost_parse_basename(optarg) == -1) {
467 RTE_LOG(INFO, VHOST_CONFIG,
468 "Invalid argument for character "
469 "device basename (Max %d characters)\n",
471 tep_termination_usage(prgname);
478 /* Invalid option - print options. */
480 tep_termination_usage(prgname);
485 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
486 if (enabled_port_mask & (1 << i))
487 ports[nb_ports++] = (uint8_t)i;
490 if ((nb_ports == 0) || (nb_ports > MAX_SUP_PORTS)) {
491 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
492 "but only %u port can be enabled\n", nb_ports,
501 * Update the global var NB_PORTS and array PORTS
502 * according to system ports number and return valid ports number
505 check_ports_num(unsigned max_nb_ports)
507 unsigned valid_nb_ports = nb_ports;
510 if (nb_ports > max_nb_ports) {
511 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) "
512 " exceeds total system port number(%u)\n",
513 nb_ports, max_nb_ports);
514 nb_ports = max_nb_ports;
517 for (portid = 0; portid < nb_ports; portid++) {
518 if (ports[portid] >= max_nb_ports) {
519 RTE_LOG(INFO, VHOST_PORT,
520 "\nSpecified port ID(%u) exceeds max "
521 " system port ID(%u)\n",
522 ports[portid], (max_nb_ports - 1));
523 ports[portid] = INVALID_PORT_ID;
527 return valid_nb_ports;
531 * This function routes the TX packet to the correct interface. This may be a local device
532 * or the physical port.
534 static __rte_always_inline void
535 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
537 struct mbuf_table *tx_q;
538 struct rte_mbuf **m_table;
539 unsigned len, ret = 0;
540 const uint16_t lcore_id = rte_lcore_id();
542 RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
545 /* Add packet to the port tx queue */
546 tx_q = &lcore_tx_queue[lcore_id];
549 tx_q->m_table[len] = m;
552 dev_statistics[vdev->vid].tx_total++;
553 dev_statistics[vdev->vid].tx++;
556 if (unlikely(len == MAX_PKT_BURST)) {
557 m_table = (struct rte_mbuf **)tx_q->m_table;
558 ret = overlay_options.tx_handle(ports[0],
559 (uint16_t)tx_q->txq_id, m_table,
560 (uint16_t)tx_q->len);
562 /* Free any buffers not handled by TX and update
565 if (unlikely(ret < len)) {
567 rte_pktmbuf_free(m_table[ret]);
568 } while (++ret < len);
579 * This function is called by each data core. It handles all
580 * RX/TX registered with the core. For TX the specific lcore
581 * linked list is used. For RX, MAC addresses are compared
582 * with all devices in the main linked list.
585 switch_worker(__rte_unused void *arg)
587 struct rte_mempool *mbuf_pool = arg;
588 struct vhost_dev *vdev = NULL;
589 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
590 struct virtio_net_data_ll *dev_ll;
591 struct mbuf_table *tx_q;
592 volatile struct lcore_ll_info *lcore_ll;
593 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
594 / US_PER_S * BURST_TX_DRAIN_US;
595 uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
597 const uint16_t lcore_id = rte_lcore_id();
598 const uint16_t num_cores = (uint16_t)rte_lcore_count();
599 uint16_t rx_count = 0;
603 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
604 lcore_ll = lcore_info[lcore_id].lcore_ll;
607 tx_q = &lcore_tx_queue[lcore_id];
608 for (i = 0; i < num_cores; i++) {
609 if (lcore_ids[i] == lcore_id) {
616 cur_tsc = rte_rdtsc();
618 * TX burst queue drain
620 diff_tsc = cur_tsc - prev_tsc;
621 if (unlikely(diff_tsc > drain_tsc)) {
624 RTE_LOG_DP(DEBUG, VHOST_DATA, "TX queue drained after "
625 "timeout with burst size %u\n",
627 ret = overlay_options.tx_handle(ports[0],
628 (uint16_t)tx_q->txq_id,
629 (struct rte_mbuf **)tx_q->m_table,
630 (uint16_t)tx_q->len);
631 if (unlikely(ret < tx_q->len)) {
633 rte_pktmbuf_free(tx_q->m_table[ret]);
634 } while (++ret < tx_q->len);
644 rte_prefetch0(lcore_ll->ll_root_used);
647 * Inform the configuration core that we have exited
648 * the linked list and that no devices are
649 * in use if requested.
651 if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
652 lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
657 dev_ll = lcore_ll->ll_root_used;
659 while (dev_ll != NULL) {
662 if (unlikely(vdev->remove)) {
663 dev_ll = dev_ll->next;
664 overlay_options.tunnel_destroy(vdev);
665 vdev->ready = DEVICE_SAFE_REMOVE;
668 if (likely(vdev->ready == DEVICE_RX)) {
669 /* Handle guest RX */
670 rx_count = rte_eth_rx_burst(ports[0],
671 vdev->rx_q, pkts_burst, MAX_PKT_BURST);
675 * Retry is enabled and the queue is
676 * full then we wait and retry to
677 * avoid packet loss. Here MAX_PKT_BURST
678 * must be less than virtio queue size
680 if (enable_retry && unlikely(rx_count >
681 rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) {
682 for (retry = 0; retry < burst_rx_retry_num;
684 rte_delay_us(burst_rx_delay_time);
685 if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))
690 ret_count = overlay_options.rx_handle(vdev->vid, pkts_burst, rx_count);
693 &dev_statistics[vdev->vid].rx_total_atomic,
696 &dev_statistics[vdev->vid].rx_atomic, ret_count);
698 while (likely(rx_count)) {
700 rte_pktmbuf_free(pkts_burst[rx_count]);
706 if (likely(!vdev->remove)) {
708 tx_count = rte_vhost_dequeue_burst(vdev->vid,
709 VIRTIO_TXQ, mbuf_pool,
710 pkts_burst, MAX_PKT_BURST);
711 /* If this is the first received packet we need to learn the MAC */
712 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
714 (overlay_options.tunnel_setup(vdev, pkts_burst[0]) == -1)) {
716 rte_pktmbuf_free(pkts_burst[--tx_count]);
720 virtio_tx_route(vdev, pkts_burst[--tx_count]);
723 /* move to the next device in the list */
724 dev_ll = dev_ll->next;
732 * Add an entry to a used linked list. A free entry must first be found
733 * in the free linked list using get_data_ll_free_entry();
736 add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
737 struct virtio_net_data_ll *ll_dev)
739 struct virtio_net_data_ll *ll = *ll_root_addr;
741 /* Set next as NULL and use a compiler barrier to avoid reordering. */
743 rte_compiler_barrier();
745 /* If ll == NULL then this is the first device. */
747 /* Increment to the tail of the linked list. */
748 while (ll->next != NULL)
753 *ll_root_addr = ll_dev;
758 * Remove an entry from a used linked list. The entry must then be added to
759 * the free linked list using put_data_ll_free_entry().
762 rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
763 struct virtio_net_data_ll *ll_dev,
764 struct virtio_net_data_ll *ll_dev_last)
766 struct virtio_net_data_ll *ll = *ll_root_addr;
768 if (unlikely((ll == NULL) || (ll_dev == NULL)))
772 *ll_root_addr = ll_dev->next;
774 if (likely(ll_dev_last != NULL))
775 ll_dev_last->next = ll_dev->next;
777 RTE_LOG(ERR, VHOST_CONFIG,
778 "Remove entry form ll failed.\n");
782 * Find and return an entry from the free linked list.
784 static struct virtio_net_data_ll *
785 get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
787 struct virtio_net_data_ll *ll_free = *ll_root_addr;
788 struct virtio_net_data_ll *ll_dev;
794 *ll_root_addr = ll_free->next;
800 * Place an entry back on to the free linked list.
803 put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
804 struct virtio_net_data_ll *ll_dev)
806 struct virtio_net_data_ll *ll_free = *ll_root_addr;
811 ll_dev->next = ll_free;
812 *ll_root_addr = ll_dev;
816 * Creates a linked list of a given size.
818 static struct virtio_net_data_ll *
819 alloc_data_ll(uint32_t size)
821 struct virtio_net_data_ll *ll_new;
824 /* Malloc and then chain the linked list. */
825 ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
826 if (ll_new == NULL) {
827 RTE_LOG(ERR, VHOST_CONFIG,
828 "Failed to allocate memory for ll_new.\n");
832 for (i = 0; i < size - 1; i++) {
833 ll_new[i].vdev = NULL;
834 ll_new[i].next = &ll_new[i+1];
836 ll_new[i].next = NULL;
842 * Create the main linked list along with each individual cores
843 * linked list. A used and a free list are created to manage entries.
850 RTE_LCORE_FOREACH_SLAVE(lcore) {
851 lcore_info[lcore].lcore_ll =
852 malloc(sizeof(struct lcore_ll_info));
853 if (lcore_info[lcore].lcore_ll == NULL) {
854 RTE_LOG(ERR, VHOST_CONFIG,
855 "Failed to allocate memory for lcore_ll.\n");
859 lcore_info[lcore].lcore_ll->device_num = 0;
860 lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
861 lcore_info[lcore].lcore_ll->ll_root_used = NULL;
862 if (nb_devices % nb_switching_cores)
863 lcore_info[lcore].lcore_ll->ll_root_free =
864 alloc_data_ll((nb_devices / nb_switching_cores)
867 lcore_info[lcore].lcore_ll->ll_root_free =
868 alloc_data_ll(nb_devices / nb_switching_cores);
871 /* Allocate devices up to a maximum of MAX_DEVICES. */
872 ll_root_free = alloc_data_ll(MIN((nb_devices), MAX_DEVICES));
878 * Remove a device from the specific data core linked list and
879 * from the main linked list. Synchonization occurs through the use
880 * of the lcore dev_removal_flag.
883 destroy_device(int vid)
885 struct virtio_net_data_ll *ll_lcore_dev_cur;
886 struct virtio_net_data_ll *ll_main_dev_cur;
887 struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
888 struct virtio_net_data_ll *ll_main_dev_last = NULL;
889 struct vhost_dev *vdev = NULL;
892 ll_main_dev_cur = ll_root_used;
893 while (ll_main_dev_cur != NULL) {
894 if (ll_main_dev_cur->vdev->vid == vid) {
895 vdev = ll_main_dev_cur->vdev;
902 /* set the remove flag. */
904 while (vdev->ready != DEVICE_SAFE_REMOVE)
907 /* Search for entry to be removed from lcore ll */
908 ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
909 while (ll_lcore_dev_cur != NULL) {
910 if (ll_lcore_dev_cur->vdev == vdev) {
913 ll_lcore_dev_last = ll_lcore_dev_cur;
914 ll_lcore_dev_cur = ll_lcore_dev_cur->next;
918 if (ll_lcore_dev_cur == NULL) {
919 RTE_LOG(ERR, VHOST_CONFIG,
920 "(%d) Failed to find the dev to be destroy.\n", vid);
924 /* Search for entry to be removed from main ll */
925 ll_main_dev_cur = ll_root_used;
926 ll_main_dev_last = NULL;
927 while (ll_main_dev_cur != NULL) {
928 if (ll_main_dev_cur->vdev == vdev) {
931 ll_main_dev_last = ll_main_dev_cur;
932 ll_main_dev_cur = ll_main_dev_cur->next;
936 /* Remove entries from the lcore and main ll. */
937 rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
938 ll_lcore_dev_cur, ll_lcore_dev_last);
939 rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
941 /* Set the dev_removal_flag on each lcore. */
942 RTE_LCORE_FOREACH_SLAVE(lcore) {
943 lcore_info[lcore].lcore_ll->dev_removal_flag =
948 * Once each core has set the dev_removal_flag to
949 * ACK_DEV_REMOVAL we can be sure that they can no longer access
950 * the device removed from the linked lists and that the devices
951 * are no longer in use.
953 RTE_LCORE_FOREACH_SLAVE(lcore) {
954 while (lcore_info[lcore].lcore_ll->dev_removal_flag
959 /* Add the entries back to the lcore and main free ll.*/
960 put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free,
962 put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
964 /* Decrement number of device on the lcore. */
965 lcore_info[vdev->coreid].lcore_ll->device_num--;
967 RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
968 "from data core\n", vid);
975 * A new device is added to a data core. First the device is added
976 * to the main linked list and the allocated to a specific data core.
981 struct virtio_net_data_ll *ll_dev;
982 int lcore, core_add = 0;
983 uint32_t device_num_min = nb_devices;
984 struct vhost_dev *vdev;
986 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
988 RTE_LOG(INFO, VHOST_DATA,
989 "(%d) Couldn't allocate memory for vhost dev\n", vid);
993 /* Add device to main ll */
994 ll_dev = get_data_ll_free_entry(&ll_root_free);
995 if (ll_dev == NULL) {
996 RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
997 " linked list Device limit of %d devices per core"
998 " has been reached\n", vid, nb_devices);
999 if (vdev->regions_hpa)
1000 rte_free(vdev->regions_hpa);
1004 ll_dev->vdev = vdev;
1005 add_data_ll_entry(&ll_root_used, ll_dev);
1008 /* reset ready flag */
1009 vdev->ready = DEVICE_MAC_LEARNING;
1012 /* Find a suitable lcore to add the device. */
1013 RTE_LCORE_FOREACH_SLAVE(lcore) {
1014 if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
1015 device_num_min = lcore_info[lcore].lcore_ll->device_num;
1019 /* Add device to lcore ll */
1020 ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
1021 if (ll_dev == NULL) {
1022 RTE_LOG(INFO, VHOST_DATA,
1023 "(%d) Failed to add device to data core\n",
1025 vdev->ready = DEVICE_SAFE_REMOVE;
1026 destroy_device(vid);
1027 rte_free(vdev->regions_hpa);
1031 ll_dev->vdev = vdev;
1032 vdev->coreid = core_add;
1034 add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used,
1037 /* Initialize device stats */
1038 memset(&dev_statistics[vid], 0,
1039 sizeof(struct device_statistics));
1041 /* Disable notifications. */
1042 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1043 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1044 lcore_info[vdev->coreid].lcore_ll->device_num++;
1046 RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
1053 * These callback allow devices to be added to the data core when configuration
1054 * has been fully complete.
1056 static const struct vhost_device_ops virtio_net_device_ops = {
1057 .new_device = new_device,
1058 .destroy_device = destroy_device,
1062 * This is a thread will wake up after a period to print stats if the user has
1068 struct virtio_net_data_ll *dev_ll;
1069 uint64_t tx_dropped, rx_dropped;
1070 uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
1072 const char clr[] = { 27, '[', '2', 'J', '\0' };
1073 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
1076 sleep(enable_stats);
1078 /* Clear screen and move to top left */
1079 printf("%s%s", clr, top_left);
1081 printf("\nDevice statistics ================================");
1083 dev_ll = ll_root_used;
1084 while (dev_ll != NULL) {
1085 vid = dev_ll->vdev->vid;
1086 tx_total = dev_statistics[vid].tx_total;
1087 tx = dev_statistics[vid].tx;
1088 tx_dropped = tx_total - tx;
1090 rx_total = rte_atomic64_read(
1091 &dev_statistics[vid].rx_total_atomic);
1092 rx = rte_atomic64_read(
1093 &dev_statistics[vid].rx_atomic);
1094 rx_dropped = rx_total - rx;
1095 rx_ip_csum = rte_atomic64_read(
1096 &dev_statistics[vid].rx_bad_ip_csum);
1097 rx_l4_csum = rte_atomic64_read(
1098 &dev_statistics[vid].rx_bad_l4_csum);
1100 printf("\nStatistics for device %d ----------"
1101 "\nTX total: %"PRIu64""
1102 "\nTX dropped: %"PRIu64""
1103 "\nTX successful: %"PRIu64""
1104 "\nRX total: %"PRIu64""
1105 "\nRX bad IP csum: %"PRIu64""
1106 "\nRX bad L4 csum: %"PRIu64""
1107 "\nRX dropped: %"PRIu64""
1108 "\nRX successful: %"PRIu64"",
1119 dev_ll = dev_ll->next;
1121 printf("\n================================================\n");
1126 * Main function, does initialisation and calls the per-lcore functions.
1129 main(int argc, char *argv[])
1131 struct rte_mempool *mbuf_pool = NULL;
1132 unsigned lcore_id, core_id = 0;
1133 unsigned nb_ports, valid_nb_ports;
1137 static pthread_t tid;
1138 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1141 ret = rte_eal_init(argc, argv);
1143 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1147 /* parse app arguments */
1148 ret = tep_termination_parse_args(argc, argv);
1150 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1152 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1153 if (rte_lcore_is_enabled(lcore_id))
1154 lcore_ids[core_id++] = lcore_id;
1156 /* set the number of swithcing cores available */
1157 nb_switching_cores = rte_lcore_count()-1;
1159 /* Get the number of physical ports. */
1160 nb_ports = rte_eth_dev_count();
1163 * Update the global var NB_PORTS and global array PORTS
1164 * and get value of var VALID_NB_PORTS according to system ports number
1166 valid_nb_ports = check_ports_num(nb_ports);
1168 if ((valid_nb_ports == 0) || (valid_nb_ports > MAX_SUP_PORTS)) {
1169 rte_exit(EXIT_FAILURE, "Current enabled port number is %u,"
1170 "but only %u port can be enabled\n", nb_ports,
1173 /* Create the mbuf pool. */
1174 mbuf_pool = rte_pktmbuf_pool_create(
1176 NUM_MBUFS_PER_PORT * valid_nb_ports,
1181 if (mbuf_pool == NULL)
1182 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1184 for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
1185 vpool_array[queue_id].pool = mbuf_pool;
1187 /* initialize all ports */
1188 for (portid = 0; portid < nb_ports; portid++) {
1189 /* skip ports that are not enabled */
1190 if ((enabled_port_mask & (1 << portid)) == 0) {
1191 RTE_LOG(INFO, VHOST_PORT,
1192 "Skipping disabled port %d\n", portid);
1195 if (overlay_options.port_configure(portid, mbuf_pool) != 0)
1196 rte_exit(EXIT_FAILURE,
1197 "Cannot initialize network ports\n");
1200 /* Initialise all linked lists. */
1201 if (init_data_ll() == -1)
1202 rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
1204 /* Initialize device stats */
1205 memset(&dev_statistics, 0, sizeof(dev_statistics));
1207 /* Enable stats if the user option is set. */
1209 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1211 rte_exit(EXIT_FAILURE, "Cannot create print-stats thread\n");
1212 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1213 ret = rte_thread_setname(tid, thread_name);
1215 RTE_LOG(DEBUG, VHOST_CONFIG, "Cannot set print-stats name\n");
1218 /* Launch all data cores. */
1219 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1220 rte_eal_remote_launch(switch_worker,
1221 mbuf_pool, lcore_id);
1224 ret = rte_vhost_driver_register((char *)&dev_basename, 0);
1226 rte_exit(EXIT_FAILURE, "failed to register vhost driver.\n");
1228 rte_vhost_driver_disable_features(dev_basename,
1229 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1231 ret = rte_vhost_driver_callback_register(dev_basename,
1232 &virtio_net_device_ops);
1234 rte_exit(EXIT_FAILURE,
1235 "failed to register vhost driver callbacks.\n");
1238 if (rte_vhost_driver_start(dev_basename) < 0) {
1239 rte_exit(EXIT_FAILURE,
1240 "failed to start vhost driver.\n");
1243 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1244 rte_eal_wait_lcore(lcore_id);