eal: fix IOVA mode selection as VA for PCI drivers
[dpdk.git] / examples / vhost / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <arpa/inet.h>
6 #include <getopt.h>
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
11 #include <signal.h>
12 #include <stdint.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
15 #include <unistd.h>
16
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_log.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_pause.h>
27
28 #include "main.h"
29
30 #ifndef MAX_QUEUES
31 #define MAX_QUEUES 128
32 #endif
33
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
36
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE  RTE_MBUF_DEFAULT_BUF_SIZE
39
40 #define BURST_TX_DRAIN_US 100   /* TX drain every ~100us */
41
42 #define BURST_RX_WAIT_US 15     /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4              /* Number of retries on RX. */
44
45 #define JUMBO_FRAME_MAX_SIZE    0x2600
46
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
49 #define DEVICE_RX                       1
50 #define DEVICE_SAFE_REMOVE      2
51
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
55
56 #define INVALID_PORT_ID 0xFF
57
58 /* Maximum long option length for option parsing. */
59 #define MAX_LONG_OPT_SZ 64
60
61 /* mask of enabled ports */
62 static uint32_t enabled_port_mask = 0;
63
64 /* Promiscuous mode */
65 static uint32_t promiscuous;
66
67 /* number of devices/queues to support*/
68 static uint32_t num_queues = 0;
69 static uint32_t num_devices;
70
71 static struct rte_mempool *mbuf_pool;
72 static int mergeable;
73
74 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
75 typedef enum {
76         VM2VM_DISABLED = 0,
77         VM2VM_SOFTWARE = 1,
78         VM2VM_HARDWARE = 2,
79         VM2VM_LAST
80 } vm2vm_type;
81 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
82
83 /* Enable stats. */
84 static uint32_t enable_stats = 0;
85 /* Enable retries on RX. */
86 static uint32_t enable_retry = 1;
87
88 /* Disable TX checksum offload */
89 static uint32_t enable_tx_csum;
90
91 /* Disable TSO offload */
92 static uint32_t enable_tso;
93
94 static int client_mode;
95 static int dequeue_zero_copy;
96
97 static int builtin_net_driver;
98
99 /* Specify timeout (in useconds) between retries on RX. */
100 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
101 /* Specify the number of retries on RX. */
102 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
103
104 /* Socket file paths. Can be set by user */
105 static char *socket_files;
106 static int nb_sockets;
107
108 /* empty vmdq configuration structure. Filled in programatically */
109 static struct rte_eth_conf vmdq_conf_default = {
110         .rxmode = {
111                 .mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
112                 .split_hdr_size = 0,
113                 /*
114                  * VLAN strip is necessary for 1G NIC such as I350,
115                  * this fixes bug of ipv4 forwarding in guest can't
116                  * forward pakets from one virtio dev to another virtio dev.
117                  */
118                 .offloads = DEV_RX_OFFLOAD_VLAN_STRIP,
119         },
120
121         .txmode = {
122                 .mq_mode = ETH_MQ_TX_NONE,
123                 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
124                              DEV_TX_OFFLOAD_TCP_CKSUM |
125                              DEV_TX_OFFLOAD_VLAN_INSERT |
126                              DEV_TX_OFFLOAD_MULTI_SEGS |
127                              DEV_TX_OFFLOAD_TCP_TSO),
128         },
129         .rx_adv_conf = {
130                 /*
131                  * should be overridden separately in code with
132                  * appropriate values
133                  */
134                 .vmdq_rx_conf = {
135                         .nb_queue_pools = ETH_8_POOLS,
136                         .enable_default_pool = 0,
137                         .default_pool = 0,
138                         .nb_pool_maps = 0,
139                         .pool_map = {{0, 0},},
140                 },
141         },
142 };
143
144
145 static unsigned lcore_ids[RTE_MAX_LCORE];
146 static uint16_t ports[RTE_MAX_ETHPORTS];
147 static unsigned num_ports = 0; /**< The number of ports specified in command line */
148 static uint16_t num_pf_queues, num_vmdq_queues;
149 static uint16_t vmdq_pool_base, vmdq_queue_base;
150 static uint16_t queues_per_pool;
151
152 const uint16_t vlan_tags[] = {
153         1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
154         1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
155         1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
156         1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
157         1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
158         1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
159         1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
160         1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
161 };
162
163 /* ethernet addresses of ports */
164 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
165
166 static struct vhost_dev_tailq_list vhost_dev_list =
167         TAILQ_HEAD_INITIALIZER(vhost_dev_list);
168
169 static struct lcore_info lcore_info[RTE_MAX_LCORE];
170
171 /* Used for queueing bursts of TX packets. */
172 struct mbuf_table {
173         unsigned len;
174         unsigned txq_id;
175         struct rte_mbuf *m_table[MAX_PKT_BURST];
176 };
177
178 /* TX queue for each data core. */
179 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
180
181 #define MBUF_TABLE_DRAIN_TSC    ((rte_get_tsc_hz() + US_PER_S - 1) \
182                                  / US_PER_S * BURST_TX_DRAIN_US)
183 #define VLAN_HLEN       4
184
185 /*
186  * Builds up the correct configuration for VMDQ VLAN pool map
187  * according to the pool & queue limits.
188  */
189 static inline int
190 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
191 {
192         struct rte_eth_vmdq_rx_conf conf;
193         struct rte_eth_vmdq_rx_conf *def_conf =
194                 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
195         unsigned i;
196
197         memset(&conf, 0, sizeof(conf));
198         conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
199         conf.nb_pool_maps = num_devices;
200         conf.enable_loop_back = def_conf->enable_loop_back;
201         conf.rx_mode = def_conf->rx_mode;
202
203         for (i = 0; i < conf.nb_pool_maps; i++) {
204                 conf.pool_map[i].vlan_id = vlan_tags[ i ];
205                 conf.pool_map[i].pools = (1UL << i);
206         }
207
208         (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
209         (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
210                    sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
211         return 0;
212 }
213
214 /*
215  * Initialises a given port using global settings and with the rx buffers
216  * coming from the mbuf_pool passed as parameter
217  */
218 static inline int
219 port_init(uint16_t port)
220 {
221         struct rte_eth_dev_info dev_info;
222         struct rte_eth_conf port_conf;
223         struct rte_eth_rxconf *rxconf;
224         struct rte_eth_txconf *txconf;
225         int16_t rx_rings, tx_rings;
226         uint16_t rx_ring_size, tx_ring_size;
227         int retval;
228         uint16_t q;
229
230         /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
231         rte_eth_dev_info_get (port, &dev_info);
232
233         rxconf = &dev_info.default_rxconf;
234         txconf = &dev_info.default_txconf;
235         rxconf->rx_drop_en = 1;
236
237         /*configure the number of supported virtio devices based on VMDQ limits */
238         num_devices = dev_info.max_vmdq_pools;
239
240         rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
241         tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
242
243         /*
244          * When dequeue zero copy is enabled, guest Tx used vring will be
245          * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
246          * (tx_ring_size here) must be small enough so that the driver will
247          * hit the free threshold easily and free mbufs timely. Otherwise,
248          * guest Tx vring would be starved.
249          */
250         if (dequeue_zero_copy)
251                 tx_ring_size = 64;
252
253         tx_rings = (uint16_t)rte_lcore_count();
254
255         /* Get port configuration. */
256         retval = get_eth_conf(&port_conf, num_devices);
257         if (retval < 0)
258                 return retval;
259         /* NIC queues are divided into pf queues and vmdq queues.  */
260         num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
261         queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
262         num_vmdq_queues = num_devices * queues_per_pool;
263         num_queues = num_pf_queues + num_vmdq_queues;
264         vmdq_queue_base = dev_info.vmdq_queue_base;
265         vmdq_pool_base  = dev_info.vmdq_pool_base;
266         printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
267                 num_pf_queues, num_devices, queues_per_pool);
268
269         if (!rte_eth_dev_is_valid_port(port))
270                 return -1;
271
272         rx_rings = (uint16_t)dev_info.max_rx_queues;
273         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
274                 port_conf.txmode.offloads |=
275                         DEV_TX_OFFLOAD_MBUF_FAST_FREE;
276         /* Configure ethernet device. */
277         retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
278         if (retval != 0) {
279                 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
280                         port, strerror(-retval));
281                 return retval;
282         }
283
284         retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
285                 &tx_ring_size);
286         if (retval != 0) {
287                 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
288                         "for port %u: %s.\n", port, strerror(-retval));
289                 return retval;
290         }
291         if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
292                 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
293                         "for Rx queues on port %u.\n", port);
294                 return -1;
295         }
296
297         /* Setup the queues. */
298         rxconf->offloads = port_conf.rxmode.offloads;
299         for (q = 0; q < rx_rings; q ++) {
300                 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
301                                                 rte_eth_dev_socket_id(port),
302                                                 rxconf,
303                                                 mbuf_pool);
304                 if (retval < 0) {
305                         RTE_LOG(ERR, VHOST_PORT,
306                                 "Failed to setup rx queue %u of port %u: %s.\n",
307                                 q, port, strerror(-retval));
308                         return retval;
309                 }
310         }
311         txconf->offloads = port_conf.txmode.offloads;
312         for (q = 0; q < tx_rings; q ++) {
313                 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
314                                                 rte_eth_dev_socket_id(port),
315                                                 txconf);
316                 if (retval < 0) {
317                         RTE_LOG(ERR, VHOST_PORT,
318                                 "Failed to setup tx queue %u of port %u: %s.\n",
319                                 q, port, strerror(-retval));
320                         return retval;
321                 }
322         }
323
324         /* Start the device. */
325         retval  = rte_eth_dev_start(port);
326         if (retval < 0) {
327                 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
328                         port, strerror(-retval));
329                 return retval;
330         }
331
332         if (promiscuous)
333                 rte_eth_promiscuous_enable(port);
334
335         rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
336         RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
337         RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
338                         " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
339                         port,
340                         vmdq_ports_eth_addr[port].addr_bytes[0],
341                         vmdq_ports_eth_addr[port].addr_bytes[1],
342                         vmdq_ports_eth_addr[port].addr_bytes[2],
343                         vmdq_ports_eth_addr[port].addr_bytes[3],
344                         vmdq_ports_eth_addr[port].addr_bytes[4],
345                         vmdq_ports_eth_addr[port].addr_bytes[5]);
346
347         return 0;
348 }
349
350 /*
351  * Set socket file path.
352  */
353 static int
354 us_vhost_parse_socket_path(const char *q_arg)
355 {
356         char *old;
357
358         /* parse number string */
359         if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
360                 return -1;
361
362         old = socket_files;
363         socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
364         if (socket_files == NULL) {
365                 free(old);
366                 return -1;
367         }
368
369         strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
370         nb_sockets++;
371
372         return 0;
373 }
374
375 /*
376  * Parse the portmask provided at run time.
377  */
378 static int
379 parse_portmask(const char *portmask)
380 {
381         char *end = NULL;
382         unsigned long pm;
383
384         errno = 0;
385
386         /* parse hexadecimal string */
387         pm = strtoul(portmask, &end, 16);
388         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
389                 return -1;
390
391         if (pm == 0)
392                 return -1;
393
394         return pm;
395
396 }
397
398 /*
399  * Parse num options at run time.
400  */
401 static int
402 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
403 {
404         char *end = NULL;
405         unsigned long num;
406
407         errno = 0;
408
409         /* parse unsigned int string */
410         num = strtoul(q_arg, &end, 10);
411         if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
412                 return -1;
413
414         if (num > max_valid_value)
415                 return -1;
416
417         return num;
418
419 }
420
421 /*
422  * Display usage
423  */
424 static void
425 us_vhost_usage(const char *prgname)
426 {
427         RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
428         "               --vm2vm [0|1|2]\n"
429         "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
430         "               --socket-file <path>\n"
431         "               --nb-devices ND\n"
432         "               -p PORTMASK: Set mask for ports to be used by application\n"
433         "               --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
434         "               --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
435         "               --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
436         "               --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
437         "               --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
438         "               --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
439         "               --socket-file: The path of the socket file.\n"
440         "               --tx-csum [0|1] disable/enable TX checksum offload.\n"
441         "               --tso [0|1] disable/enable TCP segment offload.\n"
442         "               --client register a vhost-user socket as client mode.\n"
443         "               --dequeue-zero-copy enables dequeue zero copy\n",
444                prgname);
445 }
446
447 /*
448  * Parse the arguments given in the command line of the application.
449  */
450 static int
451 us_vhost_parse_args(int argc, char **argv)
452 {
453         int opt, ret;
454         int option_index;
455         unsigned i;
456         const char *prgname = argv[0];
457         static struct option long_option[] = {
458                 {"vm2vm", required_argument, NULL, 0},
459                 {"rx-retry", required_argument, NULL, 0},
460                 {"rx-retry-delay", required_argument, NULL, 0},
461                 {"rx-retry-num", required_argument, NULL, 0},
462                 {"mergeable", required_argument, NULL, 0},
463                 {"stats", required_argument, NULL, 0},
464                 {"socket-file", required_argument, NULL, 0},
465                 {"tx-csum", required_argument, NULL, 0},
466                 {"tso", required_argument, NULL, 0},
467                 {"client", no_argument, &client_mode, 1},
468                 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
469                 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
470                 {NULL, 0, 0, 0},
471         };
472
473         /* Parse command line */
474         while ((opt = getopt_long(argc, argv, "p:P",
475                         long_option, &option_index)) != EOF) {
476                 switch (opt) {
477                 /* Portmask */
478                 case 'p':
479                         enabled_port_mask = parse_portmask(optarg);
480                         if (enabled_port_mask == 0) {
481                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
482                                 us_vhost_usage(prgname);
483                                 return -1;
484                         }
485                         break;
486
487                 case 'P':
488                         promiscuous = 1;
489                         vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
490                                 ETH_VMDQ_ACCEPT_BROADCAST |
491                                 ETH_VMDQ_ACCEPT_MULTICAST;
492
493                         break;
494
495                 case 0:
496                         /* Enable/disable vm2vm comms. */
497                         if (!strncmp(long_option[option_index].name, "vm2vm",
498                                 MAX_LONG_OPT_SZ)) {
499                                 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
500                                 if (ret == -1) {
501                                         RTE_LOG(INFO, VHOST_CONFIG,
502                                                 "Invalid argument for "
503                                                 "vm2vm [0|1|2]\n");
504                                         us_vhost_usage(prgname);
505                                         return -1;
506                                 } else {
507                                         vm2vm_mode = (vm2vm_type)ret;
508                                 }
509                         }
510
511                         /* Enable/disable retries on RX. */
512                         if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
513                                 ret = parse_num_opt(optarg, 1);
514                                 if (ret == -1) {
515                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
516                                         us_vhost_usage(prgname);
517                                         return -1;
518                                 } else {
519                                         enable_retry = ret;
520                                 }
521                         }
522
523                         /* Enable/disable TX checksum offload. */
524                         if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
525                                 ret = parse_num_opt(optarg, 1);
526                                 if (ret == -1) {
527                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
528                                         us_vhost_usage(prgname);
529                                         return -1;
530                                 } else
531                                         enable_tx_csum = ret;
532                         }
533
534                         /* Enable/disable TSO offload. */
535                         if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
536                                 ret = parse_num_opt(optarg, 1);
537                                 if (ret == -1) {
538                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
539                                         us_vhost_usage(prgname);
540                                         return -1;
541                                 } else
542                                         enable_tso = ret;
543                         }
544
545                         /* Specify the retries delay time (in useconds) on RX. */
546                         if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
547                                 ret = parse_num_opt(optarg, INT32_MAX);
548                                 if (ret == -1) {
549                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
550                                         us_vhost_usage(prgname);
551                                         return -1;
552                                 } else {
553                                         burst_rx_delay_time = ret;
554                                 }
555                         }
556
557                         /* Specify the retries number on RX. */
558                         if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
559                                 ret = parse_num_opt(optarg, INT32_MAX);
560                                 if (ret == -1) {
561                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
562                                         us_vhost_usage(prgname);
563                                         return -1;
564                                 } else {
565                                         burst_rx_retry_num = ret;
566                                 }
567                         }
568
569                         /* Enable/disable RX mergeable buffers. */
570                         if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
571                                 ret = parse_num_opt(optarg, 1);
572                                 if (ret == -1) {
573                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
574                                         us_vhost_usage(prgname);
575                                         return -1;
576                                 } else {
577                                         mergeable = !!ret;
578                                         if (ret) {
579                                                 vmdq_conf_default.rxmode.offloads |=
580                                                         DEV_RX_OFFLOAD_JUMBO_FRAME;
581                                                 vmdq_conf_default.rxmode.max_rx_pkt_len
582                                                         = JUMBO_FRAME_MAX_SIZE;
583                                         }
584                                 }
585                         }
586
587                         /* Enable/disable stats. */
588                         if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
589                                 ret = parse_num_opt(optarg, INT32_MAX);
590                                 if (ret == -1) {
591                                         RTE_LOG(INFO, VHOST_CONFIG,
592                                                 "Invalid argument for stats [0..N]\n");
593                                         us_vhost_usage(prgname);
594                                         return -1;
595                                 } else {
596                                         enable_stats = ret;
597                                 }
598                         }
599
600                         /* Set socket file path. */
601                         if (!strncmp(long_option[option_index].name,
602                                                 "socket-file", MAX_LONG_OPT_SZ)) {
603                                 if (us_vhost_parse_socket_path(optarg) == -1) {
604                                         RTE_LOG(INFO, VHOST_CONFIG,
605                                         "Invalid argument for socket name (Max %d characters)\n",
606                                         PATH_MAX);
607                                         us_vhost_usage(prgname);
608                                         return -1;
609                                 }
610                         }
611
612                         break;
613
614                         /* Invalid option - print options. */
615                 default:
616                         us_vhost_usage(prgname);
617                         return -1;
618                 }
619         }
620
621         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
622                 if (enabled_port_mask & (1 << i))
623                         ports[num_ports++] = i;
624         }
625
626         if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
627                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
628                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
629                 return -1;
630         }
631
632         return 0;
633 }
634
635 /*
636  * Update the global var NUM_PORTS and array PORTS according to system ports number
637  * and return valid ports number
638  */
639 static unsigned check_ports_num(unsigned nb_ports)
640 {
641         unsigned valid_num_ports = num_ports;
642         unsigned portid;
643
644         if (num_ports > nb_ports) {
645                 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
646                         num_ports, nb_ports);
647                 num_ports = nb_ports;
648         }
649
650         for (portid = 0; portid < num_ports; portid ++) {
651                 if (!rte_eth_dev_is_valid_port(ports[portid])) {
652                         RTE_LOG(INFO, VHOST_PORT,
653                                 "\nSpecified port ID(%u) is not valid\n",
654                                 ports[portid]);
655                         ports[portid] = INVALID_PORT_ID;
656                         valid_num_ports--;
657                 }
658         }
659         return valid_num_ports;
660 }
661
662 static __rte_always_inline struct vhost_dev *
663 find_vhost_dev(struct rte_ether_addr *mac)
664 {
665         struct vhost_dev *vdev;
666
667         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
668                 if (vdev->ready == DEVICE_RX &&
669                     rte_is_same_ether_addr(mac, &vdev->mac_address))
670                         return vdev;
671         }
672
673         return NULL;
674 }
675
676 /*
677  * This function learns the MAC address of the device and registers this along with a
678  * vlan tag to a VMDQ.
679  */
680 static int
681 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
682 {
683         struct rte_ether_hdr *pkt_hdr;
684         int i, ret;
685
686         /* Learn MAC address of guest device from packet */
687         pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
688
689         if (find_vhost_dev(&pkt_hdr->s_addr)) {
690                 RTE_LOG(ERR, VHOST_DATA,
691                         "(%d) device is using a registered MAC!\n",
692                         vdev->vid);
693                 return -1;
694         }
695
696         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
697                 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
698
699         /* vlan_tag currently uses the device_id. */
700         vdev->vlan_tag = vlan_tags[vdev->vid];
701
702         /* Print out VMDQ registration info. */
703         RTE_LOG(INFO, VHOST_DATA,
704                 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
705                 vdev->vid,
706                 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
707                 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
708                 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
709                 vdev->vlan_tag);
710
711         /* Register the MAC address. */
712         ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
713                                 (uint32_t)vdev->vid + vmdq_pool_base);
714         if (ret)
715                 RTE_LOG(ERR, VHOST_DATA,
716                         "(%d) failed to add device MAC address to VMDQ\n",
717                         vdev->vid);
718
719         rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
720
721         /* Set device as ready for RX. */
722         vdev->ready = DEVICE_RX;
723
724         return 0;
725 }
726
727 /*
728  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
729  * queue before disabling RX on the device.
730  */
731 static inline void
732 unlink_vmdq(struct vhost_dev *vdev)
733 {
734         unsigned i = 0;
735         unsigned rx_count;
736         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
737
738         if (vdev->ready == DEVICE_RX) {
739                 /*clear MAC and VLAN settings*/
740                 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
741                 for (i = 0; i < 6; i++)
742                         vdev->mac_address.addr_bytes[i] = 0;
743
744                 vdev->vlan_tag = 0;
745
746                 /*Clear out the receive buffers*/
747                 rx_count = rte_eth_rx_burst(ports[0],
748                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
749
750                 while (rx_count) {
751                         for (i = 0; i < rx_count; i++)
752                                 rte_pktmbuf_free(pkts_burst[i]);
753
754                         rx_count = rte_eth_rx_burst(ports[0],
755                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
756                 }
757
758                 vdev->ready = DEVICE_MAC_LEARNING;
759         }
760 }
761
762 static __rte_always_inline void
763 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
764             struct rte_mbuf *m)
765 {
766         uint16_t ret;
767
768         if (builtin_net_driver) {
769                 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
770         } else {
771                 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
772         }
773
774         if (enable_stats) {
775                 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
776                 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
777                 src_vdev->stats.tx_total++;
778                 src_vdev->stats.tx += ret;
779         }
780 }
781
782 /*
783  * Check if the packet destination MAC address is for a local device. If so then put
784  * the packet on that devices RX queue. If not then return.
785  */
786 static __rte_always_inline int
787 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
788 {
789         struct rte_ether_hdr *pkt_hdr;
790         struct vhost_dev *dst_vdev;
791
792         pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
793
794         dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
795         if (!dst_vdev)
796                 return -1;
797
798         if (vdev->vid == dst_vdev->vid) {
799                 RTE_LOG_DP(DEBUG, VHOST_DATA,
800                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
801                         vdev->vid);
802                 return 0;
803         }
804
805         RTE_LOG_DP(DEBUG, VHOST_DATA,
806                 "(%d) TX: MAC address is local\n", dst_vdev->vid);
807
808         if (unlikely(dst_vdev->remove)) {
809                 RTE_LOG_DP(DEBUG, VHOST_DATA,
810                         "(%d) device is marked for removal\n", dst_vdev->vid);
811                 return 0;
812         }
813
814         virtio_xmit(dst_vdev, vdev, m);
815         return 0;
816 }
817
818 /*
819  * Check if the destination MAC of a packet is one local VM,
820  * and get its vlan tag, and offset if it is.
821  */
822 static __rte_always_inline int
823 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
824         uint32_t *offset, uint16_t *vlan_tag)
825 {
826         struct vhost_dev *dst_vdev;
827         struct rte_ether_hdr *pkt_hdr =
828                 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
829
830         dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
831         if (!dst_vdev)
832                 return 0;
833
834         if (vdev->vid == dst_vdev->vid) {
835                 RTE_LOG_DP(DEBUG, VHOST_DATA,
836                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
837                         vdev->vid);
838                 return -1;
839         }
840
841         /*
842          * HW vlan strip will reduce the packet length
843          * by minus length of vlan tag, so need restore
844          * the packet length by plus it.
845          */
846         *offset  = VLAN_HLEN;
847         *vlan_tag = vlan_tags[vdev->vid];
848
849         RTE_LOG_DP(DEBUG, VHOST_DATA,
850                 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
851                 vdev->vid, dst_vdev->vid, *vlan_tag);
852
853         return 0;
854 }
855
856 static uint16_t
857 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
858 {
859         if (ol_flags & PKT_TX_IPV4)
860                 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
861         else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
862                 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
863 }
864
865 static void virtio_tx_offload(struct rte_mbuf *m)
866 {
867         void *l3_hdr;
868         struct rte_ipv4_hdr *ipv4_hdr = NULL;
869         struct rte_tcp_hdr *tcp_hdr = NULL;
870         struct rte_ether_hdr *eth_hdr =
871                 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
872
873         l3_hdr = (char *)eth_hdr + m->l2_len;
874
875         if (m->ol_flags & PKT_TX_IPV4) {
876                 ipv4_hdr = l3_hdr;
877                 ipv4_hdr->hdr_checksum = 0;
878                 m->ol_flags |= PKT_TX_IP_CKSUM;
879         }
880
881         tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
882         tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
883 }
884
885 static inline void
886 free_pkts(struct rte_mbuf **pkts, uint16_t n)
887 {
888         while (n--)
889                 rte_pktmbuf_free(pkts[n]);
890 }
891
892 static __rte_always_inline void
893 do_drain_mbuf_table(struct mbuf_table *tx_q)
894 {
895         uint16_t count;
896
897         count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
898                                  tx_q->m_table, tx_q->len);
899         if (unlikely(count < tx_q->len))
900                 free_pkts(&tx_q->m_table[count], tx_q->len - count);
901
902         tx_q->len = 0;
903 }
904
905 /*
906  * This function routes the TX packet to the correct interface. This
907  * may be a local device or the physical port.
908  */
909 static __rte_always_inline void
910 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
911 {
912         struct mbuf_table *tx_q;
913         unsigned offset = 0;
914         const uint16_t lcore_id = rte_lcore_id();
915         struct rte_ether_hdr *nh;
916
917
918         nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
919         if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
920                 struct vhost_dev *vdev2;
921
922                 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
923                         if (vdev2 != vdev)
924                                 virtio_xmit(vdev2, vdev, m);
925                 }
926                 goto queue2nic;
927         }
928
929         /*check if destination is local VM*/
930         if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
931                 rte_pktmbuf_free(m);
932                 return;
933         }
934
935         if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
936                 if (unlikely(find_local_dest(vdev, m, &offset,
937                                              &vlan_tag) != 0)) {
938                         rte_pktmbuf_free(m);
939                         return;
940                 }
941         }
942
943         RTE_LOG_DP(DEBUG, VHOST_DATA,
944                 "(%d) TX: MAC address is external\n", vdev->vid);
945
946 queue2nic:
947
948         /*Add packet to the port tx queue*/
949         tx_q = &lcore_tx_queue[lcore_id];
950
951         nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
952         if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
953                 /* Guest has inserted the vlan tag. */
954                 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
955                 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
956                 if ((vm2vm_mode == VM2VM_HARDWARE) &&
957                         (vh->vlan_tci != vlan_tag_be))
958                         vh->vlan_tci = vlan_tag_be;
959         } else {
960                 m->ol_flags |= PKT_TX_VLAN_PKT;
961
962                 /*
963                  * Find the right seg to adjust the data len when offset is
964                  * bigger than tail room size.
965                  */
966                 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
967                         if (likely(offset <= rte_pktmbuf_tailroom(m)))
968                                 m->data_len += offset;
969                         else {
970                                 struct rte_mbuf *seg = m;
971
972                                 while ((seg->next != NULL) &&
973                                         (offset > rte_pktmbuf_tailroom(seg)))
974                                         seg = seg->next;
975
976                                 seg->data_len += offset;
977                         }
978                         m->pkt_len += offset;
979                 }
980
981                 m->vlan_tci = vlan_tag;
982         }
983
984         if (m->ol_flags & PKT_TX_TCP_SEG)
985                 virtio_tx_offload(m);
986
987         tx_q->m_table[tx_q->len++] = m;
988         if (enable_stats) {
989                 vdev->stats.tx_total++;
990                 vdev->stats.tx++;
991         }
992
993         if (unlikely(tx_q->len == MAX_PKT_BURST))
994                 do_drain_mbuf_table(tx_q);
995 }
996
997
998 static __rte_always_inline void
999 drain_mbuf_table(struct mbuf_table *tx_q)
1000 {
1001         static uint64_t prev_tsc;
1002         uint64_t cur_tsc;
1003
1004         if (tx_q->len == 0)
1005                 return;
1006
1007         cur_tsc = rte_rdtsc();
1008         if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1009                 prev_tsc = cur_tsc;
1010
1011                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1012                         "TX queue drained after timeout with burst size %u\n",
1013                         tx_q->len);
1014                 do_drain_mbuf_table(tx_q);
1015         }
1016 }
1017
1018 static __rte_always_inline void
1019 drain_eth_rx(struct vhost_dev *vdev)
1020 {
1021         uint16_t rx_count, enqueue_count;
1022         struct rte_mbuf *pkts[MAX_PKT_BURST];
1023
1024         rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1025                                     pkts, MAX_PKT_BURST);
1026         if (!rx_count)
1027                 return;
1028
1029         /*
1030          * When "enable_retry" is set, here we wait and retry when there
1031          * is no enough free slots in the queue to hold @rx_count packets,
1032          * to diminish packet loss.
1033          */
1034         if (enable_retry &&
1035             unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1036                         VIRTIO_RXQ))) {
1037                 uint32_t retry;
1038
1039                 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1040                         rte_delay_us(burst_rx_delay_time);
1041                         if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1042                                         VIRTIO_RXQ))
1043                                 break;
1044                 }
1045         }
1046
1047         if (builtin_net_driver) {
1048                 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1049                                                 pkts, rx_count);
1050         } else {
1051                 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1052                                                 pkts, rx_count);
1053         }
1054         if (enable_stats) {
1055                 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1056                 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1057         }
1058
1059         free_pkts(pkts, rx_count);
1060 }
1061
1062 static __rte_always_inline void
1063 drain_virtio_tx(struct vhost_dev *vdev)
1064 {
1065         struct rte_mbuf *pkts[MAX_PKT_BURST];
1066         uint16_t count;
1067         uint16_t i;
1068
1069         if (builtin_net_driver) {
1070                 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1071                                         pkts, MAX_PKT_BURST);
1072         } else {
1073                 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1074                                         mbuf_pool, pkts, MAX_PKT_BURST);
1075         }
1076
1077         /* setup VMDq for the first packet */
1078         if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1079                 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1080                         free_pkts(pkts, count);
1081         }
1082
1083         for (i = 0; i < count; ++i)
1084                 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1085 }
1086
1087 /*
1088  * Main function of vhost-switch. It basically does:
1089  *
1090  * for each vhost device {
1091  *    - drain_eth_rx()
1092  *
1093  *      Which drains the host eth Rx queue linked to the vhost device,
1094  *      and deliver all of them to guest virito Rx ring associated with
1095  *      this vhost device.
1096  *
1097  *    - drain_virtio_tx()
1098  *
1099  *      Which drains the guest virtio Tx queue and deliver all of them
1100  *      to the target, which could be another vhost device, or the
1101  *      physical eth dev. The route is done in function "virtio_tx_route".
1102  * }
1103  */
1104 static int
1105 switch_worker(void *arg __rte_unused)
1106 {
1107         unsigned i;
1108         unsigned lcore_id = rte_lcore_id();
1109         struct vhost_dev *vdev;
1110         struct mbuf_table *tx_q;
1111
1112         RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1113
1114         tx_q = &lcore_tx_queue[lcore_id];
1115         for (i = 0; i < rte_lcore_count(); i++) {
1116                 if (lcore_ids[i] == lcore_id) {
1117                         tx_q->txq_id = i;
1118                         break;
1119                 }
1120         }
1121
1122         while(1) {
1123                 drain_mbuf_table(tx_q);
1124
1125                 /*
1126                  * Inform the configuration core that we have exited the
1127                  * linked list and that no devices are in use if requested.
1128                  */
1129                 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1130                         lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1131
1132                 /*
1133                  * Process vhost devices
1134                  */
1135                 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1136                               lcore_vdev_entry) {
1137                         if (unlikely(vdev->remove)) {
1138                                 unlink_vmdq(vdev);
1139                                 vdev->ready = DEVICE_SAFE_REMOVE;
1140                                 continue;
1141                         }
1142
1143                         if (likely(vdev->ready == DEVICE_RX))
1144                                 drain_eth_rx(vdev);
1145
1146                         if (likely(!vdev->remove))
1147                                 drain_virtio_tx(vdev);
1148                 }
1149         }
1150
1151         return 0;
1152 }
1153
1154 /*
1155  * Remove a device from the specific data core linked list and from the
1156  * main linked list. Synchonization  occurs through the use of the
1157  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1158  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1159  */
1160 static void
1161 destroy_device(int vid)
1162 {
1163         struct vhost_dev *vdev = NULL;
1164         int lcore;
1165
1166         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1167                 if (vdev->vid == vid)
1168                         break;
1169         }
1170         if (!vdev)
1171                 return;
1172         /*set the remove flag. */
1173         vdev->remove = 1;
1174         while(vdev->ready != DEVICE_SAFE_REMOVE) {
1175                 rte_pause();
1176         }
1177
1178         if (builtin_net_driver)
1179                 vs_vhost_net_remove(vdev);
1180
1181         TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1182                      lcore_vdev_entry);
1183         TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1184
1185
1186         /* Set the dev_removal_flag on each lcore. */
1187         RTE_LCORE_FOREACH_SLAVE(lcore)
1188                 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1189
1190         /*
1191          * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1192          * we can be sure that they can no longer access the device removed
1193          * from the linked lists and that the devices are no longer in use.
1194          */
1195         RTE_LCORE_FOREACH_SLAVE(lcore) {
1196                 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1197                         rte_pause();
1198         }
1199
1200         lcore_info[vdev->coreid].device_num--;
1201
1202         RTE_LOG(INFO, VHOST_DATA,
1203                 "(%d) device has been removed from data core\n",
1204                 vdev->vid);
1205
1206         rte_free(vdev);
1207 }
1208
1209 /*
1210  * A new device is added to a data core. First the device is added to the main linked list
1211  * and then allocated to a specific data core.
1212  */
1213 static int
1214 new_device(int vid)
1215 {
1216         int lcore, core_add = 0;
1217         uint32_t device_num_min = num_devices;
1218         struct vhost_dev *vdev;
1219
1220         vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1221         if (vdev == NULL) {
1222                 RTE_LOG(INFO, VHOST_DATA,
1223                         "(%d) couldn't allocate memory for vhost dev\n",
1224                         vid);
1225                 return -1;
1226         }
1227         vdev->vid = vid;
1228
1229         if (builtin_net_driver)
1230                 vs_vhost_net_setup(vdev);
1231
1232         TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1233         vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1234
1235         /*reset ready flag*/
1236         vdev->ready = DEVICE_MAC_LEARNING;
1237         vdev->remove = 0;
1238
1239         /* Find a suitable lcore to add the device. */
1240         RTE_LCORE_FOREACH_SLAVE(lcore) {
1241                 if (lcore_info[lcore].device_num < device_num_min) {
1242                         device_num_min = lcore_info[lcore].device_num;
1243                         core_add = lcore;
1244                 }
1245         }
1246         vdev->coreid = core_add;
1247
1248         TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1249                           lcore_vdev_entry);
1250         lcore_info[vdev->coreid].device_num++;
1251
1252         /* Disable notifications. */
1253         rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1254         rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1255
1256         RTE_LOG(INFO, VHOST_DATA,
1257                 "(%d) device has been added to data core %d\n",
1258                 vid, vdev->coreid);
1259
1260         return 0;
1261 }
1262
1263 /*
1264  * These callback allow devices to be added to the data core when configuration
1265  * has been fully complete.
1266  */
1267 static const struct vhost_device_ops virtio_net_device_ops =
1268 {
1269         .new_device =  new_device,
1270         .destroy_device = destroy_device,
1271 };
1272
1273 /*
1274  * This is a thread will wake up after a period to print stats if the user has
1275  * enabled them.
1276  */
1277 static void *
1278 print_stats(__rte_unused void *arg)
1279 {
1280         struct vhost_dev *vdev;
1281         uint64_t tx_dropped, rx_dropped;
1282         uint64_t tx, tx_total, rx, rx_total;
1283         const char clr[] = { 27, '[', '2', 'J', '\0' };
1284         const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1285
1286         while(1) {
1287                 sleep(enable_stats);
1288
1289                 /* Clear screen and move to top left */
1290                 printf("%s%s\n", clr, top_left);
1291                 printf("Device statistics =================================\n");
1292
1293                 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1294                         tx_total   = vdev->stats.tx_total;
1295                         tx         = vdev->stats.tx;
1296                         tx_dropped = tx_total - tx;
1297
1298                         rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1299                         rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1300                         rx_dropped = rx_total - rx;
1301
1302                         printf("Statistics for device %d\n"
1303                                 "-----------------------\n"
1304                                 "TX total:              %" PRIu64 "\n"
1305                                 "TX dropped:            %" PRIu64 "\n"
1306                                 "TX successful:         %" PRIu64 "\n"
1307                                 "RX total:              %" PRIu64 "\n"
1308                                 "RX dropped:            %" PRIu64 "\n"
1309                                 "RX successful:         %" PRIu64 "\n",
1310                                 vdev->vid,
1311                                 tx_total, tx_dropped, tx,
1312                                 rx_total, rx_dropped, rx);
1313                 }
1314
1315                 printf("===================================================\n");
1316         }
1317
1318         return NULL;
1319 }
1320
1321 static void
1322 unregister_drivers(int socket_num)
1323 {
1324         int i, ret;
1325
1326         for (i = 0; i < socket_num; i++) {
1327                 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1328                 if (ret != 0)
1329                         RTE_LOG(ERR, VHOST_CONFIG,
1330                                 "Fail to unregister vhost driver for %s.\n",
1331                                 socket_files + i * PATH_MAX);
1332         }
1333 }
1334
1335 /* When we receive a INT signal, unregister vhost driver */
1336 static void
1337 sigint_handler(__rte_unused int signum)
1338 {
1339         /* Unregister vhost driver. */
1340         unregister_drivers(nb_sockets);
1341
1342         exit(0);
1343 }
1344
1345 /*
1346  * While creating an mbuf pool, one key thing is to figure out how
1347  * many mbuf entries is enough for our use. FYI, here are some
1348  * guidelines:
1349  *
1350  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1351  *
1352  * - For each switch core (A CPU core does the packet switch), we need
1353  *   also make some reservation for receiving the packets from virtio
1354  *   Tx queue. How many is enough depends on the usage. It's normally
1355  *   a simple calculation like following:
1356  *
1357  *       MAX_PKT_BURST * max packet size / mbuf size
1358  *
1359  *   So, we definitely need allocate more mbufs when TSO is enabled.
1360  *
1361  * - Similarly, for each switching core, we should serve @nr_rx_desc
1362  *   mbufs for receiving the packets from physical NIC device.
1363  *
1364  * - We also need make sure, for each switch core, we have allocated
1365  *   enough mbufs to fill up the mbuf cache.
1366  */
1367 static void
1368 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1369         uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1370 {
1371         uint32_t nr_mbufs;
1372         uint32_t nr_mbufs_per_core;
1373         uint32_t mtu = 1500;
1374
1375         if (mergeable)
1376                 mtu = 9000;
1377         if (enable_tso)
1378                 mtu = 64 * 1024;
1379
1380         nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1381                         (mbuf_size - RTE_PKTMBUF_HEADROOM);
1382         nr_mbufs_per_core += nr_rx_desc;
1383         nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1384
1385         nr_mbufs  = nr_queues * nr_rx_desc;
1386         nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1387         nr_mbufs *= nr_port;
1388
1389         mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1390                                             nr_mbuf_cache, 0, mbuf_size,
1391                                             rte_socket_id());
1392         if (mbuf_pool == NULL)
1393                 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1394 }
1395
1396 /*
1397  * Main function, does initialisation and calls the per-lcore functions.
1398  */
1399 int
1400 main(int argc, char *argv[])
1401 {
1402         unsigned lcore_id, core_id = 0;
1403         unsigned nb_ports, valid_num_ports;
1404         int ret, i;
1405         uint16_t portid;
1406         static pthread_t tid;
1407         uint64_t flags = 0;
1408
1409         signal(SIGINT, sigint_handler);
1410
1411         /* init EAL */
1412         ret = rte_eal_init(argc, argv);
1413         if (ret < 0)
1414                 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1415         argc -= ret;
1416         argv += ret;
1417
1418         /* parse app arguments */
1419         ret = us_vhost_parse_args(argc, argv);
1420         if (ret < 0)
1421                 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1422
1423         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1424                 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1425
1426                 if (rte_lcore_is_enabled(lcore_id))
1427                         lcore_ids[core_id++] = lcore_id;
1428         }
1429
1430         if (rte_lcore_count() > RTE_MAX_LCORE)
1431                 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1432
1433         /* Get the number of physical ports. */
1434         nb_ports = rte_eth_dev_count_avail();
1435
1436         /*
1437          * Update the global var NUM_PORTS and global array PORTS
1438          * and get value of var VALID_NUM_PORTS according to system ports number
1439          */
1440         valid_num_ports = check_ports_num(nb_ports);
1441
1442         if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1443                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1444                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1445                 return -1;
1446         }
1447
1448         /*
1449          * FIXME: here we are trying to allocate mbufs big enough for
1450          * @MAX_QUEUES, but the truth is we're never going to use that
1451          * many queues here. We probably should only do allocation for
1452          * those queues we are going to use.
1453          */
1454         create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1455                          MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1456
1457         if (vm2vm_mode == VM2VM_HARDWARE) {
1458                 /* Enable VT loop back to let L2 switch to do it. */
1459                 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1460                 RTE_LOG(DEBUG, VHOST_CONFIG,
1461                         "Enable loop back for L2 switch in vmdq.\n");
1462         }
1463
1464         /* initialize all ports */
1465         RTE_ETH_FOREACH_DEV(portid) {
1466                 /* skip ports that are not enabled */
1467                 if ((enabled_port_mask & (1 << portid)) == 0) {
1468                         RTE_LOG(INFO, VHOST_PORT,
1469                                 "Skipping disabled port %d\n", portid);
1470                         continue;
1471                 }
1472                 if (port_init(portid) != 0)
1473                         rte_exit(EXIT_FAILURE,
1474                                 "Cannot initialize network ports\n");
1475         }
1476
1477         /* Enable stats if the user option is set. */
1478         if (enable_stats) {
1479                 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1480                                         print_stats, NULL);
1481                 if (ret < 0)
1482                         rte_exit(EXIT_FAILURE,
1483                                 "Cannot create print-stats thread\n");
1484         }
1485
1486         /* Launch all data cores. */
1487         RTE_LCORE_FOREACH_SLAVE(lcore_id)
1488                 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1489
1490         if (client_mode)
1491                 flags |= RTE_VHOST_USER_CLIENT;
1492
1493         if (dequeue_zero_copy)
1494                 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1495
1496         /* Register vhost user driver to handle vhost messages. */
1497         for (i = 0; i < nb_sockets; i++) {
1498                 char *file = socket_files + i * PATH_MAX;
1499                 ret = rte_vhost_driver_register(file, flags);
1500                 if (ret != 0) {
1501                         unregister_drivers(i);
1502                         rte_exit(EXIT_FAILURE,
1503                                 "vhost driver register failure.\n");
1504                 }
1505
1506                 if (builtin_net_driver)
1507                         rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1508
1509                 if (mergeable == 0) {
1510                         rte_vhost_driver_disable_features(file,
1511                                 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1512                 }
1513
1514                 if (enable_tx_csum == 0) {
1515                         rte_vhost_driver_disable_features(file,
1516                                 1ULL << VIRTIO_NET_F_CSUM);
1517                 }
1518
1519                 if (enable_tso == 0) {
1520                         rte_vhost_driver_disable_features(file,
1521                                 1ULL << VIRTIO_NET_F_HOST_TSO4);
1522                         rte_vhost_driver_disable_features(file,
1523                                 1ULL << VIRTIO_NET_F_HOST_TSO6);
1524                         rte_vhost_driver_disable_features(file,
1525                                 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1526                         rte_vhost_driver_disable_features(file,
1527                                 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1528                 }
1529
1530                 if (promiscuous) {
1531                         rte_vhost_driver_enable_features(file,
1532                                 1ULL << VIRTIO_NET_F_CTRL_RX);
1533                 }
1534
1535                 ret = rte_vhost_driver_callback_register(file,
1536                         &virtio_net_device_ops);
1537                 if (ret != 0) {
1538                         rte_exit(EXIT_FAILURE,
1539                                 "failed to register vhost driver callbacks.\n");
1540                 }
1541
1542                 if (rte_vhost_driver_start(file) < 0) {
1543                         rte_exit(EXIT_FAILURE,
1544                                 "failed to start vhost driver.\n");
1545                 }
1546         }
1547
1548         RTE_LCORE_FOREACH_SLAVE(lcore_id)
1549                 rte_eal_wait_lcore(lcore_id);
1550
1551         return 0;
1552
1553 }