examples: use new API to create control threads
[dpdk.git] / examples / vhost / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <arpa/inet.h>
6 #include <getopt.h>
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
11 #include <signal.h>
12 #include <stdint.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
15 #include <unistd.h>
16
17 #include <rte_atomic.h>
18 #include <rte_cycles.h>
19 #include <rte_ethdev.h>
20 #include <rte_log.h>
21 #include <rte_string_fns.h>
22 #include <rte_malloc.h>
23 #include <rte_vhost.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_pause.h>
27
28 #include "main.h"
29
30 #ifndef MAX_QUEUES
31 #define MAX_QUEUES 128
32 #endif
33
34 /* the maximum number of external ports supported */
35 #define MAX_SUP_PORTS 1
36
37 #define MBUF_CACHE_SIZE 128
38 #define MBUF_DATA_SIZE  RTE_MBUF_DEFAULT_BUF_SIZE
39
40 #define BURST_TX_DRAIN_US 100   /* TX drain every ~100us */
41
42 #define BURST_RX_WAIT_US 15     /* Defines how long we wait between retries on RX */
43 #define BURST_RX_RETRIES 4              /* Number of retries on RX. */
44
45 #define JUMBO_FRAME_MAX_SIZE    0x2600
46
47 /* State of virtio device. */
48 #define DEVICE_MAC_LEARNING 0
49 #define DEVICE_RX                       1
50 #define DEVICE_SAFE_REMOVE      2
51
52 /* Configurable number of RX/TX ring descriptors */
53 #define RTE_TEST_RX_DESC_DEFAULT 1024
54 #define RTE_TEST_TX_DESC_DEFAULT 512
55
56 #define INVALID_PORT_ID 0xFF
57
58 /* Max number of devices. Limited by vmdq. */
59 #define MAX_DEVICES 64
60
61 /* Size of buffers used for snprintfs. */
62 #define MAX_PRINT_BUFF 6072
63
64 /* Maximum long option length for option parsing. */
65 #define MAX_LONG_OPT_SZ 64
66
67 /* mask of enabled ports */
68 static uint32_t enabled_port_mask = 0;
69
70 /* Promiscuous mode */
71 static uint32_t promiscuous;
72
73 /* number of devices/queues to support*/
74 static uint32_t num_queues = 0;
75 static uint32_t num_devices;
76
77 static struct rte_mempool *mbuf_pool;
78 static int mergeable;
79
80 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
81 typedef enum {
82         VM2VM_DISABLED = 0,
83         VM2VM_SOFTWARE = 1,
84         VM2VM_HARDWARE = 2,
85         VM2VM_LAST
86 } vm2vm_type;
87 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
88
89 /* Enable stats. */
90 static uint32_t enable_stats = 0;
91 /* Enable retries on RX. */
92 static uint32_t enable_retry = 1;
93
94 /* Disable TX checksum offload */
95 static uint32_t enable_tx_csum;
96
97 /* Disable TSO offload */
98 static uint32_t enable_tso;
99
100 static int client_mode;
101 static int dequeue_zero_copy;
102
103 static int builtin_net_driver;
104
105 /* Specify timeout (in useconds) between retries on RX. */
106 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
107 /* Specify the number of retries on RX. */
108 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
109
110 /* Socket file paths. Can be set by user */
111 static char *socket_files;
112 static int nb_sockets;
113
114 /* empty vmdq configuration structure. Filled in programatically */
115 static struct rte_eth_conf vmdq_conf_default = {
116         .rxmode = {
117                 .mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
118                 .split_hdr_size = 0,
119                 .ignore_offload_bitfield = 1,
120                 /*
121                  * VLAN strip is necessary for 1G NIC such as I350,
122                  * this fixes bug of ipv4 forwarding in guest can't
123                  * forward pakets from one virtio dev to another virtio dev.
124                  */
125                 .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
126                              DEV_RX_OFFLOAD_VLAN_STRIP),
127         },
128
129         .txmode = {
130                 .mq_mode = ETH_MQ_TX_NONE,
131                 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
132                              DEV_TX_OFFLOAD_TCP_CKSUM |
133                              DEV_TX_OFFLOAD_VLAN_INSERT |
134                              DEV_TX_OFFLOAD_MULTI_SEGS |
135                              DEV_TX_OFFLOAD_TCP_TSO),
136         },
137         .rx_adv_conf = {
138                 /*
139                  * should be overridden separately in code with
140                  * appropriate values
141                  */
142                 .vmdq_rx_conf = {
143                         .nb_queue_pools = ETH_8_POOLS,
144                         .enable_default_pool = 0,
145                         .default_pool = 0,
146                         .nb_pool_maps = 0,
147                         .pool_map = {{0, 0},},
148                 },
149         },
150 };
151
152
153 static unsigned lcore_ids[RTE_MAX_LCORE];
154 static uint16_t ports[RTE_MAX_ETHPORTS];
155 static unsigned num_ports = 0; /**< The number of ports specified in command line */
156 static uint16_t num_pf_queues, num_vmdq_queues;
157 static uint16_t vmdq_pool_base, vmdq_queue_base;
158 static uint16_t queues_per_pool;
159
160 const uint16_t vlan_tags[] = {
161         1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
162         1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
163         1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
164         1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
165         1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
166         1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
167         1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
168         1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
169 };
170
171 /* ethernet addresses of ports */
172 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
173
174 static struct vhost_dev_tailq_list vhost_dev_list =
175         TAILQ_HEAD_INITIALIZER(vhost_dev_list);
176
177 static struct lcore_info lcore_info[RTE_MAX_LCORE];
178
179 /* Used for queueing bursts of TX packets. */
180 struct mbuf_table {
181         unsigned len;
182         unsigned txq_id;
183         struct rte_mbuf *m_table[MAX_PKT_BURST];
184 };
185
186 /* TX queue for each data core. */
187 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
188
189 #define MBUF_TABLE_DRAIN_TSC    ((rte_get_tsc_hz() + US_PER_S - 1) \
190                                  / US_PER_S * BURST_TX_DRAIN_US)
191 #define VLAN_HLEN       4
192
193 /*
194  * Builds up the correct configuration for VMDQ VLAN pool map
195  * according to the pool & queue limits.
196  */
197 static inline int
198 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
199 {
200         struct rte_eth_vmdq_rx_conf conf;
201         struct rte_eth_vmdq_rx_conf *def_conf =
202                 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
203         unsigned i;
204
205         memset(&conf, 0, sizeof(conf));
206         conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
207         conf.nb_pool_maps = num_devices;
208         conf.enable_loop_back = def_conf->enable_loop_back;
209         conf.rx_mode = def_conf->rx_mode;
210
211         for (i = 0; i < conf.nb_pool_maps; i++) {
212                 conf.pool_map[i].vlan_id = vlan_tags[ i ];
213                 conf.pool_map[i].pools = (1UL << i);
214         }
215
216         (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
217         (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
218                    sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
219         return 0;
220 }
221
222 /*
223  * Validate the device number according to the max pool number gotten form
224  * dev_info. If the device number is invalid, give the error message and
225  * return -1. Each device must have its own pool.
226  */
227 static inline int
228 validate_num_devices(uint32_t max_nb_devices)
229 {
230         if (num_devices > max_nb_devices) {
231                 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
232                 return -1;
233         }
234         return 0;
235 }
236
237 /*
238  * Initialises a given port using global settings and with the rx buffers
239  * coming from the mbuf_pool passed as parameter
240  */
241 static inline int
242 port_init(uint16_t port)
243 {
244         struct rte_eth_dev_info dev_info;
245         struct rte_eth_conf port_conf;
246         struct rte_eth_rxconf *rxconf;
247         struct rte_eth_txconf *txconf;
248         int16_t rx_rings, tx_rings;
249         uint16_t rx_ring_size, tx_ring_size;
250         int retval;
251         uint16_t q;
252
253         /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
254         rte_eth_dev_info_get (port, &dev_info);
255
256         rxconf = &dev_info.default_rxconf;
257         txconf = &dev_info.default_txconf;
258         rxconf->rx_drop_en = 1;
259         txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
260
261         /*configure the number of supported virtio devices based on VMDQ limits */
262         num_devices = dev_info.max_vmdq_pools;
263
264         rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
265         tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
266
267         /*
268          * When dequeue zero copy is enabled, guest Tx used vring will be
269          * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
270          * (tx_ring_size here) must be small enough so that the driver will
271          * hit the free threshold easily and free mbufs timely. Otherwise,
272          * guest Tx vring would be starved.
273          */
274         if (dequeue_zero_copy)
275                 tx_ring_size = 64;
276
277         tx_rings = (uint16_t)rte_lcore_count();
278
279         retval = validate_num_devices(MAX_DEVICES);
280         if (retval < 0)
281                 return retval;
282
283         /* Get port configuration. */
284         retval = get_eth_conf(&port_conf, num_devices);
285         if (retval < 0)
286                 return retval;
287         /* NIC queues are divided into pf queues and vmdq queues.  */
288         num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
289         queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
290         num_vmdq_queues = num_devices * queues_per_pool;
291         num_queues = num_pf_queues + num_vmdq_queues;
292         vmdq_queue_base = dev_info.vmdq_queue_base;
293         vmdq_pool_base  = dev_info.vmdq_pool_base;
294         printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
295                 num_pf_queues, num_devices, queues_per_pool);
296
297         if (!rte_eth_dev_is_valid_port(port))
298                 return -1;
299
300         rx_rings = (uint16_t)dev_info.max_rx_queues;
301         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
302                 port_conf.txmode.offloads |=
303                         DEV_TX_OFFLOAD_MBUF_FAST_FREE;
304         /* Configure ethernet device. */
305         retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
306         if (retval != 0) {
307                 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
308                         port, strerror(-retval));
309                 return retval;
310         }
311
312         retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
313                 &tx_ring_size);
314         if (retval != 0) {
315                 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
316                         "for port %u: %s.\n", port, strerror(-retval));
317                 return retval;
318         }
319         if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
320                 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
321                         "for Rx queues on port %u.\n", port);
322                 return -1;
323         }
324
325         /* Setup the queues. */
326         rxconf->offloads = port_conf.rxmode.offloads;
327         for (q = 0; q < rx_rings; q ++) {
328                 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
329                                                 rte_eth_dev_socket_id(port),
330                                                 rxconf,
331                                                 mbuf_pool);
332                 if (retval < 0) {
333                         RTE_LOG(ERR, VHOST_PORT,
334                                 "Failed to setup rx queue %u of port %u: %s.\n",
335                                 q, port, strerror(-retval));
336                         return retval;
337                 }
338         }
339         txconf->offloads = port_conf.txmode.offloads;
340         for (q = 0; q < tx_rings; q ++) {
341                 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
342                                                 rte_eth_dev_socket_id(port),
343                                                 txconf);
344                 if (retval < 0) {
345                         RTE_LOG(ERR, VHOST_PORT,
346                                 "Failed to setup tx queue %u of port %u: %s.\n",
347                                 q, port, strerror(-retval));
348                         return retval;
349                 }
350         }
351
352         /* Start the device. */
353         retval  = rte_eth_dev_start(port);
354         if (retval < 0) {
355                 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
356                         port, strerror(-retval));
357                 return retval;
358         }
359
360         if (promiscuous)
361                 rte_eth_promiscuous_enable(port);
362
363         rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
364         RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
365         RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
366                         " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
367                         port,
368                         vmdq_ports_eth_addr[port].addr_bytes[0],
369                         vmdq_ports_eth_addr[port].addr_bytes[1],
370                         vmdq_ports_eth_addr[port].addr_bytes[2],
371                         vmdq_ports_eth_addr[port].addr_bytes[3],
372                         vmdq_ports_eth_addr[port].addr_bytes[4],
373                         vmdq_ports_eth_addr[port].addr_bytes[5]);
374
375         return 0;
376 }
377
378 /*
379  * Set socket file path.
380  */
381 static int
382 us_vhost_parse_socket_path(const char *q_arg)
383 {
384         /* parse number string */
385         if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
386                 return -1;
387
388         socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
389         snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
390         nb_sockets++;
391
392         return 0;
393 }
394
395 /*
396  * Parse the portmask provided at run time.
397  */
398 static int
399 parse_portmask(const char *portmask)
400 {
401         char *end = NULL;
402         unsigned long pm;
403
404         errno = 0;
405
406         /* parse hexadecimal string */
407         pm = strtoul(portmask, &end, 16);
408         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
409                 return -1;
410
411         if (pm == 0)
412                 return -1;
413
414         return pm;
415
416 }
417
418 /*
419  * Parse num options at run time.
420  */
421 static int
422 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
423 {
424         char *end = NULL;
425         unsigned long num;
426
427         errno = 0;
428
429         /* parse unsigned int string */
430         num = strtoul(q_arg, &end, 10);
431         if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
432                 return -1;
433
434         if (num > max_valid_value)
435                 return -1;
436
437         return num;
438
439 }
440
441 /*
442  * Display usage
443  */
444 static void
445 us_vhost_usage(const char *prgname)
446 {
447         RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
448         "               --vm2vm [0|1|2]\n"
449         "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
450         "               --socket-file <path>\n"
451         "               --nb-devices ND\n"
452         "               -p PORTMASK: Set mask for ports to be used by application\n"
453         "               --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
454         "               --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
455         "               --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
456         "               --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
457         "               --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
458         "               --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
459         "               --socket-file: The path of the socket file.\n"
460         "               --tx-csum [0|1] disable/enable TX checksum offload.\n"
461         "               --tso [0|1] disable/enable TCP segment offload.\n"
462         "               --client register a vhost-user socket as client mode.\n"
463         "               --dequeue-zero-copy enables dequeue zero copy\n",
464                prgname);
465 }
466
467 /*
468  * Parse the arguments given in the command line of the application.
469  */
470 static int
471 us_vhost_parse_args(int argc, char **argv)
472 {
473         int opt, ret;
474         int option_index;
475         unsigned i;
476         const char *prgname = argv[0];
477         static struct option long_option[] = {
478                 {"vm2vm", required_argument, NULL, 0},
479                 {"rx-retry", required_argument, NULL, 0},
480                 {"rx-retry-delay", required_argument, NULL, 0},
481                 {"rx-retry-num", required_argument, NULL, 0},
482                 {"mergeable", required_argument, NULL, 0},
483                 {"stats", required_argument, NULL, 0},
484                 {"socket-file", required_argument, NULL, 0},
485                 {"tx-csum", required_argument, NULL, 0},
486                 {"tso", required_argument, NULL, 0},
487                 {"client", no_argument, &client_mode, 1},
488                 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
489                 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
490                 {NULL, 0, 0, 0},
491         };
492
493         /* Parse command line */
494         while ((opt = getopt_long(argc, argv, "p:P",
495                         long_option, &option_index)) != EOF) {
496                 switch (opt) {
497                 /* Portmask */
498                 case 'p':
499                         enabled_port_mask = parse_portmask(optarg);
500                         if (enabled_port_mask == 0) {
501                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
502                                 us_vhost_usage(prgname);
503                                 return -1;
504                         }
505                         break;
506
507                 case 'P':
508                         promiscuous = 1;
509                         vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
510                                 ETH_VMDQ_ACCEPT_BROADCAST |
511                                 ETH_VMDQ_ACCEPT_MULTICAST;
512
513                         break;
514
515                 case 0:
516                         /* Enable/disable vm2vm comms. */
517                         if (!strncmp(long_option[option_index].name, "vm2vm",
518                                 MAX_LONG_OPT_SZ)) {
519                                 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
520                                 if (ret == -1) {
521                                         RTE_LOG(INFO, VHOST_CONFIG,
522                                                 "Invalid argument for "
523                                                 "vm2vm [0|1|2]\n");
524                                         us_vhost_usage(prgname);
525                                         return -1;
526                                 } else {
527                                         vm2vm_mode = (vm2vm_type)ret;
528                                 }
529                         }
530
531                         /* Enable/disable retries on RX. */
532                         if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
533                                 ret = parse_num_opt(optarg, 1);
534                                 if (ret == -1) {
535                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
536                                         us_vhost_usage(prgname);
537                                         return -1;
538                                 } else {
539                                         enable_retry = ret;
540                                 }
541                         }
542
543                         /* Enable/disable TX checksum offload. */
544                         if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
545                                 ret = parse_num_opt(optarg, 1);
546                                 if (ret == -1) {
547                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
548                                         us_vhost_usage(prgname);
549                                         return -1;
550                                 } else
551                                         enable_tx_csum = ret;
552                         }
553
554                         /* Enable/disable TSO offload. */
555                         if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
556                                 ret = parse_num_opt(optarg, 1);
557                                 if (ret == -1) {
558                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
559                                         us_vhost_usage(prgname);
560                                         return -1;
561                                 } else
562                                         enable_tso = ret;
563                         }
564
565                         /* Specify the retries delay time (in useconds) on RX. */
566                         if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
567                                 ret = parse_num_opt(optarg, INT32_MAX);
568                                 if (ret == -1) {
569                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
570                                         us_vhost_usage(prgname);
571                                         return -1;
572                                 } else {
573                                         burst_rx_delay_time = ret;
574                                 }
575                         }
576
577                         /* Specify the retries number on RX. */
578                         if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
579                                 ret = parse_num_opt(optarg, INT32_MAX);
580                                 if (ret == -1) {
581                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
582                                         us_vhost_usage(prgname);
583                                         return -1;
584                                 } else {
585                                         burst_rx_retry_num = ret;
586                                 }
587                         }
588
589                         /* Enable/disable RX mergeable buffers. */
590                         if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
591                                 ret = parse_num_opt(optarg, 1);
592                                 if (ret == -1) {
593                                         RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
594                                         us_vhost_usage(prgname);
595                                         return -1;
596                                 } else {
597                                         mergeable = !!ret;
598                                         if (ret) {
599                                                 vmdq_conf_default.rxmode.offloads |=
600                                                         DEV_RX_OFFLOAD_JUMBO_FRAME;
601                                                 vmdq_conf_default.rxmode.max_rx_pkt_len
602                                                         = JUMBO_FRAME_MAX_SIZE;
603                                         }
604                                 }
605                         }
606
607                         /* Enable/disable stats. */
608                         if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
609                                 ret = parse_num_opt(optarg, INT32_MAX);
610                                 if (ret == -1) {
611                                         RTE_LOG(INFO, VHOST_CONFIG,
612                                                 "Invalid argument for stats [0..N]\n");
613                                         us_vhost_usage(prgname);
614                                         return -1;
615                                 } else {
616                                         enable_stats = ret;
617                                 }
618                         }
619
620                         /* Set socket file path. */
621                         if (!strncmp(long_option[option_index].name,
622                                                 "socket-file", MAX_LONG_OPT_SZ)) {
623                                 if (us_vhost_parse_socket_path(optarg) == -1) {
624                                         RTE_LOG(INFO, VHOST_CONFIG,
625                                         "Invalid argument for socket name (Max %d characters)\n",
626                                         PATH_MAX);
627                                         us_vhost_usage(prgname);
628                                         return -1;
629                                 }
630                         }
631
632                         break;
633
634                         /* Invalid option - print options. */
635                 default:
636                         us_vhost_usage(prgname);
637                         return -1;
638                 }
639         }
640
641         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
642                 if (enabled_port_mask & (1 << i))
643                         ports[num_ports++] = i;
644         }
645
646         if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
647                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
648                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
649                 return -1;
650         }
651
652         return 0;
653 }
654
655 /*
656  * Update the global var NUM_PORTS and array PORTS according to system ports number
657  * and return valid ports number
658  */
659 static unsigned check_ports_num(unsigned nb_ports)
660 {
661         unsigned valid_num_ports = num_ports;
662         unsigned portid;
663
664         if (num_ports > nb_ports) {
665                 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
666                         num_ports, nb_ports);
667                 num_ports = nb_ports;
668         }
669
670         for (portid = 0; portid < num_ports; portid ++) {
671                 if (!rte_eth_dev_is_valid_port(ports[portid])) {
672                         RTE_LOG(INFO, VHOST_PORT,
673                                 "\nSpecified port ID(%u) is not valid\n",
674                                 ports[portid]);
675                         ports[portid] = INVALID_PORT_ID;
676                         valid_num_ports--;
677                 }
678         }
679         return valid_num_ports;
680 }
681
682 static __rte_always_inline struct vhost_dev *
683 find_vhost_dev(struct ether_addr *mac)
684 {
685         struct vhost_dev *vdev;
686
687         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
688                 if (vdev->ready == DEVICE_RX &&
689                     is_same_ether_addr(mac, &vdev->mac_address))
690                         return vdev;
691         }
692
693         return NULL;
694 }
695
696 /*
697  * This function learns the MAC address of the device and registers this along with a
698  * vlan tag to a VMDQ.
699  */
700 static int
701 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
702 {
703         struct ether_hdr *pkt_hdr;
704         int i, ret;
705
706         /* Learn MAC address of guest device from packet */
707         pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
708
709         if (find_vhost_dev(&pkt_hdr->s_addr)) {
710                 RTE_LOG(ERR, VHOST_DATA,
711                         "(%d) device is using a registered MAC!\n",
712                         vdev->vid);
713                 return -1;
714         }
715
716         for (i = 0; i < ETHER_ADDR_LEN; i++)
717                 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
718
719         /* vlan_tag currently uses the device_id. */
720         vdev->vlan_tag = vlan_tags[vdev->vid];
721
722         /* Print out VMDQ registration info. */
723         RTE_LOG(INFO, VHOST_DATA,
724                 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
725                 vdev->vid,
726                 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
727                 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
728                 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
729                 vdev->vlan_tag);
730
731         /* Register the MAC address. */
732         ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
733                                 (uint32_t)vdev->vid + vmdq_pool_base);
734         if (ret)
735                 RTE_LOG(ERR, VHOST_DATA,
736                         "(%d) failed to add device MAC address to VMDQ\n",
737                         vdev->vid);
738
739         rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
740
741         /* Set device as ready for RX. */
742         vdev->ready = DEVICE_RX;
743
744         return 0;
745 }
746
747 /*
748  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
749  * queue before disabling RX on the device.
750  */
751 static inline void
752 unlink_vmdq(struct vhost_dev *vdev)
753 {
754         unsigned i = 0;
755         unsigned rx_count;
756         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
757
758         if (vdev->ready == DEVICE_RX) {
759                 /*clear MAC and VLAN settings*/
760                 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
761                 for (i = 0; i < 6; i++)
762                         vdev->mac_address.addr_bytes[i] = 0;
763
764                 vdev->vlan_tag = 0;
765
766                 /*Clear out the receive buffers*/
767                 rx_count = rte_eth_rx_burst(ports[0],
768                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
769
770                 while (rx_count) {
771                         for (i = 0; i < rx_count; i++)
772                                 rte_pktmbuf_free(pkts_burst[i]);
773
774                         rx_count = rte_eth_rx_burst(ports[0],
775                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
776                 }
777
778                 vdev->ready = DEVICE_MAC_LEARNING;
779         }
780 }
781
782 static __rte_always_inline void
783 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
784             struct rte_mbuf *m)
785 {
786         uint16_t ret;
787
788         if (builtin_net_driver) {
789                 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
790         } else {
791                 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
792         }
793
794         if (enable_stats) {
795                 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
796                 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
797                 src_vdev->stats.tx_total++;
798                 src_vdev->stats.tx += ret;
799         }
800 }
801
802 /*
803  * Check if the packet destination MAC address is for a local device. If so then put
804  * the packet on that devices RX queue. If not then return.
805  */
806 static __rte_always_inline int
807 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
808 {
809         struct ether_hdr *pkt_hdr;
810         struct vhost_dev *dst_vdev;
811
812         pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
813
814         dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
815         if (!dst_vdev)
816                 return -1;
817
818         if (vdev->vid == dst_vdev->vid) {
819                 RTE_LOG_DP(DEBUG, VHOST_DATA,
820                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
821                         vdev->vid);
822                 return 0;
823         }
824
825         RTE_LOG_DP(DEBUG, VHOST_DATA,
826                 "(%d) TX: MAC address is local\n", dst_vdev->vid);
827
828         if (unlikely(dst_vdev->remove)) {
829                 RTE_LOG_DP(DEBUG, VHOST_DATA,
830                         "(%d) device is marked for removal\n", dst_vdev->vid);
831                 return 0;
832         }
833
834         virtio_xmit(dst_vdev, vdev, m);
835         return 0;
836 }
837
838 /*
839  * Check if the destination MAC of a packet is one local VM,
840  * and get its vlan tag, and offset if it is.
841  */
842 static __rte_always_inline int
843 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
844         uint32_t *offset, uint16_t *vlan_tag)
845 {
846         struct vhost_dev *dst_vdev;
847         struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
848
849         dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
850         if (!dst_vdev)
851                 return 0;
852
853         if (vdev->vid == dst_vdev->vid) {
854                 RTE_LOG_DP(DEBUG, VHOST_DATA,
855                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
856                         vdev->vid);
857                 return -1;
858         }
859
860         /*
861          * HW vlan strip will reduce the packet length
862          * by minus length of vlan tag, so need restore
863          * the packet length by plus it.
864          */
865         *offset  = VLAN_HLEN;
866         *vlan_tag = vlan_tags[vdev->vid];
867
868         RTE_LOG_DP(DEBUG, VHOST_DATA,
869                 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
870                 vdev->vid, dst_vdev->vid, *vlan_tag);
871
872         return 0;
873 }
874
875 static uint16_t
876 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
877 {
878         if (ol_flags & PKT_TX_IPV4)
879                 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
880         else /* assume ethertype == ETHER_TYPE_IPv6 */
881                 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
882 }
883
884 static void virtio_tx_offload(struct rte_mbuf *m)
885 {
886         void *l3_hdr;
887         struct ipv4_hdr *ipv4_hdr = NULL;
888         struct tcp_hdr *tcp_hdr = NULL;
889         struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
890
891         l3_hdr = (char *)eth_hdr + m->l2_len;
892
893         if (m->ol_flags & PKT_TX_IPV4) {
894                 ipv4_hdr = l3_hdr;
895                 ipv4_hdr->hdr_checksum = 0;
896                 m->ol_flags |= PKT_TX_IP_CKSUM;
897         }
898
899         tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
900         tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
901 }
902
903 static inline void
904 free_pkts(struct rte_mbuf **pkts, uint16_t n)
905 {
906         while (n--)
907                 rte_pktmbuf_free(pkts[n]);
908 }
909
910 static __rte_always_inline void
911 do_drain_mbuf_table(struct mbuf_table *tx_q)
912 {
913         uint16_t count;
914
915         count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
916                                  tx_q->m_table, tx_q->len);
917         if (unlikely(count < tx_q->len))
918                 free_pkts(&tx_q->m_table[count], tx_q->len - count);
919
920         tx_q->len = 0;
921 }
922
923 /*
924  * This function routes the TX packet to the correct interface. This
925  * may be a local device or the physical port.
926  */
927 static __rte_always_inline void
928 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
929 {
930         struct mbuf_table *tx_q;
931         unsigned offset = 0;
932         const uint16_t lcore_id = rte_lcore_id();
933         struct ether_hdr *nh;
934
935
936         nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
937         if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
938                 struct vhost_dev *vdev2;
939
940                 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
941                         if (vdev2 != vdev)
942                                 virtio_xmit(vdev2, vdev, m);
943                 }
944                 goto queue2nic;
945         }
946
947         /*check if destination is local VM*/
948         if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
949                 rte_pktmbuf_free(m);
950                 return;
951         }
952
953         if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
954                 if (unlikely(find_local_dest(vdev, m, &offset,
955                                              &vlan_tag) != 0)) {
956                         rte_pktmbuf_free(m);
957                         return;
958                 }
959         }
960
961         RTE_LOG_DP(DEBUG, VHOST_DATA,
962                 "(%d) TX: MAC address is external\n", vdev->vid);
963
964 queue2nic:
965
966         /*Add packet to the port tx queue*/
967         tx_q = &lcore_tx_queue[lcore_id];
968
969         nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
970         if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
971                 /* Guest has inserted the vlan tag. */
972                 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
973                 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
974                 if ((vm2vm_mode == VM2VM_HARDWARE) &&
975                         (vh->vlan_tci != vlan_tag_be))
976                         vh->vlan_tci = vlan_tag_be;
977         } else {
978                 m->ol_flags |= PKT_TX_VLAN_PKT;
979
980                 /*
981                  * Find the right seg to adjust the data len when offset is
982                  * bigger than tail room size.
983                  */
984                 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
985                         if (likely(offset <= rte_pktmbuf_tailroom(m)))
986                                 m->data_len += offset;
987                         else {
988                                 struct rte_mbuf *seg = m;
989
990                                 while ((seg->next != NULL) &&
991                                         (offset > rte_pktmbuf_tailroom(seg)))
992                                         seg = seg->next;
993
994                                 seg->data_len += offset;
995                         }
996                         m->pkt_len += offset;
997                 }
998
999                 m->vlan_tci = vlan_tag;
1000         }
1001
1002         if (m->ol_flags & PKT_TX_TCP_SEG)
1003                 virtio_tx_offload(m);
1004
1005         tx_q->m_table[tx_q->len++] = m;
1006         if (enable_stats) {
1007                 vdev->stats.tx_total++;
1008                 vdev->stats.tx++;
1009         }
1010
1011         if (unlikely(tx_q->len == MAX_PKT_BURST))
1012                 do_drain_mbuf_table(tx_q);
1013 }
1014
1015
1016 static __rte_always_inline void
1017 drain_mbuf_table(struct mbuf_table *tx_q)
1018 {
1019         static uint64_t prev_tsc;
1020         uint64_t cur_tsc;
1021
1022         if (tx_q->len == 0)
1023                 return;
1024
1025         cur_tsc = rte_rdtsc();
1026         if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1027                 prev_tsc = cur_tsc;
1028
1029                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1030                         "TX queue drained after timeout with burst size %u\n",
1031                         tx_q->len);
1032                 do_drain_mbuf_table(tx_q);
1033         }
1034 }
1035
1036 static __rte_always_inline void
1037 drain_eth_rx(struct vhost_dev *vdev)
1038 {
1039         uint16_t rx_count, enqueue_count;
1040         struct rte_mbuf *pkts[MAX_PKT_BURST];
1041
1042         rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1043                                     pkts, MAX_PKT_BURST);
1044         if (!rx_count)
1045                 return;
1046
1047         /*
1048          * When "enable_retry" is set, here we wait and retry when there
1049          * is no enough free slots in the queue to hold @rx_count packets,
1050          * to diminish packet loss.
1051          */
1052         if (enable_retry &&
1053             unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1054                         VIRTIO_RXQ))) {
1055                 uint32_t retry;
1056
1057                 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1058                         rte_delay_us(burst_rx_delay_time);
1059                         if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1060                                         VIRTIO_RXQ))
1061                                 break;
1062                 }
1063         }
1064
1065         if (builtin_net_driver) {
1066                 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1067                                                 pkts, rx_count);
1068         } else {
1069                 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1070                                                 pkts, rx_count);
1071         }
1072         if (enable_stats) {
1073                 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1074                 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1075         }
1076
1077         free_pkts(pkts, rx_count);
1078 }
1079
1080 static __rte_always_inline void
1081 drain_virtio_tx(struct vhost_dev *vdev)
1082 {
1083         struct rte_mbuf *pkts[MAX_PKT_BURST];
1084         uint16_t count;
1085         uint16_t i;
1086
1087         if (builtin_net_driver) {
1088                 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1089                                         pkts, MAX_PKT_BURST);
1090         } else {
1091                 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1092                                         mbuf_pool, pkts, MAX_PKT_BURST);
1093         }
1094
1095         /* setup VMDq for the first packet */
1096         if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1097                 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1098                         free_pkts(pkts, count);
1099         }
1100
1101         for (i = 0; i < count; ++i)
1102                 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1103 }
1104
1105 /*
1106  * Main function of vhost-switch. It basically does:
1107  *
1108  * for each vhost device {
1109  *    - drain_eth_rx()
1110  *
1111  *      Which drains the host eth Rx queue linked to the vhost device,
1112  *      and deliver all of them to guest virito Rx ring associated with
1113  *      this vhost device.
1114  *
1115  *    - drain_virtio_tx()
1116  *
1117  *      Which drains the guest virtio Tx queue and deliver all of them
1118  *      to the target, which could be another vhost device, or the
1119  *      physical eth dev. The route is done in function "virtio_tx_route".
1120  * }
1121  */
1122 static int
1123 switch_worker(void *arg __rte_unused)
1124 {
1125         unsigned i;
1126         unsigned lcore_id = rte_lcore_id();
1127         struct vhost_dev *vdev;
1128         struct mbuf_table *tx_q;
1129
1130         RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1131
1132         tx_q = &lcore_tx_queue[lcore_id];
1133         for (i = 0; i < rte_lcore_count(); i++) {
1134                 if (lcore_ids[i] == lcore_id) {
1135                         tx_q->txq_id = i;
1136                         break;
1137                 }
1138         }
1139
1140         while(1) {
1141                 drain_mbuf_table(tx_q);
1142
1143                 /*
1144                  * Inform the configuration core that we have exited the
1145                  * linked list and that no devices are in use if requested.
1146                  */
1147                 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1148                         lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1149
1150                 /*
1151                  * Process vhost devices
1152                  */
1153                 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1154                               lcore_vdev_entry) {
1155                         if (unlikely(vdev->remove)) {
1156                                 unlink_vmdq(vdev);
1157                                 vdev->ready = DEVICE_SAFE_REMOVE;
1158                                 continue;
1159                         }
1160
1161                         if (likely(vdev->ready == DEVICE_RX))
1162                                 drain_eth_rx(vdev);
1163
1164                         if (likely(!vdev->remove))
1165                                 drain_virtio_tx(vdev);
1166                 }
1167         }
1168
1169         return 0;
1170 }
1171
1172 /*
1173  * Remove a device from the specific data core linked list and from the
1174  * main linked list. Synchonization  occurs through the use of the
1175  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1176  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1177  */
1178 static void
1179 destroy_device(int vid)
1180 {
1181         struct vhost_dev *vdev = NULL;
1182         int lcore;
1183
1184         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1185                 if (vdev->vid == vid)
1186                         break;
1187         }
1188         if (!vdev)
1189                 return;
1190         /*set the remove flag. */
1191         vdev->remove = 1;
1192         while(vdev->ready != DEVICE_SAFE_REMOVE) {
1193                 rte_pause();
1194         }
1195
1196         if (builtin_net_driver)
1197                 vs_vhost_net_remove(vdev);
1198
1199         TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1200                      lcore_vdev_entry);
1201         TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1202
1203
1204         /* Set the dev_removal_flag on each lcore. */
1205         RTE_LCORE_FOREACH_SLAVE(lcore)
1206                 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1207
1208         /*
1209          * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1210          * we can be sure that they can no longer access the device removed
1211          * from the linked lists and that the devices are no longer in use.
1212          */
1213         RTE_LCORE_FOREACH_SLAVE(lcore) {
1214                 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1215                         rte_pause();
1216         }
1217
1218         lcore_info[vdev->coreid].device_num--;
1219
1220         RTE_LOG(INFO, VHOST_DATA,
1221                 "(%d) device has been removed from data core\n",
1222                 vdev->vid);
1223
1224         rte_free(vdev);
1225 }
1226
1227 /*
1228  * A new device is added to a data core. First the device is added to the main linked list
1229  * and the allocated to a specific data core.
1230  */
1231 static int
1232 new_device(int vid)
1233 {
1234         int lcore, core_add = 0;
1235         uint32_t device_num_min = num_devices;
1236         struct vhost_dev *vdev;
1237
1238         vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1239         if (vdev == NULL) {
1240                 RTE_LOG(INFO, VHOST_DATA,
1241                         "(%d) couldn't allocate memory for vhost dev\n",
1242                         vid);
1243                 return -1;
1244         }
1245         vdev->vid = vid;
1246
1247         if (builtin_net_driver)
1248                 vs_vhost_net_setup(vdev);
1249
1250         TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1251         vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1252
1253         /*reset ready flag*/
1254         vdev->ready = DEVICE_MAC_LEARNING;
1255         vdev->remove = 0;
1256
1257         /* Find a suitable lcore to add the device. */
1258         RTE_LCORE_FOREACH_SLAVE(lcore) {
1259                 if (lcore_info[lcore].device_num < device_num_min) {
1260                         device_num_min = lcore_info[lcore].device_num;
1261                         core_add = lcore;
1262                 }
1263         }
1264         vdev->coreid = core_add;
1265
1266         TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1267                           lcore_vdev_entry);
1268         lcore_info[vdev->coreid].device_num++;
1269
1270         /* Disable notifications. */
1271         rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1272         rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1273
1274         RTE_LOG(INFO, VHOST_DATA,
1275                 "(%d) device has been added to data core %d\n",
1276                 vid, vdev->coreid);
1277
1278         return 0;
1279 }
1280
1281 /*
1282  * These callback allow devices to be added to the data core when configuration
1283  * has been fully complete.
1284  */
1285 static const struct vhost_device_ops virtio_net_device_ops =
1286 {
1287         .new_device =  new_device,
1288         .destroy_device = destroy_device,
1289 };
1290
1291 /*
1292  * This is a thread will wake up after a period to print stats if the user has
1293  * enabled them.
1294  */
1295 static void *
1296 print_stats(__rte_unused void *arg)
1297 {
1298         struct vhost_dev *vdev;
1299         uint64_t tx_dropped, rx_dropped;
1300         uint64_t tx, tx_total, rx, rx_total;
1301         const char clr[] = { 27, '[', '2', 'J', '\0' };
1302         const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1303
1304         while(1) {
1305                 sleep(enable_stats);
1306
1307                 /* Clear screen and move to top left */
1308                 printf("%s%s\n", clr, top_left);
1309                 printf("Device statistics =================================\n");
1310
1311                 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1312                         tx_total   = vdev->stats.tx_total;
1313                         tx         = vdev->stats.tx;
1314                         tx_dropped = tx_total - tx;
1315
1316                         rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1317                         rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1318                         rx_dropped = rx_total - rx;
1319
1320                         printf("Statistics for device %d\n"
1321                                 "-----------------------\n"
1322                                 "TX total:              %" PRIu64 "\n"
1323                                 "TX dropped:            %" PRIu64 "\n"
1324                                 "TX successful:         %" PRIu64 "\n"
1325                                 "RX total:              %" PRIu64 "\n"
1326                                 "RX dropped:            %" PRIu64 "\n"
1327                                 "RX successful:         %" PRIu64 "\n",
1328                                 vdev->vid,
1329                                 tx_total, tx_dropped, tx,
1330                                 rx_total, rx_dropped, rx);
1331                 }
1332
1333                 printf("===================================================\n");
1334         }
1335
1336         return NULL;
1337 }
1338
1339 static void
1340 unregister_drivers(int socket_num)
1341 {
1342         int i, ret;
1343
1344         for (i = 0; i < socket_num; i++) {
1345                 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1346                 if (ret != 0)
1347                         RTE_LOG(ERR, VHOST_CONFIG,
1348                                 "Fail to unregister vhost driver for %s.\n",
1349                                 socket_files + i * PATH_MAX);
1350         }
1351 }
1352
1353 /* When we receive a INT signal, unregister vhost driver */
1354 static void
1355 sigint_handler(__rte_unused int signum)
1356 {
1357         /* Unregister vhost driver. */
1358         unregister_drivers(nb_sockets);
1359
1360         exit(0);
1361 }
1362
1363 /*
1364  * While creating an mbuf pool, one key thing is to figure out how
1365  * many mbuf entries is enough for our use. FYI, here are some
1366  * guidelines:
1367  *
1368  * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1369  *
1370  * - For each switch core (A CPU core does the packet switch), we need
1371  *   also make some reservation for receiving the packets from virtio
1372  *   Tx queue. How many is enough depends on the usage. It's normally
1373  *   a simple calculation like following:
1374  *
1375  *       MAX_PKT_BURST * max packet size / mbuf size
1376  *
1377  *   So, we definitely need allocate more mbufs when TSO is enabled.
1378  *
1379  * - Similarly, for each switching core, we should serve @nr_rx_desc
1380  *   mbufs for receiving the packets from physical NIC device.
1381  *
1382  * - We also need make sure, for each switch core, we have allocated
1383  *   enough mbufs to fill up the mbuf cache.
1384  */
1385 static void
1386 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1387         uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1388 {
1389         uint32_t nr_mbufs;
1390         uint32_t nr_mbufs_per_core;
1391         uint32_t mtu = 1500;
1392
1393         if (mergeable)
1394                 mtu = 9000;
1395         if (enable_tso)
1396                 mtu = 64 * 1024;
1397
1398         nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1399                         (mbuf_size - RTE_PKTMBUF_HEADROOM);
1400         nr_mbufs_per_core += nr_rx_desc;
1401         nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1402
1403         nr_mbufs  = nr_queues * nr_rx_desc;
1404         nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1405         nr_mbufs *= nr_port;
1406
1407         mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1408                                             nr_mbuf_cache, 0, mbuf_size,
1409                                             rte_socket_id());
1410         if (mbuf_pool == NULL)
1411                 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1412 }
1413
1414 /*
1415  * Main function, does initialisation and calls the per-lcore functions.
1416  */
1417 int
1418 main(int argc, char *argv[])
1419 {
1420         unsigned lcore_id, core_id = 0;
1421         unsigned nb_ports, valid_num_ports;
1422         int ret, i;
1423         uint16_t portid;
1424         static pthread_t tid;
1425         uint64_t flags = 0;
1426
1427         signal(SIGINT, sigint_handler);
1428
1429         /* init EAL */
1430         ret = rte_eal_init(argc, argv);
1431         if (ret < 0)
1432                 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1433         argc -= ret;
1434         argv += ret;
1435
1436         /* parse app arguments */
1437         ret = us_vhost_parse_args(argc, argv);
1438         if (ret < 0)
1439                 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1440
1441         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1442                 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1443
1444                 if (rte_lcore_is_enabled(lcore_id))
1445                         lcore_ids[core_id++] = lcore_id;
1446         }
1447
1448         if (rte_lcore_count() > RTE_MAX_LCORE)
1449                 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1450
1451         /* Get the number of physical ports. */
1452         nb_ports = rte_eth_dev_count_avail();
1453
1454         /*
1455          * Update the global var NUM_PORTS and global array PORTS
1456          * and get value of var VALID_NUM_PORTS according to system ports number
1457          */
1458         valid_num_ports = check_ports_num(nb_ports);
1459
1460         if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1461                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1462                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1463                 return -1;
1464         }
1465
1466         /*
1467          * FIXME: here we are trying to allocate mbufs big enough for
1468          * @MAX_QUEUES, but the truth is we're never going to use that
1469          * many queues here. We probably should only do allocation for
1470          * those queues we are going to use.
1471          */
1472         create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1473                          MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1474
1475         if (vm2vm_mode == VM2VM_HARDWARE) {
1476                 /* Enable VT loop back to let L2 switch to do it. */
1477                 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1478                 RTE_LOG(DEBUG, VHOST_CONFIG,
1479                         "Enable loop back for L2 switch in vmdq.\n");
1480         }
1481
1482         /* initialize all ports */
1483         RTE_ETH_FOREACH_DEV(portid) {
1484                 /* skip ports that are not enabled */
1485                 if ((enabled_port_mask & (1 << portid)) == 0) {
1486                         RTE_LOG(INFO, VHOST_PORT,
1487                                 "Skipping disabled port %d\n", portid);
1488                         continue;
1489                 }
1490                 if (port_init(portid) != 0)
1491                         rte_exit(EXIT_FAILURE,
1492                                 "Cannot initialize network ports\n");
1493         }
1494
1495         /* Enable stats if the user option is set. */
1496         if (enable_stats) {
1497                 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1498                                         print_stats, NULL);
1499                 if (ret < 0)
1500                         rte_exit(EXIT_FAILURE,
1501                                 "Cannot create print-stats thread\n");
1502         }
1503
1504         /* Launch all data cores. */
1505         RTE_LCORE_FOREACH_SLAVE(lcore_id)
1506                 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1507
1508         if (client_mode)
1509                 flags |= RTE_VHOST_USER_CLIENT;
1510
1511         if (dequeue_zero_copy)
1512                 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1513
1514         /* Register vhost user driver to handle vhost messages. */
1515         for (i = 0; i < nb_sockets; i++) {
1516                 char *file = socket_files + i * PATH_MAX;
1517                 ret = rte_vhost_driver_register(file, flags);
1518                 if (ret != 0) {
1519                         unregister_drivers(i);
1520                         rte_exit(EXIT_FAILURE,
1521                                 "vhost driver register failure.\n");
1522                 }
1523
1524                 if (builtin_net_driver)
1525                         rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1526
1527                 if (mergeable == 0) {
1528                         rte_vhost_driver_disable_features(file,
1529                                 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1530                 }
1531
1532                 if (enable_tx_csum == 0) {
1533                         rte_vhost_driver_disable_features(file,
1534                                 1ULL << VIRTIO_NET_F_CSUM);
1535                 }
1536
1537                 if (enable_tso == 0) {
1538                         rte_vhost_driver_disable_features(file,
1539                                 1ULL << VIRTIO_NET_F_HOST_TSO4);
1540                         rte_vhost_driver_disable_features(file,
1541                                 1ULL << VIRTIO_NET_F_HOST_TSO6);
1542                         rte_vhost_driver_disable_features(file,
1543                                 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1544                         rte_vhost_driver_disable_features(file,
1545                                 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1546                 }
1547
1548                 if (promiscuous) {
1549                         rte_vhost_driver_enable_features(file,
1550                                 1ULL << VIRTIO_NET_F_CTRL_RX);
1551                 }
1552
1553                 ret = rte_vhost_driver_callback_register(file,
1554                         &virtio_net_device_ops);
1555                 if (ret != 0) {
1556                         rte_exit(EXIT_FAILURE,
1557                                 "failed to register vhost driver callbacks.\n");
1558                 }
1559
1560                 if (rte_vhost_driver_start(file) < 0) {
1561                         rte_exit(EXIT_FAILURE,
1562                                 "failed to start vhost driver.\n");
1563                 }
1564         }
1565
1566         RTE_LCORE_FOREACH_SLAVE(lcore_id)
1567                 rte_eal_wait_lcore(lcore_id);
1568
1569         return 0;
1570
1571 }