crypto/cnxk: prevent out-of-bound access in capabilities
[dpdk.git] / examples / vhost / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <arpa/inet.h>
6 #include <getopt.h>
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/virtio_net.h>
10 #include <linux/virtio_ring.h>
11 #include <signal.h>
12 #include <stdint.h>
13 #include <sys/eventfd.h>
14 #include <sys/param.h>
15 #include <unistd.h>
16
17 #include <rte_cycles.h>
18 #include <rte_ethdev.h>
19 #include <rte_log.h>
20 #include <rte_string_fns.h>
21 #include <rte_malloc.h>
22 #include <rte_net.h>
23 #include <rte_vhost.h>
24 #include <rte_ip.h>
25 #include <rte_tcp.h>
26 #include <rte_pause.h>
27 #include <rte_dmadev.h>
28 #include <rte_vhost_async.h>
29
30 #include "main.h"
31
32 #ifndef MAX_QUEUES
33 #define MAX_QUEUES 128
34 #endif
35
36 #define NUM_MBUFS_DEFAULT 0x24000
37
38 /* the maximum number of external ports supported */
39 #define MAX_SUP_PORTS 1
40
41 #define MBUF_CACHE_SIZE 128
42 #define MBUF_DATA_SIZE  RTE_MBUF_DEFAULT_BUF_SIZE
43
44 #define BURST_TX_DRAIN_US 100   /* TX drain every ~100us */
45
46 #define BURST_RX_WAIT_US 15     /* Defines how long we wait between retries on RX */
47 #define BURST_RX_RETRIES 4              /* Number of retries on RX. */
48
49 #define JUMBO_FRAME_MAX_SIZE    0x2600
50 #define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
51
52 /* State of virtio device. */
53 #define DEVICE_MAC_LEARNING 0
54 #define DEVICE_RX                       1
55 #define DEVICE_SAFE_REMOVE      2
56
57 /* Configurable number of RX/TX ring descriptors */
58 #define RTE_TEST_RX_DESC_DEFAULT 1024
59 #define RTE_TEST_TX_DESC_DEFAULT 512
60
61 #define INVALID_PORT_ID 0xFF
62 #define INVALID_DMA_ID -1
63
64 #define DMA_RING_SIZE 4096
65
66 /* number of mbufs in all pools - if specified on command-line. */
67 static int total_num_mbufs = NUM_MBUFS_DEFAULT;
68
69 struct dma_for_vhost dma_bind[RTE_MAX_VHOST_DEVICE];
70 int16_t dmas_id[RTE_DMADEV_DEFAULT_MAX];
71 static int dma_count;
72
73 /* mask of enabled ports */
74 static uint32_t enabled_port_mask = 0;
75
76 /* Promiscuous mode */
77 static uint32_t promiscuous;
78
79 /* number of devices/queues to support*/
80 static uint32_t num_queues = 0;
81 static uint32_t num_devices;
82
83 static struct rte_mempool *mbuf_pool;
84 static int mergeable;
85
86 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
87 typedef enum {
88         VM2VM_DISABLED = 0,
89         VM2VM_SOFTWARE = 1,
90         VM2VM_HARDWARE = 2,
91         VM2VM_LAST
92 } vm2vm_type;
93 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
94
95 /* Enable stats. */
96 static uint32_t enable_stats = 0;
97 /* Enable retries on RX. */
98 static uint32_t enable_retry = 1;
99
100 /* Disable TX checksum offload */
101 static uint32_t enable_tx_csum;
102
103 /* Disable TSO offload */
104 static uint32_t enable_tso;
105
106 static int client_mode;
107
108 static int builtin_net_driver;
109
110 /* Specify timeout (in useconds) between retries on RX. */
111 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
112 /* Specify the number of retries on RX. */
113 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
114
115 /* Socket file paths. Can be set by user */
116 static char *socket_files;
117 static int nb_sockets;
118
119 /* empty VMDq configuration structure. Filled in programmatically */
120 static struct rte_eth_conf vmdq_conf_default = {
121         .rxmode = {
122                 .mq_mode        = RTE_ETH_MQ_RX_VMDQ_ONLY,
123                 .split_hdr_size = 0,
124                 /*
125                  * VLAN strip is necessary for 1G NIC such as I350,
126                  * this fixes bug of ipv4 forwarding in guest can't
127                  * forward packets from one virtio dev to another virtio dev.
128                  */
129                 .offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
130         },
131
132         .txmode = {
133                 .mq_mode = RTE_ETH_MQ_TX_NONE,
134                 .offloads = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
135                              RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
136                              RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
137                              RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
138                              RTE_ETH_TX_OFFLOAD_TCP_TSO),
139         },
140         .rx_adv_conf = {
141                 /*
142                  * should be overridden separately in code with
143                  * appropriate values
144                  */
145                 .vmdq_rx_conf = {
146                         .nb_queue_pools = RTE_ETH_8_POOLS,
147                         .enable_default_pool = 0,
148                         .default_pool = 0,
149                         .nb_pool_maps = 0,
150                         .pool_map = {{0, 0},},
151                 },
152         },
153 };
154
155
156 static unsigned lcore_ids[RTE_MAX_LCORE];
157 static uint16_t ports[RTE_MAX_ETHPORTS];
158 static unsigned num_ports = 0; /**< The number of ports specified in command line */
159 static uint16_t num_pf_queues, num_vmdq_queues;
160 static uint16_t vmdq_pool_base, vmdq_queue_base;
161 static uint16_t queues_per_pool;
162
163 const uint16_t vlan_tags[] = {
164         1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
165         1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
166         1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
167         1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
168         1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
169         1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
170         1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
171         1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
172 };
173
174 /* ethernet addresses of ports */
175 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
176
177 static struct vhost_dev_tailq_list vhost_dev_list =
178         TAILQ_HEAD_INITIALIZER(vhost_dev_list);
179
180 static struct lcore_info lcore_info[RTE_MAX_LCORE];
181
182 /* Used for queueing bursts of TX packets. */
183 struct mbuf_table {
184         unsigned len;
185         unsigned txq_id;
186         struct rte_mbuf *m_table[MAX_PKT_BURST];
187 };
188
189 struct vhost_bufftable {
190         uint32_t len;
191         uint64_t pre_tsc;
192         struct rte_mbuf *m_table[MAX_PKT_BURST];
193 };
194
195 /* TX queue for each data core. */
196 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
197
198 /*
199  * Vhost TX buffer for each data core.
200  * Every data core maintains a TX buffer for every vhost device,
201  * which is used for batch pkts enqueue for higher performance.
202  */
203 struct vhost_bufftable *vhost_txbuff[RTE_MAX_LCORE * RTE_MAX_VHOST_DEVICE];
204
205 #define MBUF_TABLE_DRAIN_TSC    ((rte_get_tsc_hz() + US_PER_S - 1) \
206                                  / US_PER_S * BURST_TX_DRAIN_US)
207
208 static inline bool
209 is_dma_configured(int16_t dev_id)
210 {
211         int i;
212
213         for (i = 0; i < dma_count; i++)
214                 if (dmas_id[i] == dev_id)
215                         return true;
216         return false;
217 }
218
219 static inline int
220 open_dma(const char *value)
221 {
222         struct dma_for_vhost *dma_info = dma_bind;
223         char *input = strndup(value, strlen(value) + 1);
224         char *addrs = input;
225         char *ptrs[2];
226         char *start, *end, *substr;
227         int64_t vid;
228
229         struct rte_dma_info info;
230         struct rte_dma_conf dev_config = { .nb_vchans = 1 };
231         struct rte_dma_vchan_conf qconf = {
232                 .direction = RTE_DMA_DIR_MEM_TO_MEM,
233                 .nb_desc = DMA_RING_SIZE
234         };
235
236         int dev_id;
237         int ret = 0;
238         uint16_t i = 0;
239         char *dma_arg[RTE_MAX_VHOST_DEVICE];
240         int args_nr;
241
242         while (isblank(*addrs))
243                 addrs++;
244         if (*addrs == '\0') {
245                 ret = -1;
246                 goto out;
247         }
248
249         /* process DMA devices within bracket. */
250         addrs++;
251         substr = strtok(addrs, ";]");
252         if (!substr) {
253                 ret = -1;
254                 goto out;
255         }
256
257         args_nr = rte_strsplit(substr, strlen(substr), dma_arg, RTE_MAX_VHOST_DEVICE, ',');
258         if (args_nr <= 0) {
259                 ret = -1;
260                 goto out;
261         }
262
263         while (i < args_nr) {
264                 char *arg_temp = dma_arg[i];
265                 uint8_t sub_nr;
266
267                 sub_nr = rte_strsplit(arg_temp, strlen(arg_temp), ptrs, 2, '@');
268                 if (sub_nr != 2) {
269                         ret = -1;
270                         goto out;
271                 }
272
273                 start = strstr(ptrs[0], "txd");
274                 if (start == NULL) {
275                         ret = -1;
276                         goto out;
277                 }
278
279                 start += 3;
280                 vid = strtol(start, &end, 0);
281                 if (end == start) {
282                         ret = -1;
283                         goto out;
284                 }
285
286                 dev_id = rte_dma_get_dev_id_by_name(ptrs[1]);
287                 if (dev_id < 0) {
288                         RTE_LOG(ERR, VHOST_CONFIG, "Fail to find DMA %s.\n", ptrs[1]);
289                         ret = -1;
290                         goto out;
291                 }
292
293                 /* DMA device is already configured, so skip */
294                 if (is_dma_configured(dev_id))
295                         goto done;
296
297                 if (rte_dma_info_get(dev_id, &info) != 0) {
298                         RTE_LOG(ERR, VHOST_CONFIG, "Error with rte_dma_info_get()\n");
299                         ret = -1;
300                         goto out;
301                 }
302
303                 if (info.max_vchans < 1) {
304                         RTE_LOG(ERR, VHOST_CONFIG, "No channels available on device %d\n", dev_id);
305                         ret = -1;
306                         goto out;
307                 }
308
309                 if (rte_dma_configure(dev_id, &dev_config) != 0) {
310                         RTE_LOG(ERR, VHOST_CONFIG, "Fail to configure DMA %d.\n", dev_id);
311                         ret = -1;
312                         goto out;
313                 }
314
315                 /* Check the max desc supported by DMA device */
316                 rte_dma_info_get(dev_id, &info);
317                 if (info.nb_vchans != 1) {
318                         RTE_LOG(ERR, VHOST_CONFIG, "No configured queues reported by DMA %d.\n",
319                                         dev_id);
320                         ret = -1;
321                         goto out;
322                 }
323
324                 qconf.nb_desc = RTE_MIN(DMA_RING_SIZE, info.max_desc);
325
326                 if (rte_dma_vchan_setup(dev_id, 0, &qconf) != 0) {
327                         RTE_LOG(ERR, VHOST_CONFIG, "Fail to set up DMA %d.\n", dev_id);
328                         ret = -1;
329                         goto out;
330                 }
331
332                 if (rte_dma_start(dev_id) != 0) {
333                         RTE_LOG(ERR, VHOST_CONFIG, "Fail to start DMA %u.\n", dev_id);
334                         ret = -1;
335                         goto out;
336                 }
337
338                 dmas_id[dma_count++] = dev_id;
339
340 done:
341                 (dma_info + vid)->dmas[VIRTIO_RXQ].dev_id = dev_id;
342                 i++;
343         }
344 out:
345         free(input);
346         return ret;
347 }
348
349 /*
350  * Builds up the correct configuration for VMDQ VLAN pool map
351  * according to the pool & queue limits.
352  */
353 static inline int
354 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
355 {
356         struct rte_eth_vmdq_rx_conf conf;
357         struct rte_eth_vmdq_rx_conf *def_conf =
358                 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
359         unsigned i;
360
361         memset(&conf, 0, sizeof(conf));
362         conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
363         conf.nb_pool_maps = num_devices;
364         conf.enable_loop_back = def_conf->enable_loop_back;
365         conf.rx_mode = def_conf->rx_mode;
366
367         for (i = 0; i < conf.nb_pool_maps; i++) {
368                 conf.pool_map[i].vlan_id = vlan_tags[ i ];
369                 conf.pool_map[i].pools = (1UL << i);
370         }
371
372         (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
373         (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
374                    sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
375         return 0;
376 }
377
378 /*
379  * Initialises a given port using global settings and with the rx buffers
380  * coming from the mbuf_pool passed as parameter
381  */
382 static inline int
383 port_init(uint16_t port)
384 {
385         struct rte_eth_dev_info dev_info;
386         struct rte_eth_conf port_conf;
387         struct rte_eth_rxconf *rxconf;
388         struct rte_eth_txconf *txconf;
389         int16_t rx_rings, tx_rings;
390         uint16_t rx_ring_size, tx_ring_size;
391         int retval;
392         uint16_t q;
393
394         /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
395         retval = rte_eth_dev_info_get(port, &dev_info);
396         if (retval != 0) {
397                 RTE_LOG(ERR, VHOST_PORT,
398                         "Error during getting device (port %u) info: %s\n",
399                         port, strerror(-retval));
400
401                 return retval;
402         }
403
404         rxconf = &dev_info.default_rxconf;
405         txconf = &dev_info.default_txconf;
406         rxconf->rx_drop_en = 1;
407
408         /*configure the number of supported virtio devices based on VMDQ limits */
409         num_devices = dev_info.max_vmdq_pools;
410
411         rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
412         tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
413
414         tx_rings = (uint16_t)rte_lcore_count();
415
416         if (mergeable) {
417                 if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
418                         vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
419                 else
420                         vmdq_conf_default.rxmode.mtu = MAX_MTU;
421         }
422
423         /* Get port configuration. */
424         retval = get_eth_conf(&port_conf, num_devices);
425         if (retval < 0)
426                 return retval;
427         /* NIC queues are divided into pf queues and vmdq queues.  */
428         num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
429         queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
430         num_vmdq_queues = num_devices * queues_per_pool;
431         num_queues = num_pf_queues + num_vmdq_queues;
432         vmdq_queue_base = dev_info.vmdq_queue_base;
433         vmdq_pool_base  = dev_info.vmdq_pool_base;
434         printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
435                 num_pf_queues, num_devices, queues_per_pool);
436
437         if (!rte_eth_dev_is_valid_port(port))
438                 return -1;
439
440         rx_rings = (uint16_t)dev_info.max_rx_queues;
441         if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
442                 port_conf.txmode.offloads |=
443                         RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
444         /* Configure ethernet device. */
445         retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
446         if (retval != 0) {
447                 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
448                         port, strerror(-retval));
449                 return retval;
450         }
451
452         retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
453                 &tx_ring_size);
454         if (retval != 0) {
455                 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
456                         "for port %u: %s.\n", port, strerror(-retval));
457                 return retval;
458         }
459         if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
460                 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
461                         "for Rx queues on port %u.\n", port);
462                 return -1;
463         }
464
465         /* Setup the queues. */
466         rxconf->offloads = port_conf.rxmode.offloads;
467         for (q = 0; q < rx_rings; q ++) {
468                 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
469                                                 rte_eth_dev_socket_id(port),
470                                                 rxconf,
471                                                 mbuf_pool);
472                 if (retval < 0) {
473                         RTE_LOG(ERR, VHOST_PORT,
474                                 "Failed to setup rx queue %u of port %u: %s.\n",
475                                 q, port, strerror(-retval));
476                         return retval;
477                 }
478         }
479         txconf->offloads = port_conf.txmode.offloads;
480         for (q = 0; q < tx_rings; q ++) {
481                 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
482                                                 rte_eth_dev_socket_id(port),
483                                                 txconf);
484                 if (retval < 0) {
485                         RTE_LOG(ERR, VHOST_PORT,
486                                 "Failed to setup tx queue %u of port %u: %s.\n",
487                                 q, port, strerror(-retval));
488                         return retval;
489                 }
490         }
491
492         /* Start the device. */
493         retval  = rte_eth_dev_start(port);
494         if (retval < 0) {
495                 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
496                         port, strerror(-retval));
497                 return retval;
498         }
499
500         if (promiscuous) {
501                 retval = rte_eth_promiscuous_enable(port);
502                 if (retval != 0) {
503                         RTE_LOG(ERR, VHOST_PORT,
504                                 "Failed to enable promiscuous mode on port %u: %s\n",
505                                 port, rte_strerror(-retval));
506                         return retval;
507                 }
508         }
509
510         retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
511         if (retval < 0) {
512                 RTE_LOG(ERR, VHOST_PORT,
513                         "Failed to get MAC address on port %u: %s\n",
514                         port, rte_strerror(-retval));
515                 return retval;
516         }
517
518         RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
519         RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
520                 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
521                 port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
522
523         return 0;
524 }
525
526 /*
527  * Set socket file path.
528  */
529 static int
530 us_vhost_parse_socket_path(const char *q_arg)
531 {
532         char *old;
533
534         /* parse number string */
535         if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
536                 return -1;
537
538         old = socket_files;
539         socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
540         if (socket_files == NULL) {
541                 free(old);
542                 return -1;
543         }
544
545         strlcpy(socket_files + nb_sockets * PATH_MAX, q_arg, PATH_MAX);
546         nb_sockets++;
547
548         return 0;
549 }
550
551 /*
552  * Parse the portmask provided at run time.
553  */
554 static int
555 parse_portmask(const char *portmask)
556 {
557         char *end = NULL;
558         unsigned long pm;
559
560         errno = 0;
561
562         /* parse hexadecimal string */
563         pm = strtoul(portmask, &end, 16);
564         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
565                 return 0;
566
567         return pm;
568
569 }
570
571 /*
572  * Parse num options at run time.
573  */
574 static int
575 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
576 {
577         char *end = NULL;
578         unsigned long num;
579
580         errno = 0;
581
582         /* parse unsigned int string */
583         num = strtoul(q_arg, &end, 10);
584         if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
585                 return -1;
586
587         if (num > max_valid_value)
588                 return -1;
589
590         return num;
591
592 }
593
594 /*
595  * Display usage
596  */
597 static void
598 us_vhost_usage(const char *prgname)
599 {
600         RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
601         "               --vm2vm [0|1|2]\n"
602         "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
603         "               --socket-file <path>\n"
604         "               --nb-devices ND\n"
605         "               -p PORTMASK: Set mask for ports to be used by application\n"
606         "               --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
607         "               --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
608         "               --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
609         "               --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
610         "               --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
611         "               --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
612         "               --socket-file: The path of the socket file.\n"
613         "               --tx-csum [0|1] disable/enable TX checksum offload.\n"
614         "               --tso [0|1] disable/enable TCP segment offload.\n"
615         "               --client register a vhost-user socket as client mode.\n"
616         "               --dmas register dma channel for specific vhost device.\n"
617         "               --total-num-mbufs [0-N] set the number of mbufs to be allocated in mbuf pools, the default value is 147456.\n",
618                prgname);
619 }
620
621 enum {
622 #define OPT_VM2VM               "vm2vm"
623         OPT_VM2VM_NUM = 256,
624 #define OPT_RX_RETRY            "rx-retry"
625         OPT_RX_RETRY_NUM,
626 #define OPT_RX_RETRY_DELAY      "rx-retry-delay"
627         OPT_RX_RETRY_DELAY_NUM,
628 #define OPT_RX_RETRY_NUMB       "rx-retry-num"
629         OPT_RX_RETRY_NUMB_NUM,
630 #define OPT_MERGEABLE           "mergeable"
631         OPT_MERGEABLE_NUM,
632 #define OPT_STATS               "stats"
633         OPT_STATS_NUM,
634 #define OPT_SOCKET_FILE         "socket-file"
635         OPT_SOCKET_FILE_NUM,
636 #define OPT_TX_CSUM             "tx-csum"
637         OPT_TX_CSUM_NUM,
638 #define OPT_TSO                 "tso"
639         OPT_TSO_NUM,
640 #define OPT_CLIENT              "client"
641         OPT_CLIENT_NUM,
642 #define OPT_BUILTIN_NET_DRIVER  "builtin-net-driver"
643         OPT_BUILTIN_NET_DRIVER_NUM,
644 #define OPT_DMAS                "dmas"
645         OPT_DMAS_NUM,
646 #define OPT_NUM_MBUFS           "total-num-mbufs"
647         OPT_NUM_MBUFS_NUM,
648 };
649
650 /*
651  * Parse the arguments given in the command line of the application.
652  */
653 static int
654 us_vhost_parse_args(int argc, char **argv)
655 {
656         int opt, ret;
657         int option_index;
658         unsigned i;
659         const char *prgname = argv[0];
660         static struct option long_option[] = {
661                 {OPT_VM2VM, required_argument,
662                                 NULL, OPT_VM2VM_NUM},
663                 {OPT_RX_RETRY, required_argument,
664                                 NULL, OPT_RX_RETRY_NUM},
665                 {OPT_RX_RETRY_DELAY, required_argument,
666                                 NULL, OPT_RX_RETRY_DELAY_NUM},
667                 {OPT_RX_RETRY_NUMB, required_argument,
668                                 NULL, OPT_RX_RETRY_NUMB_NUM},
669                 {OPT_MERGEABLE, required_argument,
670                                 NULL, OPT_MERGEABLE_NUM},
671                 {OPT_STATS, required_argument,
672                                 NULL, OPT_STATS_NUM},
673                 {OPT_SOCKET_FILE, required_argument,
674                                 NULL, OPT_SOCKET_FILE_NUM},
675                 {OPT_TX_CSUM, required_argument,
676                                 NULL, OPT_TX_CSUM_NUM},
677                 {OPT_TSO, required_argument,
678                                 NULL, OPT_TSO_NUM},
679                 {OPT_CLIENT, no_argument,
680                                 NULL, OPT_CLIENT_NUM},
681                 {OPT_BUILTIN_NET_DRIVER, no_argument,
682                                 NULL, OPT_BUILTIN_NET_DRIVER_NUM},
683                 {OPT_DMAS, required_argument,
684                                 NULL, OPT_DMAS_NUM},
685                 {OPT_NUM_MBUFS, required_argument,
686                                 NULL, OPT_NUM_MBUFS_NUM},
687                 {NULL, 0, 0, 0},
688         };
689
690         /* Parse command line */
691         while ((opt = getopt_long(argc, argv, "p:P",
692                         long_option, &option_index)) != EOF) {
693                 switch (opt) {
694                 /* Portmask */
695                 case 'p':
696                         enabled_port_mask = parse_portmask(optarg);
697                         if (enabled_port_mask == 0) {
698                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
699                                 us_vhost_usage(prgname);
700                                 return -1;
701                         }
702                         break;
703
704                 case 'P':
705                         promiscuous = 1;
706                         vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
707                                 RTE_ETH_VMDQ_ACCEPT_BROADCAST |
708                                 RTE_ETH_VMDQ_ACCEPT_MULTICAST;
709                         break;
710
711                 case OPT_VM2VM_NUM:
712                         ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
713                         if (ret == -1) {
714                                 RTE_LOG(INFO, VHOST_CONFIG,
715                                         "Invalid argument for "
716                                         "vm2vm [0|1|2]\n");
717                                 us_vhost_usage(prgname);
718                                 return -1;
719                         }
720                         vm2vm_mode = (vm2vm_type)ret;
721                         break;
722
723                 case OPT_RX_RETRY_NUM:
724                         ret = parse_num_opt(optarg, 1);
725                         if (ret == -1) {
726                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
727                                 us_vhost_usage(prgname);
728                                 return -1;
729                         }
730                         enable_retry = ret;
731                         break;
732
733                 case OPT_TX_CSUM_NUM:
734                         ret = parse_num_opt(optarg, 1);
735                         if (ret == -1) {
736                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
737                                 us_vhost_usage(prgname);
738                                 return -1;
739                         }
740                         enable_tx_csum = ret;
741                         break;
742
743                 case OPT_TSO_NUM:
744                         ret = parse_num_opt(optarg, 1);
745                         if (ret == -1) {
746                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
747                                 us_vhost_usage(prgname);
748                                 return -1;
749                         }
750                         enable_tso = ret;
751                         break;
752
753                 case OPT_RX_RETRY_DELAY_NUM:
754                         ret = parse_num_opt(optarg, INT32_MAX);
755                         if (ret == -1) {
756                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
757                                 us_vhost_usage(prgname);
758                                 return -1;
759                         }
760                         burst_rx_delay_time = ret;
761                         break;
762
763                 case OPT_RX_RETRY_NUMB_NUM:
764                         ret = parse_num_opt(optarg, INT32_MAX);
765                         if (ret == -1) {
766                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
767                                 us_vhost_usage(prgname);
768                                 return -1;
769                         }
770                         burst_rx_retry_num = ret;
771                         break;
772
773                 case OPT_MERGEABLE_NUM:
774                         ret = parse_num_opt(optarg, 1);
775                         if (ret == -1) {
776                                 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
777                                 us_vhost_usage(prgname);
778                                 return -1;
779                         }
780                         mergeable = !!ret;
781                         break;
782
783                 case OPT_STATS_NUM:
784                         ret = parse_num_opt(optarg, INT32_MAX);
785                         if (ret == -1) {
786                                 RTE_LOG(INFO, VHOST_CONFIG,
787                                         "Invalid argument for stats [0..N]\n");
788                                 us_vhost_usage(prgname);
789                                 return -1;
790                         }
791                         enable_stats = ret;
792                         break;
793
794                 /* Set socket file path. */
795                 case OPT_SOCKET_FILE_NUM:
796                         if (us_vhost_parse_socket_path(optarg) == -1) {
797                                 RTE_LOG(INFO, VHOST_CONFIG,
798                                 "Invalid argument for socket name (Max %d characters)\n",
799                                 PATH_MAX);
800                                 us_vhost_usage(prgname);
801                                 return -1;
802                         }
803                         break;
804
805                 case OPT_DMAS_NUM:
806                         if (open_dma(optarg) == -1) {
807                                 RTE_LOG(INFO, VHOST_CONFIG,
808                                         "Wrong DMA args\n");
809                                 us_vhost_usage(prgname);
810                                 return -1;
811                         }
812                         break;
813
814                 case OPT_NUM_MBUFS_NUM:
815                         ret = parse_num_opt(optarg, INT32_MAX);
816                         if (ret == -1) {
817                                 RTE_LOG(INFO, VHOST_CONFIG,
818                                         "Invalid argument for total-num-mbufs [0..N]\n");
819                                 us_vhost_usage(prgname);
820                                 return -1;
821                         }
822
823                         if (total_num_mbufs < ret)
824                                 total_num_mbufs = ret;
825                         break;
826
827                 case OPT_CLIENT_NUM:
828                         client_mode = 1;
829                         break;
830
831                 case OPT_BUILTIN_NET_DRIVER_NUM:
832                         builtin_net_driver = 1;
833                         break;
834
835                 /* Invalid option - print options. */
836                 default:
837                         us_vhost_usage(prgname);
838                         return -1;
839                 }
840         }
841
842         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
843                 if (enabled_port_mask & (1 << i))
844                         ports[num_ports++] = i;
845         }
846
847         if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
848                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
849                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
850                 return -1;
851         }
852
853         return 0;
854 }
855
856 /*
857  * Update the global var NUM_PORTS and array PORTS according to system ports number
858  * and return valid ports number
859  */
860 static unsigned check_ports_num(unsigned nb_ports)
861 {
862         unsigned valid_num_ports = num_ports;
863         unsigned portid;
864
865         if (num_ports > nb_ports) {
866                 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
867                         num_ports, nb_ports);
868                 num_ports = nb_ports;
869         }
870
871         for (portid = 0; portid < num_ports; portid ++) {
872                 if (!rte_eth_dev_is_valid_port(ports[portid])) {
873                         RTE_LOG(INFO, VHOST_PORT,
874                                 "\nSpecified port ID(%u) is not valid\n",
875                                 ports[portid]);
876                         ports[portid] = INVALID_PORT_ID;
877                         valid_num_ports--;
878                 }
879         }
880         return valid_num_ports;
881 }
882
883 static __rte_always_inline struct vhost_dev *
884 find_vhost_dev(struct rte_ether_addr *mac)
885 {
886         struct vhost_dev *vdev;
887
888         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
889                 if (vdev->ready == DEVICE_RX &&
890                     rte_is_same_ether_addr(mac, &vdev->mac_address))
891                         return vdev;
892         }
893
894         return NULL;
895 }
896
897 /*
898  * This function learns the MAC address of the device and registers this along with a
899  * vlan tag to a VMDQ.
900  */
901 static int
902 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
903 {
904         struct rte_ether_hdr *pkt_hdr;
905         int i, ret;
906
907         /* Learn MAC address of guest device from packet */
908         pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
909
910         if (find_vhost_dev(&pkt_hdr->src_addr)) {
911                 RTE_LOG(ERR, VHOST_DATA,
912                         "(%d) device is using a registered MAC!\n",
913                         vdev->vid);
914                 return -1;
915         }
916
917         for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
918                 vdev->mac_address.addr_bytes[i] =
919                         pkt_hdr->src_addr.addr_bytes[i];
920
921         /* vlan_tag currently uses the device_id. */
922         vdev->vlan_tag = vlan_tags[vdev->vid];
923
924         /* Print out VMDQ registration info. */
925         RTE_LOG(INFO, VHOST_DATA,
926                 "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
927                 vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
928                 vdev->vlan_tag);
929
930         /* Register the MAC address. */
931         ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
932                                 (uint32_t)vdev->vid + vmdq_pool_base);
933         if (ret)
934                 RTE_LOG(ERR, VHOST_DATA,
935                         "(%d) failed to add device MAC address to VMDQ\n",
936                         vdev->vid);
937
938         rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
939
940         /* Set device as ready for RX. */
941         vdev->ready = DEVICE_RX;
942
943         return 0;
944 }
945
946 /*
947  * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
948  * queue before disabling RX on the device.
949  */
950 static inline void
951 unlink_vmdq(struct vhost_dev *vdev)
952 {
953         unsigned i = 0;
954         unsigned rx_count;
955         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
956
957         if (vdev->ready == DEVICE_RX) {
958                 /*clear MAC and VLAN settings*/
959                 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
960                 for (i = 0; i < 6; i++)
961                         vdev->mac_address.addr_bytes[i] = 0;
962
963                 vdev->vlan_tag = 0;
964
965                 /*Clear out the receive buffers*/
966                 rx_count = rte_eth_rx_burst(ports[0],
967                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
968
969                 while (rx_count) {
970                         for (i = 0; i < rx_count; i++)
971                                 rte_pktmbuf_free(pkts_burst[i]);
972
973                         rx_count = rte_eth_rx_burst(ports[0],
974                                         (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
975                 }
976
977                 vdev->ready = DEVICE_MAC_LEARNING;
978         }
979 }
980
981 static inline void
982 free_pkts(struct rte_mbuf **pkts, uint16_t n)
983 {
984         while (n--)
985                 rte_pktmbuf_free(pkts[n]);
986 }
987
988 static __rte_always_inline void
989 complete_async_pkts(struct vhost_dev *vdev)
990 {
991         struct rte_mbuf *p_cpl[MAX_PKT_BURST];
992         uint16_t complete_count;
993         int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
994
995         complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
996                                         VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
997         if (complete_count) {
998                 free_pkts(p_cpl, complete_count);
999                 __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
1000         }
1001
1002 }
1003
1004 static __rte_always_inline void
1005 sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
1006             struct rte_mbuf *m)
1007 {
1008         uint16_t ret;
1009
1010         if (builtin_net_driver) {
1011                 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
1012         } else {
1013                 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
1014         }
1015
1016         if (enable_stats) {
1017                 __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
1018                                 __ATOMIC_SEQ_CST);
1019                 __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
1020                                 __ATOMIC_SEQ_CST);
1021                 src_vdev->stats.tx_total++;
1022                 src_vdev->stats.tx += ret;
1023         }
1024 }
1025
1026 static __rte_always_inline void
1027 drain_vhost(struct vhost_dev *vdev)
1028 {
1029         uint16_t ret;
1030         uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
1031         uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
1032         struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
1033
1034         if (builtin_net_driver) {
1035                 ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
1036         } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1037                 uint16_t enqueue_fail = 0;
1038                 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1039
1040                 complete_async_pkts(vdev);
1041                 ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
1042                 __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
1043
1044                 enqueue_fail = nr_xmit - ret;
1045                 if (enqueue_fail)
1046                         free_pkts(&m[ret], nr_xmit - ret);
1047         } else {
1048                 ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1049                                                 m, nr_xmit);
1050         }
1051
1052         if (enable_stats) {
1053                 __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
1054                                 __ATOMIC_SEQ_CST);
1055                 __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
1056                                 __ATOMIC_SEQ_CST);
1057         }
1058
1059         if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1060                 free_pkts(m, nr_xmit);
1061 }
1062
1063 static __rte_always_inline void
1064 drain_vhost_table(void)
1065 {
1066         uint16_t lcore_id = rte_lcore_id();
1067         struct vhost_bufftable *vhost_txq;
1068         struct vhost_dev *vdev;
1069         uint64_t cur_tsc;
1070
1071         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1072                 if (unlikely(vdev->remove == 1))
1073                         continue;
1074
1075                 vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
1076
1077                 cur_tsc = rte_rdtsc();
1078                 if (unlikely(cur_tsc - vhost_txq->pre_tsc
1079                                 > MBUF_TABLE_DRAIN_TSC)) {
1080                         RTE_LOG_DP(DEBUG, VHOST_DATA,
1081                                 "Vhost TX queue drained after timeout with burst size %u\n",
1082                                 vhost_txq->len);
1083                         drain_vhost(vdev);
1084                         vhost_txq->len = 0;
1085                         vhost_txq->pre_tsc = cur_tsc;
1086                 }
1087         }
1088 }
1089
1090 /*
1091  * Check if the packet destination MAC address is for a local device. If so then put
1092  * the packet on that devices RX queue. If not then return.
1093  */
1094 static __rte_always_inline int
1095 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
1096 {
1097         struct rte_ether_hdr *pkt_hdr;
1098         struct vhost_dev *dst_vdev;
1099         struct vhost_bufftable *vhost_txq;
1100         uint16_t lcore_id = rte_lcore_id();
1101         pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1102
1103         dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1104         if (!dst_vdev)
1105                 return -1;
1106
1107         if (vdev->vid == dst_vdev->vid) {
1108                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1109                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1110                         vdev->vid);
1111                 return 0;
1112         }
1113
1114         RTE_LOG_DP(DEBUG, VHOST_DATA,
1115                 "(%d) TX: MAC address is local\n", dst_vdev->vid);
1116
1117         if (unlikely(dst_vdev->remove)) {
1118                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1119                         "(%d) device is marked for removal\n", dst_vdev->vid);
1120                 return 0;
1121         }
1122
1123         vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + dst_vdev->vid];
1124         vhost_txq->m_table[vhost_txq->len++] = m;
1125
1126         if (enable_stats) {
1127                 vdev->stats.tx_total++;
1128                 vdev->stats.tx++;
1129         }
1130
1131         if (unlikely(vhost_txq->len == MAX_PKT_BURST)) {
1132                 drain_vhost(dst_vdev);
1133                 vhost_txq->len = 0;
1134                 vhost_txq->pre_tsc = rte_rdtsc();
1135         }
1136         return 0;
1137 }
1138
1139 /*
1140  * Check if the destination MAC of a packet is one local VM,
1141  * and get its vlan tag, and offset if it is.
1142  */
1143 static __rte_always_inline int
1144 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
1145         uint32_t *offset, uint16_t *vlan_tag)
1146 {
1147         struct vhost_dev *dst_vdev;
1148         struct rte_ether_hdr *pkt_hdr =
1149                 rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1150
1151         dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
1152         if (!dst_vdev)
1153                 return 0;
1154
1155         if (vdev->vid == dst_vdev->vid) {
1156                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1157                         "(%d) TX: src and dst MAC is same. Dropping packet.\n",
1158                         vdev->vid);
1159                 return -1;
1160         }
1161
1162         /*
1163          * HW vlan strip will reduce the packet length
1164          * by minus length of vlan tag, so need restore
1165          * the packet length by plus it.
1166          */
1167         *offset  = RTE_VLAN_HLEN;
1168         *vlan_tag = vlan_tags[vdev->vid];
1169
1170         RTE_LOG_DP(DEBUG, VHOST_DATA,
1171                 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
1172                 vdev->vid, dst_vdev->vid, *vlan_tag);
1173
1174         return 0;
1175 }
1176
1177 static void virtio_tx_offload(struct rte_mbuf *m)
1178 {
1179         struct rte_net_hdr_lens hdr_lens;
1180         struct rte_ipv4_hdr *ipv4_hdr;
1181         struct rte_tcp_hdr *tcp_hdr;
1182         uint32_t ptype;
1183         void *l3_hdr;
1184
1185         ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1186         m->l2_len = hdr_lens.l2_len;
1187         m->l3_len = hdr_lens.l3_len;
1188         m->l4_len = hdr_lens.l4_len;
1189
1190         l3_hdr = rte_pktmbuf_mtod_offset(m, void *, m->l2_len);
1191         tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
1192                 m->l2_len + m->l3_len);
1193
1194         m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
1195         if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
1196                 m->ol_flags |= RTE_MBUF_F_TX_IPV4;
1197                 m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
1198                 ipv4_hdr = l3_hdr;
1199                 ipv4_hdr->hdr_checksum = 0;
1200                 tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
1201         } else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
1202                 m->ol_flags |= RTE_MBUF_F_TX_IPV6;
1203                 tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
1204         }
1205 }
1206
1207 static __rte_always_inline void
1208 do_drain_mbuf_table(struct mbuf_table *tx_q)
1209 {
1210         uint16_t count;
1211
1212         count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
1213                                  tx_q->m_table, tx_q->len);
1214         if (unlikely(count < tx_q->len))
1215                 free_pkts(&tx_q->m_table[count], tx_q->len - count);
1216
1217         tx_q->len = 0;
1218 }
1219
1220 /*
1221  * This function routes the TX packet to the correct interface. This
1222  * may be a local device or the physical port.
1223  */
1224 static __rte_always_inline void
1225 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
1226 {
1227         struct mbuf_table *tx_q;
1228         unsigned offset = 0;
1229         const uint16_t lcore_id = rte_lcore_id();
1230         struct rte_ether_hdr *nh;
1231
1232
1233         nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1234         if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
1235                 struct vhost_dev *vdev2;
1236
1237                 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
1238                         if (vdev2 != vdev)
1239                                 sync_virtio_xmit(vdev2, vdev, m);
1240                 }
1241                 goto queue2nic;
1242         }
1243
1244         /*check if destination is local VM*/
1245         if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0))
1246                 return;
1247
1248         if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1249                 if (unlikely(find_local_dest(vdev, m, &offset,
1250                                              &vlan_tag) != 0)) {
1251                         rte_pktmbuf_free(m);
1252                         return;
1253                 }
1254         }
1255
1256         RTE_LOG_DP(DEBUG, VHOST_DATA,
1257                 "(%d) TX: MAC address is external\n", vdev->vid);
1258
1259 queue2nic:
1260
1261         /*Add packet to the port tx queue*/
1262         tx_q = &lcore_tx_queue[lcore_id];
1263
1264         nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1265         if (unlikely(nh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) {
1266                 /* Guest has inserted the vlan tag. */
1267                 struct rte_vlan_hdr *vh = (struct rte_vlan_hdr *) (nh + 1);
1268                 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
1269                 if ((vm2vm_mode == VM2VM_HARDWARE) &&
1270                         (vh->vlan_tci != vlan_tag_be))
1271                         vh->vlan_tci = vlan_tag_be;
1272         } else {
1273                 m->ol_flags |= RTE_MBUF_F_TX_VLAN;
1274
1275                 /*
1276                  * Find the right seg to adjust the data len when offset is
1277                  * bigger than tail room size.
1278                  */
1279                 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1280                         if (likely(offset <= rte_pktmbuf_tailroom(m)))
1281                                 m->data_len += offset;
1282                         else {
1283                                 struct rte_mbuf *seg = m;
1284
1285                                 while ((seg->next != NULL) &&
1286                                         (offset > rte_pktmbuf_tailroom(seg)))
1287                                         seg = seg->next;
1288
1289                                 seg->data_len += offset;
1290                         }
1291                         m->pkt_len += offset;
1292                 }
1293
1294                 m->vlan_tci = vlan_tag;
1295         }
1296
1297         if (m->ol_flags & RTE_MBUF_F_RX_LRO)
1298                 virtio_tx_offload(m);
1299
1300         tx_q->m_table[tx_q->len++] = m;
1301         if (enable_stats) {
1302                 vdev->stats.tx_total++;
1303                 vdev->stats.tx++;
1304         }
1305
1306         if (unlikely(tx_q->len == MAX_PKT_BURST))
1307                 do_drain_mbuf_table(tx_q);
1308 }
1309
1310
1311 static __rte_always_inline void
1312 drain_mbuf_table(struct mbuf_table *tx_q)
1313 {
1314         static uint64_t prev_tsc;
1315         uint64_t cur_tsc;
1316
1317         if (tx_q->len == 0)
1318                 return;
1319
1320         cur_tsc = rte_rdtsc();
1321         if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1322                 prev_tsc = cur_tsc;
1323
1324                 RTE_LOG_DP(DEBUG, VHOST_DATA,
1325                         "TX queue drained after timeout with burst size %u\n",
1326                         tx_q->len);
1327                 do_drain_mbuf_table(tx_q);
1328         }
1329 }
1330
1331 static __rte_always_inline void
1332 drain_eth_rx(struct vhost_dev *vdev)
1333 {
1334         uint16_t rx_count, enqueue_count;
1335         struct rte_mbuf *pkts[MAX_PKT_BURST];
1336
1337         rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1338                                     pkts, MAX_PKT_BURST);
1339
1340         if (!rx_count)
1341                 return;
1342
1343         /*
1344          * When "enable_retry" is set, here we wait and retry when there
1345          * is no enough free slots in the queue to hold @rx_count packets,
1346          * to diminish packet loss.
1347          */
1348         if (enable_retry &&
1349             unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1350                         VIRTIO_RXQ))) {
1351                 uint32_t retry;
1352
1353                 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1354                         rte_delay_us(burst_rx_delay_time);
1355                         if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1356                                         VIRTIO_RXQ))
1357                                 break;
1358                 }
1359         }
1360
1361         if (builtin_net_driver) {
1362                 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1363                                                 pkts, rx_count);
1364         } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
1365                 uint16_t enqueue_fail = 0;
1366                 int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
1367
1368                 complete_async_pkts(vdev);
1369                 enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
1370                                         VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
1371                 __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
1372
1373                 enqueue_fail = rx_count - enqueue_count;
1374                 if (enqueue_fail)
1375                         free_pkts(&pkts[enqueue_count], enqueue_fail);
1376
1377         } else {
1378                 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1379                                                 pkts, rx_count);
1380         }
1381
1382         if (enable_stats) {
1383                 __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
1384                                 __ATOMIC_SEQ_CST);
1385                 __atomic_add_fetch(&vdev->stats.rx_atomic, enqueue_count,
1386                                 __ATOMIC_SEQ_CST);
1387         }
1388
1389         if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
1390                 free_pkts(pkts, rx_count);
1391 }
1392
1393 static __rte_always_inline void
1394 drain_virtio_tx(struct vhost_dev *vdev)
1395 {
1396         struct rte_mbuf *pkts[MAX_PKT_BURST];
1397         uint16_t count;
1398         uint16_t i;
1399
1400         if (builtin_net_driver) {
1401                 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1402                                         pkts, MAX_PKT_BURST);
1403         } else {
1404                 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1405                                         mbuf_pool, pkts, MAX_PKT_BURST);
1406         }
1407
1408         /* setup VMDq for the first packet */
1409         if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1410                 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1411                         free_pkts(pkts, count);
1412         }
1413
1414         for (i = 0; i < count; ++i)
1415                 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1416 }
1417
1418 /*
1419  * Main function of vhost-switch. It basically does:
1420  *
1421  * for each vhost device {
1422  *    - drain_eth_rx()
1423  *
1424  *      Which drains the host eth Rx queue linked to the vhost device,
1425  *      and deliver all of them to guest virito Rx ring associated with
1426  *      this vhost device.
1427  *
1428  *    - drain_virtio_tx()
1429  *
1430  *      Which drains the guest virtio Tx queue and deliver all of them
1431  *      to the target, which could be another vhost device, or the
1432  *      physical eth dev. The route is done in function "virtio_tx_route".
1433  * }
1434  */
1435 static int
1436 switch_worker(void *arg __rte_unused)
1437 {
1438         unsigned i;
1439         unsigned lcore_id = rte_lcore_id();
1440         struct vhost_dev *vdev;
1441         struct mbuf_table *tx_q;
1442
1443         RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
1444
1445         tx_q = &lcore_tx_queue[lcore_id];
1446         for (i = 0; i < rte_lcore_count(); i++) {
1447                 if (lcore_ids[i] == lcore_id) {
1448                         tx_q->txq_id = i;
1449                         break;
1450                 }
1451         }
1452
1453         while(1) {
1454                 drain_mbuf_table(tx_q);
1455                 drain_vhost_table();
1456                 /*
1457                  * Inform the configuration core that we have exited the
1458                  * linked list and that no devices are in use if requested.
1459                  */
1460                 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1461                         lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1462
1463                 /*
1464                  * Process vhost devices
1465                  */
1466                 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1467                               lcore_vdev_entry) {
1468                         if (unlikely(vdev->remove)) {
1469                                 unlink_vmdq(vdev);
1470                                 vdev->ready = DEVICE_SAFE_REMOVE;
1471                                 continue;
1472                         }
1473
1474                         if (likely(vdev->ready == DEVICE_RX))
1475                                 drain_eth_rx(vdev);
1476
1477                         if (likely(!vdev->remove))
1478                                 drain_virtio_tx(vdev);
1479                 }
1480         }
1481
1482         return 0;
1483 }
1484
1485 /*
1486  * Remove a device from the specific data core linked list and from the
1487  * main linked list. Synchronization  occurs through the use of the
1488  * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1489  * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1490  */
1491 static void
1492 destroy_device(int vid)
1493 {
1494         struct vhost_dev *vdev = NULL;
1495         int lcore;
1496         uint16_t i;
1497
1498         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1499                 if (vdev->vid == vid)
1500                         break;
1501         }
1502         if (!vdev)
1503                 return;
1504         /*set the remove flag. */
1505         vdev->remove = 1;
1506         while(vdev->ready != DEVICE_SAFE_REMOVE) {
1507                 rte_pause();
1508         }
1509
1510         for (i = 0; i < RTE_MAX_LCORE; i++)
1511                 rte_free(vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]);
1512
1513         if (builtin_net_driver)
1514                 vs_vhost_net_remove(vdev);
1515
1516         TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1517                      lcore_vdev_entry);
1518         TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1519
1520
1521         /* Set the dev_removal_flag on each lcore. */
1522         RTE_LCORE_FOREACH_WORKER(lcore)
1523                 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1524
1525         /*
1526          * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1527          * we can be sure that they can no longer access the device removed
1528          * from the linked lists and that the devices are no longer in use.
1529          */
1530         RTE_LCORE_FOREACH_WORKER(lcore) {
1531                 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1532                         rte_pause();
1533         }
1534
1535         lcore_info[vdev->coreid].device_num--;
1536
1537         RTE_LOG(INFO, VHOST_DATA,
1538                 "(%d) device has been removed from data core\n",
1539                 vdev->vid);
1540
1541         if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
1542                 uint16_t n_pkt = 0;
1543                 int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1544                 struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1545
1546                 while (vdev->pkts_inflight) {
1547                         n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
1548                                                 m_cpl, vdev->pkts_inflight, dma_id, 0);
1549                         free_pkts(m_cpl, n_pkt);
1550                         __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1551                 }
1552
1553                 rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
1554                 dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
1555         }
1556
1557         rte_free(vdev);
1558 }
1559
1560 /*
1561  * A new device is added to a data core. First the device is added to the main linked list
1562  * and then allocated to a specific data core.
1563  */
1564 static int
1565 new_device(int vid)
1566 {
1567         int lcore, core_add = 0;
1568         uint16_t i;
1569         uint32_t device_num_min = num_devices;
1570         struct vhost_dev *vdev;
1571         vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1572         if (vdev == NULL) {
1573                 RTE_LOG(INFO, VHOST_DATA,
1574                         "(%d) couldn't allocate memory for vhost dev\n",
1575                         vid);
1576                 return -1;
1577         }
1578         vdev->vid = vid;
1579
1580         for (i = 0; i < RTE_MAX_LCORE; i++) {
1581                 vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid]
1582                         = rte_zmalloc("vhost bufftable",
1583                                 sizeof(struct vhost_bufftable),
1584                                 RTE_CACHE_LINE_SIZE);
1585
1586                 if (vhost_txbuff[i * RTE_MAX_VHOST_DEVICE + vid] == NULL) {
1587                         RTE_LOG(INFO, VHOST_DATA,
1588                           "(%d) couldn't allocate memory for vhost TX\n", vid);
1589                         return -1;
1590                 }
1591         }
1592
1593         if (builtin_net_driver)
1594                 vs_vhost_net_setup(vdev);
1595
1596         TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1597         vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1598
1599         /*reset ready flag*/
1600         vdev->ready = DEVICE_MAC_LEARNING;
1601         vdev->remove = 0;
1602
1603         /* Find a suitable lcore to add the device. */
1604         RTE_LCORE_FOREACH_WORKER(lcore) {
1605                 if (lcore_info[lcore].device_num < device_num_min) {
1606                         device_num_min = lcore_info[lcore].device_num;
1607                         core_add = lcore;
1608                 }
1609         }
1610         vdev->coreid = core_add;
1611
1612         TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1613                           lcore_vdev_entry);
1614         lcore_info[vdev->coreid].device_num++;
1615
1616         /* Disable notifications. */
1617         rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1618         rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1619
1620         RTE_LOG(INFO, VHOST_DATA,
1621                 "(%d) device has been added to data core %d\n",
1622                 vid, vdev->coreid);
1623
1624         if (dma_bind[vid].dmas[VIRTIO_RXQ].dev_id != INVALID_DMA_ID) {
1625                 int ret;
1626
1627                 ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
1628                 if (ret == 0)
1629                         dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = true;
1630                 return ret;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static int
1637 vring_state_changed(int vid, uint16_t queue_id, int enable)
1638 {
1639         struct vhost_dev *vdev = NULL;
1640
1641         TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1642                 if (vdev->vid == vid)
1643                         break;
1644         }
1645         if (!vdev)
1646                 return -1;
1647
1648         if (queue_id != VIRTIO_RXQ)
1649                 return 0;
1650
1651         if (dma_bind[vid].dmas[queue_id].async_enabled) {
1652                 if (!enable) {
1653                         uint16_t n_pkt = 0;
1654                         int16_t dma_id = dma_bind[vid].dmas[VIRTIO_RXQ].dev_id;
1655                         struct rte_mbuf *m_cpl[vdev->pkts_inflight];
1656
1657                         while (vdev->pkts_inflight) {
1658                                 n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
1659                                                         m_cpl, vdev->pkts_inflight, dma_id, 0);
1660                                 free_pkts(m_cpl, n_pkt);
1661                                 __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
1662                         }
1663                 }
1664         }
1665
1666         return 0;
1667 }
1668
1669 /*
1670  * These callback allow devices to be added to the data core when configuration
1671  * has been fully complete.
1672  */
1673 static const struct rte_vhost_device_ops virtio_net_device_ops =
1674 {
1675         .new_device =  new_device,
1676         .destroy_device = destroy_device,
1677         .vring_state_changed = vring_state_changed,
1678 };
1679
1680 /*
1681  * This is a thread will wake up after a period to print stats if the user has
1682  * enabled them.
1683  */
1684 static void *
1685 print_stats(__rte_unused void *arg)
1686 {
1687         struct vhost_dev *vdev;
1688         uint64_t tx_dropped, rx_dropped;
1689         uint64_t tx, tx_total, rx, rx_total;
1690         const char clr[] = { 27, '[', '2', 'J', '\0' };
1691         const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1692
1693         while(1) {
1694                 sleep(enable_stats);
1695
1696                 /* Clear screen and move to top left */
1697                 printf("%s%s\n", clr, top_left);
1698                 printf("Device statistics =================================\n");
1699
1700                 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1701                         tx_total   = vdev->stats.tx_total;
1702                         tx         = vdev->stats.tx;
1703                         tx_dropped = tx_total - tx;
1704
1705                         rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
1706                                 __ATOMIC_SEQ_CST);
1707                         rx         = __atomic_load_n(&vdev->stats.rx_atomic,
1708                                 __ATOMIC_SEQ_CST);
1709                         rx_dropped = rx_total - rx;
1710
1711                         printf("Statistics for device %d\n"
1712                                 "-----------------------\n"
1713                                 "TX total:              %" PRIu64 "\n"
1714                                 "TX dropped:            %" PRIu64 "\n"
1715                                 "TX successful:         %" PRIu64 "\n"
1716                                 "RX total:              %" PRIu64 "\n"
1717                                 "RX dropped:            %" PRIu64 "\n"
1718                                 "RX successful:         %" PRIu64 "\n",
1719                                 vdev->vid,
1720                                 tx_total, tx_dropped, tx,
1721                                 rx_total, rx_dropped, rx);
1722                 }
1723
1724                 printf("===================================================\n");
1725
1726                 fflush(stdout);
1727         }
1728
1729         return NULL;
1730 }
1731
1732 static void
1733 unregister_drivers(int socket_num)
1734 {
1735         int i, ret;
1736
1737         for (i = 0; i < socket_num; i++) {
1738                 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1739                 if (ret != 0)
1740                         RTE_LOG(ERR, VHOST_CONFIG,
1741                                 "Fail to unregister vhost driver for %s.\n",
1742                                 socket_files + i * PATH_MAX);
1743         }
1744 }
1745
1746 /* When we receive a INT signal, unregister vhost driver */
1747 static void
1748 sigint_handler(__rte_unused int signum)
1749 {
1750         /* Unregister vhost driver. */
1751         unregister_drivers(nb_sockets);
1752
1753         exit(0);
1754 }
1755
1756 static void
1757 reset_dma(void)
1758 {
1759         int i;
1760
1761         for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
1762                 int j;
1763
1764                 for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
1765                         dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
1766                         dma_bind[i].dmas[j].async_enabled = false;
1767                 }
1768         }
1769
1770         for (i = 0; i < RTE_DMADEV_DEFAULT_MAX; i++)
1771                 dmas_id[i] = INVALID_DMA_ID;
1772 }
1773
1774 /*
1775  * Main function, does initialisation and calls the per-lcore functions.
1776  */
1777 int
1778 main(int argc, char *argv[])
1779 {
1780         unsigned lcore_id, core_id = 0;
1781         unsigned nb_ports, valid_num_ports;
1782         int ret, i;
1783         uint16_t portid;
1784         static pthread_t tid;
1785         uint64_t flags = RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
1786
1787         signal(SIGINT, sigint_handler);
1788
1789         /* init EAL */
1790         ret = rte_eal_init(argc, argv);
1791         if (ret < 0)
1792                 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1793         argc -= ret;
1794         argv += ret;
1795
1796         /* initialize dma structures */
1797         reset_dma();
1798
1799         /* parse app arguments */
1800         ret = us_vhost_parse_args(argc, argv);
1801         if (ret < 0)
1802                 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1803
1804         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1805                 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1806
1807                 if (rte_lcore_is_enabled(lcore_id))
1808                         lcore_ids[core_id++] = lcore_id;
1809         }
1810
1811         if (rte_lcore_count() > RTE_MAX_LCORE)
1812                 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1813
1814         /* Get the number of physical ports. */
1815         nb_ports = rte_eth_dev_count_avail();
1816
1817         /*
1818          * Update the global var NUM_PORTS and global array PORTS
1819          * and get value of var VALID_NUM_PORTS according to system ports number
1820          */
1821         valid_num_ports = check_ports_num(nb_ports);
1822
1823         if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1824                 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1825                         "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1826                 return -1;
1827         }
1828
1829         /*
1830          * FIXME: here we are trying to allocate mbufs big enough for
1831          * @MAX_QUEUES, but the truth is we're never going to use that
1832          * many queues here. We probably should only do allocation for
1833          * those queues we are going to use.
1834          */
1835         mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", total_num_mbufs,
1836                                             MBUF_CACHE_SIZE, 0, MBUF_DATA_SIZE,
1837                                             rte_socket_id());
1838         if (mbuf_pool == NULL)
1839                 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1840
1841         if (vm2vm_mode == VM2VM_HARDWARE) {
1842                 /* Enable VT loop back to let L2 switch to do it. */
1843                 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1844                 RTE_LOG(DEBUG, VHOST_CONFIG,
1845                         "Enable loop back for L2 switch in vmdq.\n");
1846         }
1847
1848         /* initialize all ports */
1849         RTE_ETH_FOREACH_DEV(portid) {
1850                 /* skip ports that are not enabled */
1851                 if ((enabled_port_mask & (1 << portid)) == 0) {
1852                         RTE_LOG(INFO, VHOST_PORT,
1853                                 "Skipping disabled port %d\n", portid);
1854                         continue;
1855                 }
1856                 if (port_init(portid) != 0)
1857                         rte_exit(EXIT_FAILURE,
1858                                 "Cannot initialize network ports\n");
1859         }
1860
1861         /* Enable stats if the user option is set. */
1862         if (enable_stats) {
1863                 ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
1864                                         print_stats, NULL);
1865                 if (ret < 0)
1866                         rte_exit(EXIT_FAILURE,
1867                                 "Cannot create print-stats thread\n");
1868         }
1869
1870         /* Launch all data cores. */
1871         RTE_LCORE_FOREACH_WORKER(lcore_id)
1872                 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1873
1874         if (client_mode)
1875                 flags |= RTE_VHOST_USER_CLIENT;
1876
1877         for (i = 0; i < dma_count; i++) {
1878                 if (rte_vhost_async_dma_configure(dmas_id[i], 0) < 0) {
1879                         RTE_LOG(ERR, VHOST_PORT, "Failed to configure DMA in vhost.\n");
1880                         rte_exit(EXIT_FAILURE, "Cannot use given DMA device\n");
1881                 }
1882         }
1883
1884         /* Register vhost user driver to handle vhost messages. */
1885         for (i = 0; i < nb_sockets; i++) {
1886                 char *file = socket_files + i * PATH_MAX;
1887
1888                 if (dma_count)
1889                         flags = flags | RTE_VHOST_USER_ASYNC_COPY;
1890
1891                 ret = rte_vhost_driver_register(file, flags);
1892                 if (ret != 0) {
1893                         unregister_drivers(i);
1894                         rte_exit(EXIT_FAILURE,
1895                                 "vhost driver register failure.\n");
1896                 }
1897
1898                 if (builtin_net_driver)
1899                         rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1900
1901                 if (mergeable == 0) {
1902                         rte_vhost_driver_disable_features(file,
1903                                 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1904                 }
1905
1906                 if (enable_tx_csum == 0) {
1907                         rte_vhost_driver_disable_features(file,
1908                                 1ULL << VIRTIO_NET_F_CSUM);
1909                 }
1910
1911                 if (enable_tso == 0) {
1912                         rte_vhost_driver_disable_features(file,
1913                                 1ULL << VIRTIO_NET_F_HOST_TSO4);
1914                         rte_vhost_driver_disable_features(file,
1915                                 1ULL << VIRTIO_NET_F_HOST_TSO6);
1916                         rte_vhost_driver_disable_features(file,
1917                                 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1918                         rte_vhost_driver_disable_features(file,
1919                                 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1920                 }
1921
1922                 if (promiscuous) {
1923                         rte_vhost_driver_enable_features(file,
1924                                 1ULL << VIRTIO_NET_F_CTRL_RX);
1925                 }
1926
1927                 ret = rte_vhost_driver_callback_register(file,
1928                         &virtio_net_device_ops);
1929                 if (ret != 0) {
1930                         rte_exit(EXIT_FAILURE,
1931                                 "failed to register vhost driver callbacks.\n");
1932                 }
1933
1934                 if (rte_vhost_driver_start(file) < 0) {
1935                         rte_exit(EXIT_FAILURE,
1936                                 "failed to start vhost driver.\n");
1937                 }
1938         }
1939
1940         RTE_LCORE_FOREACH_WORKER(lcore_id)
1941                 rte_eal_wait_lcore(lcore_id);
1942
1943         /* clean up the EAL */
1944         rte_eal_cleanup();
1945
1946         return 0;
1947 }