1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
17 #include <rte_common.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
37 #define MAX_QUEUES 1024
39 * 1024 queues require to meet the needs of a large number of vmdq_pools.
40 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
43 RTE_TEST_TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
46 #define MAX_PKT_BURST 32
49 * Configurable number of RX/TX ring descriptors
51 #define RTE_TEST_RX_DESC_DEFAULT 1024
52 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 #define INVALID_PORT_ID 0xFF
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
59 /* number of pools (if user does not specify any, 8 by default */
60 static uint32_t num_queues = 8;
61 static uint32_t num_pools = 8;
62 static uint8_t rss_enable;
64 /* empty vmdq configuration structure. Filled in programatically */
65 static const struct rte_eth_conf vmdq_conf_default = {
67 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
72 .mq_mode = ETH_MQ_TX_NONE,
76 * should be overridden separately in code with
80 .nb_queue_pools = ETH_8_POOLS,
81 .enable_default_pool = 0,
84 .pool_map = {{0, 0},},
89 static unsigned lcore_ids[RTE_MAX_LCORE];
90 static uint16_t ports[RTE_MAX_ETHPORTS];
91 static unsigned num_ports; /**< The number of ports specified in command line */
93 /* array used for printing out statistics */
94 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
96 const uint16_t vlan_tags[] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 9, 10, 11, 12, 13, 14, 15,
99 16, 17, 18, 19, 20, 21, 22, 23,
100 24, 25, 26, 27, 28, 29, 30, 31,
101 32, 33, 34, 35, 36, 37, 38, 39,
102 40, 41, 42, 43, 44, 45, 46, 47,
103 48, 49, 50, 51, 52, 53, 54, 55,
104 56, 57, 58, 59, 60, 61, 62, 63,
106 const uint16_t num_vlans = RTE_DIM(vlan_tags);
107 static uint16_t num_pf_queues, num_vmdq_queues;
108 static uint16_t vmdq_pool_base, vmdq_queue_base;
109 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
110 static struct rte_ether_addr pool_addr_template = {
111 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
114 /* ethernet addresses of ports */
115 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
117 #define MAX_QUEUE_NUM_10G 128
118 #define MAX_QUEUE_NUM_1G 8
119 #define MAX_POOL_MAP_NUM_10G 64
120 #define MAX_POOL_MAP_NUM_1G 32
121 #define MAX_POOL_NUM_10G 64
122 #define MAX_POOL_NUM_1G 8
124 * Builds up the correct configuration for vmdq based on the vlan tags array
125 * given above, and determine the queue number and pool map number according to
129 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
131 struct rte_eth_vmdq_rx_conf conf;
134 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
135 conf.nb_pool_maps = num_pools;
136 conf.enable_default_pool = 0;
137 conf.default_pool = 0; /* set explicit value, even if not used */
139 for (i = 0; i < conf.nb_pool_maps; i++) {
140 conf.pool_map[i].vlan_id = vlan_tags[i];
141 conf.pool_map[i].pools = (1UL << (i % num_pools));
144 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
145 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
146 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
148 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
149 eth_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP |
158 * Initialises a given port using global settings and with the rx buffers
159 * coming from the mbuf_pool passed as parameter
162 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
164 struct rte_eth_dev_info dev_info;
165 struct rte_eth_rxconf *rxconf;
166 struct rte_eth_txconf *txconf;
167 struct rte_eth_conf port_conf;
168 uint16_t rxRings, txRings;
169 uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
170 uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
173 uint16_t queues_per_pool;
174 uint32_t max_nb_pools;
178 * The max pool number from dev_info will be used to validate the pool
179 * number specified in cmd line
181 retval = rte_eth_dev_info_get(port, &dev_info);
183 printf("Error during getting device (port %u) info: %s\n",
184 port, strerror(-retval));
188 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
190 * We allow to process part of VMDQ pools specified by num_pools in
193 if (num_pools > max_nb_pools) {
194 printf("num_pools %d >max_nb_pools %d\n",
195 num_pools, max_nb_pools);
198 retval = get_eth_conf(&port_conf, max_nb_pools);
203 * NIC queues are divided into pf queues and vmdq queues.
205 /* There is assumption here all ports have the same configuration! */
206 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
207 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
208 num_vmdq_queues = num_pools * queues_per_pool;
209 num_queues = num_pf_queues + num_vmdq_queues;
210 vmdq_queue_base = dev_info.vmdq_queue_base;
211 vmdq_pool_base = dev_info.vmdq_pool_base;
213 printf("pf queue num: %u, configured vmdq pool num: %u,"
214 " each vmdq pool has %u queues\n",
215 num_pf_queues, num_pools, queues_per_pool);
216 printf("vmdq queue base: %d pool base %d\n",
217 vmdq_queue_base, vmdq_pool_base);
218 if (!rte_eth_dev_is_valid_port(port))
221 rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
222 port_conf.rx_adv_conf.rss_conf.rss_hf &=
223 dev_info.flow_type_rss_offloads;
224 if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
225 printf("Port %u modified RSS hash function based on hardware support,"
226 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
229 port_conf.rx_adv_conf.rss_conf.rss_hf);
233 * Though in this example, we only receive packets from the first queue
234 * of each pool and send packets through first rte_lcore_count() tx
235 * queues of vmdq queues, all queues including pf queues are setup.
236 * This is because VMDQ queues doesn't always start from zero, and the
237 * PMD layer doesn't support selectively initialising part of rx/tx
240 rxRings = (uint16_t)dev_info.max_rx_queues;
241 txRings = (uint16_t)dev_info.max_tx_queues;
243 retval = rte_eth_dev_info_get(port, &dev_info);
245 printf("Error during getting device (port %u) info: %s\n",
246 port, strerror(-retval));
250 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
251 port_conf.txmode.offloads |=
252 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
253 retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
257 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
261 if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
262 RTE_TEST_TX_DESC_DEFAULT)) {
263 printf("Mbuf pool has an insufficient size for port %u.\n",
268 rxconf = &dev_info.default_rxconf;
269 rxconf->rx_drop_en = 1;
270 txconf = &dev_info.default_txconf;
271 txconf->offloads = port_conf.txmode.offloads;
272 for (q = 0; q < rxRings; q++) {
273 retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
274 rte_eth_dev_socket_id(port),
278 printf("initialise rx queue %d failed\n", q);
283 for (q = 0; q < txRings; q++) {
284 retval = rte_eth_tx_queue_setup(port, q, txRingSize,
285 rte_eth_dev_socket_id(port),
288 printf("initialise tx queue %d failed\n", q);
293 retval = rte_eth_dev_start(port);
295 printf("port %d start failed\n", port);
299 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
301 printf("port %d MAC address get failed: %s\n", port,
302 rte_strerror(-retval));
305 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
306 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
308 vmdq_ports_eth_addr[port].addr_bytes[0],
309 vmdq_ports_eth_addr[port].addr_bytes[1],
310 vmdq_ports_eth_addr[port].addr_bytes[2],
311 vmdq_ports_eth_addr[port].addr_bytes[3],
312 vmdq_ports_eth_addr[port].addr_bytes[4],
313 vmdq_ports_eth_addr[port].addr_bytes[5]);
316 * Set mac for each pool.
317 * There is no default mac for the pools in i40.
318 * Removes this after i40e fixes this issue.
320 for (q = 0; q < num_pools; q++) {
321 struct rte_ether_addr mac;
322 mac = pool_addr_template;
323 mac.addr_bytes[4] = port;
324 mac.addr_bytes[5] = q;
325 printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
327 mac.addr_bytes[0], mac.addr_bytes[1],
328 mac.addr_bytes[2], mac.addr_bytes[3],
329 mac.addr_bytes[4], mac.addr_bytes[5]);
330 retval = rte_eth_dev_mac_addr_add(port, &mac,
333 printf("mac addr add failed at pool %d\n", q);
341 /* Check num_pools parameter and set it if OK*/
343 vmdq_parse_num_pools(const char *q_arg)
348 /* parse number string */
349 n = strtol(q_arg, &end, 10);
350 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
353 if (num_pools > num_vlans) {
354 printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
365 parse_portmask(const char *portmask)
370 /* parse hexadecimal string */
371 pm = strtoul(portmask, &end, 16);
372 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
383 vmdq_usage(const char *prgname)
385 printf("%s [EAL options] -- -p PORTMASK]\n"
386 " --nb-pools NP: number of pools\n"
387 " --enable-rss: enable RSS (disabled by default)\n",
391 /* Parse the argument (num_pools) given in the command line of the application */
393 vmdq_parse_args(int argc, char **argv)
398 const char *prgname = argv[0];
399 static struct option long_option[] = {
400 {"nb-pools", required_argument, NULL, 0},
401 {"enable-rss", 0, NULL, 0},
405 /* Parse command line */
406 while ((opt = getopt_long(argc, argv, "p:", long_option,
407 &option_index)) != EOF) {
411 enabled_port_mask = parse_portmask(optarg);
412 if (enabled_port_mask == 0) {
413 printf("invalid portmask\n");
419 if (!strcmp(long_option[option_index].name,
421 if (vmdq_parse_num_pools(optarg) == -1) {
422 printf("invalid number of pools\n");
428 if (!strcmp(long_option[option_index].name,
439 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
440 if (enabled_port_mask & (1 << i))
441 ports[num_ports++] = (uint8_t)i;
444 if (num_ports < 2 || num_ports % 2) {
445 printf("Current enabled port number is %u,"
446 "but it should be even and at least 2\n", num_ports);
454 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
456 struct rte_ether_hdr *eth;
459 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
461 /* 02:00:00:00:00:xx */
462 tmp = ð->d_addr.addr_bytes[0];
463 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
466 rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr);
469 /* When we receive a HUP signal, print out our stats */
471 sighup_handler(int signum)
473 unsigned int q = vmdq_queue_base;
474 for (; q < num_queues; q++) {
475 if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
476 printf("\nPool %u: ", (q - vmdq_queue_base) /
477 (num_vmdq_queues / num_pools));
478 printf("%lu ", rxPackets[q]);
480 printf("\nFinished handling signal %d\n", signum);
484 * Main thread that does the work, reading from INPUT_PORT
485 * and writing to OUTPUT_PORT
488 lcore_main(__rte_unused void *dummy)
490 const uint16_t lcore_id = (uint16_t)rte_lcore_id();
491 const uint16_t num_cores = (uint16_t)rte_lcore_count();
492 uint16_t core_id = 0;
493 uint16_t startQueue, endQueue;
495 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
497 for (i = 0; i < num_cores; i++)
498 if (lcore_ids[i] == lcore_id) {
503 if (remainder != 0) {
504 if (core_id < remainder) {
505 startQueue = (uint16_t)(core_id *
506 (num_vmdq_queues / num_cores + 1));
507 endQueue = (uint16_t)(startQueue +
508 (num_vmdq_queues / num_cores) + 1);
510 startQueue = (uint16_t)(core_id *
511 (num_vmdq_queues / num_cores) +
513 endQueue = (uint16_t)(startQueue +
514 (num_vmdq_queues / num_cores));
517 startQueue = (uint16_t)(core_id *
518 (num_vmdq_queues / num_cores));
519 endQueue = (uint16_t)(startQueue +
520 (num_vmdq_queues / num_cores));
523 /* vmdq queue idx doesn't always start from zero.*/
524 startQueue += vmdq_queue_base;
525 endQueue += vmdq_queue_base;
526 printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
527 (unsigned)lcore_id, startQueue, endQueue - 1);
529 if (startQueue == endQueue) {
530 printf("lcore %u has nothing to do\n", lcore_id);
535 struct rte_mbuf *buf[MAX_PKT_BURST];
536 const uint16_t buf_size = RTE_DIM(buf);
538 for (p = 0; p < num_ports; p++) {
539 const uint8_t sport = ports[p];
540 /* 0 <-> 1, 2 <-> 3 etc */
541 const uint8_t dport = ports[p ^ 1];
542 if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
545 for (q = startQueue; q < endQueue; q++) {
546 const uint16_t rxCount = rte_eth_rx_burst(sport,
549 if (unlikely(rxCount == 0))
552 rxPackets[q] += rxCount;
554 for (i = 0; i < rxCount; i++)
555 update_mac_address(buf[i], dport);
557 const uint16_t txCount = rte_eth_tx_burst(dport,
558 vmdq_queue_base + core_id,
562 if (txCount != rxCount) {
563 for (i = txCount; i < rxCount; i++)
564 rte_pktmbuf_free(buf[i]);
572 * Update the global var NUM_PORTS and array PORTS according to system ports number
573 * and return valid ports number
575 static unsigned check_ports_num(unsigned nb_ports)
577 unsigned valid_num_ports = num_ports;
580 if (num_ports > nb_ports) {
581 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
582 num_ports, nb_ports);
583 num_ports = nb_ports;
586 for (portid = 0; portid < num_ports; portid++) {
587 if (!rte_eth_dev_is_valid_port(ports[portid])) {
588 printf("\nSpecified port ID(%u) is not valid\n",
590 ports[portid] = INVALID_PORT_ID;
594 return valid_num_ports;
597 /* Main function, does initialisation and calls the per-lcore functions */
599 main(int argc, char *argv[])
601 struct rte_mempool *mbuf_pool;
602 unsigned lcore_id, core_id = 0;
604 unsigned nb_ports, valid_num_ports;
607 signal(SIGHUP, sighup_handler);
610 ret = rte_eal_init(argc, argv);
612 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
616 /* parse app arguments */
617 ret = vmdq_parse_args(argc, argv);
619 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
621 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
622 if (rte_lcore_is_enabled(lcore_id))
623 lcore_ids[core_id++] = lcore_id;
625 if (rte_lcore_count() > RTE_MAX_LCORE)
626 rte_exit(EXIT_FAILURE, "Not enough cores\n");
628 nb_ports = rte_eth_dev_count_avail();
631 * Update the global var NUM_PORTS and global array PORTS
632 * and get value of var VALID_NUM_PORTS according to system ports number
634 valid_num_ports = check_ports_num(nb_ports);
636 if (valid_num_ports < 2 || valid_num_ports % 2) {
637 printf("Current valid ports number is %u\n", valid_num_ports);
638 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
641 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
642 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
643 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
644 if (mbuf_pool == NULL)
645 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
647 /* initialize all ports */
648 RTE_ETH_FOREACH_DEV(portid) {
649 /* skip ports that are not enabled */
650 if ((enabled_port_mask & (1 << portid)) == 0) {
651 printf("\nSkipping disabled port %d\n", portid);
654 if (port_init(portid, mbuf_pool) != 0)
655 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
658 /* call lcore_main() on every lcore */
659 rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
660 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
661 if (rte_eal_wait_lcore(lcore_id) < 0)