1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
17 #include <rte_common.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
34 #include <rte_mempool.h>
37 #define MAX_QUEUES 1024
39 * 1024 queues require to meet the needs of a large number of vmdq_pools.
40 * (RX/TX_queue_nb * RX/TX_ring_descriptors_nb) per port.
42 #define NUM_MBUFS_PER_PORT (MAX_QUEUES * RTE_MAX(RTE_TEST_RX_DESC_DEFAULT, \
43 RTE_TEST_TX_DESC_DEFAULT))
44 #define MBUF_CACHE_SIZE 64
46 #define MAX_PKT_BURST 32
49 * Configurable number of RX/TX ring descriptors
51 #define RTE_TEST_RX_DESC_DEFAULT 1024
52 #define RTE_TEST_TX_DESC_DEFAULT 1024
54 #define INVALID_PORT_ID 0xFF
56 /* mask of enabled ports */
57 static uint32_t enabled_port_mask;
59 /* number of pools (if user does not specify any, 8 by default */
60 static uint32_t num_queues = 8;
61 static uint32_t num_pools = 8;
63 /* empty vmdq configuration structure. Filled in programatically */
64 static const struct rte_eth_conf vmdq_conf_default = {
66 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
71 .mq_mode = ETH_MQ_TX_NONE,
75 * should be overridden separately in code with
79 .nb_queue_pools = ETH_8_POOLS,
80 .enable_default_pool = 0,
83 .pool_map = {{0, 0},},
88 static unsigned lcore_ids[RTE_MAX_LCORE];
89 static uint16_t ports[RTE_MAX_ETHPORTS];
90 static unsigned num_ports; /**< The number of ports specified in command line */
92 /* array used for printing out statistics */
93 volatile unsigned long rxPackets[MAX_QUEUES] = {0};
95 const uint16_t vlan_tags[] = {
96 0, 1, 2, 3, 4, 5, 6, 7,
97 8, 9, 10, 11, 12, 13, 14, 15,
98 16, 17, 18, 19, 20, 21, 22, 23,
99 24, 25, 26, 27, 28, 29, 30, 31,
100 32, 33, 34, 35, 36, 37, 38, 39,
101 40, 41, 42, 43, 44, 45, 46, 47,
102 48, 49, 50, 51, 52, 53, 54, 55,
103 56, 57, 58, 59, 60, 61, 62, 63,
105 const uint16_t num_vlans = RTE_DIM(vlan_tags);
106 static uint16_t num_pf_queues, num_vmdq_queues;
107 static uint16_t vmdq_pool_base, vmdq_queue_base;
108 /* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
109 static struct rte_ether_addr pool_addr_template = {
110 .addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
113 /* ethernet addresses of ports */
114 static struct rte_ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
116 #define MAX_QUEUE_NUM_10G 128
117 #define MAX_QUEUE_NUM_1G 8
118 #define MAX_POOL_MAP_NUM_10G 64
119 #define MAX_POOL_MAP_NUM_1G 32
120 #define MAX_POOL_NUM_10G 64
121 #define MAX_POOL_NUM_1G 8
123 * Builds up the correct configuration for vmdq based on the vlan tags array
124 * given above, and determine the queue number and pool map number according to
128 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
130 struct rte_eth_vmdq_rx_conf conf;
133 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
134 conf.nb_pool_maps = num_pools;
135 conf.enable_default_pool = 0;
136 conf.default_pool = 0; /* set explicit value, even if not used */
138 for (i = 0; i < conf.nb_pool_maps; i++) {
139 conf.pool_map[i].vlan_id = vlan_tags[i];
140 conf.pool_map[i].pools = (1UL << (i % num_pools));
143 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
144 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
145 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
150 * Initialises a given port using global settings and with the rx buffers
151 * coming from the mbuf_pool passed as parameter
154 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
156 struct rte_eth_dev_info dev_info;
157 struct rte_eth_rxconf *rxconf;
158 struct rte_eth_txconf *txconf;
159 struct rte_eth_conf port_conf;
160 uint16_t rxRings, txRings;
161 uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT;
162 uint16_t txRingSize = RTE_TEST_TX_DESC_DEFAULT;
165 uint16_t queues_per_pool;
166 uint32_t max_nb_pools;
169 * The max pool number from dev_info will be used to validate the pool
170 * number specified in cmd line
172 retval = rte_eth_dev_info_get(port, &dev_info);
174 printf("Error during getting device (port %u) info: %s\n",
175 port, strerror(-retval));
179 max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
181 * We allow to process part of VMDQ pools specified by num_pools in
184 if (num_pools > max_nb_pools) {
185 printf("num_pools %d >max_nb_pools %d\n",
186 num_pools, max_nb_pools);
189 retval = get_eth_conf(&port_conf, max_nb_pools);
194 * NIC queues are divided into pf queues and vmdq queues.
196 /* There is assumption here all ports have the same configuration! */
197 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
198 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
199 num_vmdq_queues = num_pools * queues_per_pool;
200 num_queues = num_pf_queues + num_vmdq_queues;
201 vmdq_queue_base = dev_info.vmdq_queue_base;
202 vmdq_pool_base = dev_info.vmdq_pool_base;
204 printf("pf queue num: %u, configured vmdq pool num: %u,"
205 " each vmdq pool has %u queues\n",
206 num_pf_queues, num_pools, queues_per_pool);
207 printf("vmdq queue base: %d pool base %d\n",
208 vmdq_queue_base, vmdq_pool_base);
209 if (!rte_eth_dev_is_valid_port(port))
213 * Though in this example, we only receive packets from the first queue
214 * of each pool and send packets through first rte_lcore_count() tx
215 * queues of vmdq queues, all queues including pf queues are setup.
216 * This is because VMDQ queues doesn't always start from zero, and the
217 * PMD layer doesn't support selectively initialising part of rx/tx
220 rxRings = (uint16_t)dev_info.max_rx_queues;
221 txRings = (uint16_t)dev_info.max_tx_queues;
223 retval = rte_eth_dev_info_get(port, &dev_info);
225 printf("Error during getting device (port %u) info: %s\n",
226 port, strerror(-retval));
230 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
231 port_conf.txmode.offloads |=
232 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
233 retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
237 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rxRingSize,
241 if (RTE_MAX(rxRingSize, txRingSize) > RTE_MAX(RTE_TEST_RX_DESC_DEFAULT,
242 RTE_TEST_TX_DESC_DEFAULT)) {
243 printf("Mbuf pool has an insufficient size for port %u.\n",
248 rxconf = &dev_info.default_rxconf;
249 rxconf->rx_drop_en = 1;
250 txconf = &dev_info.default_txconf;
251 txconf->offloads = port_conf.txmode.offloads;
252 for (q = 0; q < rxRings; q++) {
253 retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
254 rte_eth_dev_socket_id(port),
258 printf("initialise rx queue %d failed\n", q);
263 for (q = 0; q < txRings; q++) {
264 retval = rte_eth_tx_queue_setup(port, q, txRingSize,
265 rte_eth_dev_socket_id(port),
268 printf("initialise tx queue %d failed\n", q);
273 retval = rte_eth_dev_start(port);
275 printf("port %d start failed\n", port);
279 retval = rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
281 printf("port %d MAC address get failed: %s\n", port,
282 rte_strerror(-retval));
285 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
286 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
288 vmdq_ports_eth_addr[port].addr_bytes[0],
289 vmdq_ports_eth_addr[port].addr_bytes[1],
290 vmdq_ports_eth_addr[port].addr_bytes[2],
291 vmdq_ports_eth_addr[port].addr_bytes[3],
292 vmdq_ports_eth_addr[port].addr_bytes[4],
293 vmdq_ports_eth_addr[port].addr_bytes[5]);
296 * Set mac for each pool.
297 * There is no default mac for the pools in i40.
298 * Removes this after i40e fixes this issue.
300 for (q = 0; q < num_pools; q++) {
301 struct rte_ether_addr mac;
302 mac = pool_addr_template;
303 mac.addr_bytes[4] = port;
304 mac.addr_bytes[5] = q;
305 printf("Port %u vmdq pool %u set mac %02x:%02x:%02x:%02x:%02x:%02x\n",
307 mac.addr_bytes[0], mac.addr_bytes[1],
308 mac.addr_bytes[2], mac.addr_bytes[3],
309 mac.addr_bytes[4], mac.addr_bytes[5]);
310 retval = rte_eth_dev_mac_addr_add(port, &mac,
313 printf("mac addr add failed at pool %d\n", q);
321 /* Check num_pools parameter and set it if OK*/
323 vmdq_parse_num_pools(const char *q_arg)
328 /* parse number string */
329 n = strtol(q_arg, &end, 10);
330 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
333 if (num_pools > num_vlans) {
334 printf("num_pools %d > num_vlans %d\n", num_pools, num_vlans);
345 parse_portmask(const char *portmask)
350 /* parse hexadecimal string */
351 pm = strtoul(portmask, &end, 16);
352 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
363 vmdq_usage(const char *prgname)
365 printf("%s [EAL options] -- -p PORTMASK]\n"
366 " --nb-pools NP: number of pools\n",
370 /* Parse the argument (num_pools) given in the command line of the application */
372 vmdq_parse_args(int argc, char **argv)
377 const char *prgname = argv[0];
378 static struct option long_option[] = {
379 {"nb-pools", required_argument, NULL, 0},
383 /* Parse command line */
384 while ((opt = getopt_long(argc, argv, "p:", long_option,
385 &option_index)) != EOF) {
389 enabled_port_mask = parse_portmask(optarg);
390 if (enabled_port_mask == 0) {
391 printf("invalid portmask\n");
397 if (vmdq_parse_num_pools(optarg) == -1) {
398 printf("invalid number of pools\n");
410 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
411 if (enabled_port_mask & (1 << i))
412 ports[num_ports++] = (uint8_t)i;
415 if (num_ports < 2 || num_ports % 2) {
416 printf("Current enabled port number is %u,"
417 "but it should be even and at least 2\n", num_ports);
425 update_mac_address(struct rte_mbuf *m, unsigned dst_port)
427 struct rte_ether_hdr *eth;
430 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
432 /* 02:00:00:00:00:xx */
433 tmp = ð->d_addr.addr_bytes[0];
434 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
437 rte_ether_addr_copy(&vmdq_ports_eth_addr[dst_port], ð->s_addr);
440 /* When we receive a HUP signal, print out our stats */
442 sighup_handler(int signum)
444 unsigned int q = vmdq_queue_base;
445 for (; q < num_queues; q++) {
446 if ((q - vmdq_queue_base) % (num_vmdq_queues / num_pools) == 0)
447 printf("\nPool %u: ", (q - vmdq_queue_base) /
448 (num_vmdq_queues / num_pools));
449 printf("%lu ", rxPackets[q]);
451 printf("\nFinished handling signal %d\n", signum);
455 * Main thread that does the work, reading from INPUT_PORT
456 * and writing to OUTPUT_PORT
459 lcore_main(__rte_unused void *dummy)
461 const uint16_t lcore_id = (uint16_t)rte_lcore_id();
462 const uint16_t num_cores = (uint16_t)rte_lcore_count();
463 uint16_t core_id = 0;
464 uint16_t startQueue, endQueue;
466 const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
468 for (i = 0; i < num_cores; i++)
469 if (lcore_ids[i] == lcore_id) {
474 if (remainder != 0) {
475 if (core_id < remainder) {
476 startQueue = (uint16_t)(core_id *
477 (num_vmdq_queues / num_cores + 1));
478 endQueue = (uint16_t)(startQueue +
479 (num_vmdq_queues / num_cores) + 1);
481 startQueue = (uint16_t)(core_id *
482 (num_vmdq_queues / num_cores) +
484 endQueue = (uint16_t)(startQueue +
485 (num_vmdq_queues / num_cores));
488 startQueue = (uint16_t)(core_id *
489 (num_vmdq_queues / num_cores));
490 endQueue = (uint16_t)(startQueue +
491 (num_vmdq_queues / num_cores));
494 /* vmdq queue idx doesn't always start from zero.*/
495 startQueue += vmdq_queue_base;
496 endQueue += vmdq_queue_base;
497 printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
498 (unsigned)lcore_id, startQueue, endQueue - 1);
500 if (startQueue == endQueue) {
501 printf("lcore %u has nothing to do\n", lcore_id);
506 struct rte_mbuf *buf[MAX_PKT_BURST];
507 const uint16_t buf_size = RTE_DIM(buf);
509 for (p = 0; p < num_ports; p++) {
510 const uint8_t sport = ports[p];
511 /* 0 <-> 1, 2 <-> 3 etc */
512 const uint8_t dport = ports[p ^ 1];
513 if ((sport == INVALID_PORT_ID) || (dport == INVALID_PORT_ID))
516 for (q = startQueue; q < endQueue; q++) {
517 const uint16_t rxCount = rte_eth_rx_burst(sport,
520 if (unlikely(rxCount == 0))
523 rxPackets[q] += rxCount;
525 for (i = 0; i < rxCount; i++)
526 update_mac_address(buf[i], dport);
528 const uint16_t txCount = rte_eth_tx_burst(dport,
529 vmdq_queue_base + core_id,
533 if (txCount != rxCount) {
534 for (i = txCount; i < rxCount; i++)
535 rte_pktmbuf_free(buf[i]);
543 * Update the global var NUM_PORTS and array PORTS according to system ports number
544 * and return valid ports number
546 static unsigned check_ports_num(unsigned nb_ports)
548 unsigned valid_num_ports = num_ports;
551 if (num_ports > nb_ports) {
552 printf("\nSpecified port number(%u) exceeds total system port number(%u)\n",
553 num_ports, nb_ports);
554 num_ports = nb_ports;
557 for (portid = 0; portid < num_ports; portid++) {
558 if (!rte_eth_dev_is_valid_port(ports[portid])) {
559 printf("\nSpecified port ID(%u) is not valid\n",
561 ports[portid] = INVALID_PORT_ID;
565 return valid_num_ports;
568 /* Main function, does initialisation and calls the per-lcore functions */
570 main(int argc, char *argv[])
572 struct rte_mempool *mbuf_pool;
573 unsigned lcore_id, core_id = 0;
575 unsigned nb_ports, valid_num_ports;
578 signal(SIGHUP, sighup_handler);
581 ret = rte_eal_init(argc, argv);
583 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
587 /* parse app arguments */
588 ret = vmdq_parse_args(argc, argv);
590 rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n");
592 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
593 if (rte_lcore_is_enabled(lcore_id))
594 lcore_ids[core_id++] = lcore_id;
596 if (rte_lcore_count() > RTE_MAX_LCORE)
597 rte_exit(EXIT_FAILURE, "Not enough cores\n");
599 nb_ports = rte_eth_dev_count_avail();
602 * Update the global var NUM_PORTS and global array PORTS
603 * and get value of var VALID_NUM_PORTS according to system ports number
605 valid_num_ports = check_ports_num(nb_ports);
607 if (valid_num_ports < 2 || valid_num_ports % 2) {
608 printf("Current valid ports number is %u\n", valid_num_ports);
609 rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n");
612 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
613 NUM_MBUFS_PER_PORT * nb_ports, MBUF_CACHE_SIZE,
614 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
615 if (mbuf_pool == NULL)
616 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
618 /* initialize all ports */
619 RTE_ETH_FOREACH_DEV(portid) {
620 /* skip ports that are not enabled */
621 if ((enabled_port_mask & (1 << portid)) == 0) {
622 printf("\nSkipping disabled port %d\n", portid);
625 if (port_init(portid, mbuf_pool) != 0)
626 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
629 /* call lcore_main() on every lcore */
630 rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
631 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
632 if (rte_eal_wait_lcore(lcore_id) < 0)