4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
51 #include <rte_per_lcore.h>
52 #include <rte_launch.h>
53 #include <rte_atomic.h>
54 #include <rte_cycles.h>
55 #include <rte_prefetch.h>
56 #include <rte_lcore.h>
57 #include <rte_per_lcore.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_interrupts.h>
61 #include <rte_random.h>
62 #include <rte_debug.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
66 #include <rte_mempool.h>
69 #include <rte_string_fns.h>
73 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
74 #define NB_MBUF (32 * 1024)
76 #define MAX_PKT_BURST 32
77 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
79 #define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
80 #define TSC_COUNT_LIMIT 1000
82 #define ACTION_ENCRYPT 1
83 #define ACTION_DECRYPT 2
86 * Configurable number of RX/TX ring descriptors
88 #define RTE_TEST_RX_DESC_DEFAULT 128
89 #define RTE_TEST_TX_DESC_DEFAULT 512
90 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
91 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
93 /* ethernet addresses of ports */
94 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
96 /* mask of enabled ports */
97 static unsigned enabled_port_mask = 0;
98 static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
100 /* list of enabled ports */
101 static uint32_t dst_ports[RTE_MAX_ETHPORTS];
105 struct rte_mbuf *m_table[MAX_PKT_BURST];
108 struct lcore_rx_queue {
113 #define MAX_RX_QUEUE_PER_LCORE 16
115 #define MAX_LCORE_PARAMS 1024
116 struct lcore_params {
122 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
123 static struct lcore_params lcore_params_array_default[] = {
135 static struct lcore_params * lcore_params = lcore_params_array_default;
136 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
137 sizeof(lcore_params_array_default[0]);
139 static struct rte_eth_conf port_conf = {
141 .mq_mode = ETH_MQ_RX_RSS,
143 .header_split = 0, /**< Header Split disabled */
144 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
145 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
146 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
147 .hw_strip_crc = 0, /**< CRC stripped by hardware */
152 .rss_hf = ETH_RSS_IP,
156 .mq_mode = ETH_MQ_TX_NONE,
160 static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
167 uint16_t rx_queue_list_pos;
168 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
169 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
170 struct mbuf_table rx_mbuf;
171 uint32_t rx_mbuf_pos;
172 uint32_t rx_curr_queue;
173 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
174 } __rte_cache_aligned;
176 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
178 static inline struct rte_mbuf *
179 nic_rx_get_packet(struct lcore_conf *qconf)
181 struct rte_mbuf *pkt;
183 if (unlikely(qconf->n_rx_queue == 0))
186 /* Look for the next queue with packets; return if none */
187 if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
190 qconf->rx_mbuf_pos = 0;
191 for (i = 0; i < qconf->n_rx_queue; i++) {
192 qconf->rx_mbuf.len = rte_eth_rx_burst(
193 qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
194 qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
195 qconf->rx_mbuf.m_table, MAX_PKT_BURST);
197 qconf->rx_curr_queue++;
198 if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
199 qconf->rx_curr_queue = 0;
200 if (likely(qconf->rx_mbuf.len > 0))
203 if (unlikely(i == qconf->n_rx_queue))
207 /* Get the next packet from the current queue; if last packet, go to next queue */
208 pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
209 qconf->rx_mbuf_pos++;
215 nic_tx_flush_queues(struct lcore_conf *qconf)
219 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
220 struct rte_mbuf **m_table = NULL;
221 uint16_t queueid, len;
224 if (likely((qconf->tx_mask & (1 << portid)) == 0))
227 len = qconf->tx_mbufs[portid].len;
228 if (likely(len == 0))
231 queueid = qconf->tx_queue_id[portid];
232 m_table = qconf->tx_mbufs[portid].m_table;
234 n = rte_eth_tx_burst(portid, queueid, m_table, len);
235 for (i = n; i < len; i++){
236 rte_pktmbuf_free(m_table[i]);
239 qconf->tx_mbufs[portid].len = 0;
242 qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
246 nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
248 struct lcore_conf *qconf;
252 if (unlikely(pkt == NULL)) {
256 lcoreid = rte_lcore_id();
257 qconf = &lcore_conf[lcoreid];
259 len = qconf->tx_mbufs[port].len;
260 qconf->tx_mbufs[port].m_table[len] = pkt;
263 /* enough pkts to be sent */
264 if (unlikely(len == MAX_PKT_BURST)) {
268 queueid = qconf->tx_queue_id[port];
269 n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
270 for (i = n; i < MAX_PKT_BURST; i++){
271 rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
274 qconf->tx_mask &= ~(1 << port);
278 qconf->tx_mbufs[port].len = len;
281 /* main processing loop */
282 static __attribute__((noreturn)) int
283 main_loop(__attribute__((unused)) void *dummy)
286 struct lcore_conf *qconf;
287 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
289 lcoreid = rte_lcore_id();
290 qconf = &lcore_conf[lcoreid];
292 printf("Thread %u starting...\n", lcoreid);
295 struct rte_mbuf *pkt;
296 uint32_t pkt_from_nic_rx = 0;
299 /* Flush TX queues */
301 if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
302 uint64_t tsc, diff_tsc;
306 diff_tsc = tsc - qconf->tsc;
307 if (unlikely(diff_tsc > drain_tsc)) {
308 nic_tx_flush_queues(qconf);
309 crypto_flush_tx_queue(lcoreid);
313 qconf->tsc_count = 0;
317 * Check the Intel QuickAssist queues first
320 pkt = (struct rte_mbuf *) crypto_get_next_response();
322 pkt = nic_rx_get_packet(qconf);
327 /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
328 if (pkt_from_nic_rx) {
329 struct ipv4_hdr *ip = (struct ipv4_hdr *) (rte_pktmbuf_mtod(pkt, unsigned char *) +
330 sizeof(struct ether_hdr));
331 if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
332 if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
333 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
334 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
335 rte_pktmbuf_free(pkt);
339 if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
340 if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
341 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
342 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
343 rte_pktmbuf_free(pkt);
348 port = dst_ports[pkt->port];
350 /* Transmit the packet */
351 nic_tx_send_packet(pkt, (uint8_t)port);
355 static inline unsigned
356 get_port_max_rx_queues(uint8_t port_id)
358 struct rte_eth_dev_info dev_info;
360 rte_eth_dev_info_get(port_id, &dev_info);
361 return dev_info.max_rx_queues;
364 static inline unsigned
365 get_port_max_tx_queues(uint8_t port_id)
367 struct rte_eth_dev_info dev_info;
369 rte_eth_dev_info_get(port_id, &dev_info);
370 return dev_info.max_tx_queues;
374 check_lcore_params(void)
378 for (i = 0; i < nb_lcore_params; ++i) {
379 if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
380 printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
383 if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
384 printf("error: lcore %hhu is not enabled in lcore mask\n",
385 lcore_params[i].lcore_id);
393 check_port_config(const unsigned nb_ports)
398 for (i = 0; i < nb_lcore_params; ++i) {
399 portid = lcore_params[i].port_id;
400 if ((enabled_port_mask & (1 << portid)) == 0) {
401 printf("port %u is not enabled in port mask\n", portid);
404 if (portid >= nb_ports) {
405 printf("port %u is not present on the board\n", portid);
413 get_port_n_rx_queues(const uint8_t port)
418 for (i = 0; i < nb_lcore_params; ++i) {
419 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
420 queue = lcore_params[i].queue_id;
422 return (uint8_t)(++queue);
426 init_lcore_rx_queues(void)
428 uint16_t i, nb_rx_queue;
431 for (i = 0; i < nb_lcore_params; ++i) {
432 lcore = lcore_params[i].lcore_id;
433 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
434 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
435 printf("error: too many queues (%u) for lcore: %u\n",
436 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
439 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
440 lcore_params[i].port_id;
441 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
442 lcore_params[i].queue_id;
443 lcore_conf[lcore].n_rx_queue++;
450 print_usage(const char *prgname)
452 printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
453 " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n"
454 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
455 " --no-promisc: disable promiscuous mode (default is ON)\n"
456 " --config '(port,queue,lcore)': rx queues configuration\n",
461 parse_portmask(const char *portmask)
466 /* parse hexadecimal string */
467 pm = strtoul(portmask, &end, 16);
468 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
475 parse_config(const char *q_arg)
478 const char *p, *p_end = q_arg;
486 unsigned long int_fld[_NUM_FLD];
487 char *str_fld[_NUM_FLD];
493 while ((p = strchr(p_end,'(')) != NULL) {
494 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
495 printf("exceeded max number of lcore params: %hu\n",
500 if((p_end = strchr(p,')')) == NULL)
504 if(size >= sizeof(s))
507 snprintf(s, sizeof(s), "%.*s", size, p);
508 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
510 for (i = 0; i < _NUM_FLD; i++) {
512 int_fld[i] = strtoul(str_fld[i], &end, 0);
513 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
516 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
517 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
518 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
521 lcore_params = lcore_params_array;
525 /* Parse the argument given in the command line of the application */
527 parse_args(int argc, char **argv)
532 char *prgname = argv[0];
533 static struct option lgopts[] = {
535 {"no-promisc", 0, 0, 0},
541 while ((opt = getopt_long(argc, argvopt, "p:",
542 lgopts, &option_index)) != EOF) {
547 enabled_port_mask = parse_portmask(optarg);
548 if (enabled_port_mask == 0) {
549 printf("invalid portmask\n");
550 print_usage(prgname);
557 if (strcmp(lgopts[option_index].name, "config") == 0) {
558 ret = parse_config(optarg);
560 printf("invalid config\n");
561 print_usage(prgname);
565 if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
566 printf("Promiscuous mode disabled\n");
571 print_usage(prgname);
576 if (enabled_port_mask == 0) {
577 printf("portmask not specified\n");
578 print_usage(prgname);
583 argv[optind-1] = prgname;
586 optind = 0; /* reset getopt lib */
591 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
593 char buf[ETHER_ADDR_FMT_SIZE];
594 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
595 printf("%s%s", name, buf);
601 const unsigned flags = 0;
606 RTE_LCORE_FOREACH(lcoreid) {
607 socketid = rte_lcore_to_socket_id(lcoreid);
608 if (socketid >= RTE_MAX_NUMA_NODES) {
609 printf("Socket %d of lcore %u is out of range %d\n",
610 socketid, lcoreid, RTE_MAX_NUMA_NODES);
613 if (pktmbuf_pool[socketid] == NULL) {
614 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
615 pktmbuf_pool[socketid] =
616 rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
617 sizeof(struct rte_pktmbuf_pool_private),
618 rte_pktmbuf_pool_init, NULL,
619 rte_pktmbuf_init, NULL,
621 if (pktmbuf_pool[socketid] == NULL) {
622 printf("Cannot init mbuf pool on socket %d\n", socketid);
625 printf("Allocated mbuf pool on socket %d\n", socketid);
632 main(int argc, char **argv)
634 struct lcore_conf *qconf;
635 struct rte_eth_link link;
640 uint32_t nb_tx_queue;
641 uint8_t portid, nb_rx_queue, queue, socketid, last_port;
642 unsigned nb_ports_in_mask = 0;
645 ret = rte_eal_init(argc, argv);
651 /* parse application arguments (after the EAL ones) */
652 ret = parse_args(argc, argv);
656 if (check_lcore_params() < 0)
657 rte_panic("check_lcore_params failed\n");
659 ret = init_lcore_rx_queues();
667 nb_ports = rte_eth_dev_count();
668 if (nb_ports > RTE_MAX_ETHPORTS)
669 nb_ports = RTE_MAX_ETHPORTS;
671 if (check_port_config(nb_ports) < 0)
672 rte_panic("check_port_config failed\n");
674 /* reset dst_ports */
675 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
676 dst_ports[portid] = 0;
680 * Each logical core is assigned a dedicated TX queue on each port.
682 for (portid = 0; portid < nb_ports; portid++) {
683 /* skip ports that are not enabled */
684 if ((enabled_port_mask & (1 << portid)) == 0)
687 if (nb_ports_in_mask % 2) {
688 dst_ports[portid] = last_port;
689 dst_ports[last_port] = portid;
696 if (nb_ports_in_mask % 2) {
697 printf("Notice: odd number of ports in portmask.\n");
698 dst_ports[last_port] = last_port;
701 /* initialize all ports */
702 for (portid = 0; portid < nb_ports; portid++) {
703 /* skip ports that are not enabled */
704 if ((enabled_port_mask & (1 << portid)) == 0) {
705 printf("\nSkipping disabled port %d\n", portid);
710 printf("Initializing port %d ... ", portid );
713 nb_rx_queue = get_port_n_rx_queues(portid);
714 if (nb_rx_queue > get_port_max_rx_queues(portid))
715 rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
716 " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
718 nb_tx_queue = rte_lcore_count();
719 if (nb_tx_queue > get_port_max_tx_queues(portid))
720 rte_panic("Number of lcores %u exceeds max number of tx queues %u"
721 " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
723 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
724 nb_rx_queue, (unsigned)nb_tx_queue );
725 ret = rte_eth_dev_configure(portid, nb_rx_queue,
726 (uint16_t)nb_tx_queue, &port_conf);
728 rte_panic("Cannot configure device: err=%d, port=%d\n",
731 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
732 print_ethaddr(" Address:", &ports_eth_addr[portid]);
735 /* init one TX queue per couple (lcore,port) */
737 RTE_LCORE_FOREACH(lcoreid) {
738 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
739 printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
741 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
745 rte_panic("rte_eth_tx_queue_setup: err=%d, "
746 "port=%d\n", ret, portid);
748 qconf = &lcore_conf[lcoreid];
749 qconf->tx_queue_id[portid] = queueid;
755 RTE_LCORE_FOREACH(lcoreid) {
756 qconf = &lcore_conf[lcoreid];
757 printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
760 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
761 portid = qconf->rx_queue_list[queue].port_id;
762 queueid = qconf->rx_queue_list[queue].queue_id;
763 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
764 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
767 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
770 pktmbuf_pool[socketid]);
772 rte_panic("rte_eth_rx_queue_setup: err=%d,"
773 "port=%d\n", ret, portid);
780 for (portid = 0; portid < nb_ports; portid++) {
781 if ((enabled_port_mask & (1 << portid)) == 0)
784 ret = rte_eth_dev_start(portid);
786 rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
789 printf("done: Port %d ", portid);
791 /* get link status */
792 rte_eth_link_get(portid, &link);
793 if (link.link_status)
794 printf(" Link Up - speed %u Mbps - %s\n",
795 (unsigned) link.link_speed,
796 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
797 ("full-duplex") : ("half-duplex\n"));
799 printf(" Link Down\n");
801 * If enabled, put device in promiscuous mode.
802 * This allows IO forwarding mode to forward packets
803 * to itself through 2 cross-connected ports of the
807 rte_eth_promiscuous_enable(portid);
809 printf("Crypto: Initializing Crypto...\n");
810 if (crypto_init() != 0)
813 RTE_LCORE_FOREACH(lcoreid) {
814 if (per_core_crypto_init(lcoreid) != 0) {
815 printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
819 printf("Crypto: Initialization complete\n");
820 /* launch per-lcore init on every lcore */
821 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
822 RTE_LCORE_FOREACH_SLAVE(lcoreid) {
823 if (rte_eal_wait_lcore(lcoreid) < 0)