4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_tailq.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
70 #include <rte_string_fns.h>
75 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
76 #define NB_MBUF (32 * 1024)
78 #define MAX_PKT_BURST 32
79 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
81 #define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
82 #define TSC_COUNT_LIMIT 1000
84 #define ACTION_ENCRYPT 1
85 #define ACTION_DECRYPT 2
88 * Configurable number of RX/TX ring descriptors
90 #define RTE_TEST_RX_DESC_DEFAULT 128
91 #define RTE_TEST_TX_DESC_DEFAULT 512
92 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
93 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
95 /* ethernet addresses of ports */
96 static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
98 /* mask of enabled ports */
99 static unsigned enabled_port_mask = 0;
100 static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
102 /* list of enabled ports */
103 static uint32_t dst_ports[RTE_MAX_ETHPORTS];
107 struct rte_mbuf *m_table[MAX_PKT_BURST];
110 struct lcore_rx_queue {
115 #define MAX_RX_QUEUE_PER_LCORE 16
117 #define MAX_LCORE_PARAMS 1024
118 struct lcore_params {
124 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
125 static struct lcore_params lcore_params_array_default[] = {
137 static struct lcore_params * lcore_params = lcore_params_array_default;
138 static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
139 sizeof(lcore_params_array_default[0]);
141 static struct rte_eth_conf port_conf = {
143 .mq_mode = ETH_MQ_RX_RSS,
145 .header_split = 0, /**< Header Split disabled */
146 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
147 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
148 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
149 .hw_strip_crc = 0, /**< CRC stripped by hardware */
154 .rss_hf = ETH_RSS_IP,
158 .mq_mode = ETH_MQ_TX_NONE,
162 static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
169 uint16_t rx_queue_list_pos;
170 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
171 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
172 struct mbuf_table rx_mbuf;
173 uint32_t rx_mbuf_pos;
174 uint32_t rx_curr_queue;
175 struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
176 } __rte_cache_aligned;
178 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
180 static inline struct rte_mbuf *
181 nic_rx_get_packet(struct lcore_conf *qconf)
183 struct rte_mbuf *pkt;
185 if (unlikely(qconf->n_rx_queue == 0))
188 /* Look for the next queue with packets; return if none */
189 if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
192 qconf->rx_mbuf_pos = 0;
193 for (i = 0; i < qconf->n_rx_queue; i++) {
194 qconf->rx_mbuf.len = rte_eth_rx_burst(
195 qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
196 qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
197 qconf->rx_mbuf.m_table, MAX_PKT_BURST);
199 qconf->rx_curr_queue++;
200 if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
201 qconf->rx_curr_queue = 0;
202 if (likely(qconf->rx_mbuf.len > 0))
205 if (unlikely(i == qconf->n_rx_queue))
209 /* Get the next packet from the current queue; if last packet, go to next queue */
210 pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
211 qconf->rx_mbuf_pos++;
217 nic_tx_flush_queues(struct lcore_conf *qconf)
221 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
222 struct rte_mbuf **m_table = NULL;
223 uint16_t queueid, len;
226 if (likely((qconf->tx_mask & (1 << portid)) == 0))
229 len = qconf->tx_mbufs[portid].len;
230 if (likely(len == 0))
233 queueid = qconf->tx_queue_id[portid];
234 m_table = qconf->tx_mbufs[portid].m_table;
236 n = rte_eth_tx_burst(portid, queueid, m_table, len);
237 for (i = n; i < len; i++){
238 rte_pktmbuf_free(m_table[i]);
241 qconf->tx_mbufs[portid].len = 0;
244 qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
248 nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
250 struct lcore_conf *qconf;
254 if (unlikely(pkt == NULL)) {
258 lcoreid = rte_lcore_id();
259 qconf = &lcore_conf[lcoreid];
261 len = qconf->tx_mbufs[port].len;
262 qconf->tx_mbufs[port].m_table[len] = pkt;
265 /* enough pkts to be sent */
266 if (unlikely(len == MAX_PKT_BURST)) {
270 queueid = qconf->tx_queue_id[port];
271 n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
272 for (i = n; i < MAX_PKT_BURST; i++){
273 rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
276 qconf->tx_mask &= ~(1 << port);
280 qconf->tx_mbufs[port].len = len;
283 /* main processing loop */
284 static __attribute__((noreturn)) int
285 main_loop(__attribute__((unused)) void *dummy)
288 struct lcore_conf *qconf;
289 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
291 lcoreid = rte_lcore_id();
292 qconf = &lcore_conf[lcoreid];
294 printf("Thread %u starting...\n", lcoreid);
297 struct rte_mbuf *pkt;
298 uint32_t pkt_from_nic_rx = 0;
301 /* Flush TX queues */
303 if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
304 uint64_t tsc, diff_tsc;
308 diff_tsc = tsc - qconf->tsc;
309 if (unlikely(diff_tsc > drain_tsc)) {
310 nic_tx_flush_queues(qconf);
311 crypto_flush_tx_queue(lcoreid);
315 qconf->tsc_count = 0;
319 * Check the Intel QuickAssist queues first
322 pkt = (struct rte_mbuf *) crypto_get_next_response();
324 pkt = nic_rx_get_packet(qconf);
329 /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
330 if (pkt_from_nic_rx) {
331 struct ipv4_hdr *ip = (struct ipv4_hdr *) (rte_pktmbuf_mtod(pkt, unsigned char *) +
332 sizeof(struct ether_hdr));
333 if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
334 if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
335 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
336 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
337 rte_pktmbuf_free(pkt);
341 if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
342 if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
343 (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
344 (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
345 rte_pktmbuf_free(pkt);
350 port = dst_ports[pkt->port];
352 /* Transmit the packet */
353 nic_tx_send_packet(pkt, (uint8_t)port);
357 static inline unsigned
358 get_port_max_rx_queues(uint8_t port_id)
360 struct rte_eth_dev_info dev_info;
362 rte_eth_dev_info_get(port_id, &dev_info);
363 return dev_info.max_rx_queues;
366 static inline unsigned
367 get_port_max_tx_queues(uint8_t port_id)
369 struct rte_eth_dev_info dev_info;
371 rte_eth_dev_info_get(port_id, &dev_info);
372 return dev_info.max_tx_queues;
376 check_lcore_params(void)
380 for (i = 0; i < nb_lcore_params; ++i) {
381 if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
382 printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
385 if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
386 printf("error: lcore %hhu is not enabled in lcore mask\n",
387 lcore_params[i].lcore_id);
395 check_port_config(const unsigned nb_ports)
400 for (i = 0; i < nb_lcore_params; ++i) {
401 portid = lcore_params[i].port_id;
402 if ((enabled_port_mask & (1 << portid)) == 0) {
403 printf("port %u is not enabled in port mask\n", portid);
406 if (portid >= nb_ports) {
407 printf("port %u is not present on the board\n", portid);
415 get_port_n_rx_queues(const uint8_t port)
420 for (i = 0; i < nb_lcore_params; ++i) {
421 if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
422 queue = lcore_params[i].queue_id;
424 return (uint8_t)(++queue);
428 init_lcore_rx_queues(void)
430 uint16_t i, nb_rx_queue;
433 for (i = 0; i < nb_lcore_params; ++i) {
434 lcore = lcore_params[i].lcore_id;
435 nb_rx_queue = lcore_conf[lcore].n_rx_queue;
436 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
437 printf("error: too many queues (%u) for lcore: %u\n",
438 (unsigned)nb_rx_queue + 1, (unsigned)lcore);
441 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
442 lcore_params[i].port_id;
443 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
444 lcore_params[i].queue_id;
445 lcore_conf[lcore].n_rx_queue++;
452 print_usage(const char *prgname)
454 printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
455 " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n"
456 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
457 " --no-promisc: disable promiscuous mode (default is ON)\n"
458 " --config '(port,queue,lcore)': rx queues configuration\n",
463 parse_portmask(const char *portmask)
468 /* parse hexadecimal string */
469 pm = strtoul(portmask, &end, 16);
470 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
477 parse_config(const char *q_arg)
480 const char *p, *p_end = q_arg;
488 unsigned long int_fld[_NUM_FLD];
489 char *str_fld[_NUM_FLD];
495 while ((p = strchr(p_end,'(')) != NULL) {
496 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
497 printf("exceeded max number of lcore params: %hu\n",
502 if((p_end = strchr(p,')')) == NULL)
506 if(size >= sizeof(s))
509 snprintf(s, sizeof(s), "%.*s", size, p);
510 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
512 for (i = 0; i < _NUM_FLD; i++) {
514 int_fld[i] = strtoul(str_fld[i], &end, 0);
515 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
518 lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
519 lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
520 lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
523 lcore_params = lcore_params_array;
527 /* Parse the argument given in the command line of the application */
529 parse_args(int argc, char **argv)
534 char *prgname = argv[0];
535 static struct option lgopts[] = {
537 {"no-promisc", 0, 0, 0},
543 while ((opt = getopt_long(argc, argvopt, "p:",
544 lgopts, &option_index)) != EOF) {
549 enabled_port_mask = parse_portmask(optarg);
550 if (enabled_port_mask == 0) {
551 printf("invalid portmask\n");
552 print_usage(prgname);
559 if (strcmp(lgopts[option_index].name, "config") == 0) {
560 ret = parse_config(optarg);
562 printf("invalid config\n");
563 print_usage(prgname);
567 if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
568 printf("Promiscuous mode disabled\n");
573 print_usage(prgname);
578 if (enabled_port_mask == 0) {
579 printf("portmask not specified\n");
580 print_usage(prgname);
585 argv[optind-1] = prgname;
588 optind = 0; /* reset getopt lib */
593 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
595 printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
596 eth_addr->addr_bytes[0],
597 eth_addr->addr_bytes[1],
598 eth_addr->addr_bytes[2],
599 eth_addr->addr_bytes[3],
600 eth_addr->addr_bytes[4],
601 eth_addr->addr_bytes[5]);
607 const unsigned flags = 0;
612 RTE_LCORE_FOREACH(lcoreid) {
613 socketid = rte_lcore_to_socket_id(lcoreid);
614 if (socketid >= RTE_MAX_NUMA_NODES) {
615 printf("Socket %d of lcore %u is out of range %d\n",
616 socketid, lcoreid, RTE_MAX_NUMA_NODES);
619 if (pktmbuf_pool[socketid] == NULL) {
620 snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
621 pktmbuf_pool[socketid] =
622 rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
623 sizeof(struct rte_pktmbuf_pool_private),
624 rte_pktmbuf_pool_init, NULL,
625 rte_pktmbuf_init, NULL,
627 if (pktmbuf_pool[socketid] == NULL) {
628 printf("Cannot init mbuf pool on socket %d\n", socketid);
631 printf("Allocated mbuf pool on socket %d\n", socketid);
638 MAIN(int argc, char **argv)
640 struct lcore_conf *qconf;
641 struct rte_eth_link link;
646 uint32_t nb_tx_queue;
647 uint8_t portid, nb_rx_queue, queue, socketid, last_port;
648 unsigned nb_ports_in_mask = 0;
651 ret = rte_eal_init(argc, argv);
657 /* parse application arguments (after the EAL ones) */
658 ret = parse_args(argc, argv);
662 if (check_lcore_params() < 0)
663 rte_panic("check_lcore_params failed\n");
665 ret = init_lcore_rx_queues();
673 nb_ports = rte_eth_dev_count();
674 if (nb_ports > RTE_MAX_ETHPORTS)
675 nb_ports = RTE_MAX_ETHPORTS;
677 if (check_port_config(nb_ports) < 0)
678 rte_panic("check_port_config failed\n");
680 /* reset dst_ports */
681 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
682 dst_ports[portid] = 0;
686 * Each logical core is assigned a dedicated TX queue on each port.
688 for (portid = 0; portid < nb_ports; portid++) {
689 /* skip ports that are not enabled */
690 if ((enabled_port_mask & (1 << portid)) == 0)
693 if (nb_ports_in_mask % 2) {
694 dst_ports[portid] = last_port;
695 dst_ports[last_port] = portid;
702 if (nb_ports_in_mask % 2) {
703 printf("Notice: odd number of ports in portmask.\n");
704 dst_ports[last_port] = last_port;
707 /* initialize all ports */
708 for (portid = 0; portid < nb_ports; portid++) {
709 /* skip ports that are not enabled */
710 if ((enabled_port_mask & (1 << portid)) == 0) {
711 printf("\nSkipping disabled port %d\n", portid);
716 printf("Initializing port %d ... ", portid );
719 nb_rx_queue = get_port_n_rx_queues(portid);
720 if (nb_rx_queue > get_port_max_rx_queues(portid))
721 rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
722 " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
724 nb_tx_queue = rte_lcore_count();
725 if (nb_tx_queue > get_port_max_tx_queues(portid))
726 rte_panic("Number of lcores %u exceeds max number of tx queues %u"
727 " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
729 printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
730 nb_rx_queue, (unsigned)nb_tx_queue );
731 ret = rte_eth_dev_configure(portid, nb_rx_queue,
732 (uint16_t)nb_tx_queue, &port_conf);
734 rte_panic("Cannot configure device: err=%d, port=%d\n",
737 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
738 print_ethaddr(" Address:", &ports_eth_addr[portid]);
741 /* init one TX queue per couple (lcore,port) */
743 RTE_LCORE_FOREACH(lcoreid) {
744 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
745 printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
747 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
751 rte_panic("rte_eth_tx_queue_setup: err=%d, "
752 "port=%d\n", ret, portid);
754 qconf = &lcore_conf[lcoreid];
755 qconf->tx_queue_id[portid] = queueid;
761 RTE_LCORE_FOREACH(lcoreid) {
762 qconf = &lcore_conf[lcoreid];
763 printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
766 for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
767 portid = qconf->rx_queue_list[queue].port_id;
768 queueid = qconf->rx_queue_list[queue].queue_id;
769 socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
770 printf("rxq=%d,%d,%d ", portid, queueid, socketid);
773 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
776 pktmbuf_pool[socketid]);
778 rte_panic("rte_eth_rx_queue_setup: err=%d,"
779 "port=%d\n", ret, portid);
786 for (portid = 0; portid < nb_ports; portid++) {
787 if ((enabled_port_mask & (1 << portid)) == 0)
790 ret = rte_eth_dev_start(portid);
792 rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
795 printf("done: Port %d ", portid);
797 /* get link status */
798 rte_eth_link_get(portid, &link);
799 if (link.link_status)
800 printf(" Link Up - speed %u Mbps - %s\n",
801 (unsigned) link.link_speed,
802 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
803 ("full-duplex") : ("half-duplex\n"));
805 printf(" Link Down\n");
807 * If enabled, put device in promiscuous mode.
808 * This allows IO forwarding mode to forward packets
809 * to itself through 2 cross-connected ports of the
813 rte_eth_promiscuous_enable(portid);
815 printf("Crypto: Initializing Crypto...\n");
816 if (crypto_init() != 0)
819 RTE_LCORE_FOREACH(lcoreid) {
820 if (per_core_crypto_init(lcoreid) != 0) {
821 printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
825 printf("Crypto: Initialization complete\n");
826 /* launch per-lcore init on every lcore */
827 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
828 RTE_LCORE_FOREACH_SLAVE(lcoreid) {
829 if (rte_eal_wait_lcore(lcoreid) < 0)