4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
49 #include <rte_launch.h>
50 #include <rte_atomic.h>
51 #include <rte_cycles.h>
52 #include <rte_prefetch.h>
53 #include <rte_lcore.h>
54 #include <rte_per_lcore.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_interrupts.h>
58 #include <rte_random.h>
59 #include <rte_debug.h>
60 #include <rte_ether.h>
61 #include <rte_ethdev.h>
62 #include <rte_mempool.h>
67 #include <rte_jhash.h>
68 #include <rte_cryptodev.h>
72 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
74 #define MAX_JUMBO_PKT_LEN 9600
76 #define MEMPOOL_CACHE_SIZE 256
78 #define NB_MBUF (32000)
80 #define CDEV_MAP_ENTRIES 1024
81 #define CDEV_MP_NB_OBJS 2048
82 #define CDEV_MP_CACHE_SZ 64
83 #define MAX_QUEUE_PAIRS 1
85 #define OPTION_CONFIG "config"
86 #define OPTION_SINGLE_SA "single-sa"
87 #define OPTION_EP0 "ep0"
88 #define OPTION_EP1 "ep1"
90 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
94 /* Configure how many packets ahead to prefetch, when reading packets */
95 #define PREFETCH_OFFSET 3
97 #define MAX_RX_QUEUE_PER_LCORE 16
99 #define MAX_LCORE_PARAMS 1024
101 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
104 * Configurable number of RX/TX ring descriptors
106 #define IPSEC_SECGW_RX_DESC_DEFAULT 128
107 #define IPSEC_SECGW_TX_DESC_DEFAULT 512
108 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
109 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
111 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
112 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
113 (((uint64_t)((a) & 0xff) << 56) | \
114 ((uint64_t)((b) & 0xff) << 48) | \
115 ((uint64_t)((c) & 0xff) << 40) | \
116 ((uint64_t)((d) & 0xff) << 32) | \
117 ((uint64_t)((e) & 0xff) << 24) | \
118 ((uint64_t)((f) & 0xff) << 16) | \
119 ((uint64_t)((g) & 0xff) << 8) | \
120 ((uint64_t)(h) & 0xff))
122 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
123 (((uint64_t)((h) & 0xff) << 56) | \
124 ((uint64_t)((g) & 0xff) << 48) | \
125 ((uint64_t)((f) & 0xff) << 40) | \
126 ((uint64_t)((e) & 0xff) << 32) | \
127 ((uint64_t)((d) & 0xff) << 24) | \
128 ((uint64_t)((c) & 0xff) << 16) | \
129 ((uint64_t)((b) & 0xff) << 8) | \
130 ((uint64_t)(a) & 0xff))
132 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
134 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
135 addr.addr_bytes[0], addr.addr_bytes[1], \
136 addr.addr_bytes[2], addr.addr_bytes[3], \
137 addr.addr_bytes[4], addr.addr_bytes[5], \
140 /* port/source ethernet addr and destination ethernet addr */
141 struct ethaddr_info {
145 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
146 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
147 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
148 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
149 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
152 /* mask of enabled ports */
153 static uint32_t enabled_port_mask;
154 static uint32_t unprotected_port_mask;
155 static int32_t promiscuous_on = 1;
156 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
157 static int32_t ep = -1; /**< Endpoint configuration (0 or 1) */
158 static uint32_t nb_lcores;
159 static uint32_t single_sa;
160 static uint32_t single_sa_idx;
162 struct lcore_rx_queue {
165 } __rte_cache_aligned;
167 struct lcore_params {
171 } __rte_cache_aligned;
173 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
175 static struct lcore_params *lcore_params;
176 static uint16_t nb_lcore_params;
178 static struct rte_hash *cdev_map_in;
179 static struct rte_hash *cdev_map_out;
183 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
187 uint16_t nb_rx_queue;
188 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
189 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
190 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
191 struct ipsec_ctx inbound;
192 struct ipsec_ctx outbound;
193 struct rt_ctx *rt_ctx;
194 } __rte_cache_aligned;
196 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
198 static struct rte_eth_conf port_conf = {
200 .mq_mode = ETH_MQ_RX_RSS,
201 .max_rx_pkt_len = ETHER_MAX_LEN,
203 .header_split = 0, /**< Header Split disabled */
204 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
205 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
206 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
207 .hw_strip_crc = 0, /**< CRC stripped by hardware */
212 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
213 ETH_RSS_TCP | ETH_RSS_SCTP,
217 .mq_mode = ETH_MQ_TX_NONE,
221 static struct socket_ctx socket_ctx[NB_SOCKETS];
223 struct traffic_type {
224 const uint8_t *data[MAX_PKT_BURST * 2];
225 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
226 uint32_t res[MAX_PKT_BURST * 2];
230 struct ipsec_traffic {
231 struct traffic_type ipsec4;
232 struct traffic_type ipv4;
236 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
240 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
241 rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
242 nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
243 offsetof(struct ip, ip_p));
244 if (*nlp == IPPROTO_ESP)
245 t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
247 t->ipv4.data[t->ipv4.num] = nlp;
248 t->ipv4.pkts[(t->ipv4.num)++] = pkt;
251 /* Unknown/Unsupported type, drop the packet */
252 rte_pktmbuf_free(pkt);
257 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
265 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
266 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
268 prepare_one_packet(pkts[i], t);
270 /* Process left packets */
271 for (; i < nb_pkts; i++)
272 prepare_one_packet(pkts[i], t);
276 prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
278 pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
279 pkt->l3_len = sizeof(struct ip);
280 pkt->l2_len = ETHER_HDR_LEN;
282 struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
285 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
286 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
287 sizeof(struct ether_addr));
288 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
289 sizeof(struct ether_addr));
293 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
296 const int32_t prefetch_offset = 2;
298 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
299 rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
300 prepare_tx_pkt(pkts[i], port);
302 /* Process left packets */
303 for (; i < nb_pkts; i++)
304 prepare_tx_pkt(pkts[i], port);
307 /* Send burst of packets on an output interface */
308 static inline int32_t
309 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
311 struct rte_mbuf **m_table;
315 queueid = qconf->tx_queue_id[port];
316 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
318 prepare_tx_burst(m_table, n, port);
320 ret = rte_eth_tx_burst(port, queueid, m_table, n);
321 if (unlikely(ret < n)) {
323 rte_pktmbuf_free(m_table[ret]);
330 /* Enqueue a single packet, and send burst if queue is filled */
331 static inline int32_t
332 send_single_packet(struct rte_mbuf *m, uint8_t port)
336 struct lcore_conf *qconf;
338 lcore_id = rte_lcore_id();
340 qconf = &lcore_conf[lcore_id];
341 len = qconf->tx_mbufs[port].len;
342 qconf->tx_mbufs[port].m_table[len] = m;
345 /* enough pkts to be sent */
346 if (unlikely(len == MAX_PKT_BURST)) {
347 send_burst(qconf, MAX_PKT_BURST, port);
351 qconf->tx_mbufs[port].len = len;
356 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
357 struct ipsec_traffic *traffic)
360 uint16_t idx, nb_pkts_in, i, j;
361 uint32_t sa_idx, res;
363 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
364 traffic->ipsec4.num, MAX_PKT_BURST);
366 /* SP/ACL Inbound check ipsec and ipv4 */
367 for (i = 0; i < nb_pkts_in; i++) {
368 idx = traffic->ipv4.num++;
369 m = traffic->ipsec4.pkts[i];
370 traffic->ipv4.pkts[idx] = m;
371 traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
372 uint8_t *, offsetof(struct ip, ip_p));
375 rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
376 traffic->ipv4.data, traffic->ipv4.res,
377 traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
380 for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
381 m = traffic->ipv4.pkts[i];
382 res = traffic->ipv4.res[i];
387 traffic->ipv4.pkts[j++] = m;
389 /* Check return SA SPI matches pkt SPI */
390 for ( ; i < traffic->ipv4.num; i++) {
391 m = traffic->ipv4.pkts[i];
392 sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
393 if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
398 traffic->ipv4.pkts[j++] = m;
400 traffic->ipv4.num = j;
404 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
405 struct ipsec_traffic *traffic)
408 uint16_t idx, nb_pkts_out, i, j;
409 uint32_t sa_idx, res;
411 rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
412 traffic->ipv4.data, traffic->ipv4.res,
413 traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
415 /* Drop any IPsec traffic from protected ports */
416 for (i = 0; i < traffic->ipsec4.num; i++)
417 rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
419 traffic->ipsec4.num = 0;
422 for (i = 0; i < traffic->ipv4.num; i++) {
423 m = traffic->ipv4.pkts[i];
424 res = traffic->ipv4.res[i];
425 sa_idx = res & PROTECT_MASK;
426 if ((res == 0) || (res & DISCARD))
428 else if (sa_idx != 0) {
429 traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
430 traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
432 traffic->ipv4.pkts[j++] = m;
434 traffic->ipv4.num = j;
436 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
437 traffic->ipsec4.res, traffic->ipsec4.num,
440 for (i = 0; i < nb_pkts_out; i++) {
441 idx = traffic->ipv4.num++;
442 m = traffic->ipsec4.pkts[i];
443 traffic->ipv4.pkts[idx] = m;
448 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
449 struct ipsec_traffic *traffic)
451 uint16_t nb_pkts_in, i;
453 /* Drop any IPv4 traffic from unprotected ports */
454 for (i = 0; i < traffic->ipv4.num; i++)
455 rte_pktmbuf_free(traffic->ipv4.pkts[i]);
457 traffic->ipv4.num = 0;
459 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
460 traffic->ipsec4.num, MAX_PKT_BURST);
462 for (i = 0; i < nb_pkts_in; i++)
463 traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
465 traffic->ipv4.num = nb_pkts_in;
469 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
470 struct ipsec_traffic *traffic)
472 uint16_t nb_pkts_out, i;
474 /* Drop any IPsec traffic from protected ports */
475 for (i = 0; i < traffic->ipsec4.num; i++)
476 rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
478 traffic->ipsec4.num = 0;
480 for (i = 0; i < traffic->ipv4.num; i++)
481 traffic->ipv4.res[i] = single_sa_idx;
483 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
484 traffic->ipv4.res, traffic->ipv4.num,
487 traffic->ipv4.num = nb_pkts_out;
491 route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
493 uint32_t hop[MAX_PKT_BURST * 2];
494 uint32_t dst_ip[MAX_PKT_BURST * 2];
500 for (i = 0; i < nb_pkts; i++) {
501 offset = offsetof(struct ip, ip_dst);
502 dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],
504 dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
507 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);
509 for (i = 0; i < nb_pkts; i++) {
510 if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {
511 rte_pktmbuf_free(pkts[i]);
514 send_single_packet(pkts[i], hop[i] & 0xff);
519 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
520 uint8_t nb_pkts, uint8_t portid)
522 struct ipsec_traffic traffic;
524 prepare_traffic(pkts, &traffic, nb_pkts);
527 if (UNPROTECTED_PORT(portid))
528 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
530 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
532 if (UNPROTECTED_PORT(portid))
533 process_pkts_inbound(&qconf->inbound, &traffic);
535 process_pkts_outbound(&qconf->outbound, &traffic);
538 route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
542 drain_buffers(struct lcore_conf *qconf)
547 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
548 buf = &qconf->tx_mbufs[portid];
551 send_burst(qconf, buf->len, portid);
556 /* main processing loop */
558 main_loop(__attribute__((unused)) void *dummy)
560 struct rte_mbuf *pkts[MAX_PKT_BURST];
562 uint64_t prev_tsc, diff_tsc, cur_tsc;
564 uint8_t portid, queueid;
565 struct lcore_conf *qconf;
567 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
568 / US_PER_S * BURST_TX_DRAIN_US;
569 struct lcore_rx_queue *rxql;
572 lcore_id = rte_lcore_id();
573 qconf = &lcore_conf[lcore_id];
574 rxql = qconf->rx_queue_list;
575 socket_id = rte_lcore_to_socket_id(lcore_id);
577 qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
578 qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
579 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
580 qconf->inbound.cdev_map = cdev_map_in;
581 qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
582 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
583 qconf->outbound.cdev_map = cdev_map_out;
585 if (qconf->nb_rx_queue == 0) {
586 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
590 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
592 for (i = 0; i < qconf->nb_rx_queue; i++) {
593 portid = rxql[i].port_id;
594 queueid = rxql[i].queue_id;
596 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
597 lcore_id, portid, queueid);
601 cur_tsc = rte_rdtsc();
603 /* TX queue buffer drain */
604 diff_tsc = cur_tsc - prev_tsc;
606 if (unlikely(diff_tsc > drain_tsc)) {
607 drain_buffers(qconf);
611 /* Read packet from RX queues */
612 for (i = 0; i < qconf->nb_rx_queue; ++i) {
613 portid = rxql[i].port_id;
614 queueid = rxql[i].queue_id;
615 nb_rx = rte_eth_rx_burst(portid, queueid,
616 pkts, MAX_PKT_BURST);
619 process_pkts(qconf, pkts, nb_rx, portid);
627 uint8_t lcore, portid, nb_ports;
631 if (lcore_params == NULL) {
632 printf("Error: No port/queue/core mappings\n");
636 nb_ports = rte_eth_dev_count();
637 if (nb_ports > RTE_MAX_ETHPORTS)
638 nb_ports = RTE_MAX_ETHPORTS;
640 for (i = 0; i < nb_lcore_params; ++i) {
641 lcore = lcore_params[i].lcore_id;
642 if (!rte_lcore_is_enabled(lcore)) {
643 printf("error: lcore %hhu is not enabled in "
644 "lcore mask\n", lcore);
647 socket_id = rte_lcore_to_socket_id(lcore);
648 if (socket_id != 0 && numa_on == 0) {
649 printf("warning: lcore %hhu is on socket %d "
653 portid = lcore_params[i].port_id;
654 if ((enabled_port_mask & (1 << portid)) == 0) {
655 printf("port %u is not enabled in port mask\n", portid);
658 if (portid >= nb_ports) {
659 printf("port %u is not present on the board\n", portid);
667 get_port_nb_rx_queues(const uint8_t port)
672 for (i = 0; i < nb_lcore_params; ++i) {
673 if (lcore_params[i].port_id == port &&
674 lcore_params[i].queue_id > queue)
675 queue = lcore_params[i].queue_id;
677 return (uint8_t)(++queue);
681 init_lcore_rx_queues(void)
683 uint16_t i, nb_rx_queue;
686 for (i = 0; i < nb_lcore_params; ++i) {
687 lcore = lcore_params[i].lcore_id;
688 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
689 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
690 printf("error: too many queues (%u) for lcore: %u\n",
691 nb_rx_queue + 1, lcore);
694 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
695 lcore_params[i].port_id;
696 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
697 lcore_params[i].queue_id;
698 lcore_conf[lcore].nb_rx_queue++;
705 print_usage(const char *prgname)
707 printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
708 " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
709 " --single-sa SAIDX --ep0|--ep1\n"
710 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
711 " -P : enable promiscuous mode\n"
712 " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
713 " --"OPTION_CONFIG": (port,queue,lcore): "
714 "rx queues configuration\n"
715 " --single-sa SAIDX: use single SA index for outbound, "
717 " --ep0: Configure as Endpoint 0\n"
718 " --ep1: Configure as Endpoint 1\n", prgname);
722 parse_portmask(const char *portmask)
727 /* parse hexadecimal string */
728 pm = strtoul(portmask, &end, 16);
729 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
732 if ((pm == 0) && errno)
739 parse_decimal(const char *str)
744 num = strtoul(str, &end, 10);
745 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
752 parse_config(const char *q_arg)
755 const char *p, *p0 = q_arg;
763 int long int_fld[_NUM_FLD];
764 char *str_fld[_NUM_FLD];
770 while ((p = strchr(p0, '(')) != NULL) {
777 if (size >= sizeof(s))
780 snprintf(s, sizeof(s), "%.*s", size, p);
781 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
784 for (i = 0; i < _NUM_FLD; i++) {
786 int_fld[i] = strtoul(str_fld[i], &end, 0);
787 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
790 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
791 printf("exceeded max number of lcore params: %hu\n",
795 lcore_params_array[nb_lcore_params].port_id =
796 (uint8_t)int_fld[FLD_PORT];
797 lcore_params_array[nb_lcore_params].queue_id =
798 (uint8_t)int_fld[FLD_QUEUE];
799 lcore_params_array[nb_lcore_params].lcore_id =
800 (uint8_t)int_fld[FLD_LCORE];
803 lcore_params = lcore_params_array;
807 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
809 parse_args_long_options(struct option *lgopts, int32_t option_index)
812 const char *optname = lgopts[option_index].name;
814 if (__STRNCMP(optname, OPTION_CONFIG)) {
815 ret = parse_config(optarg);
817 printf("invalid config\n");
820 if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
821 ret = parse_decimal(optarg);
825 printf("Configured with single SA index %u\n",
831 if (__STRNCMP(optname, OPTION_EP0)) {
832 printf("endpoint 0\n");
837 if (__STRNCMP(optname, OPTION_EP1)) {
838 printf("endpoint 1\n");
848 parse_args(int32_t argc, char **argv)
852 int32_t option_index;
853 char *prgname = argv[0];
854 static struct option lgopts[] = {
855 {OPTION_CONFIG, 1, 0, 0},
856 {OPTION_SINGLE_SA, 1, 0, 0},
857 {OPTION_EP0, 0, 0, 0},
858 {OPTION_EP1, 0, 0, 0},
864 while ((opt = getopt_long(argc, argvopt, "p:Pu:",
865 lgopts, &option_index)) != EOF) {
869 enabled_port_mask = parse_portmask(optarg);
870 if (enabled_port_mask == 0) {
871 printf("invalid portmask\n");
872 print_usage(prgname);
877 printf("Promiscuous mode selected\n");
881 unprotected_port_mask = parse_portmask(optarg);
882 if (unprotected_port_mask == 0) {
883 printf("invalid unprotected portmask\n");
884 print_usage(prgname);
889 if (parse_args_long_options(lgopts, option_index)) {
890 print_usage(prgname);
895 print_usage(prgname);
901 argv[optind-1] = prgname;
904 optind = 0; /* reset getopt lib */
909 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
911 char buf[ETHER_ADDR_FMT_SIZE];
912 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
913 printf("%s%s", name, buf);
916 /* Check the link status of all ports in up to 9s, and print them finally */
918 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
920 #define CHECK_INTERVAL 100 /* 100ms */
921 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
922 uint8_t portid, count, all_ports_up, print_flag = 0;
923 struct rte_eth_link link;
925 printf("\nChecking link status");
927 for (count = 0; count <= MAX_CHECK_TIME; count++) {
929 for (portid = 0; portid < port_num; portid++) {
930 if ((port_mask & (1 << portid)) == 0)
932 memset(&link, 0, sizeof(link));
933 rte_eth_link_get_nowait(portid, &link);
934 /* print link status if flag set */
935 if (print_flag == 1) {
936 if (link.link_status)
937 printf("Port %d Link Up - speed %u "
938 "Mbps - %s\n", (uint8_t)portid,
939 (uint32_t)link.link_speed,
940 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
941 ("full-duplex") : ("half-duplex\n"));
943 printf("Port %d Link Down\n",
947 /* clear all_ports_up flag if any link down */
948 if (link.link_status == 0) {
953 /* after finally printing all link status, get out */
957 if (all_ports_up == 0) {
960 rte_delay_ms(CHECK_INTERVAL);
963 /* set the print_flag if all ports up or timeout */
964 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
972 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
973 uint16_t qp, struct lcore_params *params,
974 struct ipsec_ctx *ipsec_ctx,
975 const struct rte_cryptodev_capabilities *cipher,
976 const struct rte_cryptodev_capabilities *auth)
980 struct cdev_key key = { 0 };
982 key.lcore_id = params->lcore_id;
984 key.cipher_algo = cipher->sym.cipher.algo;
986 key.auth_algo = auth->sym.auth.algo;
988 ret = rte_hash_lookup(map, &key);
992 for (i = 0; i < ipsec_ctx->nb_qps; i++)
993 if (ipsec_ctx->tbl[i].id == cdev_id)
996 if (i == ipsec_ctx->nb_qps) {
997 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
998 printf("Maximum number of crypto devices assigned to "
999 "a core, increase MAX_QP_PER_LCORE value\n");
1002 ipsec_ctx->tbl[i].id = cdev_id;
1003 ipsec_ctx->tbl[i].qp = qp;
1004 ipsec_ctx->nb_qps++;
1005 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1006 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1010 ret = rte_hash_add_key_data(map, &key, (void *)i);
1012 printf("Faled to insert cdev mapping for (lcore %u, "
1013 "cdev %u, qp %u), errno %d\n",
1014 key.lcore_id, ipsec_ctx->tbl[i].id,
1015 ipsec_ctx->tbl[i].qp, ret);
1023 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1024 uint16_t qp, struct lcore_params *params)
1027 const struct rte_cryptodev_capabilities *i, *j;
1028 struct rte_hash *map;
1029 struct lcore_conf *qconf;
1030 struct ipsec_ctx *ipsec_ctx;
1033 qconf = &lcore_conf[params->lcore_id];
1035 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1037 ipsec_ctx = &qconf->outbound;
1041 ipsec_ctx = &qconf->inbound;
1045 /* Required cryptodevs with operation chainning */
1046 if (!(dev_info->feature_flags &
1047 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1050 for (i = dev_info->capabilities;
1051 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1052 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1055 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1058 for (j = dev_info->capabilities;
1059 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1060 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1063 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1066 ret |= add_mapping(map, str, cdev_id, qp, params,
1075 cryptodevs_init(void)
1077 struct rte_cryptodev_config dev_conf;
1078 struct rte_cryptodev_qp_conf qp_conf;
1079 uint16_t idx, max_nb_qps, qp, i;
1081 struct rte_hash_parameters params = { 0 };
1083 params.entries = CDEV_MAP_ENTRIES;
1084 params.key_len = sizeof(struct cdev_key);
1085 params.hash_func = rte_jhash;
1086 params.hash_func_init_val = 0;
1087 params.socket_id = rte_socket_id();
1089 params.name = "cdev_map_in";
1090 cdev_map_in = rte_hash_create(¶ms);
1091 if (cdev_map_in == NULL)
1092 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1095 params.name = "cdev_map_out";
1096 cdev_map_out = rte_hash_create(¶ms);
1097 if (cdev_map_out == NULL)
1098 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1101 printf("lcore/cryptodev/qp mappings:\n");
1104 /* Start from last cdev id to give HW priority */
1105 for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
1106 struct rte_cryptodev_info cdev_info;
1108 rte_cryptodev_info_get(cdev_id, &cdev_info);
1110 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1111 max_nb_qps = cdev_info.max_nb_queue_pairs;
1113 max_nb_qps = nb_lcore_params;
1117 while (qp < max_nb_qps && i < nb_lcore_params) {
1118 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1119 &lcore_params[idx]))
1122 idx = idx % nb_lcore_params;
1129 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1130 dev_conf.nb_queue_pairs = qp;
1131 dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
1132 dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
1134 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1135 rte_panic("Failed to initialize crypodev %u\n",
1138 qp_conf.nb_descriptors = CDEV_MP_NB_OBJS;
1139 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1140 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1141 &qp_conf, dev_conf.socket_id))
1142 rte_panic("Failed to setup queue %u for "
1143 "cdev_id %u\n", 0, cdev_id);
1152 port_init(uint8_t portid)
1154 struct rte_eth_dev_info dev_info;
1155 struct rte_eth_txconf *txconf;
1156 uint16_t nb_tx_queue, nb_rx_queue;
1157 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1158 int32_t ret, socket_id;
1159 struct lcore_conf *qconf;
1160 struct ether_addr ethaddr;
1162 rte_eth_dev_info_get(portid, &dev_info);
1164 printf("Configuring device port %u:\n", portid);
1166 rte_eth_macaddr_get(portid, ðaddr);
1167 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1168 print_ethaddr("Address: ", ðaddr);
1171 nb_rx_queue = get_port_nb_rx_queues(portid);
1172 nb_tx_queue = nb_lcores;
1174 if (nb_rx_queue > dev_info.max_rx_queues)
1175 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1176 "(max rx queue is %u)\n",
1177 nb_rx_queue, dev_info.max_rx_queues);
1179 if (nb_tx_queue > dev_info.max_tx_queues)
1180 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1181 "(max tx queue is %u)\n",
1182 nb_tx_queue, dev_info.max_tx_queues);
1184 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1185 nb_rx_queue, nb_tx_queue);
1187 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1190 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1191 "err=%d, port=%d\n", ret, portid);
1193 /* init one TX queue per lcore */
1195 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1196 if (rte_lcore_is_enabled(lcore_id) == 0)
1200 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1205 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1207 txconf = &dev_info.default_txconf;
1208 txconf->txq_flags = 0;
1210 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1213 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1214 "err=%d, port=%d\n", ret, portid);
1216 qconf = &lcore_conf[lcore_id];
1217 qconf->tx_queue_id[portid] = tx_queueid;
1220 /* init RX queues */
1221 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1222 if (portid != qconf->rx_queue_list[queue].port_id)
1225 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1227 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1230 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1231 nb_rxd, socket_id, NULL,
1232 socket_ctx[socket_id].mbuf_pool);
1234 rte_exit(EXIT_FAILURE,
1235 "rte_eth_rx_queue_setup: err=%d, "
1236 "port=%d\n", ret, portid);
1243 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1247 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1248 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1249 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1250 RTE_MBUF_DEFAULT_BUF_SIZE,
1252 if (ctx->mbuf_pool == NULL)
1253 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1256 printf("Allocated mbuf pool on socket %d\n", socket_id);
1260 main(int32_t argc, char **argv)
1263 uint32_t lcore_id, nb_ports;
1264 uint8_t portid, socket_id;
1267 ret = rte_eal_init(argc, argv);
1269 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1273 /* parse application arguments (after the EAL ones) */
1274 ret = parse_args(argc, argv);
1276 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1279 rte_exit(EXIT_FAILURE, "need to choose either EP0 or EP1\n");
1281 if ((unprotected_port_mask & enabled_port_mask) !=
1282 unprotected_port_mask)
1283 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1284 unprotected_port_mask);
1286 nb_ports = rte_eth_dev_count();
1287 if (nb_ports > RTE_MAX_ETHPORTS)
1288 nb_ports = RTE_MAX_ETHPORTS;
1290 if (check_params() < 0)
1291 rte_exit(EXIT_FAILURE, "check_params failed\n");
1293 ret = init_lcore_rx_queues();
1295 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1297 nb_lcores = rte_lcore_count();
1299 /* Replicate each contex per socket */
1300 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1301 if (rte_lcore_is_enabled(lcore_id) == 0)
1305 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1309 if (socket_ctx[socket_id].mbuf_pool)
1312 sa_init(&socket_ctx[socket_id], socket_id, ep);
1314 sp_init(&socket_ctx[socket_id], socket_id, ep);
1316 rt_init(&socket_ctx[socket_id], socket_id, ep);
1318 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1321 for (portid = 0; portid < nb_ports; portid++) {
1322 if ((enabled_port_mask & (1 << portid)) == 0)
1331 for (portid = 0; portid < nb_ports; portid++) {
1332 if ((enabled_port_mask & (1 << portid)) == 0)
1336 ret = rte_eth_dev_start(portid);
1338 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1339 "err=%d, port=%d\n", ret, portid);
1341 * If enabled, put device in promiscuous mode.
1342 * This allows IO forwarding mode to forward packets
1343 * to itself through 2 cross-connected ports of the
1347 rte_eth_promiscuous_enable(portid);
1350 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1352 /* launch per-lcore init on every lcore */
1353 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1354 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1355 if (rte_eal_wait_lcore(lcore_id) < 0)