4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
42 #include <sys/queue.h>
47 #include <rte_common.h>
48 #include <rte_byteorder.h>
51 #include <rte_launch.h>
52 #include <rte_atomic.h>
53 #include <rte_cycles.h>
54 #include <rte_prefetch.h>
55 #include <rte_lcore.h>
56 #include <rte_per_lcore.h>
57 #include <rte_branch_prediction.h>
58 #include <rte_interrupts.h>
60 #include <rte_random.h>
61 #include <rte_debug.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_mempool.h>
69 #include <rte_jhash.h>
70 #include <rte_cryptodev.h>
74 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
76 #define MAX_JUMBO_PKT_LEN 9600
78 #define MEMPOOL_CACHE_SIZE 256
80 #define NB_MBUF (32000)
82 #define CDEV_MAP_ENTRIES 1024
83 #define CDEV_MP_NB_OBJS 2048
84 #define CDEV_MP_CACHE_SZ 64
85 #define MAX_QUEUE_PAIRS 1
87 #define OPTION_CONFIG "config"
88 #define OPTION_SINGLE_SA "single-sa"
89 #define OPTION_EP0 "ep0"
90 #define OPTION_EP1 "ep1"
92 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
96 /* Configure how many packets ahead to prefetch, when reading packets */
97 #define PREFETCH_OFFSET 3
99 #define MAX_RX_QUEUE_PER_LCORE 16
101 #define MAX_LCORE_PARAMS 1024
103 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
106 * Configurable number of RX/TX ring descriptors
108 #define IPSEC_SECGW_RX_DESC_DEFAULT 128
109 #define IPSEC_SECGW_TX_DESC_DEFAULT 512
110 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
111 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
113 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
114 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
115 (((uint64_t)((a) & 0xff) << 56) | \
116 ((uint64_t)((b) & 0xff) << 48) | \
117 ((uint64_t)((c) & 0xff) << 40) | \
118 ((uint64_t)((d) & 0xff) << 32) | \
119 ((uint64_t)((e) & 0xff) << 24) | \
120 ((uint64_t)((f) & 0xff) << 16) | \
121 ((uint64_t)((g) & 0xff) << 8) | \
122 ((uint64_t)(h) & 0xff))
124 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
125 (((uint64_t)((h) & 0xff) << 56) | \
126 ((uint64_t)((g) & 0xff) << 48) | \
127 ((uint64_t)((f) & 0xff) << 40) | \
128 ((uint64_t)((e) & 0xff) << 32) | \
129 ((uint64_t)((d) & 0xff) << 24) | \
130 ((uint64_t)((c) & 0xff) << 16) | \
131 ((uint64_t)((b) & 0xff) << 8) | \
132 ((uint64_t)(a) & 0xff))
134 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
136 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
137 addr.addr_bytes[0], addr.addr_bytes[1], \
138 addr.addr_bytes[2], addr.addr_bytes[3], \
139 addr.addr_bytes[4], addr.addr_bytes[5], \
142 /* port/source ethernet addr and destination ethernet addr */
143 struct ethaddr_info {
147 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
148 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
149 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
150 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
151 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
154 /* mask of enabled ports */
155 static uint32_t enabled_port_mask;
156 static uint32_t unprotected_port_mask;
157 static int32_t promiscuous_on = 1;
158 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
159 static int32_t ep = -1; /**< Endpoint configuration (0 or 1) */
160 static uint32_t nb_lcores;
161 static uint32_t single_sa;
162 static uint32_t single_sa_idx;
164 struct lcore_rx_queue {
167 } __rte_cache_aligned;
169 struct lcore_params {
173 } __rte_cache_aligned;
175 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
177 static struct lcore_params *lcore_params;
178 static uint16_t nb_lcore_params;
180 static struct rte_hash *cdev_map_in;
181 static struct rte_hash *cdev_map_out;
185 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
189 uint16_t nb_rx_queue;
190 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
191 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
192 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
193 struct ipsec_ctx inbound;
194 struct ipsec_ctx outbound;
195 struct rt_ctx *rt_ctx;
196 } __rte_cache_aligned;
198 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
200 static struct rte_eth_conf port_conf = {
202 .mq_mode = ETH_MQ_RX_RSS,
203 .max_rx_pkt_len = ETHER_MAX_LEN,
205 .header_split = 0, /**< Header Split disabled */
206 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
207 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
208 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
209 .hw_strip_crc = 0, /**< CRC stripped by hardware */
214 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
215 ETH_RSS_TCP | ETH_RSS_SCTP,
219 .mq_mode = ETH_MQ_TX_NONE,
223 static struct socket_ctx socket_ctx[NB_SOCKETS];
225 struct traffic_type {
226 const uint8_t *data[MAX_PKT_BURST * 2];
227 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
228 uint32_t res[MAX_PKT_BURST * 2];
232 struct ipsec_traffic {
233 struct traffic_type ipsec4;
234 struct traffic_type ipv4;
238 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
242 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
243 rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
244 nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
245 offsetof(struct ip, ip_p));
246 if (*nlp == IPPROTO_ESP)
247 t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
249 t->ipv4.data[t->ipv4.num] = nlp;
250 t->ipv4.pkts[(t->ipv4.num)++] = pkt;
253 /* Unknown/Unsupported type, drop the packet */
254 rte_pktmbuf_free(pkt);
259 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
267 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
268 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
270 prepare_one_packet(pkts[i], t);
272 /* Process left packets */
273 for (; i < nb_pkts; i++)
274 prepare_one_packet(pkts[i], t);
278 prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
280 pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
281 pkt->l3_len = sizeof(struct ip);
282 pkt->l2_len = ETHER_HDR_LEN;
284 struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
287 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
288 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
289 sizeof(struct ether_addr));
290 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
291 sizeof(struct ether_addr));
295 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
298 const int32_t prefetch_offset = 2;
300 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
301 rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
302 prepare_tx_pkt(pkts[i], port);
304 /* Process left packets */
305 for (; i < nb_pkts; i++)
306 prepare_tx_pkt(pkts[i], port);
309 /* Send burst of packets on an output interface */
310 static inline int32_t
311 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
313 struct rte_mbuf **m_table;
317 queueid = qconf->tx_queue_id[port];
318 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
320 prepare_tx_burst(m_table, n, port);
322 ret = rte_eth_tx_burst(port, queueid, m_table, n);
323 if (unlikely(ret < n)) {
325 rte_pktmbuf_free(m_table[ret]);
332 /* Enqueue a single packet, and send burst if queue is filled */
333 static inline int32_t
334 send_single_packet(struct rte_mbuf *m, uint8_t port)
338 struct lcore_conf *qconf;
340 lcore_id = rte_lcore_id();
342 qconf = &lcore_conf[lcore_id];
343 len = qconf->tx_mbufs[port].len;
344 qconf->tx_mbufs[port].m_table[len] = m;
347 /* enough pkts to be sent */
348 if (unlikely(len == MAX_PKT_BURST)) {
349 send_burst(qconf, MAX_PKT_BURST, port);
353 qconf->tx_mbufs[port].len = len;
358 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
359 struct ipsec_traffic *traffic)
362 uint16_t idx, nb_pkts_in, i, j;
363 uint32_t sa_idx, res;
365 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
366 traffic->ipsec4.num, MAX_PKT_BURST);
368 /* SP/ACL Inbound check ipsec and ipv4 */
369 for (i = 0; i < nb_pkts_in; i++) {
370 idx = traffic->ipv4.num++;
371 m = traffic->ipsec4.pkts[i];
372 traffic->ipv4.pkts[idx] = m;
373 traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
374 uint8_t *, offsetof(struct ip, ip_p));
377 rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
378 traffic->ipv4.data, traffic->ipv4.res,
379 traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
382 for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
383 m = traffic->ipv4.pkts[i];
384 res = traffic->ipv4.res[i];
389 traffic->ipv4.pkts[j++] = m;
391 /* Check return SA SPI matches pkt SPI */
392 for ( ; i < traffic->ipv4.num; i++) {
393 m = traffic->ipv4.pkts[i];
394 sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
395 if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
400 traffic->ipv4.pkts[j++] = m;
402 traffic->ipv4.num = j;
406 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
407 struct ipsec_traffic *traffic)
410 uint16_t idx, nb_pkts_out, i, j;
411 uint32_t sa_idx, res;
413 rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
414 traffic->ipv4.data, traffic->ipv4.res,
415 traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
417 /* Drop any IPsec traffic from protected ports */
418 for (i = 0; i < traffic->ipsec4.num; i++)
419 rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
421 traffic->ipsec4.num = 0;
424 for (i = 0; i < traffic->ipv4.num; i++) {
425 m = traffic->ipv4.pkts[i];
426 res = traffic->ipv4.res[i];
427 sa_idx = res & PROTECT_MASK;
428 if ((res == 0) || (res & DISCARD))
430 else if (sa_idx != 0) {
431 traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
432 traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
434 traffic->ipv4.pkts[j++] = m;
436 traffic->ipv4.num = j;
438 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
439 traffic->ipsec4.res, traffic->ipsec4.num,
442 for (i = 0; i < nb_pkts_out; i++) {
443 idx = traffic->ipv4.num++;
444 m = traffic->ipsec4.pkts[i];
445 traffic->ipv4.pkts[idx] = m;
450 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
451 struct ipsec_traffic *traffic)
453 uint16_t nb_pkts_in, i;
455 /* Drop any IPv4 traffic from unprotected ports */
456 for (i = 0; i < traffic->ipv4.num; i++)
457 rte_pktmbuf_free(traffic->ipv4.pkts[i]);
459 traffic->ipv4.num = 0;
461 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
462 traffic->ipsec4.num, MAX_PKT_BURST);
464 for (i = 0; i < nb_pkts_in; i++)
465 traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
467 traffic->ipv4.num = nb_pkts_in;
471 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
472 struct ipsec_traffic *traffic)
474 uint16_t nb_pkts_out, i;
476 /* Drop any IPsec traffic from protected ports */
477 for (i = 0; i < traffic->ipsec4.num; i++)
478 rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
480 traffic->ipsec4.num = 0;
482 for (i = 0; i < traffic->ipv4.num; i++)
483 traffic->ipv4.res[i] = single_sa_idx;
485 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
486 traffic->ipv4.res, traffic->ipv4.num,
489 traffic->ipv4.num = nb_pkts_out;
493 route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
495 uint32_t hop[MAX_PKT_BURST * 2];
496 uint32_t dst_ip[MAX_PKT_BURST * 2];
502 for (i = 0; i < nb_pkts; i++) {
503 offset = offsetof(struct ip, ip_dst);
504 dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],
506 dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
509 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);
511 for (i = 0; i < nb_pkts; i++) {
512 if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {
513 rte_pktmbuf_free(pkts[i]);
516 send_single_packet(pkts[i], hop[i] & 0xff);
521 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
522 uint8_t nb_pkts, uint8_t portid)
524 struct ipsec_traffic traffic;
526 prepare_traffic(pkts, &traffic, nb_pkts);
529 if (UNPROTECTED_PORT(portid))
530 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
532 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
534 if (UNPROTECTED_PORT(portid))
535 process_pkts_inbound(&qconf->inbound, &traffic);
537 process_pkts_outbound(&qconf->outbound, &traffic);
540 route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
544 drain_buffers(struct lcore_conf *qconf)
549 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
550 buf = &qconf->tx_mbufs[portid];
553 send_burst(qconf, buf->len, portid);
558 /* main processing loop */
560 main_loop(__attribute__((unused)) void *dummy)
562 struct rte_mbuf *pkts[MAX_PKT_BURST];
564 uint64_t prev_tsc, diff_tsc, cur_tsc;
566 uint8_t portid, queueid;
567 struct lcore_conf *qconf;
569 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
570 / US_PER_S * BURST_TX_DRAIN_US;
571 struct lcore_rx_queue *rxql;
574 lcore_id = rte_lcore_id();
575 qconf = &lcore_conf[lcore_id];
576 rxql = qconf->rx_queue_list;
577 socket_id = rte_lcore_to_socket_id(lcore_id);
579 qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
580 qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
581 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
582 qconf->inbound.cdev_map = cdev_map_in;
583 qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
584 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
585 qconf->outbound.cdev_map = cdev_map_out;
587 if (qconf->nb_rx_queue == 0) {
588 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
592 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
594 for (i = 0; i < qconf->nb_rx_queue; i++) {
595 portid = rxql[i].port_id;
596 queueid = rxql[i].queue_id;
598 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
599 lcore_id, portid, queueid);
603 cur_tsc = rte_rdtsc();
605 /* TX queue buffer drain */
606 diff_tsc = cur_tsc - prev_tsc;
608 if (unlikely(diff_tsc > drain_tsc)) {
609 drain_buffers(qconf);
613 /* Read packet from RX queues */
614 for (i = 0; i < qconf->nb_rx_queue; ++i) {
615 portid = rxql[i].port_id;
616 queueid = rxql[i].queue_id;
617 nb_rx = rte_eth_rx_burst(portid, queueid,
618 pkts, MAX_PKT_BURST);
621 process_pkts(qconf, pkts, nb_rx, portid);
629 uint8_t lcore, portid, nb_ports;
633 if (lcore_params == NULL) {
634 printf("Error: No port/queue/core mappings\n");
638 nb_ports = rte_eth_dev_count();
639 if (nb_ports > RTE_MAX_ETHPORTS)
640 nb_ports = RTE_MAX_ETHPORTS;
642 for (i = 0; i < nb_lcore_params; ++i) {
643 lcore = lcore_params[i].lcore_id;
644 if (!rte_lcore_is_enabled(lcore)) {
645 printf("error: lcore %hhu is not enabled in "
646 "lcore mask\n", lcore);
649 socket_id = rte_lcore_to_socket_id(lcore);
650 if (socket_id != 0 && numa_on == 0) {
651 printf("warning: lcore %hhu is on socket %d "
655 portid = lcore_params[i].port_id;
656 if ((enabled_port_mask & (1 << portid)) == 0) {
657 printf("port %u is not enabled in port mask\n", portid);
660 if (portid >= nb_ports) {
661 printf("port %u is not present on the board\n", portid);
669 get_port_nb_rx_queues(const uint8_t port)
674 for (i = 0; i < nb_lcore_params; ++i) {
675 if (lcore_params[i].port_id == port &&
676 lcore_params[i].queue_id > queue)
677 queue = lcore_params[i].queue_id;
679 return (uint8_t)(++queue);
683 init_lcore_rx_queues(void)
685 uint16_t i, nb_rx_queue;
688 for (i = 0; i < nb_lcore_params; ++i) {
689 lcore = lcore_params[i].lcore_id;
690 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
691 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
692 printf("error: too many queues (%u) for lcore: %u\n",
693 nb_rx_queue + 1, lcore);
696 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
697 lcore_params[i].port_id;
698 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
699 lcore_params[i].queue_id;
700 lcore_conf[lcore].nb_rx_queue++;
707 print_usage(const char *prgname)
709 printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
710 " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
711 " --single-sa SAIDX --ep0|--ep1\n"
712 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
713 " -P : enable promiscuous mode\n"
714 " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
715 " --"OPTION_CONFIG": (port,queue,lcore): "
716 "rx queues configuration\n"
717 " --single-sa SAIDX: use single SA index for outbound, "
719 " --ep0: Configure as Endpoint 0\n"
720 " --ep1: Configure as Endpoint 1\n", prgname);
724 parse_portmask(const char *portmask)
729 /* parse hexadecimal string */
730 pm = strtoul(portmask, &end, 16);
731 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
734 if ((pm == 0) && errno)
741 parse_decimal(const char *str)
746 num = strtoul(str, &end, 10);
747 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
754 parse_config(const char *q_arg)
757 const char *p, *p0 = q_arg;
765 int long int_fld[_NUM_FLD];
766 char *str_fld[_NUM_FLD];
772 while ((p = strchr(p0, '(')) != NULL) {
779 if (size >= sizeof(s))
782 snprintf(s, sizeof(s), "%.*s", size, p);
783 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
786 for (i = 0; i < _NUM_FLD; i++) {
788 int_fld[i] = strtoul(str_fld[i], &end, 0);
789 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
792 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
793 printf("exceeded max number of lcore params: %hu\n",
797 lcore_params_array[nb_lcore_params].port_id =
798 (uint8_t)int_fld[FLD_PORT];
799 lcore_params_array[nb_lcore_params].queue_id =
800 (uint8_t)int_fld[FLD_QUEUE];
801 lcore_params_array[nb_lcore_params].lcore_id =
802 (uint8_t)int_fld[FLD_LCORE];
805 lcore_params = lcore_params_array;
809 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
811 parse_args_long_options(struct option *lgopts, int32_t option_index)
814 const char *optname = lgopts[option_index].name;
816 if (__STRNCMP(optname, OPTION_CONFIG)) {
817 ret = parse_config(optarg);
819 printf("invalid config\n");
822 if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
823 ret = parse_decimal(optarg);
827 printf("Configured with single SA index %u\n",
833 if (__STRNCMP(optname, OPTION_EP0)) {
834 printf("endpoint 0\n");
839 if (__STRNCMP(optname, OPTION_EP1)) {
840 printf("endpoint 1\n");
850 parse_args(int32_t argc, char **argv)
854 int32_t option_index;
855 char *prgname = argv[0];
856 static struct option lgopts[] = {
857 {OPTION_CONFIG, 1, 0, 0},
858 {OPTION_SINGLE_SA, 1, 0, 0},
859 {OPTION_EP0, 0, 0, 0},
860 {OPTION_EP1, 0, 0, 0},
866 while ((opt = getopt_long(argc, argvopt, "p:Pu:",
867 lgopts, &option_index)) != EOF) {
871 enabled_port_mask = parse_portmask(optarg);
872 if (enabled_port_mask == 0) {
873 printf("invalid portmask\n");
874 print_usage(prgname);
879 printf("Promiscuous mode selected\n");
883 unprotected_port_mask = parse_portmask(optarg);
884 if (unprotected_port_mask == 0) {
885 printf("invalid unprotected portmask\n");
886 print_usage(prgname);
891 if (parse_args_long_options(lgopts, option_index)) {
892 print_usage(prgname);
897 print_usage(prgname);
903 argv[optind-1] = prgname;
906 optind = 0; /* reset getopt lib */
911 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
913 char buf[ETHER_ADDR_FMT_SIZE];
914 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
915 printf("%s%s", name, buf);
918 /* Check the link status of all ports in up to 9s, and print them finally */
920 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
922 #define CHECK_INTERVAL 100 /* 100ms */
923 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
924 uint8_t portid, count, all_ports_up, print_flag = 0;
925 struct rte_eth_link link;
927 printf("\nChecking link status");
929 for (count = 0; count <= MAX_CHECK_TIME; count++) {
931 for (portid = 0; portid < port_num; portid++) {
932 if ((port_mask & (1 << portid)) == 0)
934 memset(&link, 0, sizeof(link));
935 rte_eth_link_get_nowait(portid, &link);
936 /* print link status if flag set */
937 if (print_flag == 1) {
938 if (link.link_status)
939 printf("Port %d Link Up - speed %u "
940 "Mbps - %s\n", (uint8_t)portid,
941 (uint32_t)link.link_speed,
942 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
943 ("full-duplex") : ("half-duplex\n"));
945 printf("Port %d Link Down\n",
949 /* clear all_ports_up flag if any link down */
950 if (link.link_status == ETH_LINK_DOWN) {
955 /* after finally printing all link status, get out */
959 if (all_ports_up == 0) {
962 rte_delay_ms(CHECK_INTERVAL);
965 /* set the print_flag if all ports up or timeout */
966 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
974 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
975 uint16_t qp, struct lcore_params *params,
976 struct ipsec_ctx *ipsec_ctx,
977 const struct rte_cryptodev_capabilities *cipher,
978 const struct rte_cryptodev_capabilities *auth)
982 struct cdev_key key = { 0 };
984 key.lcore_id = params->lcore_id;
986 key.cipher_algo = cipher->sym.cipher.algo;
988 key.auth_algo = auth->sym.auth.algo;
990 ret = rte_hash_lookup(map, &key);
994 for (i = 0; i < ipsec_ctx->nb_qps; i++)
995 if (ipsec_ctx->tbl[i].id == cdev_id)
998 if (i == ipsec_ctx->nb_qps) {
999 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1000 printf("Maximum number of crypto devices assigned to "
1001 "a core, increase MAX_QP_PER_LCORE value\n");
1004 ipsec_ctx->tbl[i].id = cdev_id;
1005 ipsec_ctx->tbl[i].qp = qp;
1006 ipsec_ctx->nb_qps++;
1007 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1008 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1012 ret = rte_hash_add_key_data(map, &key, (void *)i);
1014 printf("Faled to insert cdev mapping for (lcore %u, "
1015 "cdev %u, qp %u), errno %d\n",
1016 key.lcore_id, ipsec_ctx->tbl[i].id,
1017 ipsec_ctx->tbl[i].qp, ret);
1025 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1026 uint16_t qp, struct lcore_params *params)
1029 const struct rte_cryptodev_capabilities *i, *j;
1030 struct rte_hash *map;
1031 struct lcore_conf *qconf;
1032 struct ipsec_ctx *ipsec_ctx;
1035 qconf = &lcore_conf[params->lcore_id];
1037 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1039 ipsec_ctx = &qconf->outbound;
1043 ipsec_ctx = &qconf->inbound;
1047 /* Required cryptodevs with operation chainning */
1048 if (!(dev_info->feature_flags &
1049 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1052 for (i = dev_info->capabilities;
1053 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1054 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1057 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1060 for (j = dev_info->capabilities;
1061 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1062 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1065 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1068 ret |= add_mapping(map, str, cdev_id, qp, params,
1077 cryptodevs_init(void)
1079 struct rte_cryptodev_config dev_conf;
1080 struct rte_cryptodev_qp_conf qp_conf;
1081 uint16_t idx, max_nb_qps, qp, i;
1083 struct rte_hash_parameters params = { 0 };
1085 params.entries = CDEV_MAP_ENTRIES;
1086 params.key_len = sizeof(struct cdev_key);
1087 params.hash_func = rte_jhash;
1088 params.hash_func_init_val = 0;
1089 params.socket_id = rte_socket_id();
1091 params.name = "cdev_map_in";
1092 cdev_map_in = rte_hash_create(¶ms);
1093 if (cdev_map_in == NULL)
1094 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1097 params.name = "cdev_map_out";
1098 cdev_map_out = rte_hash_create(¶ms);
1099 if (cdev_map_out == NULL)
1100 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1103 printf("lcore/cryptodev/qp mappings:\n");
1106 /* Start from last cdev id to give HW priority */
1107 for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
1108 struct rte_cryptodev_info cdev_info;
1110 rte_cryptodev_info_get(cdev_id, &cdev_info);
1112 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1113 max_nb_qps = cdev_info.max_nb_queue_pairs;
1115 max_nb_qps = nb_lcore_params;
1119 while (qp < max_nb_qps && i < nb_lcore_params) {
1120 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1121 &lcore_params[idx]))
1124 idx = idx % nb_lcore_params;
1131 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1132 dev_conf.nb_queue_pairs = qp;
1133 dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
1134 dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
1136 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1137 rte_panic("Failed to initialize crypodev %u\n",
1140 qp_conf.nb_descriptors = CDEV_MP_NB_OBJS;
1141 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1142 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1143 &qp_conf, dev_conf.socket_id))
1144 rte_panic("Failed to setup queue %u for "
1145 "cdev_id %u\n", 0, cdev_id);
1154 port_init(uint8_t portid)
1156 struct rte_eth_dev_info dev_info;
1157 struct rte_eth_txconf *txconf;
1158 uint16_t nb_tx_queue, nb_rx_queue;
1159 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1160 int32_t ret, socket_id;
1161 struct lcore_conf *qconf;
1162 struct ether_addr ethaddr;
1164 rte_eth_dev_info_get(portid, &dev_info);
1166 printf("Configuring device port %u:\n", portid);
1168 rte_eth_macaddr_get(portid, ðaddr);
1169 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1170 print_ethaddr("Address: ", ðaddr);
1173 nb_rx_queue = get_port_nb_rx_queues(portid);
1174 nb_tx_queue = nb_lcores;
1176 if (nb_rx_queue > dev_info.max_rx_queues)
1177 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1178 "(max rx queue is %u)\n",
1179 nb_rx_queue, dev_info.max_rx_queues);
1181 if (nb_tx_queue > dev_info.max_tx_queues)
1182 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1183 "(max tx queue is %u)\n",
1184 nb_tx_queue, dev_info.max_tx_queues);
1186 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1187 nb_rx_queue, nb_tx_queue);
1189 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1192 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1193 "err=%d, port=%d\n", ret, portid);
1195 /* init one TX queue per lcore */
1197 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1198 if (rte_lcore_is_enabled(lcore_id) == 0)
1202 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1207 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1209 txconf = &dev_info.default_txconf;
1210 txconf->txq_flags = 0;
1212 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1215 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1216 "err=%d, port=%d\n", ret, portid);
1218 qconf = &lcore_conf[lcore_id];
1219 qconf->tx_queue_id[portid] = tx_queueid;
1222 /* init RX queues */
1223 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1224 if (portid != qconf->rx_queue_list[queue].port_id)
1227 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1229 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1232 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1233 nb_rxd, socket_id, NULL,
1234 socket_ctx[socket_id].mbuf_pool);
1236 rte_exit(EXIT_FAILURE,
1237 "rte_eth_rx_queue_setup: err=%d, "
1238 "port=%d\n", ret, portid);
1245 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1249 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1250 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1251 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1252 RTE_MBUF_DEFAULT_BUF_SIZE,
1254 if (ctx->mbuf_pool == NULL)
1255 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1258 printf("Allocated mbuf pool on socket %d\n", socket_id);
1262 main(int32_t argc, char **argv)
1265 uint32_t lcore_id, nb_ports;
1266 uint8_t portid, socket_id;
1269 ret = rte_eal_init(argc, argv);
1271 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1275 /* parse application arguments (after the EAL ones) */
1276 ret = parse_args(argc, argv);
1278 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1281 rte_exit(EXIT_FAILURE, "need to choose either EP0 or EP1\n");
1283 if ((unprotected_port_mask & enabled_port_mask) !=
1284 unprotected_port_mask)
1285 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1286 unprotected_port_mask);
1288 nb_ports = rte_eth_dev_count();
1289 if (nb_ports > RTE_MAX_ETHPORTS)
1290 nb_ports = RTE_MAX_ETHPORTS;
1292 if (check_params() < 0)
1293 rte_exit(EXIT_FAILURE, "check_params failed\n");
1295 ret = init_lcore_rx_queues();
1297 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1299 nb_lcores = rte_lcore_count();
1301 /* Replicate each contex per socket */
1302 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1303 if (rte_lcore_is_enabled(lcore_id) == 0)
1307 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1311 if (socket_ctx[socket_id].mbuf_pool)
1314 sa_init(&socket_ctx[socket_id], socket_id, ep);
1316 sp_init(&socket_ctx[socket_id], socket_id, ep);
1318 rt_init(&socket_ctx[socket_id], socket_id, ep);
1320 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1323 for (portid = 0; portid < nb_ports; portid++) {
1324 if ((enabled_port_mask & (1 << portid)) == 0)
1333 for (portid = 0; portid < nb_ports; portid++) {
1334 if ((enabled_port_mask & (1 << portid)) == 0)
1338 ret = rte_eth_dev_start(portid);
1340 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1341 "err=%d, port=%d\n", ret, portid);
1343 * If enabled, put device in promiscuous mode.
1344 * This allows IO forwarding mode to forward packets
1345 * to itself through 2 cross-connected ports of the
1349 rte_eth_promiscuous_enable(portid);
1352 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1354 /* launch per-lcore init on every lcore */
1355 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1356 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1357 if (rte_eal_wait_lcore(lcore_id) < 0)