4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
41 #include <netinet/ip6.h>
43 #include <sys/queue.h>
48 #include <rte_common.h>
49 #include <rte_byteorder.h>
52 #include <rte_launch.h>
53 #include <rte_atomic.h>
54 #include <rte_cycles.h>
55 #include <rte_prefetch.h>
56 #include <rte_lcore.h>
57 #include <rte_per_lcore.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_interrupts.h>
61 #include <rte_random.h>
62 #include <rte_debug.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_mempool.h>
71 #include <rte_jhash.h>
72 #include <rte_cryptodev.h>
76 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
78 #define MAX_JUMBO_PKT_LEN 9600
80 #define MEMPOOL_CACHE_SIZE 256
82 #define NB_MBUF (32000)
84 #define CDEV_MAP_ENTRIES 1024
85 #define CDEV_MP_NB_OBJS 2048
86 #define CDEV_MP_CACHE_SZ 64
87 #define MAX_QUEUE_PAIRS 1
89 #define OPTION_CONFIG "config"
90 #define OPTION_SINGLE_SA "single-sa"
91 #define OPTION_EP0 "ep0"
92 #define OPTION_EP1 "ep1"
94 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
98 /* Configure how many packets ahead to prefetch, when reading packets */
99 #define PREFETCH_OFFSET 3
101 #define MAX_RX_QUEUE_PER_LCORE 16
103 #define MAX_LCORE_PARAMS 1024
105 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
108 * Configurable number of RX/TX ring descriptors
110 #define IPSEC_SECGW_RX_DESC_DEFAULT 128
111 #define IPSEC_SECGW_TX_DESC_DEFAULT 512
112 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
113 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
115 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
116 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
117 (((uint64_t)((a) & 0xff) << 56) | \
118 ((uint64_t)((b) & 0xff) << 48) | \
119 ((uint64_t)((c) & 0xff) << 40) | \
120 ((uint64_t)((d) & 0xff) << 32) | \
121 ((uint64_t)((e) & 0xff) << 24) | \
122 ((uint64_t)((f) & 0xff) << 16) | \
123 ((uint64_t)((g) & 0xff) << 8) | \
124 ((uint64_t)(h) & 0xff))
126 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
127 (((uint64_t)((h) & 0xff) << 56) | \
128 ((uint64_t)((g) & 0xff) << 48) | \
129 ((uint64_t)((f) & 0xff) << 40) | \
130 ((uint64_t)((e) & 0xff) << 32) | \
131 ((uint64_t)((d) & 0xff) << 24) | \
132 ((uint64_t)((c) & 0xff) << 16) | \
133 ((uint64_t)((b) & 0xff) << 8) | \
134 ((uint64_t)(a) & 0xff))
136 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
138 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
139 addr.addr_bytes[0], addr.addr_bytes[1], \
140 addr.addr_bytes[2], addr.addr_bytes[3], \
141 addr.addr_bytes[4], addr.addr_bytes[5], \
144 /* port/source ethernet addr and destination ethernet addr */
145 struct ethaddr_info {
149 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
150 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
151 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
152 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
153 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
156 /* mask of enabled ports */
157 static uint32_t enabled_port_mask;
158 static uint32_t unprotected_port_mask;
159 static int32_t promiscuous_on = 1;
160 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
161 static int32_t ep = -1; /**< Endpoint configuration (0 or 1) */
162 static uint32_t nb_lcores;
163 static uint32_t single_sa;
164 static uint32_t single_sa_idx;
166 struct lcore_rx_queue {
169 } __rte_cache_aligned;
171 struct lcore_params {
175 } __rte_cache_aligned;
177 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
179 static struct lcore_params *lcore_params;
180 static uint16_t nb_lcore_params;
182 static struct rte_hash *cdev_map_in;
183 static struct rte_hash *cdev_map_out;
187 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
191 uint16_t nb_rx_queue;
192 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
193 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
194 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
195 struct ipsec_ctx inbound;
196 struct ipsec_ctx outbound;
197 struct rt_ctx *rt4_ctx;
198 struct rt_ctx *rt6_ctx;
199 } __rte_cache_aligned;
201 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
203 static struct rte_eth_conf port_conf = {
205 .mq_mode = ETH_MQ_RX_RSS,
206 .max_rx_pkt_len = ETHER_MAX_LEN,
208 .header_split = 0, /**< Header Split disabled */
209 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
210 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
211 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
212 .hw_strip_crc = 0, /**< CRC stripped by hardware */
217 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
218 ETH_RSS_TCP | ETH_RSS_SCTP,
222 .mq_mode = ETH_MQ_TX_NONE,
226 static struct socket_ctx socket_ctx[NB_SOCKETS];
228 struct traffic_type {
229 const uint8_t *data[MAX_PKT_BURST * 2];
230 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
231 uint32_t res[MAX_PKT_BURST * 2];
235 struct ipsec_traffic {
236 struct traffic_type ipsec;
237 struct traffic_type ip4;
238 struct traffic_type ip6;
242 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
245 struct ether_hdr *eth;
247 eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
248 if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
249 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
250 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
251 if (*nlp == IPPROTO_ESP)
252 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
254 t->ip4.data[t->ip4.num] = nlp;
255 t->ip4.pkts[(t->ip4.num)++] = pkt;
257 } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
258 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
259 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
260 if (*nlp == IPPROTO_ESP)
261 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
263 t->ip6.data[t->ip6.num] = nlp;
264 t->ip6.pkts[(t->ip6.num)++] = pkt;
267 /* Unknown/Unsupported type, drop the packet */
268 RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
269 rte_pktmbuf_free(pkt);
274 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
283 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
284 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
286 prepare_one_packet(pkts[i], t);
288 /* Process left packets */
289 for (; i < nb_pkts; i++)
290 prepare_one_packet(pkts[i], t);
294 prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
297 struct ether_hdr *ethhdr;
299 ip = rte_pktmbuf_mtod(pkt, struct ip *);
301 ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
303 if (ip->ip_v == IPVERSION) {
304 pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
305 pkt->l3_len = sizeof(struct ip);
306 pkt->l2_len = ETHER_HDR_LEN;
308 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
310 pkt->ol_flags |= PKT_TX_IPV6;
311 pkt->l3_len = sizeof(struct ip6_hdr);
312 pkt->l2_len = ETHER_HDR_LEN;
314 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
317 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
318 sizeof(struct ether_addr));
319 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
320 sizeof(struct ether_addr));
324 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
327 const int32_t prefetch_offset = 2;
329 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
330 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
331 prepare_tx_pkt(pkts[i], port);
333 /* Process left packets */
334 for (; i < nb_pkts; i++)
335 prepare_tx_pkt(pkts[i], port);
338 /* Send burst of packets on an output interface */
339 static inline int32_t
340 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
342 struct rte_mbuf **m_table;
346 queueid = qconf->tx_queue_id[port];
347 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
349 prepare_tx_burst(m_table, n, port);
351 ret = rte_eth_tx_burst(port, queueid, m_table, n);
352 if (unlikely(ret < n)) {
354 rte_pktmbuf_free(m_table[ret]);
361 /* Enqueue a single packet, and send burst if queue is filled */
362 static inline int32_t
363 send_single_packet(struct rte_mbuf *m, uint8_t port)
367 struct lcore_conf *qconf;
369 lcore_id = rte_lcore_id();
371 qconf = &lcore_conf[lcore_id];
372 len = qconf->tx_mbufs[port].len;
373 qconf->tx_mbufs[port].m_table[len] = m;
376 /* enough pkts to be sent */
377 if (unlikely(len == MAX_PKT_BURST)) {
378 send_burst(qconf, MAX_PKT_BURST, port);
382 qconf->tx_mbufs[port].len = len;
387 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
391 uint32_t i, j, res, sa_idx;
396 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
397 ip->num, DEFAULT_MAX_CATEGORIES);
400 for (i = 0; i < ip->num; i++) {
407 if (res & DISCARD || i < lim) {
411 /* Only check SPI match for processed IPSec packets */
412 sa_idx = ip->res[i] & PROTECT_MASK;
413 if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
423 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
424 struct ipsec_traffic *traffic)
427 uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
429 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
430 traffic->ipsec.num, MAX_PKT_BURST);
432 n_ip4 = traffic->ip4.num;
433 n_ip6 = traffic->ip6.num;
435 /* SP/ACL Inbound check ipsec and ip4 */
436 for (i = 0; i < nb_pkts_in; i++) {
437 m = traffic->ipsec.pkts[i];
438 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
439 if (ip->ip_v == IPVERSION) {
440 idx = traffic->ip4.num++;
441 traffic->ip4.pkts[idx] = m;
442 traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
443 uint8_t *, offsetof(struct ip, ip_p));
444 } else if (ip->ip_v == IP6_VERSION) {
445 idx = traffic->ip6.num++;
446 traffic->ip6.pkts[idx] = m;
447 traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
449 offsetof(struct ip6_hdr, ip6_nxt));
454 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
457 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
462 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
463 struct traffic_type *ipsec)
466 uint32_t i, j, sa_idx;
471 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
472 ip->num, DEFAULT_MAX_CATEGORIES);
475 for (i = 0; i < ip->num; i++) {
477 sa_idx = ip->res[i] & PROTECT_MASK;
478 if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
480 else if (sa_idx != 0) {
481 ipsec->res[ipsec->num] = sa_idx;
482 ipsec->pkts[ipsec->num++] = m;
490 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
491 struct ipsec_traffic *traffic)
494 uint16_t idx, nb_pkts_out, i;
496 /* Drop any IPsec traffic from protected ports */
497 for (i = 0; i < traffic->ipsec.num; i++)
498 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
500 traffic->ipsec.num = 0;
502 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
504 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
506 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
507 traffic->ipsec.res, traffic->ipsec.num,
510 for (i = 0; i < nb_pkts_out; i++) {
511 m = traffic->ipsec.pkts[i];
512 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
513 if (ip->ip_v == IPVERSION) {
514 idx = traffic->ip4.num++;
515 traffic->ip4.pkts[idx] = m;
517 idx = traffic->ip6.num++;
518 traffic->ip6.pkts[idx] = m;
524 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
525 struct ipsec_traffic *traffic)
528 uint32_t nb_pkts_in, i, idx;
530 /* Drop any IPv4 traffic from unprotected ports */
531 for (i = 0; i < traffic->ip4.num; i++)
532 rte_pktmbuf_free(traffic->ip4.pkts[i]);
534 traffic->ip4.num = 0;
536 /* Drop any IPv6 traffic from unprotected ports */
537 for (i = 0; i < traffic->ip6.num; i++)
538 rte_pktmbuf_free(traffic->ip6.pkts[i]);
540 traffic->ip6.num = 0;
542 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
543 traffic->ipsec.num, MAX_PKT_BURST);
545 for (i = 0; i < nb_pkts_in; i++) {
546 m = traffic->ipsec.pkts[i];
547 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
548 if (ip->ip_v == IPVERSION) {
549 idx = traffic->ip4.num++;
550 traffic->ip4.pkts[idx] = m;
552 idx = traffic->ip6.num++;
553 traffic->ip6.pkts[idx] = m;
559 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
560 struct ipsec_traffic *traffic)
563 uint32_t nb_pkts_out, i;
566 /* Drop any IPsec traffic from protected ports */
567 for (i = 0; i < traffic->ipsec.num; i++)
568 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
570 traffic->ipsec.num = 0;
572 for (i = 0; i < traffic->ip4.num; i++)
573 traffic->ip4.res[i] = single_sa_idx;
575 for (i = 0; i < traffic->ip6.num; i++)
576 traffic->ip6.res[i] = single_sa_idx;
578 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
579 traffic->ip4.res, traffic->ip4.num,
582 /* They all sue the same SA (ip4 or ip6 tunnel) */
583 m = traffic->ipsec.pkts[i];
584 ip = rte_pktmbuf_mtod(m, struct ip *);
585 if (ip->ip_v == IPVERSION)
586 traffic->ip4.num = nb_pkts_out;
588 traffic->ip6.num = nb_pkts_out;
592 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
594 uint32_t hop[MAX_PKT_BURST * 2];
595 uint32_t dst_ip[MAX_PKT_BURST * 2];
601 for (i = 0; i < nb_pkts; i++) {
602 offset = offsetof(struct ip, ip_dst);
603 dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],
605 dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
608 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);
610 for (i = 0; i < nb_pkts; i++) {
611 if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {
612 rte_pktmbuf_free(pkts[i]);
615 send_single_packet(pkts[i], hop[i] & 0xff);
620 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
622 int16_t hop[MAX_PKT_BURST * 2];
623 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
630 for (i = 0; i < nb_pkts; i++) {
631 offset = offsetof(struct ip6_hdr, ip6_dst);
632 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
633 memcpy(&dst_ip[i][0], ip6_dst, 16);
636 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
639 for (i = 0; i < nb_pkts; i++) {
641 rte_pktmbuf_free(pkts[i]);
644 send_single_packet(pkts[i], hop[i] & 0xff);
649 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
650 uint8_t nb_pkts, uint8_t portid)
652 struct ipsec_traffic traffic;
654 prepare_traffic(pkts, &traffic, nb_pkts);
656 if (unlikely(single_sa)) {
657 if (UNPROTECTED_PORT(portid))
658 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
660 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
662 if (UNPROTECTED_PORT(portid))
663 process_pkts_inbound(&qconf->inbound, &traffic);
665 process_pkts_outbound(&qconf->outbound, &traffic);
668 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
669 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
673 drain_buffers(struct lcore_conf *qconf)
678 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
679 buf = &qconf->tx_mbufs[portid];
682 send_burst(qconf, buf->len, portid);
687 /* main processing loop */
689 main_loop(__attribute__((unused)) void *dummy)
691 struct rte_mbuf *pkts[MAX_PKT_BURST];
693 uint64_t prev_tsc, diff_tsc, cur_tsc;
695 uint8_t portid, queueid;
696 struct lcore_conf *qconf;
698 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
699 / US_PER_S * BURST_TX_DRAIN_US;
700 struct lcore_rx_queue *rxql;
703 lcore_id = rte_lcore_id();
704 qconf = &lcore_conf[lcore_id];
705 rxql = qconf->rx_queue_list;
706 socket_id = rte_lcore_to_socket_id(lcore_id);
708 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
709 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
710 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
711 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
712 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
713 qconf->inbound.cdev_map = cdev_map_in;
714 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
715 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
716 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
717 qconf->outbound.cdev_map = cdev_map_out;
719 if (qconf->nb_rx_queue == 0) {
720 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
724 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
726 for (i = 0; i < qconf->nb_rx_queue; i++) {
727 portid = rxql[i].port_id;
728 queueid = rxql[i].queue_id;
730 " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
731 lcore_id, portid, queueid);
735 cur_tsc = rte_rdtsc();
737 /* TX queue buffer drain */
738 diff_tsc = cur_tsc - prev_tsc;
740 if (unlikely(diff_tsc > drain_tsc)) {
741 drain_buffers(qconf);
745 /* Read packet from RX queues */
746 for (i = 0; i < qconf->nb_rx_queue; ++i) {
747 portid = rxql[i].port_id;
748 queueid = rxql[i].queue_id;
749 nb_rx = rte_eth_rx_burst(portid, queueid,
750 pkts, MAX_PKT_BURST);
753 process_pkts(qconf, pkts, nb_rx, portid);
761 uint8_t lcore, portid, nb_ports;
765 if (lcore_params == NULL) {
766 printf("Error: No port/queue/core mappings\n");
770 nb_ports = rte_eth_dev_count();
772 for (i = 0; i < nb_lcore_params; ++i) {
773 lcore = lcore_params[i].lcore_id;
774 if (!rte_lcore_is_enabled(lcore)) {
775 printf("error: lcore %hhu is not enabled in "
776 "lcore mask\n", lcore);
779 socket_id = rte_lcore_to_socket_id(lcore);
780 if (socket_id != 0 && numa_on == 0) {
781 printf("warning: lcore %hhu is on socket %d "
785 portid = lcore_params[i].port_id;
786 if ((enabled_port_mask & (1 << portid)) == 0) {
787 printf("port %u is not enabled in port mask\n", portid);
790 if (portid >= nb_ports) {
791 printf("port %u is not present on the board\n", portid);
799 get_port_nb_rx_queues(const uint8_t port)
804 for (i = 0; i < nb_lcore_params; ++i) {
805 if (lcore_params[i].port_id == port &&
806 lcore_params[i].queue_id > queue)
807 queue = lcore_params[i].queue_id;
809 return (uint8_t)(++queue);
813 init_lcore_rx_queues(void)
815 uint16_t i, nb_rx_queue;
818 for (i = 0; i < nb_lcore_params; ++i) {
819 lcore = lcore_params[i].lcore_id;
820 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
821 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
822 printf("error: too many queues (%u) for lcore: %u\n",
823 nb_rx_queue + 1, lcore);
826 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
827 lcore_params[i].port_id;
828 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
829 lcore_params[i].queue_id;
830 lcore_conf[lcore].nb_rx_queue++;
837 print_usage(const char *prgname)
839 printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
840 " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
841 " --single-sa SAIDX --ep0|--ep1\n"
842 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
843 " -P : enable promiscuous mode\n"
844 " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
845 " --"OPTION_CONFIG": (port,queue,lcore): "
846 "rx queues configuration\n"
847 " --single-sa SAIDX: use single SA index for outbound, "
849 " --ep0: Configure as Endpoint 0\n"
850 " --ep1: Configure as Endpoint 1\n", prgname);
854 parse_portmask(const char *portmask)
859 /* parse hexadecimal string */
860 pm = strtoul(portmask, &end, 16);
861 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
864 if ((pm == 0) && errno)
871 parse_decimal(const char *str)
876 num = strtoul(str, &end, 10);
877 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
884 parse_config(const char *q_arg)
887 const char *p, *p0 = q_arg;
895 unsigned long int_fld[_NUM_FLD];
896 char *str_fld[_NUM_FLD];
902 while ((p = strchr(p0, '(')) != NULL) {
909 if (size >= sizeof(s))
912 snprintf(s, sizeof(s), "%.*s", size, p);
913 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
916 for (i = 0; i < _NUM_FLD; i++) {
918 int_fld[i] = strtoul(str_fld[i], &end, 0);
919 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
922 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
923 printf("exceeded max number of lcore params: %hu\n",
927 lcore_params_array[nb_lcore_params].port_id =
928 (uint8_t)int_fld[FLD_PORT];
929 lcore_params_array[nb_lcore_params].queue_id =
930 (uint8_t)int_fld[FLD_QUEUE];
931 lcore_params_array[nb_lcore_params].lcore_id =
932 (uint8_t)int_fld[FLD_LCORE];
935 lcore_params = lcore_params_array;
939 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
941 parse_args_long_options(struct option *lgopts, int32_t option_index)
944 const char *optname = lgopts[option_index].name;
946 if (__STRNCMP(optname, OPTION_CONFIG)) {
947 ret = parse_config(optarg);
949 printf("invalid config\n");
952 if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
953 ret = parse_decimal(optarg);
957 printf("Configured with single SA index %u\n",
963 if (__STRNCMP(optname, OPTION_EP0)) {
964 printf("endpoint 0\n");
969 if (__STRNCMP(optname, OPTION_EP1)) {
970 printf("endpoint 1\n");
980 parse_args(int32_t argc, char **argv)
984 int32_t option_index;
985 char *prgname = argv[0];
986 static struct option lgopts[] = {
987 {OPTION_CONFIG, 1, 0, 0},
988 {OPTION_SINGLE_SA, 1, 0, 0},
989 {OPTION_EP0, 0, 0, 0},
990 {OPTION_EP1, 0, 0, 0},
996 while ((opt = getopt_long(argc, argvopt, "p:Pu:",
997 lgopts, &option_index)) != EOF) {
1001 enabled_port_mask = parse_portmask(optarg);
1002 if (enabled_port_mask == 0) {
1003 printf("invalid portmask\n");
1004 print_usage(prgname);
1009 printf("Promiscuous mode selected\n");
1013 unprotected_port_mask = parse_portmask(optarg);
1014 if (unprotected_port_mask == 0) {
1015 printf("invalid unprotected portmask\n");
1016 print_usage(prgname);
1021 if (parse_args_long_options(lgopts, option_index)) {
1022 print_usage(prgname);
1027 print_usage(prgname);
1033 argv[optind-1] = prgname;
1036 optind = 0; /* reset getopt lib */
1041 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1043 char buf[ETHER_ADDR_FMT_SIZE];
1044 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1045 printf("%s%s", name, buf);
1048 /* Check the link status of all ports in up to 9s, and print them finally */
1050 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1052 #define CHECK_INTERVAL 100 /* 100ms */
1053 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1054 uint8_t portid, count, all_ports_up, print_flag = 0;
1055 struct rte_eth_link link;
1057 printf("\nChecking link status");
1059 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1061 for (portid = 0; portid < port_num; portid++) {
1062 if ((port_mask & (1 << portid)) == 0)
1064 memset(&link, 0, sizeof(link));
1065 rte_eth_link_get_nowait(portid, &link);
1066 /* print link status if flag set */
1067 if (print_flag == 1) {
1068 if (link.link_status)
1069 printf("Port %d Link Up - speed %u "
1070 "Mbps - %s\n", (uint8_t)portid,
1071 (uint32_t)link.link_speed,
1072 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1073 ("full-duplex") : ("half-duplex\n"));
1075 printf("Port %d Link Down\n",
1079 /* clear all_ports_up flag if any link down */
1080 if (link.link_status == ETH_LINK_DOWN) {
1085 /* after finally printing all link status, get out */
1086 if (print_flag == 1)
1089 if (all_ports_up == 0) {
1092 rte_delay_ms(CHECK_INTERVAL);
1095 /* set the print_flag if all ports up or timeout */
1096 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1104 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1105 uint16_t qp, struct lcore_params *params,
1106 struct ipsec_ctx *ipsec_ctx,
1107 const struct rte_cryptodev_capabilities *cipher,
1108 const struct rte_cryptodev_capabilities *auth)
1112 struct cdev_key key = { 0 };
1114 key.lcore_id = params->lcore_id;
1116 key.cipher_algo = cipher->sym.cipher.algo;
1118 key.auth_algo = auth->sym.auth.algo;
1120 ret = rte_hash_lookup(map, &key);
1124 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1125 if (ipsec_ctx->tbl[i].id == cdev_id)
1128 if (i == ipsec_ctx->nb_qps) {
1129 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1130 printf("Maximum number of crypto devices assigned to "
1131 "a core, increase MAX_QP_PER_LCORE value\n");
1134 ipsec_ctx->tbl[i].id = cdev_id;
1135 ipsec_ctx->tbl[i].qp = qp;
1136 ipsec_ctx->nb_qps++;
1137 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1138 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1142 ret = rte_hash_add_key_data(map, &key, (void *)i);
1144 printf("Faled to insert cdev mapping for (lcore %u, "
1145 "cdev %u, qp %u), errno %d\n",
1146 key.lcore_id, ipsec_ctx->tbl[i].id,
1147 ipsec_ctx->tbl[i].qp, ret);
1155 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1156 uint16_t qp, struct lcore_params *params)
1159 const struct rte_cryptodev_capabilities *i, *j;
1160 struct rte_hash *map;
1161 struct lcore_conf *qconf;
1162 struct ipsec_ctx *ipsec_ctx;
1165 qconf = &lcore_conf[params->lcore_id];
1167 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1169 ipsec_ctx = &qconf->outbound;
1173 ipsec_ctx = &qconf->inbound;
1177 /* Required cryptodevs with operation chainning */
1178 if (!(dev_info->feature_flags &
1179 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1182 for (i = dev_info->capabilities;
1183 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1184 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1187 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1190 for (j = dev_info->capabilities;
1191 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1192 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1195 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1198 ret |= add_mapping(map, str, cdev_id, qp, params,
1207 cryptodevs_init(void)
1209 struct rte_cryptodev_config dev_conf;
1210 struct rte_cryptodev_qp_conf qp_conf;
1211 uint16_t idx, max_nb_qps, qp, i;
1213 struct rte_hash_parameters params = { 0 };
1215 params.entries = CDEV_MAP_ENTRIES;
1216 params.key_len = sizeof(struct cdev_key);
1217 params.hash_func = rte_jhash;
1218 params.hash_func_init_val = 0;
1219 params.socket_id = rte_socket_id();
1221 params.name = "cdev_map_in";
1222 cdev_map_in = rte_hash_create(¶ms);
1223 if (cdev_map_in == NULL)
1224 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1227 params.name = "cdev_map_out";
1228 cdev_map_out = rte_hash_create(¶ms);
1229 if (cdev_map_out == NULL)
1230 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1233 printf("lcore/cryptodev/qp mappings:\n");
1236 /* Start from last cdev id to give HW priority */
1237 for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
1238 struct rte_cryptodev_info cdev_info;
1240 rte_cryptodev_info_get(cdev_id, &cdev_info);
1242 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1243 max_nb_qps = cdev_info.max_nb_queue_pairs;
1245 max_nb_qps = nb_lcore_params;
1249 while (qp < max_nb_qps && i < nb_lcore_params) {
1250 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1251 &lcore_params[idx]))
1254 idx = idx % nb_lcore_params;
1261 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1262 dev_conf.nb_queue_pairs = qp;
1263 dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
1264 dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
1266 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1267 rte_panic("Failed to initialize crypodev %u\n",
1270 qp_conf.nb_descriptors = CDEV_MP_NB_OBJS;
1271 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1272 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1273 &qp_conf, dev_conf.socket_id))
1274 rte_panic("Failed to setup queue %u for "
1275 "cdev_id %u\n", 0, cdev_id);
1277 if (rte_cryptodev_start(cdev_id))
1278 rte_panic("Failed to start cryptodev %u\n",
1288 port_init(uint8_t portid)
1290 struct rte_eth_dev_info dev_info;
1291 struct rte_eth_txconf *txconf;
1292 uint16_t nb_tx_queue, nb_rx_queue;
1293 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1294 int32_t ret, socket_id;
1295 struct lcore_conf *qconf;
1296 struct ether_addr ethaddr;
1298 rte_eth_dev_info_get(portid, &dev_info);
1300 printf("Configuring device port %u:\n", portid);
1302 rte_eth_macaddr_get(portid, ðaddr);
1303 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1304 print_ethaddr("Address: ", ðaddr);
1307 nb_rx_queue = get_port_nb_rx_queues(portid);
1308 nb_tx_queue = nb_lcores;
1310 if (nb_rx_queue > dev_info.max_rx_queues)
1311 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1312 "(max rx queue is %u)\n",
1313 nb_rx_queue, dev_info.max_rx_queues);
1315 if (nb_tx_queue > dev_info.max_tx_queues)
1316 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1317 "(max tx queue is %u)\n",
1318 nb_tx_queue, dev_info.max_tx_queues);
1320 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1321 nb_rx_queue, nb_tx_queue);
1323 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1326 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1327 "err=%d, port=%d\n", ret, portid);
1329 /* init one TX queue per lcore */
1331 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1332 if (rte_lcore_is_enabled(lcore_id) == 0)
1336 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1341 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1343 txconf = &dev_info.default_txconf;
1344 txconf->txq_flags = 0;
1346 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1349 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1350 "err=%d, port=%d\n", ret, portid);
1352 qconf = &lcore_conf[lcore_id];
1353 qconf->tx_queue_id[portid] = tx_queueid;
1356 /* init RX queues */
1357 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1358 if (portid != qconf->rx_queue_list[queue].port_id)
1361 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1363 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1366 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1367 nb_rxd, socket_id, NULL,
1368 socket_ctx[socket_id].mbuf_pool);
1370 rte_exit(EXIT_FAILURE,
1371 "rte_eth_rx_queue_setup: err=%d, "
1372 "port=%d\n", ret, portid);
1379 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1383 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1384 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1385 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1386 RTE_MBUF_DEFAULT_BUF_SIZE,
1388 if (ctx->mbuf_pool == NULL)
1389 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1392 printf("Allocated mbuf pool on socket %d\n", socket_id);
1396 main(int32_t argc, char **argv)
1399 uint32_t lcore_id, nb_ports;
1400 uint8_t portid, socket_id;
1403 ret = rte_eal_init(argc, argv);
1405 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1409 /* parse application arguments (after the EAL ones) */
1410 ret = parse_args(argc, argv);
1412 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1415 rte_exit(EXIT_FAILURE, "need to choose either EP0 or EP1\n");
1417 if ((unprotected_port_mask & enabled_port_mask) !=
1418 unprotected_port_mask)
1419 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1420 unprotected_port_mask);
1422 nb_ports = rte_eth_dev_count();
1424 if (check_params() < 0)
1425 rte_exit(EXIT_FAILURE, "check_params failed\n");
1427 ret = init_lcore_rx_queues();
1429 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1431 nb_lcores = rte_lcore_count();
1433 /* Replicate each contex per socket */
1434 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1435 if (rte_lcore_is_enabled(lcore_id) == 0)
1439 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1443 if (socket_ctx[socket_id].mbuf_pool)
1446 sa_init(&socket_ctx[socket_id], socket_id, ep);
1448 sp4_init(&socket_ctx[socket_id], socket_id, ep);
1450 sp6_init(&socket_ctx[socket_id], socket_id, ep);
1452 rt_init(&socket_ctx[socket_id], socket_id, ep);
1454 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1457 for (portid = 0; portid < nb_ports; portid++) {
1458 if ((enabled_port_mask & (1 << portid)) == 0)
1467 for (portid = 0; portid < nb_ports; portid++) {
1468 if ((enabled_port_mask & (1 << portid)) == 0)
1472 ret = rte_eth_dev_start(portid);
1474 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1475 "err=%d, port=%d\n", ret, portid);
1477 * If enabled, put device in promiscuous mode.
1478 * This allows IO forwarding mode to forward packets
1479 * to itself through 2 cross-connected ports of the
1483 rte_eth_promiscuous_enable(portid);
1486 check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1488 /* launch per-lcore init on every lcore */
1489 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1490 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1491 if (rte_eal_wait_lcore(lcore_id) < 0)