1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
14 #include <sys/queue.h>
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
47 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
49 #define MAX_JUMBO_PKT_LEN 9600
51 #define MEMPOOL_CACHE_SIZE 256
53 #define NB_MBUF (32000)
55 #define CDEV_QUEUE_DESC 2048
56 #define CDEV_MAP_ENTRIES 1024
57 #define CDEV_MP_NB_OBJS 2048
58 #define CDEV_MP_CACHE_SZ 64
59 #define MAX_QUEUE_PAIRS 1
61 #define OPTION_CONFIG "config"
62 #define OPTION_SINGLE_SA "single-sa"
63 #define OPTION_CRYPTODEV_MASK "cryptodev_mask"
65 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 /* Configure how many packets ahead to prefetch, when reading packets */
70 #define PREFETCH_OFFSET 3
72 #define MAX_RX_QUEUE_PER_LCORE 16
74 #define MAX_LCORE_PARAMS 1024
76 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
79 * Configurable number of RX/TX ring descriptors
81 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
82 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
83 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
84 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
86 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
87 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
88 (((uint64_t)((a) & 0xff) << 56) | \
89 ((uint64_t)((b) & 0xff) << 48) | \
90 ((uint64_t)((c) & 0xff) << 40) | \
91 ((uint64_t)((d) & 0xff) << 32) | \
92 ((uint64_t)((e) & 0xff) << 24) | \
93 ((uint64_t)((f) & 0xff) << 16) | \
94 ((uint64_t)((g) & 0xff) << 8) | \
95 ((uint64_t)(h) & 0xff))
97 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
98 (((uint64_t)((h) & 0xff) << 56) | \
99 ((uint64_t)((g) & 0xff) << 48) | \
100 ((uint64_t)((f) & 0xff) << 40) | \
101 ((uint64_t)((e) & 0xff) << 32) | \
102 ((uint64_t)((d) & 0xff) << 24) | \
103 ((uint64_t)((c) & 0xff) << 16) | \
104 ((uint64_t)((b) & 0xff) << 8) | \
105 ((uint64_t)(a) & 0xff))
107 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
109 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
110 addr.addr_bytes[0], addr.addr_bytes[1], \
111 addr.addr_bytes[2], addr.addr_bytes[3], \
112 addr.addr_bytes[4], addr.addr_bytes[5], \
115 /* port/source ethernet addr and destination ethernet addr */
116 struct ethaddr_info {
120 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
121 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
122 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
123 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
124 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
127 /* mask of enabled ports */
128 static uint32_t enabled_port_mask;
129 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
130 static uint32_t unprotected_port_mask;
131 static int32_t promiscuous_on = 1;
132 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
133 static uint32_t nb_lcores;
134 static uint32_t single_sa;
135 static uint32_t single_sa_idx;
136 static uint32_t frame_size;
138 struct lcore_rx_queue {
141 } __rte_cache_aligned;
143 struct lcore_params {
147 } __rte_cache_aligned;
149 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
151 static struct lcore_params *lcore_params;
152 static uint16_t nb_lcore_params;
154 static struct rte_hash *cdev_map_in;
155 static struct rte_hash *cdev_map_out;
159 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
163 uint16_t nb_rx_queue;
164 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
165 uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
166 struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
167 struct ipsec_ctx inbound;
168 struct ipsec_ctx outbound;
169 struct rt_ctx *rt4_ctx;
170 struct rt_ctx *rt6_ctx;
171 } __rte_cache_aligned;
173 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
175 static struct rte_eth_conf port_conf = {
177 .mq_mode = ETH_MQ_RX_RSS,
178 .max_rx_pkt_len = ETHER_MAX_LEN,
180 .offloads = DEV_RX_OFFLOAD_CHECKSUM |
181 DEV_RX_OFFLOAD_CRC_STRIP,
182 .ignore_offload_bitfield = 1,
187 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
188 ETH_RSS_TCP | ETH_RSS_SCTP,
192 .mq_mode = ETH_MQ_TX_NONE,
193 .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
194 DEV_TX_OFFLOAD_MULTI_SEGS),
198 static struct socket_ctx socket_ctx[NB_SOCKETS];
200 struct traffic_type {
201 const uint8_t *data[MAX_PKT_BURST * 2];
202 struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
203 uint32_t res[MAX_PKT_BURST * 2];
207 struct ipsec_traffic {
208 struct traffic_type ipsec;
209 struct traffic_type ip4;
210 struct traffic_type ip6;
214 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
217 struct ether_hdr *eth;
219 eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
220 if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
221 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
222 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
223 if (*nlp == IPPROTO_ESP)
224 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
226 t->ip4.data[t->ip4.num] = nlp;
227 t->ip4.pkts[(t->ip4.num)++] = pkt;
229 } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
230 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
231 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
232 if (*nlp == IPPROTO_ESP)
233 t->ipsec.pkts[(t->ipsec.num)++] = pkt;
235 t->ip6.data[t->ip6.num] = nlp;
236 t->ip6.pkts[(t->ip6.num)++] = pkt;
239 /* Unknown/Unsupported type, drop the packet */
240 RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
241 rte_pktmbuf_free(pkt);
244 /* Check if the packet has been processed inline. For inline protocol
245 * processed packets, the metadata in the mbuf can be used to identify
246 * the security processing done on the packet. The metadata will be
247 * used to retrieve the application registered userdata associated
248 * with the security session.
251 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
253 struct ipsec_mbuf_metadata *priv;
254 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
255 rte_eth_dev_get_sec_ctx(
258 /* Retrieve the userdata registered. Here, the userdata
259 * registered is the SA pointer.
262 sa = (struct ipsec_sa *)
263 rte_security_get_userdata(ctx, pkt->udata64);
266 /* userdata could not be retrieved */
270 /* Save SA as priv member in mbuf. This will be used in the
271 * IPsec selector(SP-SA) check.
274 priv = get_priv(pkt);
280 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
289 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
290 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
292 prepare_one_packet(pkts[i], t);
294 /* Process left packets */
295 for (; i < nb_pkts; i++)
296 prepare_one_packet(pkts[i], t);
300 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
303 struct ether_hdr *ethhdr;
305 ip = rte_pktmbuf_mtod(pkt, struct ip *);
307 ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
309 if (ip->ip_v == IPVERSION) {
310 pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
311 pkt->l3_len = sizeof(struct ip);
312 pkt->l2_len = ETHER_HDR_LEN;
314 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
316 pkt->ol_flags |= PKT_TX_IPV6;
317 pkt->l3_len = sizeof(struct ip6_hdr);
318 pkt->l2_len = ETHER_HDR_LEN;
320 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
323 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
324 sizeof(struct ether_addr));
325 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
326 sizeof(struct ether_addr));
330 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
333 const int32_t prefetch_offset = 2;
335 for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
336 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
337 prepare_tx_pkt(pkts[i], port);
339 /* Process left packets */
340 for (; i < nb_pkts; i++)
341 prepare_tx_pkt(pkts[i], port);
344 /* Send burst of packets on an output interface */
345 static inline int32_t
346 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
348 struct rte_mbuf **m_table;
352 queueid = qconf->tx_queue_id[port];
353 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
355 prepare_tx_burst(m_table, n, port);
357 ret = rte_eth_tx_burst(port, queueid, m_table, n);
358 if (unlikely(ret < n)) {
360 rte_pktmbuf_free(m_table[ret]);
367 /* Enqueue a single packet, and send burst if queue is filled */
368 static inline int32_t
369 send_single_packet(struct rte_mbuf *m, uint16_t port)
373 struct lcore_conf *qconf;
375 lcore_id = rte_lcore_id();
377 qconf = &lcore_conf[lcore_id];
378 len = qconf->tx_mbufs[port].len;
379 qconf->tx_mbufs[port].m_table[len] = m;
382 /* enough pkts to be sent */
383 if (unlikely(len == MAX_PKT_BURST)) {
384 send_burst(qconf, MAX_PKT_BURST, port);
388 qconf->tx_mbufs[port].len = len;
393 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
397 uint32_t i, j, res, sa_idx;
399 if (ip->num == 0 || sp == NULL)
402 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
403 ip->num, DEFAULT_MAX_CATEGORIES);
406 for (i = 0; i < ip->num; i++) {
418 /* Only check SPI match for processed IPSec packets */
419 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
424 sa_idx = ip->res[i] & PROTECT_MASK;
425 if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
426 !inbound_sa_check(sa, m, sa_idx)) {
436 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
437 struct ipsec_traffic *traffic)
440 uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
442 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
443 traffic->ipsec.num, MAX_PKT_BURST);
445 n_ip4 = traffic->ip4.num;
446 n_ip6 = traffic->ip6.num;
448 /* SP/ACL Inbound check ipsec and ip4 */
449 for (i = 0; i < nb_pkts_in; i++) {
450 m = traffic->ipsec.pkts[i];
451 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
452 if (ip->ip_v == IPVERSION) {
453 idx = traffic->ip4.num++;
454 traffic->ip4.pkts[idx] = m;
455 traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
456 uint8_t *, offsetof(struct ip, ip_p));
457 } else if (ip->ip_v == IP6_VERSION) {
458 idx = traffic->ip6.num++;
459 traffic->ip6.pkts[idx] = m;
460 traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
462 offsetof(struct ip6_hdr, ip6_nxt));
467 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
470 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
475 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
476 struct traffic_type *ipsec)
479 uint32_t i, j, sa_idx;
481 if (ip->num == 0 || sp == NULL)
484 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
485 ip->num, DEFAULT_MAX_CATEGORIES);
488 for (i = 0; i < ip->num; i++) {
490 sa_idx = ip->res[i] & PROTECT_MASK;
491 if (ip->res[i] & DISCARD)
493 else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
494 ipsec->res[ipsec->num] = sa_idx;
495 ipsec->pkts[ipsec->num++] = m;
503 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
504 struct ipsec_traffic *traffic)
507 uint16_t idx, nb_pkts_out, i;
509 /* Drop any IPsec traffic from protected ports */
510 for (i = 0; i < traffic->ipsec.num; i++)
511 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
513 traffic->ipsec.num = 0;
515 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
517 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
519 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
520 traffic->ipsec.res, traffic->ipsec.num,
523 for (i = 0; i < nb_pkts_out; i++) {
524 m = traffic->ipsec.pkts[i];
525 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
526 if (ip->ip_v == IPVERSION) {
527 idx = traffic->ip4.num++;
528 traffic->ip4.pkts[idx] = m;
530 idx = traffic->ip6.num++;
531 traffic->ip6.pkts[idx] = m;
537 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
538 struct ipsec_traffic *traffic)
541 uint32_t nb_pkts_in, i, idx;
543 /* Drop any IPv4 traffic from unprotected ports */
544 for (i = 0; i < traffic->ip4.num; i++)
545 rte_pktmbuf_free(traffic->ip4.pkts[i]);
547 traffic->ip4.num = 0;
549 /* Drop any IPv6 traffic from unprotected ports */
550 for (i = 0; i < traffic->ip6.num; i++)
551 rte_pktmbuf_free(traffic->ip6.pkts[i]);
553 traffic->ip6.num = 0;
555 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
556 traffic->ipsec.num, MAX_PKT_BURST);
558 for (i = 0; i < nb_pkts_in; i++) {
559 m = traffic->ipsec.pkts[i];
560 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
561 if (ip->ip_v == IPVERSION) {
562 idx = traffic->ip4.num++;
563 traffic->ip4.pkts[idx] = m;
565 idx = traffic->ip6.num++;
566 traffic->ip6.pkts[idx] = m;
572 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
573 struct ipsec_traffic *traffic)
576 uint32_t nb_pkts_out, i;
579 /* Drop any IPsec traffic from protected ports */
580 for (i = 0; i < traffic->ipsec.num; i++)
581 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
583 traffic->ipsec.num = 0;
585 for (i = 0; i < traffic->ip4.num; i++)
586 traffic->ip4.res[i] = single_sa_idx;
588 for (i = 0; i < traffic->ip6.num; i++)
589 traffic->ip6.res[i] = single_sa_idx;
591 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
592 traffic->ip4.res, traffic->ip4.num,
595 /* They all sue the same SA (ip4 or ip6 tunnel) */
596 m = traffic->ipsec.pkts[i];
597 ip = rte_pktmbuf_mtod(m, struct ip *);
598 if (ip->ip_v == IPVERSION)
599 traffic->ip4.num = nb_pkts_out;
601 traffic->ip6.num = nb_pkts_out;
604 static inline int32_t
605 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
607 struct ipsec_mbuf_metadata *priv;
610 priv = get_priv(pkt);
613 if (unlikely(sa == NULL)) {
614 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
622 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
633 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
635 uint32_t hop[MAX_PKT_BURST * 2];
636 uint32_t dst_ip[MAX_PKT_BURST * 2];
639 uint16_t lpm_pkts = 0;
644 /* Need to do an LPM lookup for non-inline packets. Inline packets will
645 * have port ID in the SA
648 for (i = 0; i < nb_pkts; i++) {
649 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
650 /* Security offload not enabled. So an LPM lookup is
651 * required to get the hop
653 offset = offsetof(struct ip, ip_dst);
654 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
656 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
661 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
665 for (i = 0; i < nb_pkts; i++) {
666 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
667 /* Read hop from the SA */
668 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
670 /* Need to use hop returned by lookup */
671 pkt_hop = hop[lpm_pkts++];
674 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
675 rte_pktmbuf_free(pkts[i]);
678 send_single_packet(pkts[i], pkt_hop & 0xff);
683 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
685 int32_t hop[MAX_PKT_BURST * 2];
686 uint8_t dst_ip[MAX_PKT_BURST * 2][16];
690 uint16_t lpm_pkts = 0;
695 /* Need to do an LPM lookup for non-inline packets. Inline packets will
696 * have port ID in the SA
699 for (i = 0; i < nb_pkts; i++) {
700 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
701 /* Security offload not enabled. So an LPM lookup is
702 * required to get the hop
704 offset = offsetof(struct ip6_hdr, ip6_dst);
705 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
707 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
712 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
717 for (i = 0; i < nb_pkts; i++) {
718 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
719 /* Read hop from the SA */
720 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
722 /* Need to use hop returned by lookup */
723 pkt_hop = hop[lpm_pkts++];
727 rte_pktmbuf_free(pkts[i]);
730 send_single_packet(pkts[i], pkt_hop & 0xff);
735 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
736 uint8_t nb_pkts, uint16_t portid)
738 struct ipsec_traffic traffic;
740 prepare_traffic(pkts, &traffic, nb_pkts);
742 if (unlikely(single_sa)) {
743 if (UNPROTECTED_PORT(portid))
744 process_pkts_inbound_nosp(&qconf->inbound, &traffic);
746 process_pkts_outbound_nosp(&qconf->outbound, &traffic);
748 if (UNPROTECTED_PORT(portid))
749 process_pkts_inbound(&qconf->inbound, &traffic);
751 process_pkts_outbound(&qconf->outbound, &traffic);
754 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
755 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
759 drain_buffers(struct lcore_conf *qconf)
764 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
765 buf = &qconf->tx_mbufs[portid];
768 send_burst(qconf, buf->len, portid);
773 /* main processing loop */
775 main_loop(__attribute__((unused)) void *dummy)
777 struct rte_mbuf *pkts[MAX_PKT_BURST];
779 uint64_t prev_tsc, diff_tsc, cur_tsc;
783 struct lcore_conf *qconf;
785 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
786 / US_PER_S * BURST_TX_DRAIN_US;
787 struct lcore_rx_queue *rxql;
790 lcore_id = rte_lcore_id();
791 qconf = &lcore_conf[lcore_id];
792 rxql = qconf->rx_queue_list;
793 socket_id = rte_lcore_to_socket_id(lcore_id);
795 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
796 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
797 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
798 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
799 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
800 qconf->inbound.cdev_map = cdev_map_in;
801 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
802 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
803 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
804 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
805 qconf->outbound.cdev_map = cdev_map_out;
806 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
808 if (qconf->nb_rx_queue == 0) {
809 RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
813 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
815 for (i = 0; i < qconf->nb_rx_queue; i++) {
816 portid = rxql[i].port_id;
817 queueid = rxql[i].queue_id;
819 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
820 lcore_id, portid, queueid);
824 cur_tsc = rte_rdtsc();
826 /* TX queue buffer drain */
827 diff_tsc = cur_tsc - prev_tsc;
829 if (unlikely(diff_tsc > drain_tsc)) {
830 drain_buffers(qconf);
834 /* Read packet from RX queues */
835 for (i = 0; i < qconf->nb_rx_queue; ++i) {
836 portid = rxql[i].port_id;
837 queueid = rxql[i].queue_id;
838 nb_rx = rte_eth_rx_burst(portid, queueid,
839 pkts, MAX_PKT_BURST);
842 process_pkts(qconf, pkts, nb_rx, portid);
851 uint16_t portid, nb_ports;
855 if (lcore_params == NULL) {
856 printf("Error: No port/queue/core mappings\n");
860 nb_ports = rte_eth_dev_count();
862 for (i = 0; i < nb_lcore_params; ++i) {
863 lcore = lcore_params[i].lcore_id;
864 if (!rte_lcore_is_enabled(lcore)) {
865 printf("error: lcore %hhu is not enabled in "
866 "lcore mask\n", lcore);
869 socket_id = rte_lcore_to_socket_id(lcore);
870 if (socket_id != 0 && numa_on == 0) {
871 printf("warning: lcore %hhu is on socket %d "
875 portid = lcore_params[i].port_id;
876 if ((enabled_port_mask & (1 << portid)) == 0) {
877 printf("port %u is not enabled in port mask\n", portid);
880 if (portid >= nb_ports) {
881 printf("port %u is not present on the board\n", portid);
889 get_port_nb_rx_queues(const uint16_t port)
894 for (i = 0; i < nb_lcore_params; ++i) {
895 if (lcore_params[i].port_id == port &&
896 lcore_params[i].queue_id > queue)
897 queue = lcore_params[i].queue_id;
899 return (uint8_t)(++queue);
903 init_lcore_rx_queues(void)
905 uint16_t i, nb_rx_queue;
908 for (i = 0; i < nb_lcore_params; ++i) {
909 lcore = lcore_params[i].lcore_id;
910 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
911 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
912 printf("error: too many queues (%u) for lcore: %u\n",
913 nb_rx_queue + 1, lcore);
916 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
917 lcore_params[i].port_id;
918 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
919 lcore_params[i].queue_id;
920 lcore_conf[lcore].nb_rx_queue++;
927 print_usage(const char *prgname)
929 printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
930 " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
931 " --single-sa SAIDX -f CONFIG_FILE\n"
932 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
933 " -P : enable promiscuous mode\n"
934 " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
935 " -j FRAMESIZE: jumbo frame maximum size\n"
936 " --"OPTION_CONFIG": (port,queue,lcore): "
937 "rx queues configuration\n"
938 " --single-sa SAIDX: use single SA index for outbound, "
940 " --cryptodev_mask MASK: hexadecimal bitmask of the "
941 "crypto devices to configure\n"
942 " -f CONFIG_FILE: Configuration file path\n",
947 parse_portmask(const char *portmask)
952 /* parse hexadecimal string */
953 pm = strtoul(portmask, &end, 16);
954 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
957 if ((pm == 0) && errno)
964 parse_decimal(const char *str)
969 num = strtoul(str, &end, 10);
970 if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
977 parse_config(const char *q_arg)
980 const char *p, *p0 = q_arg;
988 unsigned long int_fld[_NUM_FLD];
989 char *str_fld[_NUM_FLD];
995 while ((p = strchr(p0, '(')) != NULL) {
1002 if (size >= sizeof(s))
1005 snprintf(s, sizeof(s), "%.*s", size, p);
1006 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1009 for (i = 0; i < _NUM_FLD; i++) {
1011 int_fld[i] = strtoul(str_fld[i], &end, 0);
1012 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1015 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1016 printf("exceeded max number of lcore params: %hu\n",
1020 lcore_params_array[nb_lcore_params].port_id =
1021 (uint8_t)int_fld[FLD_PORT];
1022 lcore_params_array[nb_lcore_params].queue_id =
1023 (uint8_t)int_fld[FLD_QUEUE];
1024 lcore_params_array[nb_lcore_params].lcore_id =
1025 (uint8_t)int_fld[FLD_LCORE];
1028 lcore_params = lcore_params_array;
1032 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
1034 parse_args_long_options(struct option *lgopts, int32_t option_index)
1037 const char *optname = lgopts[option_index].name;
1039 if (__STRNCMP(optname, OPTION_CONFIG)) {
1040 ret = parse_config(optarg);
1042 printf("invalid config\n");
1045 if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
1046 ret = parse_decimal(optarg);
1049 single_sa_idx = ret;
1050 printf("Configured with single SA index %u\n",
1056 if (__STRNCMP(optname, OPTION_CRYPTODEV_MASK)) {
1057 ret = parse_portmask(optarg);
1059 enabled_cryptodev_mask = ret;
1069 parse_args(int32_t argc, char **argv)
1073 int32_t option_index;
1074 char *prgname = argv[0];
1075 static struct option lgopts[] = {
1076 {OPTION_CONFIG, 1, 0, 0},
1077 {OPTION_SINGLE_SA, 1, 0, 0},
1078 {OPTION_CRYPTODEV_MASK, 1, 0, 0},
1081 int32_t f_present = 0;
1085 while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
1086 lgopts, &option_index)) != EOF) {
1090 enabled_port_mask = parse_portmask(optarg);
1091 if (enabled_port_mask == 0) {
1092 printf("invalid portmask\n");
1093 print_usage(prgname);
1098 printf("Promiscuous mode selected\n");
1102 unprotected_port_mask = parse_portmask(optarg);
1103 if (unprotected_port_mask == 0) {
1104 printf("invalid unprotected portmask\n");
1105 print_usage(prgname);
1110 if (f_present == 1) {
1111 printf("\"-f\" option present more than "
1113 print_usage(prgname);
1116 if (parse_cfg_file(optarg) < 0) {
1117 printf("parsing file \"%s\" failed\n",
1119 print_usage(prgname);
1126 int32_t size = parse_decimal(optarg);
1128 printf("Invalid jumbo frame size\n");
1130 print_usage(prgname);
1133 printf("Using default value 9000\n");
1139 printf("Enabled jumbo frames size %u\n", frame_size);
1142 if (parse_args_long_options(lgopts, option_index)) {
1143 print_usage(prgname);
1148 print_usage(prgname);
1153 if (f_present == 0) {
1154 printf("Mandatory option \"-f\" not present\n");
1159 argv[optind-1] = prgname;
1162 optind = 1; /* reset getopt lib */
1167 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1169 char buf[ETHER_ADDR_FMT_SIZE];
1170 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1171 printf("%s%s", name, buf);
1174 /* Check the link status of all ports in up to 9s, and print them finally */
1176 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
1178 #define CHECK_INTERVAL 100 /* 100ms */
1179 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1181 uint8_t count, all_ports_up, print_flag = 0;
1182 struct rte_eth_link link;
1184 printf("\nChecking link status");
1186 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1188 for (portid = 0; portid < port_num; portid++) {
1189 if ((port_mask & (1 << portid)) == 0)
1191 memset(&link, 0, sizeof(link));
1192 rte_eth_link_get_nowait(portid, &link);
1193 /* print link status if flag set */
1194 if (print_flag == 1) {
1195 if (link.link_status)
1197 "Port%d Link Up - speed %u Mbps -%s\n",
1198 portid, link.link_speed,
1199 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1200 ("full-duplex") : ("half-duplex\n"));
1202 printf("Port %d Link Down\n", portid);
1205 /* clear all_ports_up flag if any link down */
1206 if (link.link_status == ETH_LINK_DOWN) {
1211 /* after finally printing all link status, get out */
1212 if (print_flag == 1)
1215 if (all_ports_up == 0) {
1218 rte_delay_ms(CHECK_INTERVAL);
1221 /* set the print_flag if all ports up or timeout */
1222 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1230 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1231 uint16_t qp, struct lcore_params *params,
1232 struct ipsec_ctx *ipsec_ctx,
1233 const struct rte_cryptodev_capabilities *cipher,
1234 const struct rte_cryptodev_capabilities *auth,
1235 const struct rte_cryptodev_capabilities *aead)
1239 struct cdev_key key = { 0 };
1241 key.lcore_id = params->lcore_id;
1243 key.cipher_algo = cipher->sym.cipher.algo;
1245 key.auth_algo = auth->sym.auth.algo;
1247 key.aead_algo = aead->sym.aead.algo;
1249 ret = rte_hash_lookup(map, &key);
1253 for (i = 0; i < ipsec_ctx->nb_qps; i++)
1254 if (ipsec_ctx->tbl[i].id == cdev_id)
1257 if (i == ipsec_ctx->nb_qps) {
1258 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1259 printf("Maximum number of crypto devices assigned to "
1260 "a core, increase MAX_QP_PER_LCORE value\n");
1263 ipsec_ctx->tbl[i].id = cdev_id;
1264 ipsec_ctx->tbl[i].qp = qp;
1265 ipsec_ctx->nb_qps++;
1266 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1267 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1271 ret = rte_hash_add_key_data(map, &key, (void *)i);
1273 printf("Faled to insert cdev mapping for (lcore %u, "
1274 "cdev %u, qp %u), errno %d\n",
1275 key.lcore_id, ipsec_ctx->tbl[i].id,
1276 ipsec_ctx->tbl[i].qp, ret);
1284 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1285 uint16_t qp, struct lcore_params *params)
1288 const struct rte_cryptodev_capabilities *i, *j;
1289 struct rte_hash *map;
1290 struct lcore_conf *qconf;
1291 struct ipsec_ctx *ipsec_ctx;
1294 qconf = &lcore_conf[params->lcore_id];
1296 if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1298 ipsec_ctx = &qconf->outbound;
1302 ipsec_ctx = &qconf->inbound;
1306 /* Required cryptodevs with operation chainning */
1307 if (!(dev_info->feature_flags &
1308 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1311 for (i = dev_info->capabilities;
1312 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1313 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1316 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1317 ret |= add_mapping(map, str, cdev_id, qp, params,
1318 ipsec_ctx, NULL, NULL, i);
1322 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1325 for (j = dev_info->capabilities;
1326 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1327 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1330 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1333 ret |= add_mapping(map, str, cdev_id, qp, params,
1334 ipsec_ctx, i, j, NULL);
1341 /* Check if the device is enabled by cryptodev_mask */
1343 check_cryptodev_mask(uint8_t cdev_id)
1345 if (enabled_cryptodev_mask & (1 << cdev_id))
1352 cryptodevs_init(void)
1354 struct rte_cryptodev_config dev_conf;
1355 struct rte_cryptodev_qp_conf qp_conf;
1356 uint16_t idx, max_nb_qps, qp, i;
1357 int16_t cdev_id, port_id;
1358 struct rte_hash_parameters params = { 0 };
1360 params.entries = CDEV_MAP_ENTRIES;
1361 params.key_len = sizeof(struct cdev_key);
1362 params.hash_func = rte_jhash;
1363 params.hash_func_init_val = 0;
1364 params.socket_id = rte_socket_id();
1366 params.name = "cdev_map_in";
1367 cdev_map_in = rte_hash_create(¶ms);
1368 if (cdev_map_in == NULL)
1369 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1372 params.name = "cdev_map_out";
1373 cdev_map_out = rte_hash_create(¶ms);
1374 if (cdev_map_out == NULL)
1375 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1378 printf("lcore/cryptodev/qp mappings:\n");
1380 uint32_t max_sess_sz = 0, sess_sz;
1381 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1382 sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
1383 if (sess_sz > max_sess_sz)
1384 max_sess_sz = sess_sz;
1386 for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
1389 if ((enabled_port_mask & (1 << port_id)) == 0)
1392 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1393 if (sec_ctx == NULL)
1396 sess_sz = rte_security_session_get_size(sec_ctx);
1397 if (sess_sz > max_sess_sz)
1398 max_sess_sz = sess_sz;
1402 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1403 struct rte_cryptodev_info cdev_info;
1405 if (check_cryptodev_mask((uint8_t)cdev_id))
1408 rte_cryptodev_info_get(cdev_id, &cdev_info);
1410 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1411 max_nb_qps = cdev_info.max_nb_queue_pairs;
1413 max_nb_qps = nb_lcore_params;
1417 while (qp < max_nb_qps && i < nb_lcore_params) {
1418 if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1419 &lcore_params[idx]))
1422 idx = idx % nb_lcore_params;
1429 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1430 dev_conf.nb_queue_pairs = qp;
1432 if (!socket_ctx[dev_conf.socket_id].session_pool) {
1433 char mp_name[RTE_MEMPOOL_NAMESIZE];
1434 struct rte_mempool *sess_mp;
1436 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1437 "sess_mp_%u", dev_conf.socket_id);
1438 sess_mp = rte_mempool_create(mp_name,
1442 0, NULL, NULL, NULL,
1443 NULL, dev_conf.socket_id,
1445 if (sess_mp == NULL)
1446 rte_exit(EXIT_FAILURE,
1447 "Cannot create session pool on socket %d\n",
1448 dev_conf.socket_id);
1450 printf("Allocated session pool on socket %d\n",
1451 dev_conf.socket_id);
1452 socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1455 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1456 rte_panic("Failed to initialize cryptodev %u\n",
1459 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1460 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1461 if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1462 &qp_conf, dev_conf.socket_id,
1463 socket_ctx[dev_conf.socket_id].session_pool))
1464 rte_panic("Failed to setup queue %u for "
1465 "cdev_id %u\n", 0, cdev_id);
1467 if (rte_cryptodev_start(cdev_id))
1468 rte_panic("Failed to start cryptodev %u\n",
1472 /* create session pools for eth devices that implement security */
1473 for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
1474 if ((enabled_port_mask & (1 << port_id)) &&
1475 rte_eth_dev_get_sec_ctx(port_id)) {
1476 int socket_id = rte_eth_dev_socket_id(port_id);
1478 if (!socket_ctx[socket_id].session_pool) {
1479 char mp_name[RTE_MEMPOOL_NAMESIZE];
1480 struct rte_mempool *sess_mp;
1482 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1483 "sess_mp_%u", socket_id);
1484 sess_mp = rte_mempool_create(mp_name,
1488 0, NULL, NULL, NULL,
1491 if (sess_mp == NULL)
1492 rte_exit(EXIT_FAILURE,
1493 "Cannot create session pool "
1494 "on socket %d\n", socket_id);
1496 printf("Allocated session pool "
1497 "on socket %d\n", socket_id);
1498 socket_ctx[socket_id].session_pool = sess_mp;
1510 port_init(uint16_t portid)
1512 struct rte_eth_dev_info dev_info;
1513 struct rte_eth_txconf *txconf;
1514 uint16_t nb_tx_queue, nb_rx_queue;
1515 uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1516 int32_t ret, socket_id;
1517 struct lcore_conf *qconf;
1518 struct ether_addr ethaddr;
1519 struct rte_eth_conf local_port_conf = port_conf;
1521 rte_eth_dev_info_get(portid, &dev_info);
1523 printf("Configuring device port %u:\n", portid);
1525 rte_eth_macaddr_get(portid, ðaddr);
1526 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1527 print_ethaddr("Address: ", ðaddr);
1530 nb_rx_queue = get_port_nb_rx_queues(portid);
1531 nb_tx_queue = nb_lcores;
1533 if (nb_rx_queue > dev_info.max_rx_queues)
1534 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1535 "(max rx queue is %u)\n",
1536 nb_rx_queue, dev_info.max_rx_queues);
1538 if (nb_tx_queue > dev_info.max_tx_queues)
1539 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1540 "(max tx queue is %u)\n",
1541 nb_tx_queue, dev_info.max_tx_queues);
1543 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1544 nb_rx_queue, nb_tx_queue);
1547 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1548 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1551 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
1552 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
1553 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
1554 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
1555 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1556 local_port_conf.txmode.offloads |=
1557 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1558 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1561 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1562 "err=%d, port=%d\n", ret, portid);
1564 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1566 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1567 "err=%d, port=%d\n", ret, portid);
1569 /* init one TX queue per lcore */
1571 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1572 if (rte_lcore_is_enabled(lcore_id) == 0)
1576 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1581 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1583 txconf = &dev_info.default_txconf;
1584 txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
1585 txconf->offloads = local_port_conf.txmode.offloads;
1587 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1590 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1591 "err=%d, port=%d\n", ret, portid);
1593 qconf = &lcore_conf[lcore_id];
1594 qconf->tx_queue_id[portid] = tx_queueid;
1597 /* init RX queues */
1598 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1599 struct rte_eth_rxconf rxq_conf;
1601 if (portid != qconf->rx_queue_list[queue].port_id)
1604 rx_queueid = qconf->rx_queue_list[queue].queue_id;
1606 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1609 rxq_conf = dev_info.default_rxconf;
1610 rxq_conf.offloads = local_port_conf.rxmode.offloads;
1611 ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1612 nb_rxd, socket_id, &rxq_conf,
1613 socket_ctx[socket_id].mbuf_pool);
1615 rte_exit(EXIT_FAILURE,
1616 "rte_eth_rx_queue_setup: err=%d, "
1617 "port=%d\n", ret, portid);
1624 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1627 uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
1628 RTE_MBUF_DEFAULT_BUF_SIZE;
1631 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1632 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1633 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1636 if (ctx->mbuf_pool == NULL)
1637 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1640 printf("Allocated mbuf pool on socket %d\n", socket_id);
1644 main(int32_t argc, char **argv)
1649 uint16_t portid, nb_ports;
1652 ret = rte_eal_init(argc, argv);
1654 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1658 /* parse application arguments (after the EAL ones) */
1659 ret = parse_args(argc, argv);
1661 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1663 if ((unprotected_port_mask & enabled_port_mask) !=
1664 unprotected_port_mask)
1665 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1666 unprotected_port_mask);
1668 nb_ports = rte_eth_dev_count();
1670 if (check_params() < 0)
1671 rte_exit(EXIT_FAILURE, "check_params failed\n");
1673 ret = init_lcore_rx_queues();
1675 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1677 nb_lcores = rte_lcore_count();
1679 /* Replicate each context per socket */
1680 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1681 if (rte_lcore_is_enabled(lcore_id) == 0)
1685 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1689 if (socket_ctx[socket_id].mbuf_pool)
1692 sa_init(&socket_ctx[socket_id], socket_id);
1694 sp4_init(&socket_ctx[socket_id], socket_id);
1696 sp6_init(&socket_ctx[socket_id], socket_id);
1698 rt_init(&socket_ctx[socket_id], socket_id);
1700 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1703 for (portid = 0; portid < nb_ports; portid++) {
1704 if ((enabled_port_mask & (1 << portid)) == 0)
1713 for (portid = 0; portid < nb_ports; portid++) {
1714 if ((enabled_port_mask & (1 << portid)) == 0)
1718 ret = rte_eth_dev_start(portid);
1720 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1721 "err=%d, port=%d\n", ret, portid);
1723 * If enabled, put device in promiscuous mode.
1724 * This allows IO forwarding mode to forward packets
1725 * to itself through 2 cross-connected ports of the
1729 rte_eth_promiscuous_enable(portid);
1732 check_all_ports_link_status(nb_ports, enabled_port_mask);
1734 /* launch per-lcore init on every lcore */
1735 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1736 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1737 if (rte_eal_wait_lcore(lcore_id) < 0)