examples/ipsec-secgw: support IPv6 options
[dpdk.git] / examples / ipsec-secgw / ipsec-secgw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <stdint.h>
8 #include <inttypes.h>
9 #include <sys/types.h>
10 #include <netinet/in.h>
11 #include <netinet/ip.h>
12 #include <netinet/ip6.h>
13 #include <string.h>
14 #include <sys/queue.h>
15 #include <stdarg.h>
16 #include <errno.h>
17 #include <getopt.h>
18
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_log.h>
22 #include <rte_eal.h>
23 #include <rte_launch.h>
24 #include <rte_atomic.h>
25 #include <rte_cycles.h>
26 #include <rte_prefetch.h>
27 #include <rte_lcore.h>
28 #include <rte_per_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_interrupts.h>
31 #include <rte_random.h>
32 #include <rte_debug.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_acl.h>
38 #include <rte_lpm.h>
39 #include <rte_lpm6.h>
40 #include <rte_hash.h>
41 #include <rte_jhash.h>
42 #include <rte_cryptodev.h>
43 #include <rte_security.h>
44 #include <rte_ip.h>
45
46 #include "ipsec.h"
47 #include "parser.h"
48
49 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
50
51 #define MAX_JUMBO_PKT_LEN  9600
52
53 #define MEMPOOL_CACHE_SIZE 256
54
55 #define NB_MBUF (32000)
56
57 #define CDEV_QUEUE_DESC 2048
58 #define CDEV_MAP_ENTRIES 16384
59 #define CDEV_MP_NB_OBJS 1024
60 #define CDEV_MP_CACHE_SZ 64
61 #define MAX_QUEUE_PAIRS 1
62
63 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
64
65 #define NB_SOCKETS 4
66
67 /* Configure how many packets ahead to prefetch, when reading packets */
68 #define PREFETCH_OFFSET 3
69
70 #define MAX_RX_QUEUE_PER_LCORE 16
71
72 #define MAX_LCORE_PARAMS 1024
73
74 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
75
76 /*
77  * Configurable number of RX/TX ring descriptors
78  */
79 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024
80 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024
81 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
82 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
83
84 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
85 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
86         (((uint64_t)((a) & 0xff) << 56) | \
87         ((uint64_t)((b) & 0xff) << 48) | \
88         ((uint64_t)((c) & 0xff) << 40) | \
89         ((uint64_t)((d) & 0xff) << 32) | \
90         ((uint64_t)((e) & 0xff) << 24) | \
91         ((uint64_t)((f) & 0xff) << 16) | \
92         ((uint64_t)((g) & 0xff) << 8)  | \
93         ((uint64_t)(h) & 0xff))
94 #else
95 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
96         (((uint64_t)((h) & 0xff) << 56) | \
97         ((uint64_t)((g) & 0xff) << 48) | \
98         ((uint64_t)((f) & 0xff) << 40) | \
99         ((uint64_t)((e) & 0xff) << 32) | \
100         ((uint64_t)((d) & 0xff) << 24) | \
101         ((uint64_t)((c) & 0xff) << 16) | \
102         ((uint64_t)((b) & 0xff) << 8) | \
103         ((uint64_t)(a) & 0xff))
104 #endif
105 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
106
107 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
108                 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
109                 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
110                 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
111                 0, 0)
112
113 /* port/source ethernet addr and destination ethernet addr */
114 struct ethaddr_info {
115         uint64_t src, dst;
116 };
117
118 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
119         { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
120         { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
121         { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
122         { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
123 };
124
125 #define CMD_LINE_OPT_CONFIG             "config"
126 #define CMD_LINE_OPT_SINGLE_SA          "single-sa"
127 #define CMD_LINE_OPT_CRYPTODEV_MASK     "cryptodev_mask"
128 #define CMD_LINE_OPT_RX_OFFLOAD         "rxoffload"
129 #define CMD_LINE_OPT_TX_OFFLOAD         "txoffload"
130
131 enum {
132         /* long options mapped to a short option */
133
134         /* first long only option value must be >= 256, so that we won't
135          * conflict with short options
136          */
137         CMD_LINE_OPT_MIN_NUM = 256,
138         CMD_LINE_OPT_CONFIG_NUM,
139         CMD_LINE_OPT_SINGLE_SA_NUM,
140         CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
141         CMD_LINE_OPT_RX_OFFLOAD_NUM,
142         CMD_LINE_OPT_TX_OFFLOAD_NUM,
143 };
144
145 static const struct option lgopts[] = {
146         {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
147         {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
148         {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
149         {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
150         {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
151         {NULL, 0, 0, 0}
152 };
153
154 /* mask of enabled ports */
155 static uint32_t enabled_port_mask;
156 static uint64_t enabled_cryptodev_mask = UINT64_MAX;
157 static uint32_t unprotected_port_mask;
158 static int32_t promiscuous_on = 1;
159 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
160 static uint32_t nb_lcores;
161 static uint32_t single_sa;
162 static uint32_t single_sa_idx;
163 static uint32_t frame_size;
164
165 /*
166  * RX/TX HW offload capabilities to enable/use on ethernet ports.
167  * By default all capabilities are enabled.
168  */
169 static uint64_t dev_rx_offload = UINT64_MAX;
170 static uint64_t dev_tx_offload = UINT64_MAX;
171
172 /* application wide librte_ipsec/SA parameters */
173 struct app_sa_prm app_sa_prm = {.enable = 0};
174
175 struct lcore_rx_queue {
176         uint16_t port_id;
177         uint8_t queue_id;
178 } __rte_cache_aligned;
179
180 struct lcore_params {
181         uint16_t port_id;
182         uint8_t queue_id;
183         uint8_t lcore_id;
184 } __rte_cache_aligned;
185
186 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
187
188 static struct lcore_params *lcore_params;
189 static uint16_t nb_lcore_params;
190
191 static struct rte_hash *cdev_map_in;
192 static struct rte_hash *cdev_map_out;
193
194 struct buffer {
195         uint16_t len;
196         struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
197 };
198
199 struct lcore_conf {
200         uint16_t nb_rx_queue;
201         struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
202         uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
203         struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
204         struct ipsec_ctx inbound;
205         struct ipsec_ctx outbound;
206         struct rt_ctx *rt4_ctx;
207         struct rt_ctx *rt6_ctx;
208 } __rte_cache_aligned;
209
210 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
211
212 static struct rte_eth_conf port_conf = {
213         .rxmode = {
214                 .mq_mode        = ETH_MQ_RX_RSS,
215                 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
216                 .split_hdr_size = 0,
217                 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
218         },
219         .rx_adv_conf = {
220                 .rss_conf = {
221                         .rss_key = NULL,
222                         .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
223                                 ETH_RSS_TCP | ETH_RSS_SCTP,
224                 },
225         },
226         .txmode = {
227                 .mq_mode = ETH_MQ_TX_NONE,
228         },
229 };
230
231 static struct socket_ctx socket_ctx[NB_SOCKETS];
232
233 static inline void
234 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
235 {
236         uint8_t *nlp;
237         struct rte_ether_hdr *eth;
238
239         eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
240         if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
241                 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
242                 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
243                 if (*nlp == IPPROTO_ESP)
244                         t->ipsec.pkts[(t->ipsec.num)++] = pkt;
245                 else {
246                         t->ip4.data[t->ip4.num] = nlp;
247                         t->ip4.pkts[(t->ip4.num)++] = pkt;
248                 }
249                 pkt->l2_len = 0;
250                 pkt->l3_len = sizeof(struct ip);
251         } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
252                 int next_proto;
253                 size_t l3len, ext_len;
254                 struct rte_ipv6_hdr *v6h;
255                 uint8_t *p;
256
257                 /* get protocol type */
258                 v6h = (struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
259                         RTE_ETHER_HDR_LEN);
260                 next_proto = v6h->proto;
261
262                 /* determine l3 header size up to ESP extension */
263                 l3len = sizeof(struct ip6_hdr);
264                 p = rte_pktmbuf_mtod(pkt, uint8_t *);
265                 while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
266                         (next_proto = rte_ipv6_get_next_ext(p + l3len,
267                                                 next_proto, &ext_len)) >= 0)
268                         l3len += ext_len;
269
270                 /* drop packet when IPv6 header exceeds first segment length */
271                 if (unlikely(l3len > pkt->data_len)) {
272                         rte_pktmbuf_free(pkt);
273                         return;
274                 }
275
276                 if (next_proto == IPPROTO_ESP)
277                         t->ipsec.pkts[(t->ipsec.num)++] = pkt;
278                 else {
279                         t->ip6.data[t->ip6.num] = rte_pktmbuf_mtod_offset(pkt,
280                                 uint8_t *,
281                                 offsetof(struct rte_ipv6_hdr, proto));
282                         t->ip6.pkts[(t->ip6.num)++] = pkt;
283                 }
284                 pkt->l2_len = 0;
285                 pkt->l3_len = l3len;
286         } else {
287                 /* Unknown/Unsupported type, drop the packet */
288                 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
289                         rte_be_to_cpu_16(eth->ether_type));
290                 rte_pktmbuf_free(pkt);
291         }
292
293         /* Check if the packet has been processed inline. For inline protocol
294          * processed packets, the metadata in the mbuf can be used to identify
295          * the security processing done on the packet. The metadata will be
296          * used to retrieve the application registered userdata associated
297          * with the security session.
298          */
299
300         if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
301                 struct ipsec_sa *sa;
302                 struct ipsec_mbuf_metadata *priv;
303                 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
304                                                 rte_eth_dev_get_sec_ctx(
305                                                 pkt->port);
306
307                 /* Retrieve the userdata registered. Here, the userdata
308                  * registered is the SA pointer.
309                  */
310
311                 sa = (struct ipsec_sa *)
312                                 rte_security_get_userdata(ctx, pkt->udata64);
313
314                 if (sa == NULL) {
315                         /* userdata could not be retrieved */
316                         return;
317                 }
318
319                 /* Save SA as priv member in mbuf. This will be used in the
320                  * IPsec selector(SP-SA) check.
321                  */
322
323                 priv = get_priv(pkt);
324                 priv->sa = sa;
325         }
326 }
327
328 static inline void
329 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
330                 uint16_t nb_pkts)
331 {
332         int32_t i;
333
334         t->ipsec.num = 0;
335         t->ip4.num = 0;
336         t->ip6.num = 0;
337
338         for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
339                 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
340                                         void *));
341                 prepare_one_packet(pkts[i], t);
342         }
343         /* Process left packets */
344         for (; i < nb_pkts; i++)
345                 prepare_one_packet(pkts[i], t);
346 }
347
348 static inline void
349 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
350                 const struct lcore_conf *qconf)
351 {
352         struct ip *ip;
353         struct rte_ether_hdr *ethhdr;
354
355         ip = rte_pktmbuf_mtod(pkt, struct ip *);
356
357         ethhdr = (struct rte_ether_hdr *)
358                 rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
359
360         if (ip->ip_v == IPVERSION) {
361                 pkt->ol_flags |= qconf->outbound.ipv4_offloads;
362                 pkt->l3_len = sizeof(struct ip);
363                 pkt->l2_len = RTE_ETHER_HDR_LEN;
364
365                 ip->ip_sum = 0;
366
367                 /* calculate IPv4 cksum in SW */
368                 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
369                         ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
370
371                 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
372         } else {
373                 pkt->ol_flags |= qconf->outbound.ipv6_offloads;
374                 pkt->l3_len = sizeof(struct ip6_hdr);
375                 pkt->l2_len = RTE_ETHER_HDR_LEN;
376
377                 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
378         }
379
380         memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
381                         sizeof(struct rte_ether_addr));
382         memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
383                         sizeof(struct rte_ether_addr));
384 }
385
386 static inline void
387 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
388                 const struct lcore_conf *qconf)
389 {
390         int32_t i;
391         const int32_t prefetch_offset = 2;
392
393         for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
394                 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
395                 prepare_tx_pkt(pkts[i], port, qconf);
396         }
397         /* Process left packets */
398         for (; i < nb_pkts; i++)
399                 prepare_tx_pkt(pkts[i], port, qconf);
400 }
401
402 /* Send burst of packets on an output interface */
403 static inline int32_t
404 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
405 {
406         struct rte_mbuf **m_table;
407         int32_t ret;
408         uint16_t queueid;
409
410         queueid = qconf->tx_queue_id[port];
411         m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
412
413         prepare_tx_burst(m_table, n, port, qconf);
414
415         ret = rte_eth_tx_burst(port, queueid, m_table, n);
416         if (unlikely(ret < n)) {
417                 do {
418                         rte_pktmbuf_free(m_table[ret]);
419                 } while (++ret < n);
420         }
421
422         return 0;
423 }
424
425 /* Enqueue a single packet, and send burst if queue is filled */
426 static inline int32_t
427 send_single_packet(struct rte_mbuf *m, uint16_t port)
428 {
429         uint32_t lcore_id;
430         uint16_t len;
431         struct lcore_conf *qconf;
432
433         lcore_id = rte_lcore_id();
434
435         qconf = &lcore_conf[lcore_id];
436         len = qconf->tx_mbufs[port].len;
437         qconf->tx_mbufs[port].m_table[len] = m;
438         len++;
439
440         /* enough pkts to be sent */
441         if (unlikely(len == MAX_PKT_BURST)) {
442                 send_burst(qconf, MAX_PKT_BURST, port);
443                 len = 0;
444         }
445
446         qconf->tx_mbufs[port].len = len;
447         return 0;
448 }
449
450 static inline void
451 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
452                 uint16_t lim)
453 {
454         struct rte_mbuf *m;
455         uint32_t i, j, res, sa_idx;
456
457         if (ip->num == 0 || sp == NULL)
458                 return;
459
460         rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
461                         ip->num, DEFAULT_MAX_CATEGORIES);
462
463         j = 0;
464         for (i = 0; i < ip->num; i++) {
465                 m = ip->pkts[i];
466                 res = ip->res[i];
467                 if (res == BYPASS) {
468                         ip->pkts[j++] = m;
469                         continue;
470                 }
471                 if (res == DISCARD) {
472                         rte_pktmbuf_free(m);
473                         continue;
474                 }
475
476                 /* Only check SPI match for processed IPSec packets */
477                 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
478                         rte_pktmbuf_free(m);
479                         continue;
480                 }
481
482                 sa_idx = SPI2IDX(res);
483                 if (!inbound_sa_check(sa, m, sa_idx)) {
484                         rte_pktmbuf_free(m);
485                         continue;
486                 }
487                 ip->pkts[j++] = m;
488         }
489         ip->num = j;
490 }
491
492 static void
493 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
494 {
495         uint32_t i, n4, n6;
496         struct ip *ip;
497         struct rte_mbuf *m;
498
499         n4 = trf->ip4.num;
500         n6 = trf->ip6.num;
501
502         for (i = 0; i < num; i++) {
503
504                 m = mb[i];
505                 ip = rte_pktmbuf_mtod(m, struct ip *);
506
507                 if (ip->ip_v == IPVERSION) {
508                         trf->ip4.pkts[n4] = m;
509                         trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
510                                         uint8_t *, offsetof(struct ip, ip_p));
511                         n4++;
512                 } else if (ip->ip_v == IP6_VERSION) {
513                         trf->ip6.pkts[n6] = m;
514                         trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
515                                         uint8_t *,
516                                         offsetof(struct ip6_hdr, ip6_nxt));
517                         n6++;
518                 } else
519                         rte_pktmbuf_free(m);
520         }
521
522         trf->ip4.num = n4;
523         trf->ip6.num = n6;
524 }
525
526
527 static inline void
528 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
529                 struct ipsec_traffic *traffic)
530 {
531         uint16_t nb_pkts_in, n_ip4, n_ip6;
532
533         n_ip4 = traffic->ip4.num;
534         n_ip6 = traffic->ip6.num;
535
536         if (app_sa_prm.enable == 0) {
537                 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
538                                 traffic->ipsec.num, MAX_PKT_BURST);
539                 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
540         } else {
541                 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
542                         traffic->ipsec.saptr, traffic->ipsec.num);
543                 ipsec_process(ipsec_ctx, traffic);
544         }
545
546         inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
547                         n_ip4);
548
549         inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
550                         n_ip6);
551 }
552
553 static inline void
554 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
555                 struct traffic_type *ipsec)
556 {
557         struct rte_mbuf *m;
558         uint32_t i, j, sa_idx;
559
560         if (ip->num == 0 || sp == NULL)
561                 return;
562
563         rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
564                         ip->num, DEFAULT_MAX_CATEGORIES);
565
566         j = 0;
567         for (i = 0; i < ip->num; i++) {
568                 m = ip->pkts[i];
569                 sa_idx = SPI2IDX(ip->res[i]);
570                 if (ip->res[i] == DISCARD)
571                         rte_pktmbuf_free(m);
572                 else if (ip->res[i] == BYPASS)
573                         ip->pkts[j++] = m;
574                 else {
575                         ipsec->res[ipsec->num] = sa_idx;
576                         ipsec->pkts[ipsec->num++] = m;
577                 }
578         }
579         ip->num = j;
580 }
581
582 static inline void
583 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
584                 struct ipsec_traffic *traffic)
585 {
586         struct rte_mbuf *m;
587         uint16_t idx, nb_pkts_out, i;
588
589         /* Drop any IPsec traffic from protected ports */
590         for (i = 0; i < traffic->ipsec.num; i++)
591                 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
592
593         traffic->ipsec.num = 0;
594
595         outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
596
597         outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
598
599         if (app_sa_prm.enable == 0) {
600
601                 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
602                                 traffic->ipsec.res, traffic->ipsec.num,
603                                 MAX_PKT_BURST);
604
605                 for (i = 0; i < nb_pkts_out; i++) {
606                         m = traffic->ipsec.pkts[i];
607                         struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
608                         if (ip->ip_v == IPVERSION) {
609                                 idx = traffic->ip4.num++;
610                                 traffic->ip4.pkts[idx] = m;
611                         } else {
612                                 idx = traffic->ip6.num++;
613                                 traffic->ip6.pkts[idx] = m;
614                         }
615                 }
616         } else {
617                 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
618                         traffic->ipsec.saptr, traffic->ipsec.num);
619                 ipsec_process(ipsec_ctx, traffic);
620         }
621 }
622
623 static inline void
624 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
625                 struct ipsec_traffic *traffic)
626 {
627         struct rte_mbuf *m;
628         uint32_t nb_pkts_in, i, idx;
629
630         /* Drop any IPv4 traffic from unprotected ports */
631         for (i = 0; i < traffic->ip4.num; i++)
632                 rte_pktmbuf_free(traffic->ip4.pkts[i]);
633
634         traffic->ip4.num = 0;
635
636         /* Drop any IPv6 traffic from unprotected ports */
637         for (i = 0; i < traffic->ip6.num; i++)
638                 rte_pktmbuf_free(traffic->ip6.pkts[i]);
639
640         traffic->ip6.num = 0;
641
642         if (app_sa_prm.enable == 0) {
643
644                 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
645                                 traffic->ipsec.num, MAX_PKT_BURST);
646
647                 for (i = 0; i < nb_pkts_in; i++) {
648                         m = traffic->ipsec.pkts[i];
649                         struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
650                         if (ip->ip_v == IPVERSION) {
651                                 idx = traffic->ip4.num++;
652                                 traffic->ip4.pkts[idx] = m;
653                         } else {
654                                 idx = traffic->ip6.num++;
655                                 traffic->ip6.pkts[idx] = m;
656                         }
657                 }
658         } else {
659                 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
660                         traffic->ipsec.saptr, traffic->ipsec.num);
661                 ipsec_process(ipsec_ctx, traffic);
662         }
663 }
664
665 static inline void
666 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
667                 struct ipsec_traffic *traffic)
668 {
669         struct rte_mbuf *m;
670         uint32_t nb_pkts_out, i, n;
671         struct ip *ip;
672
673         /* Drop any IPsec traffic from protected ports */
674         for (i = 0; i < traffic->ipsec.num; i++)
675                 rte_pktmbuf_free(traffic->ipsec.pkts[i]);
676
677         n = 0;
678
679         for (i = 0; i < traffic->ip4.num; i++) {
680                 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
681                 traffic->ipsec.res[n++] = single_sa_idx;
682         }
683
684         for (i = 0; i < traffic->ip6.num; i++) {
685                 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
686                 traffic->ipsec.res[n++] = single_sa_idx;
687         }
688
689         traffic->ip4.num = 0;
690         traffic->ip6.num = 0;
691         traffic->ipsec.num = n;
692
693         if (app_sa_prm.enable == 0) {
694
695                 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
696                                 traffic->ipsec.res, traffic->ipsec.num,
697                                 MAX_PKT_BURST);
698
699                 /* They all sue the same SA (ip4 or ip6 tunnel) */
700                 m = traffic->ipsec.pkts[0];
701                 ip = rte_pktmbuf_mtod(m, struct ip *);
702                 if (ip->ip_v == IPVERSION) {
703                         traffic->ip4.num = nb_pkts_out;
704                         for (i = 0; i < nb_pkts_out; i++)
705                                 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
706                 } else {
707                         traffic->ip6.num = nb_pkts_out;
708                         for (i = 0; i < nb_pkts_out; i++)
709                                 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
710                 }
711         } else {
712                 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
713                         traffic->ipsec.saptr, traffic->ipsec.num);
714                 ipsec_process(ipsec_ctx, traffic);
715         }
716 }
717
718 static inline int32_t
719 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
720 {
721         struct ipsec_mbuf_metadata *priv;
722         struct ipsec_sa *sa;
723
724         priv = get_priv(pkt);
725
726         sa = priv->sa;
727         if (unlikely(sa == NULL)) {
728                 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
729                 goto fail;
730         }
731
732         if (is_ipv6)
733                 return sa->portid;
734
735         /* else */
736         return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
737
738 fail:
739         if (is_ipv6)
740                 return -1;
741
742         /* else */
743         return 0;
744 }
745
746 static inline void
747 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
748 {
749         uint32_t hop[MAX_PKT_BURST * 2];
750         uint32_t dst_ip[MAX_PKT_BURST * 2];
751         int32_t pkt_hop = 0;
752         uint16_t i, offset;
753         uint16_t lpm_pkts = 0;
754
755         if (nb_pkts == 0)
756                 return;
757
758         /* Need to do an LPM lookup for non-inline packets. Inline packets will
759          * have port ID in the SA
760          */
761
762         for (i = 0; i < nb_pkts; i++) {
763                 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
764                         /* Security offload not enabled. So an LPM lookup is
765                          * required to get the hop
766                          */
767                         offset = offsetof(struct ip, ip_dst);
768                         dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
769                                         uint32_t *, offset);
770                         dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
771                         lpm_pkts++;
772                 }
773         }
774
775         rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
776
777         lpm_pkts = 0;
778
779         for (i = 0; i < nb_pkts; i++) {
780                 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
781                         /* Read hop from the SA */
782                         pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
783                 } else {
784                         /* Need to use hop returned by lookup */
785                         pkt_hop = hop[lpm_pkts++];
786                 }
787
788                 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
789                         rte_pktmbuf_free(pkts[i]);
790                         continue;
791                 }
792                 send_single_packet(pkts[i], pkt_hop & 0xff);
793         }
794 }
795
796 static inline void
797 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
798 {
799         int32_t hop[MAX_PKT_BURST * 2];
800         uint8_t dst_ip[MAX_PKT_BURST * 2][16];
801         uint8_t *ip6_dst;
802         int32_t pkt_hop = 0;
803         uint16_t i, offset;
804         uint16_t lpm_pkts = 0;
805
806         if (nb_pkts == 0)
807                 return;
808
809         /* Need to do an LPM lookup for non-inline packets. Inline packets will
810          * have port ID in the SA
811          */
812
813         for (i = 0; i < nb_pkts; i++) {
814                 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
815                         /* Security offload not enabled. So an LPM lookup is
816                          * required to get the hop
817                          */
818                         offset = offsetof(struct ip6_hdr, ip6_dst);
819                         ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
820                                         offset);
821                         memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
822                         lpm_pkts++;
823                 }
824         }
825
826         rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
827                         lpm_pkts);
828
829         lpm_pkts = 0;
830
831         for (i = 0; i < nb_pkts; i++) {
832                 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
833                         /* Read hop from the SA */
834                         pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
835                 } else {
836                         /* Need to use hop returned by lookup */
837                         pkt_hop = hop[lpm_pkts++];
838                 }
839
840                 if (pkt_hop == -1) {
841                         rte_pktmbuf_free(pkts[i]);
842                         continue;
843                 }
844                 send_single_packet(pkts[i], pkt_hop & 0xff);
845         }
846 }
847
848 static inline void
849 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
850                 uint8_t nb_pkts, uint16_t portid)
851 {
852         struct ipsec_traffic traffic;
853
854         prepare_traffic(pkts, &traffic, nb_pkts);
855
856         if (unlikely(single_sa)) {
857                 if (UNPROTECTED_PORT(portid))
858                         process_pkts_inbound_nosp(&qconf->inbound, &traffic);
859                 else
860                         process_pkts_outbound_nosp(&qconf->outbound, &traffic);
861         } else {
862                 if (UNPROTECTED_PORT(portid))
863                         process_pkts_inbound(&qconf->inbound, &traffic);
864                 else
865                         process_pkts_outbound(&qconf->outbound, &traffic);
866         }
867
868         route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
869         route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
870 }
871
872 static inline void
873 drain_tx_buffers(struct lcore_conf *qconf)
874 {
875         struct buffer *buf;
876         uint32_t portid;
877
878         for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
879                 buf = &qconf->tx_mbufs[portid];
880                 if (buf->len == 0)
881                         continue;
882                 send_burst(qconf, buf->len, portid);
883                 buf->len = 0;
884         }
885 }
886
887 static inline void
888 drain_crypto_buffers(struct lcore_conf *qconf)
889 {
890         uint32_t i;
891         struct ipsec_ctx *ctx;
892
893         /* drain inbound buffers*/
894         ctx = &qconf->inbound;
895         for (i = 0; i != ctx->nb_qps; i++) {
896                 if (ctx->tbl[i].len != 0)
897                         enqueue_cop_burst(ctx->tbl  + i);
898         }
899
900         /* drain outbound buffers*/
901         ctx = &qconf->outbound;
902         for (i = 0; i != ctx->nb_qps; i++) {
903                 if (ctx->tbl[i].len != 0)
904                         enqueue_cop_burst(ctx->tbl  + i);
905         }
906 }
907
908 static void
909 drain_inbound_crypto_queues(const struct lcore_conf *qconf,
910                 struct ipsec_ctx *ctx)
911 {
912         uint32_t n;
913         struct ipsec_traffic trf;
914
915         if (app_sa_prm.enable == 0) {
916
917                 /* dequeue packets from crypto-queue */
918                 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
919                         RTE_DIM(trf.ipsec.pkts));
920
921                 trf.ip4.num = 0;
922                 trf.ip6.num = 0;
923
924                 /* split traffic by ipv4-ipv6 */
925                 split46_traffic(&trf, trf.ipsec.pkts, n);
926         } else
927                 ipsec_cqp_process(ctx, &trf);
928
929         /* process ipv4 packets */
930         if (trf.ip4.num != 0) {
931                 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
932                 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
933         }
934
935         /* process ipv6 packets */
936         if (trf.ip6.num != 0) {
937                 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
938                 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
939         }
940 }
941
942 static void
943 drain_outbound_crypto_queues(const struct lcore_conf *qconf,
944                 struct ipsec_ctx *ctx)
945 {
946         uint32_t n;
947         struct ipsec_traffic trf;
948
949         if (app_sa_prm.enable == 0) {
950
951                 /* dequeue packets from crypto-queue */
952                 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
953                         RTE_DIM(trf.ipsec.pkts));
954
955                 trf.ip4.num = 0;
956                 trf.ip6.num = 0;
957
958                 /* split traffic by ipv4-ipv6 */
959                 split46_traffic(&trf, trf.ipsec.pkts, n);
960         } else
961                 ipsec_cqp_process(ctx, &trf);
962
963         /* process ipv4 packets */
964         if (trf.ip4.num != 0)
965                 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
966
967         /* process ipv6 packets */
968         if (trf.ip6.num != 0)
969                 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
970 }
971
972 /* main processing loop */
973 static int32_t
974 main_loop(__attribute__((unused)) void *dummy)
975 {
976         struct rte_mbuf *pkts[MAX_PKT_BURST];
977         uint32_t lcore_id;
978         uint64_t prev_tsc, diff_tsc, cur_tsc;
979         int32_t i, nb_rx;
980         uint16_t portid;
981         uint8_t queueid;
982         struct lcore_conf *qconf;
983         int32_t socket_id;
984         const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
985                         / US_PER_S * BURST_TX_DRAIN_US;
986         struct lcore_rx_queue *rxql;
987
988         prev_tsc = 0;
989         lcore_id = rte_lcore_id();
990         qconf = &lcore_conf[lcore_id];
991         rxql = qconf->rx_queue_list;
992         socket_id = rte_lcore_to_socket_id(lcore_id);
993
994         qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
995         qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
996         qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
997         qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
998         qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
999         qconf->inbound.cdev_map = cdev_map_in;
1000         qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
1001         qconf->inbound.session_priv_pool =
1002                         socket_ctx[socket_id].session_priv_pool;
1003         qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1004         qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1005         qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1006         qconf->outbound.cdev_map = cdev_map_out;
1007         qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
1008         qconf->outbound.session_priv_pool =
1009                         socket_ctx[socket_id].session_priv_pool;
1010
1011         if (qconf->nb_rx_queue == 0) {
1012                 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1013                         lcore_id);
1014                 return 0;
1015         }
1016
1017         RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1018
1019         for (i = 0; i < qconf->nb_rx_queue; i++) {
1020                 portid = rxql[i].port_id;
1021                 queueid = rxql[i].queue_id;
1022                 RTE_LOG(INFO, IPSEC,
1023                         " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1024                         lcore_id, portid, queueid);
1025         }
1026
1027         while (1) {
1028                 cur_tsc = rte_rdtsc();
1029
1030                 /* TX queue buffer drain */
1031                 diff_tsc = cur_tsc - prev_tsc;
1032
1033                 if (unlikely(diff_tsc > drain_tsc)) {
1034                         drain_tx_buffers(qconf);
1035                         drain_crypto_buffers(qconf);
1036                         prev_tsc = cur_tsc;
1037                 }
1038
1039                 for (i = 0; i < qconf->nb_rx_queue; ++i) {
1040
1041                         /* Read packets from RX queues */
1042                         portid = rxql[i].port_id;
1043                         queueid = rxql[i].queue_id;
1044                         nb_rx = rte_eth_rx_burst(portid, queueid,
1045                                         pkts, MAX_PKT_BURST);
1046
1047                         if (nb_rx > 0)
1048                                 process_pkts(qconf, pkts, nb_rx, portid);
1049
1050                         /* dequeue and process completed crypto-ops */
1051                         if (UNPROTECTED_PORT(portid))
1052                                 drain_inbound_crypto_queues(qconf,
1053                                         &qconf->inbound);
1054                         else
1055                                 drain_outbound_crypto_queues(qconf,
1056                                         &qconf->outbound);
1057                 }
1058         }
1059 }
1060
1061 static int32_t
1062 check_params(void)
1063 {
1064         uint8_t lcore;
1065         uint16_t portid;
1066         uint16_t i;
1067         int32_t socket_id;
1068
1069         if (lcore_params == NULL) {
1070                 printf("Error: No port/queue/core mappings\n");
1071                 return -1;
1072         }
1073
1074         for (i = 0; i < nb_lcore_params; ++i) {
1075                 lcore = lcore_params[i].lcore_id;
1076                 if (!rte_lcore_is_enabled(lcore)) {
1077                         printf("error: lcore %hhu is not enabled in "
1078                                 "lcore mask\n", lcore);
1079                         return -1;
1080                 }
1081                 socket_id = rte_lcore_to_socket_id(lcore);
1082                 if (socket_id != 0 && numa_on == 0) {
1083                         printf("warning: lcore %hhu is on socket %d "
1084                                 "with numa off\n",
1085                                 lcore, socket_id);
1086                 }
1087                 portid = lcore_params[i].port_id;
1088                 if ((enabled_port_mask & (1 << portid)) == 0) {
1089                         printf("port %u is not enabled in port mask\n", portid);
1090                         return -1;
1091                 }
1092                 if (!rte_eth_dev_is_valid_port(portid)) {
1093                         printf("port %u is not present on the board\n", portid);
1094                         return -1;
1095                 }
1096         }
1097         return 0;
1098 }
1099
1100 static uint8_t
1101 get_port_nb_rx_queues(const uint16_t port)
1102 {
1103         int32_t queue = -1;
1104         uint16_t i;
1105
1106         for (i = 0; i < nb_lcore_params; ++i) {
1107                 if (lcore_params[i].port_id == port &&
1108                                 lcore_params[i].queue_id > queue)
1109                         queue = lcore_params[i].queue_id;
1110         }
1111         return (uint8_t)(++queue);
1112 }
1113
1114 static int32_t
1115 init_lcore_rx_queues(void)
1116 {
1117         uint16_t i, nb_rx_queue;
1118         uint8_t lcore;
1119
1120         for (i = 0; i < nb_lcore_params; ++i) {
1121                 lcore = lcore_params[i].lcore_id;
1122                 nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
1123                 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
1124                         printf("error: too many queues (%u) for lcore: %u\n",
1125                                         nb_rx_queue + 1, lcore);
1126                         return -1;
1127                 }
1128                 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
1129                         lcore_params[i].port_id;
1130                 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
1131                         lcore_params[i].queue_id;
1132                 lcore_conf[lcore].nb_rx_queue++;
1133         }
1134         return 0;
1135 }
1136
1137 /* display usage */
1138 static void
1139 print_usage(const char *prgname)
1140 {
1141         fprintf(stderr, "%s [EAL options] --"
1142                 " -p PORTMASK"
1143                 " [-P]"
1144                 " [-u PORTMASK]"
1145                 " [-j FRAMESIZE]"
1146                 " [-l]"
1147                 " [-w REPLAY_WINDOW_SIZE]"
1148                 " [-e]"
1149                 " [-a]"
1150                 " -f CONFIG_FILE"
1151                 " --config (port,queue,lcore)[,(port,queue,lcore)]"
1152                 " [--single-sa SAIDX]"
1153                 " [--cryptodev_mask MASK]"
1154                 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
1155                 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
1156                 "\n\n"
1157                 "  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
1158                 "  -P : Enable promiscuous mode\n"
1159                 "  -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
1160                 "  -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
1161                 "                packet size\n"
1162                 "  -l enables code-path that uses librte_ipsec\n"
1163                 "  -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
1164                 "     size for each SA\n"
1165                 "  -e enables ESN\n"
1166                 "  -a enables SA SQN atomic behaviour\n"
1167                 "  -f CONFIG_FILE: Configuration file\n"
1168                 "  --config (port,queue,lcore): Rx queue configuration\n"
1169                 "  --single-sa SAIDX: Use single SA index for outbound traffic,\n"
1170                 "                     bypassing the SP\n"
1171                 "  --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
1172                 "                         devices to configure\n"
1173                 "  --" CMD_LINE_OPT_RX_OFFLOAD
1174                 ": bitmask of the RX HW offload capabilities to enable/use\n"
1175                 "                         (DEV_RX_OFFLOAD_*)\n"
1176                 "  --" CMD_LINE_OPT_TX_OFFLOAD
1177                 ": bitmask of the TX HW offload capabilities to enable/use\n"
1178                 "                         (DEV_TX_OFFLOAD_*)\n"
1179                 "\n",
1180                 prgname);
1181 }
1182
1183 static int
1184 parse_mask(const char *str, uint64_t *val)
1185 {
1186         char *end;
1187         unsigned long t;
1188
1189         errno = 0;
1190         t = strtoul(str, &end, 0);
1191         if (errno != 0 || end[0] != 0)
1192                 return -EINVAL;
1193
1194         *val = t;
1195         return 0;
1196 }
1197
1198 static int32_t
1199 parse_portmask(const char *portmask)
1200 {
1201         char *end = NULL;
1202         unsigned long pm;
1203
1204         /* parse hexadecimal string */
1205         pm = strtoul(portmask, &end, 16);
1206         if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
1207                 return -1;
1208
1209         if ((pm == 0) && errno)
1210                 return -1;
1211
1212         return pm;
1213 }
1214
1215 static int32_t
1216 parse_decimal(const char *str)
1217 {
1218         char *end = NULL;
1219         unsigned long num;
1220
1221         num = strtoul(str, &end, 10);
1222         if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
1223                 return -1;
1224
1225         return num;
1226 }
1227
1228 static int32_t
1229 parse_config(const char *q_arg)
1230 {
1231         char s[256];
1232         const char *p, *p0 = q_arg;
1233         char *end;
1234         enum fieldnames {
1235                 FLD_PORT = 0,
1236                 FLD_QUEUE,
1237                 FLD_LCORE,
1238                 _NUM_FLD
1239         };
1240         unsigned long int_fld[_NUM_FLD];
1241         char *str_fld[_NUM_FLD];
1242         int32_t i;
1243         uint32_t size;
1244
1245         nb_lcore_params = 0;
1246
1247         while ((p = strchr(p0, '(')) != NULL) {
1248                 ++p;
1249                 p0 = strchr(p, ')');
1250                 if (p0 == NULL)
1251                         return -1;
1252
1253                 size = p0 - p;
1254                 if (size >= sizeof(s))
1255                         return -1;
1256
1257                 snprintf(s, sizeof(s), "%.*s", size, p);
1258                 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
1259                                 _NUM_FLD)
1260                         return -1;
1261                 for (i = 0; i < _NUM_FLD; i++) {
1262                         errno = 0;
1263                         int_fld[i] = strtoul(str_fld[i], &end, 0);
1264                         if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
1265                                 return -1;
1266                 }
1267                 if (nb_lcore_params >= MAX_LCORE_PARAMS) {
1268                         printf("exceeded max number of lcore params: %hu\n",
1269                                 nb_lcore_params);
1270                         return -1;
1271                 }
1272                 lcore_params_array[nb_lcore_params].port_id =
1273                         (uint8_t)int_fld[FLD_PORT];
1274                 lcore_params_array[nb_lcore_params].queue_id =
1275                         (uint8_t)int_fld[FLD_QUEUE];
1276                 lcore_params_array[nb_lcore_params].lcore_id =
1277                         (uint8_t)int_fld[FLD_LCORE];
1278                 ++nb_lcore_params;
1279         }
1280         lcore_params = lcore_params_array;
1281         return 0;
1282 }
1283
1284 static void
1285 print_app_sa_prm(const struct app_sa_prm *prm)
1286 {
1287         printf("librte_ipsec usage: %s\n",
1288                 (prm->enable == 0) ? "disabled" : "enabled");
1289
1290         if (prm->enable == 0)
1291                 return;
1292
1293         printf("replay window size: %u\n", prm->window_size);
1294         printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
1295         printf("SA flags: %#" PRIx64 "\n", prm->flags);
1296 }
1297
1298 static int32_t
1299 parse_args(int32_t argc, char **argv)
1300 {
1301         int32_t opt, ret;
1302         char **argvopt;
1303         int32_t option_index;
1304         char *prgname = argv[0];
1305         int32_t f_present = 0;
1306
1307         argvopt = argv;
1308
1309         while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
1310                                 lgopts, &option_index)) != EOF) {
1311
1312                 switch (opt) {
1313                 case 'p':
1314                         enabled_port_mask = parse_portmask(optarg);
1315                         if (enabled_port_mask == 0) {
1316                                 printf("invalid portmask\n");
1317                                 print_usage(prgname);
1318                                 return -1;
1319                         }
1320                         break;
1321                 case 'P':
1322                         printf("Promiscuous mode selected\n");
1323                         promiscuous_on = 1;
1324                         break;
1325                 case 'u':
1326                         unprotected_port_mask = parse_portmask(optarg);
1327                         if (unprotected_port_mask == 0) {
1328                                 printf("invalid unprotected portmask\n");
1329                                 print_usage(prgname);
1330                                 return -1;
1331                         }
1332                         break;
1333                 case 'f':
1334                         if (f_present == 1) {
1335                                 printf("\"-f\" option present more than "
1336                                         "once!\n");
1337                                 print_usage(prgname);
1338                                 return -1;
1339                         }
1340                         if (parse_cfg_file(optarg) < 0) {
1341                                 printf("parsing file \"%s\" failed\n",
1342                                         optarg);
1343                                 print_usage(prgname);
1344                                 return -1;
1345                         }
1346                         f_present = 1;
1347                         break;
1348                 case 'j':
1349                         {
1350                                 int32_t size = parse_decimal(optarg);
1351                                 if (size <= 1518) {
1352                                         printf("Invalid jumbo frame size\n");
1353                                         if (size < 0) {
1354                                                 print_usage(prgname);
1355                                                 return -1;
1356                                         }
1357                                         printf("Using default value 9000\n");
1358                                         frame_size = 9000;
1359                                 } else {
1360                                         frame_size = size;
1361                                 }
1362                         }
1363                         printf("Enabled jumbo frames size %u\n", frame_size);
1364                         break;
1365                 case 'l':
1366                         app_sa_prm.enable = 1;
1367                         break;
1368                 case 'w':
1369                         app_sa_prm.enable = 1;
1370                         app_sa_prm.window_size = parse_decimal(optarg);
1371                         break;
1372                 case 'e':
1373                         app_sa_prm.enable = 1;
1374                         app_sa_prm.enable_esn = 1;
1375                         break;
1376                 case 'a':
1377                         app_sa_prm.enable = 1;
1378                         app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
1379                         break;
1380                 case CMD_LINE_OPT_CONFIG_NUM:
1381                         ret = parse_config(optarg);
1382                         if (ret) {
1383                                 printf("Invalid config\n");
1384                                 print_usage(prgname);
1385                                 return -1;
1386                         }
1387                         break;
1388                 case CMD_LINE_OPT_SINGLE_SA_NUM:
1389                         ret = parse_decimal(optarg);
1390                         if (ret == -1) {
1391                                 printf("Invalid argument[sa_idx]\n");
1392                                 print_usage(prgname);
1393                                 return -1;
1394                         }
1395
1396                         /* else */
1397                         single_sa = 1;
1398                         single_sa_idx = ret;
1399                         printf("Configured with single SA index %u\n",
1400                                         single_sa_idx);
1401                         break;
1402                 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
1403                         ret = parse_portmask(optarg);
1404                         if (ret == -1) {
1405                                 printf("Invalid argument[portmask]\n");
1406                                 print_usage(prgname);
1407                                 return -1;
1408                         }
1409
1410                         /* else */
1411                         enabled_cryptodev_mask = ret;
1412                         break;
1413                 case CMD_LINE_OPT_RX_OFFLOAD_NUM:
1414                         ret = parse_mask(optarg, &dev_rx_offload);
1415                         if (ret != 0) {
1416                                 printf("Invalid argument for \'%s\': %s\n",
1417                                         CMD_LINE_OPT_RX_OFFLOAD, optarg);
1418                                 print_usage(prgname);
1419                                 return -1;
1420                         }
1421                         break;
1422                 case CMD_LINE_OPT_TX_OFFLOAD_NUM:
1423                         ret = parse_mask(optarg, &dev_tx_offload);
1424                         if (ret != 0) {
1425                                 printf("Invalid argument for \'%s\': %s\n",
1426                                         CMD_LINE_OPT_TX_OFFLOAD, optarg);
1427                                 print_usage(prgname);
1428                                 return -1;
1429                         }
1430                         break;
1431                 default:
1432                         print_usage(prgname);
1433                         return -1;
1434                 }
1435         }
1436
1437         if (f_present == 0) {
1438                 printf("Mandatory option \"-f\" not present\n");
1439                 return -1;
1440         }
1441
1442         print_app_sa_prm(&app_sa_prm);
1443
1444         if (optind >= 0)
1445                 argv[optind-1] = prgname;
1446
1447         ret = optind-1;
1448         optind = 1; /* reset getopt lib */
1449         return ret;
1450 }
1451
1452 static void
1453 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
1454 {
1455         char buf[RTE_ETHER_ADDR_FMT_SIZE];
1456         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
1457         printf("%s%s", name, buf);
1458 }
1459
1460 /*
1461  * Update destination ethaddr for the port.
1462  */
1463 int
1464 add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
1465 {
1466         if (port >= RTE_DIM(ethaddr_tbl))
1467                 return -EINVAL;
1468
1469         ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
1470         return 0;
1471 }
1472
1473 /* Check the link status of all ports in up to 9s, and print them finally */
1474 static void
1475 check_all_ports_link_status(uint32_t port_mask)
1476 {
1477 #define CHECK_INTERVAL 100 /* 100ms */
1478 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1479         uint16_t portid;
1480         uint8_t count, all_ports_up, print_flag = 0;
1481         struct rte_eth_link link;
1482
1483         printf("\nChecking link status");
1484         fflush(stdout);
1485         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1486                 all_ports_up = 1;
1487                 RTE_ETH_FOREACH_DEV(portid) {
1488                         if ((port_mask & (1 << portid)) == 0)
1489                                 continue;
1490                         memset(&link, 0, sizeof(link));
1491                         rte_eth_link_get_nowait(portid, &link);
1492                         /* print link status if flag set */
1493                         if (print_flag == 1) {
1494                                 if (link.link_status)
1495                                         printf(
1496                                         "Port%d Link Up - speed %u Mbps -%s\n",
1497                                                 portid, link.link_speed,
1498                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1499                                         ("full-duplex") : ("half-duplex\n"));
1500                                 else
1501                                         printf("Port %d Link Down\n", portid);
1502                                 continue;
1503                         }
1504                         /* clear all_ports_up flag if any link down */
1505                         if (link.link_status == ETH_LINK_DOWN) {
1506                                 all_ports_up = 0;
1507                                 break;
1508                         }
1509                 }
1510                 /* after finally printing all link status, get out */
1511                 if (print_flag == 1)
1512                         break;
1513
1514                 if (all_ports_up == 0) {
1515                         printf(".");
1516                         fflush(stdout);
1517                         rte_delay_ms(CHECK_INTERVAL);
1518                 }
1519
1520                 /* set the print_flag if all ports up or timeout */
1521                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1522                         print_flag = 1;
1523                         printf("done\n");
1524                 }
1525         }
1526 }
1527
1528 static int32_t
1529 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1530                 uint16_t qp, struct lcore_params *params,
1531                 struct ipsec_ctx *ipsec_ctx,
1532                 const struct rte_cryptodev_capabilities *cipher,
1533                 const struct rte_cryptodev_capabilities *auth,
1534                 const struct rte_cryptodev_capabilities *aead)
1535 {
1536         int32_t ret = 0;
1537         unsigned long i;
1538         struct cdev_key key = { 0 };
1539
1540         key.lcore_id = params->lcore_id;
1541         if (cipher)
1542                 key.cipher_algo = cipher->sym.cipher.algo;
1543         if (auth)
1544                 key.auth_algo = auth->sym.auth.algo;
1545         if (aead)
1546                 key.aead_algo = aead->sym.aead.algo;
1547
1548         ret = rte_hash_lookup(map, &key);
1549         if (ret != -ENOENT)
1550                 return 0;
1551
1552         for (i = 0; i < ipsec_ctx->nb_qps; i++)
1553                 if (ipsec_ctx->tbl[i].id == cdev_id)
1554                         break;
1555
1556         if (i == ipsec_ctx->nb_qps) {
1557                 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1558                         printf("Maximum number of crypto devices assigned to "
1559                                 "a core, increase MAX_QP_PER_LCORE value\n");
1560                         return 0;
1561                 }
1562                 ipsec_ctx->tbl[i].id = cdev_id;
1563                 ipsec_ctx->tbl[i].qp = qp;
1564                 ipsec_ctx->nb_qps++;
1565                 printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1566                                 "(cdev_id_qp %lu)\n", str, key.lcore_id,
1567                                 cdev_id, qp, i);
1568         }
1569
1570         ret = rte_hash_add_key_data(map, &key, (void *)i);
1571         if (ret < 0) {
1572                 printf("Faled to insert cdev mapping for (lcore %u, "
1573                                 "cdev %u, qp %u), errno %d\n",
1574                                 key.lcore_id, ipsec_ctx->tbl[i].id,
1575                                 ipsec_ctx->tbl[i].qp, ret);
1576                 return 0;
1577         }
1578
1579         return 1;
1580 }
1581
1582 static int32_t
1583 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1584                 uint16_t qp, struct lcore_params *params)
1585 {
1586         int32_t ret = 0;
1587         const struct rte_cryptodev_capabilities *i, *j;
1588         struct rte_hash *map;
1589         struct lcore_conf *qconf;
1590         struct ipsec_ctx *ipsec_ctx;
1591         const char *str;
1592
1593         qconf = &lcore_conf[params->lcore_id];
1594
1595         if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1596                 map = cdev_map_out;
1597                 ipsec_ctx = &qconf->outbound;
1598                 str = "Outbound";
1599         } else {
1600                 map = cdev_map_in;
1601                 ipsec_ctx = &qconf->inbound;
1602                 str = "Inbound";
1603         }
1604
1605         /* Required cryptodevs with operation chainning */
1606         if (!(dev_info->feature_flags &
1607                                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1608                 return ret;
1609
1610         for (i = dev_info->capabilities;
1611                         i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1612                 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1613                         continue;
1614
1615                 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1616                         ret |= add_mapping(map, str, cdev_id, qp, params,
1617                                         ipsec_ctx, NULL, NULL, i);
1618                         continue;
1619                 }
1620
1621                 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1622                         continue;
1623
1624                 for (j = dev_info->capabilities;
1625                                 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1626                         if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1627                                 continue;
1628
1629                         if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1630                                 continue;
1631
1632                         ret |= add_mapping(map, str, cdev_id, qp, params,
1633                                                 ipsec_ctx, i, j, NULL);
1634                 }
1635         }
1636
1637         return ret;
1638 }
1639
1640 /* Check if the device is enabled by cryptodev_mask */
1641 static int
1642 check_cryptodev_mask(uint8_t cdev_id)
1643 {
1644         if (enabled_cryptodev_mask & (1 << cdev_id))
1645                 return 0;
1646
1647         return -1;
1648 }
1649
1650 static int32_t
1651 cryptodevs_init(void)
1652 {
1653         struct rte_cryptodev_config dev_conf;
1654         struct rte_cryptodev_qp_conf qp_conf;
1655         uint16_t idx, max_nb_qps, qp, i;
1656         int16_t cdev_id, port_id;
1657         struct rte_hash_parameters params = { 0 };
1658
1659         params.entries = CDEV_MAP_ENTRIES;
1660         params.key_len = sizeof(struct cdev_key);
1661         params.hash_func = rte_jhash;
1662         params.hash_func_init_val = 0;
1663         params.socket_id = rte_socket_id();
1664
1665         params.name = "cdev_map_in";
1666         cdev_map_in = rte_hash_create(&params);
1667         if (cdev_map_in == NULL)
1668                 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1669                                 rte_errno);
1670
1671         params.name = "cdev_map_out";
1672         cdev_map_out = rte_hash_create(&params);
1673         if (cdev_map_out == NULL)
1674                 rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1675                                 rte_errno);
1676
1677         printf("lcore/cryptodev/qp mappings:\n");
1678
1679         uint32_t max_sess_sz = 0, sess_sz;
1680         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1681                 void *sec_ctx;
1682
1683                 /* Get crypto priv session size */
1684                 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
1685                 if (sess_sz > max_sess_sz)
1686                         max_sess_sz = sess_sz;
1687
1688                 /*
1689                  * If crypto device is security capable, need to check the
1690                  * size of security session as well.
1691                  */
1692
1693                 /* Get security context of the crypto device */
1694                 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
1695                 if (sec_ctx == NULL)
1696                         continue;
1697
1698                 /* Get size of security session */
1699                 sess_sz = rte_security_session_get_size(sec_ctx);
1700                 if (sess_sz > max_sess_sz)
1701                         max_sess_sz = sess_sz;
1702         }
1703         RTE_ETH_FOREACH_DEV(port_id) {
1704                 void *sec_ctx;
1705
1706                 if ((enabled_port_mask & (1 << port_id)) == 0)
1707                         continue;
1708
1709                 sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
1710                 if (sec_ctx == NULL)
1711                         continue;
1712
1713                 sess_sz = rte_security_session_get_size(sec_ctx);
1714                 if (sess_sz > max_sess_sz)
1715                         max_sess_sz = sess_sz;
1716         }
1717
1718         idx = 0;
1719         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
1720                 struct rte_cryptodev_info cdev_info;
1721
1722                 if (check_cryptodev_mask((uint8_t)cdev_id))
1723                         continue;
1724
1725                 rte_cryptodev_info_get(cdev_id, &cdev_info);
1726
1727                 if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1728                         max_nb_qps = cdev_info.max_nb_queue_pairs;
1729                 else
1730                         max_nb_qps = nb_lcore_params;
1731
1732                 qp = 0;
1733                 i = 0;
1734                 while (qp < max_nb_qps && i < nb_lcore_params) {
1735                         if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1736                                                 &lcore_params[idx]))
1737                                 qp++;
1738                         idx++;
1739                         idx = idx % nb_lcore_params;
1740                         i++;
1741                 }
1742
1743                 if (qp == 0)
1744                         continue;
1745
1746                 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1747                 dev_conf.nb_queue_pairs = qp;
1748                 dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
1749
1750                 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
1751                 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
1752                         rte_exit(EXIT_FAILURE,
1753                                 "Device does not support at least %u "
1754                                 "sessions", CDEV_MP_NB_OBJS);
1755
1756                 if (!socket_ctx[dev_conf.socket_id].session_pool) {
1757                         char mp_name[RTE_MEMPOOL_NAMESIZE];
1758                         struct rte_mempool *sess_mp;
1759
1760                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1761                                         "sess_mp_%u", dev_conf.socket_id);
1762                         sess_mp = rte_cryptodev_sym_session_pool_create(
1763                                         mp_name, CDEV_MP_NB_OBJS,
1764                                         0, CDEV_MP_CACHE_SZ, 0,
1765                                         dev_conf.socket_id);
1766                         socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
1767                 }
1768
1769                 if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
1770                         char mp_name[RTE_MEMPOOL_NAMESIZE];
1771                         struct rte_mempool *sess_mp;
1772
1773                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1774                                         "sess_mp_priv_%u", dev_conf.socket_id);
1775                         sess_mp = rte_mempool_create(mp_name,
1776                                         CDEV_MP_NB_OBJS,
1777                                         max_sess_sz,
1778                                         CDEV_MP_CACHE_SZ,
1779                                         0, NULL, NULL, NULL,
1780                                         NULL, dev_conf.socket_id,
1781                                         0);
1782                         socket_ctx[dev_conf.socket_id].session_priv_pool =
1783                                         sess_mp;
1784                 }
1785
1786                 if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
1787                                 !socket_ctx[dev_conf.socket_id].session_pool)
1788                         rte_exit(EXIT_FAILURE,
1789                                 "Cannot create session pool on socket %d\n",
1790                                 dev_conf.socket_id);
1791                 else
1792                         printf("Allocated session pool on socket %d\n",
1793                                         dev_conf.socket_id);
1794
1795                 if (rte_cryptodev_configure(cdev_id, &dev_conf))
1796                         rte_panic("Failed to initialize cryptodev %u\n",
1797                                         cdev_id);
1798
1799                 qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1800                 qp_conf.mp_session =
1801                         socket_ctx[dev_conf.socket_id].session_pool;
1802                 qp_conf.mp_session_private =
1803                         socket_ctx[dev_conf.socket_id].session_priv_pool;
1804                 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1805                         if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1806                                         &qp_conf, dev_conf.socket_id))
1807                                 rte_panic("Failed to setup queue %u for "
1808                                                 "cdev_id %u\n", 0, cdev_id);
1809
1810                 if (rte_cryptodev_start(cdev_id))
1811                         rte_panic("Failed to start cryptodev %u\n",
1812                                         cdev_id);
1813         }
1814
1815         /* create session pools for eth devices that implement security */
1816         RTE_ETH_FOREACH_DEV(port_id) {
1817                 if ((enabled_port_mask & (1 << port_id)) &&
1818                                 rte_eth_dev_get_sec_ctx(port_id)) {
1819                         int socket_id = rte_eth_dev_socket_id(port_id);
1820
1821                         if (!socket_ctx[socket_id].session_priv_pool) {
1822                                 char mp_name[RTE_MEMPOOL_NAMESIZE];
1823                                 struct rte_mempool *sess_mp;
1824
1825                                 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
1826                                                 "sess_mp_%u", socket_id);
1827                                 sess_mp = rte_mempool_create(mp_name,
1828                                                 (CDEV_MP_NB_OBJS * 2),
1829                                                 max_sess_sz,
1830                                                 CDEV_MP_CACHE_SZ,
1831                                                 0, NULL, NULL, NULL,
1832                                                 NULL, socket_id,
1833                                                 0);
1834                                 if (sess_mp == NULL)
1835                                         rte_exit(EXIT_FAILURE,
1836                                                 "Cannot create session pool "
1837                                                 "on socket %d\n", socket_id);
1838                                 else
1839                                         printf("Allocated session pool "
1840                                                 "on socket %d\n", socket_id);
1841                                 socket_ctx[socket_id].session_priv_pool =
1842                                                 sess_mp;
1843                         }
1844                 }
1845         }
1846
1847
1848         printf("\n");
1849
1850         return 0;
1851 }
1852
1853 static void
1854 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
1855 {
1856         struct rte_eth_dev_info dev_info;
1857         struct rte_eth_txconf *txconf;
1858         uint16_t nb_tx_queue, nb_rx_queue;
1859         uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1860         int32_t ret, socket_id;
1861         struct lcore_conf *qconf;
1862         struct rte_ether_addr ethaddr;
1863         struct rte_eth_conf local_port_conf = port_conf;
1864
1865         rte_eth_dev_info_get(portid, &dev_info);
1866
1867         /* limit allowed HW offloafs, as user requested */
1868         dev_info.rx_offload_capa &= dev_rx_offload;
1869         dev_info.tx_offload_capa &= dev_tx_offload;
1870
1871         printf("Configuring device port %u:\n", portid);
1872
1873         rte_eth_macaddr_get(portid, &ethaddr);
1874         ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(&ethaddr);
1875         print_ethaddr("Address: ", &ethaddr);
1876         printf("\n");
1877
1878         nb_rx_queue = get_port_nb_rx_queues(portid);
1879         nb_tx_queue = nb_lcores;
1880
1881         if (nb_rx_queue > dev_info.max_rx_queues)
1882                 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1883                                 "(max rx queue is %u)\n",
1884                                 nb_rx_queue, dev_info.max_rx_queues);
1885
1886         if (nb_tx_queue > dev_info.max_tx_queues)
1887                 rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1888                                 "(max tx queue is %u)\n",
1889                                 nb_tx_queue, dev_info.max_tx_queues);
1890
1891         printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1892                         nb_rx_queue, nb_tx_queue);
1893
1894         if (frame_size) {
1895                 local_port_conf.rxmode.max_rx_pkt_len = frame_size;
1896                 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1897         }
1898
1899         local_port_conf.rxmode.offloads |= req_rx_offloads;
1900         local_port_conf.txmode.offloads |= req_tx_offloads;
1901
1902         /* Check that all required capabilities are supported */
1903         if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
1904                         local_port_conf.rxmode.offloads)
1905                 rte_exit(EXIT_FAILURE,
1906                         "Error: port %u required RX offloads: 0x%" PRIx64
1907                         ", avaialbe RX offloads: 0x%" PRIx64 "\n",
1908                         portid, local_port_conf.rxmode.offloads,
1909                         dev_info.rx_offload_capa);
1910
1911         if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
1912                         local_port_conf.txmode.offloads)
1913                 rte_exit(EXIT_FAILURE,
1914                         "Error: port %u required TX offloads: 0x%" PRIx64
1915                         ", avaialbe TX offloads: 0x%" PRIx64 "\n",
1916                         portid, local_port_conf.txmode.offloads,
1917                         dev_info.tx_offload_capa);
1918
1919         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1920                 local_port_conf.txmode.offloads |=
1921                         DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1922
1923         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
1924                 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
1925
1926         printf("port %u configurng rx_offloads=0x%" PRIx64
1927                 ", tx_offloads=0x%" PRIx64 "\n",
1928                 portid, local_port_conf.rxmode.offloads,
1929                 local_port_conf.txmode.offloads);
1930
1931         local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1932                 dev_info.flow_type_rss_offloads;
1933         if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1934                         port_conf.rx_adv_conf.rss_conf.rss_hf) {
1935                 printf("Port %u modified RSS hash function based on hardware support,"
1936                         "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1937                         portid,
1938                         port_conf.rx_adv_conf.rss_conf.rss_hf,
1939                         local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1940         }
1941
1942         ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1943                         &local_port_conf);
1944         if (ret < 0)
1945                 rte_exit(EXIT_FAILURE, "Cannot configure device: "
1946                                 "err=%d, port=%d\n", ret, portid);
1947
1948         ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
1949         if (ret < 0)
1950                 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
1951                                 "err=%d, port=%d\n", ret, portid);
1952
1953         /* init one TX queue per lcore */
1954         tx_queueid = 0;
1955         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1956                 if (rte_lcore_is_enabled(lcore_id) == 0)
1957                         continue;
1958
1959                 if (numa_on)
1960                         socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1961                 else
1962                         socket_id = 0;
1963
1964                 /* init TX queue */
1965                 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1966
1967                 txconf = &dev_info.default_txconf;
1968                 txconf->offloads = local_port_conf.txmode.offloads;
1969
1970                 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1971                                 socket_id, txconf);
1972                 if (ret < 0)
1973                         rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1974                                         "err=%d, port=%d\n", ret, portid);
1975
1976                 qconf = &lcore_conf[lcore_id];
1977                 qconf->tx_queue_id[portid] = tx_queueid;
1978
1979                 /* Pre-populate pkt offloads based on capabilities */
1980                 qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
1981                 qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
1982                 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
1983                         qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
1984
1985                 tx_queueid++;
1986
1987                 /* init RX queues */
1988                 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1989                         struct rte_eth_rxconf rxq_conf;
1990
1991                         if (portid != qconf->rx_queue_list[queue].port_id)
1992                                 continue;
1993
1994                         rx_queueid = qconf->rx_queue_list[queue].queue_id;
1995
1996                         printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1997                                         socket_id);
1998
1999                         rxq_conf = dev_info.default_rxconf;
2000                         rxq_conf.offloads = local_port_conf.rxmode.offloads;
2001                         ret = rte_eth_rx_queue_setup(portid, rx_queueid,
2002                                         nb_rxd, socket_id, &rxq_conf,
2003                                         socket_ctx[socket_id].mbuf_pool);
2004                         if (ret < 0)
2005                                 rte_exit(EXIT_FAILURE,
2006                                         "rte_eth_rx_queue_setup: err=%d, "
2007                                         "port=%d\n", ret, portid);
2008                 }
2009         }
2010         printf("\n");
2011 }
2012
2013 static void
2014 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
2015 {
2016         char s[64];
2017         uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
2018                         RTE_MBUF_DEFAULT_BUF_SIZE;
2019
2020
2021         snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
2022         ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
2023                         MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
2024                         buff_size,
2025                         socket_id);
2026         if (ctx->mbuf_pool == NULL)
2027                 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
2028                                 socket_id);
2029         else
2030                 printf("Allocated mbuf pool on socket %d\n", socket_id);
2031 }
2032
2033 static inline int
2034 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
2035 {
2036         struct ipsec_sa *sa;
2037
2038         /* For inline protocol processing, the metadata in the event will
2039          * uniquely identify the security session which raised the event.
2040          * Application would then need the userdata it had registered with the
2041          * security session to process the event.
2042          */
2043
2044         sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
2045
2046         if (sa == NULL) {
2047                 /* userdata could not be retrieved */
2048                 return -1;
2049         }
2050
2051         /* Sequence number over flow. SA need to be re-established */
2052         RTE_SET_USED(sa);
2053         return 0;
2054 }
2055
2056 static int
2057 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2058                  void *param, void *ret_param)
2059 {
2060         uint64_t md;
2061         struct rte_eth_event_ipsec_desc *event_desc = NULL;
2062         struct rte_security_ctx *ctx = (struct rte_security_ctx *)
2063                                         rte_eth_dev_get_sec_ctx(port_id);
2064
2065         RTE_SET_USED(param);
2066
2067         if (type != RTE_ETH_EVENT_IPSEC)
2068                 return -1;
2069
2070         event_desc = ret_param;
2071         if (event_desc == NULL) {
2072                 printf("Event descriptor not set\n");
2073                 return -1;
2074         }
2075
2076         md = event_desc->metadata;
2077
2078         if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
2079                 return inline_ipsec_event_esn_overflow(ctx, md);
2080         else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
2081                 printf("Invalid IPsec event reported\n");
2082                 return -1;
2083         }
2084
2085         return -1;
2086 }
2087
2088 int32_t
2089 main(int32_t argc, char **argv)
2090 {
2091         int32_t ret;
2092         uint32_t lcore_id;
2093         uint8_t socket_id;
2094         uint16_t portid;
2095         uint64_t req_rx_offloads, req_tx_offloads;
2096
2097         /* init EAL */
2098         ret = rte_eal_init(argc, argv);
2099         if (ret < 0)
2100                 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
2101         argc -= ret;
2102         argv += ret;
2103
2104         /* parse application arguments (after the EAL ones) */
2105         ret = parse_args(argc, argv);
2106         if (ret < 0)
2107                 rte_exit(EXIT_FAILURE, "Invalid parameters\n");
2108
2109         if ((unprotected_port_mask & enabled_port_mask) !=
2110                         unprotected_port_mask)
2111                 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
2112                                 unprotected_port_mask);
2113
2114         if (check_params() < 0)
2115                 rte_exit(EXIT_FAILURE, "check_params failed\n");
2116
2117         ret = init_lcore_rx_queues();
2118         if (ret < 0)
2119                 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
2120
2121         nb_lcores = rte_lcore_count();
2122
2123         /* Replicate each context per socket */
2124         for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
2125                 if (rte_lcore_is_enabled(lcore_id) == 0)
2126                         continue;
2127
2128                 if (numa_on)
2129                         socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
2130                 else
2131                         socket_id = 0;
2132
2133                 if (socket_ctx[socket_id].mbuf_pool)
2134                         continue;
2135
2136                 /* initilaze SPD */
2137                 sp4_init(&socket_ctx[socket_id], socket_id);
2138
2139                 sp6_init(&socket_ctx[socket_id], socket_id);
2140
2141                 /* initilaze SAD */
2142                 sa_init(&socket_ctx[socket_id], socket_id);
2143
2144                 rt_init(&socket_ctx[socket_id], socket_id);
2145
2146                 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
2147         }
2148
2149         RTE_ETH_FOREACH_DEV(portid) {
2150                 if ((enabled_port_mask & (1 << portid)) == 0)
2151                         continue;
2152
2153                 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
2154                 port_init(portid, req_rx_offloads, req_tx_offloads);
2155         }
2156
2157         cryptodevs_init();
2158
2159         /* start ports */
2160         RTE_ETH_FOREACH_DEV(portid) {
2161                 if ((enabled_port_mask & (1 << portid)) == 0)
2162                         continue;
2163
2164                 /* Start device */
2165                 ret = rte_eth_dev_start(portid);
2166                 if (ret < 0)
2167                         rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
2168                                         "err=%d, port=%d\n", ret, portid);
2169                 /*
2170                  * If enabled, put device in promiscuous mode.
2171                  * This allows IO forwarding mode to forward packets
2172                  * to itself through 2 cross-connected  ports of the
2173                  * target machine.
2174                  */
2175                 if (promiscuous_on)
2176                         rte_eth_promiscuous_enable(portid);
2177
2178                 rte_eth_dev_callback_register(portid,
2179                         RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
2180         }
2181
2182         check_all_ports_link_status(enabled_port_mask);
2183
2184         /* launch per-lcore init on every lcore */
2185         rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
2186         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2187                 if (rte_eal_wait_lcore(lcore_id) < 0)
2188                         return -1;
2189         }
2190
2191         return 0;
2192 }