examples/dma: add force minimal copy size parameter
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_random.h>
22 #include <rte_ethdev.h>
23 #include <rte_malloc.h>
24
25 #include "ipsec.h"
26 #include "esp.h"
27 #include "parser.h"
28 #include "sad.h"
29
30 #define IPDEFTTL 64
31
32 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
33
34 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
35
36 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
37
38 struct supported_cipher_algo {
39         const char *keyword;
40         enum rte_crypto_cipher_algorithm algo;
41         uint16_t iv_len;
42         uint16_t block_size;
43         uint16_t key_len;
44 };
45
46 struct supported_auth_algo {
47         const char *keyword;
48         enum rte_crypto_auth_algorithm algo;
49         uint16_t iv_len;
50         uint16_t digest_len;
51         uint16_t key_len;
52         uint8_t key_not_req;
53 };
54
55 struct supported_aead_algo {
56         const char *keyword;
57         enum rte_crypto_aead_algorithm algo;
58         uint16_t iv_len;
59         uint16_t block_size;
60         uint16_t digest_len;
61         uint16_t key_len;
62         uint8_t aad_len;
63 };
64
65
66 const struct supported_cipher_algo cipher_algos[] = {
67         {
68                 .keyword = "null",
69                 .algo = RTE_CRYPTO_CIPHER_NULL,
70                 .iv_len = 0,
71                 .block_size = 4,
72                 .key_len = 0
73         },
74         {
75                 .keyword = "aes-128-cbc",
76                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
77                 .iv_len = 16,
78                 .block_size = 16,
79                 .key_len = 16
80         },
81         {
82                 .keyword = "aes-192-cbc",
83                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
84                 .iv_len = 16,
85                 .block_size = 16,
86                 .key_len = 24
87         },
88         {
89                 .keyword = "aes-256-cbc",
90                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
91                 .iv_len = 16,
92                 .block_size = 16,
93                 .key_len = 32
94         },
95         {
96                 .keyword = "aes-128-ctr",
97                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
98                 .iv_len = 8,
99                 .block_size = 4,
100                 .key_len = 20
101         },
102         {
103                 .keyword = "aes-192-ctr",
104                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
105                 .iv_len = 16,
106                 .block_size = 16,
107                 .key_len = 28
108         },
109         {
110                 .keyword = "aes-256-ctr",
111                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
112                 .iv_len = 16,
113                 .block_size = 16,
114                 .key_len = 36
115         },
116         {
117                 .keyword = "3des-cbc",
118                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
119                 .iv_len = 8,
120                 .block_size = 8,
121                 .key_len = 24
122         },
123         {
124                 .keyword = "des-cbc",
125                 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
126                 .iv_len = 8,
127                 .block_size = 8,
128                 .key_len = 8
129         }
130 };
131
132 const struct supported_auth_algo auth_algos[] = {
133         {
134                 .keyword = "null",
135                 .algo = RTE_CRYPTO_AUTH_NULL,
136                 .digest_len = 0,
137                 .key_len = 0,
138                 .key_not_req = 1
139         },
140         {
141                 .keyword = "sha1-hmac",
142                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
143                 .digest_len = 12,
144                 .key_len = 20
145         },
146         {
147                 .keyword = "sha256-hmac",
148                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
149                 .digest_len = 16,
150                 .key_len = 32
151         },
152         {
153                 .keyword = "sha384-hmac",
154                 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
155                 .digest_len = 24,
156                 .key_len = 48
157         },
158         {
159                 .keyword = "sha512-hmac",
160                 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
161                 .digest_len = 32,
162                 .key_len = 64
163         },
164         {
165                 .keyword = "aes-gmac",
166                 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
167                 .iv_len = 8,
168                 .digest_len = 16,
169                 .key_len = 20
170         },
171         {
172                 .keyword = "aes-xcbc-mac-96",
173                 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
174                 .digest_len = 12,
175                 .key_len = 16
176         }
177 };
178
179 const struct supported_aead_algo aead_algos[] = {
180         {
181                 .keyword = "aes-128-gcm",
182                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
183                 .iv_len = 8,
184                 .block_size = 4,
185                 .key_len = 20,
186                 .digest_len = 16,
187                 .aad_len = 8,
188         },
189         {
190                 .keyword = "aes-192-gcm",
191                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
192                 .iv_len = 8,
193                 .block_size = 4,
194                 .key_len = 28,
195                 .digest_len = 16,
196                 .aad_len = 8,
197         },
198         {
199                 .keyword = "aes-256-gcm",
200                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
201                 .iv_len = 8,
202                 .block_size = 4,
203                 .key_len = 36,
204                 .digest_len = 16,
205                 .aad_len = 8,
206         },
207         {
208                 .keyword = "aes-128-ccm",
209                 .algo = RTE_CRYPTO_AEAD_AES_CCM,
210                 .iv_len = 8,
211                 .block_size = 4,
212                 .key_len = 20,
213                 .digest_len = 16,
214                 .aad_len = 8,
215         },
216         {
217                 .keyword = "aes-192-ccm",
218                 .algo = RTE_CRYPTO_AEAD_AES_CCM,
219                 .iv_len = 8,
220                 .block_size = 4,
221                 .key_len = 28,
222                 .digest_len = 16,
223                 .aad_len = 8,
224         },
225         {
226                 .keyword = "aes-256-ccm",
227                 .algo = RTE_CRYPTO_AEAD_AES_CCM,
228                 .iv_len = 8,
229                 .block_size = 4,
230                 .key_len = 36,
231                 .digest_len = 16,
232                 .aad_len = 8,
233         },
234         {
235                 .keyword = "chacha20-poly1305",
236                 .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
237                 .iv_len = 12,
238                 .block_size = 64,
239                 .key_len = 36,
240                 .digest_len = 16,
241                 .aad_len = 8,
242         }
243 };
244
245 #define SA_INIT_NB      128
246
247 static uint32_t nb_crypto_sessions;
248 struct ipsec_sa *sa_out;
249 uint32_t nb_sa_out;
250 static uint32_t sa_out_sz;
251 static struct ipsec_sa_cnt sa_out_cnt;
252
253 struct ipsec_sa *sa_in;
254 uint32_t nb_sa_in;
255 static uint32_t sa_in_sz;
256 static struct ipsec_sa_cnt sa_in_cnt;
257
258 static const struct supported_cipher_algo *
259 find_match_cipher_algo(const char *cipher_keyword)
260 {
261         size_t i;
262
263         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
264                 const struct supported_cipher_algo *algo =
265                         &cipher_algos[i];
266
267                 if (strcmp(cipher_keyword, algo->keyword) == 0)
268                         return algo;
269         }
270
271         return NULL;
272 }
273
274 static const struct supported_auth_algo *
275 find_match_auth_algo(const char *auth_keyword)
276 {
277         size_t i;
278
279         for (i = 0; i < RTE_DIM(auth_algos); i++) {
280                 const struct supported_auth_algo *algo =
281                         &auth_algos[i];
282
283                 if (strcmp(auth_keyword, algo->keyword) == 0)
284                         return algo;
285         }
286
287         return NULL;
288 }
289
290 static const struct supported_aead_algo *
291 find_match_aead_algo(const char *aead_keyword)
292 {
293         size_t i;
294
295         for (i = 0; i < RTE_DIM(aead_algos); i++) {
296                 const struct supported_aead_algo *algo =
297                         &aead_algos[i];
298
299                 if (strcmp(aead_keyword, algo->keyword) == 0)
300                         return algo;
301         }
302
303         return NULL;
304 }
305
306 /** parse_key_string
307  *  parse x:x:x:x.... hex number key string into uint8_t *key
308  *  return:
309  *  > 0: number of bytes parsed
310  *  0:   failed
311  */
312 static uint32_t
313 parse_key_string(const char *key_str, uint8_t *key)
314 {
315         const char *pt_start = key_str, *pt_end = key_str;
316         uint32_t nb_bytes = 0;
317
318         while (pt_end != NULL) {
319                 char sub_str[3] = {0};
320
321                 pt_end = strchr(pt_start, ':');
322
323                 if (pt_end == NULL) {
324                         if (strlen(pt_start) > 2)
325                                 return 0;
326                         strncpy(sub_str, pt_start, 2);
327                 } else {
328                         if (pt_end - pt_start > 2)
329                                 return 0;
330
331                         strncpy(sub_str, pt_start, pt_end - pt_start);
332                         pt_start = pt_end + 1;
333                 }
334
335                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
336         }
337
338         return nb_bytes;
339 }
340
341 static int
342 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
343 {
344         if (*sa_tbl == NULL) {
345                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
346                 if (*sa_tbl == NULL)
347                         return -1;
348                 *cur_sz = SA_INIT_NB;
349                 return 0;
350         }
351
352         if (cur_cnt >= *cur_sz) {
353                 *sa_tbl = realloc(*sa_tbl,
354                         *cur_sz * sizeof(struct ipsec_sa) * 2);
355                 if (*sa_tbl == NULL)
356                         return -1;
357                 /* clean reallocated extra space */
358                 memset(&(*sa_tbl)[*cur_sz], 0,
359                         *cur_sz * sizeof(struct ipsec_sa));
360                 *cur_sz *= 2;
361         }
362
363         return 0;
364 }
365
366 void
367 parse_sa_tokens(char **tokens, uint32_t n_tokens,
368         struct parse_status *status)
369 {
370         struct ipsec_sa *rule = NULL;
371         struct rte_ipsec_session *ips;
372         uint32_t ti; /*token index*/
373         uint32_t *ri /*rule index*/;
374         struct ipsec_sa_cnt *sa_cnt;
375         uint32_t cipher_algo_p = 0;
376         uint32_t auth_algo_p = 0;
377         uint32_t aead_algo_p = 0;
378         uint32_t src_p = 0;
379         uint32_t dst_p = 0;
380         uint32_t mode_p = 0;
381         uint32_t type_p = 0;
382         uint32_t portid_p = 0;
383         uint32_t fallback_p = 0;
384         int16_t status_p = 0;
385         uint16_t udp_encap_p = 0;
386
387         if (strcmp(tokens[0], "in") == 0) {
388                 ri = &nb_sa_in;
389                 sa_cnt = &sa_in_cnt;
390                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
391                         return;
392                 rule = &sa_in[*ri];
393                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
394         } else {
395                 ri = &nb_sa_out;
396                 sa_cnt = &sa_out_cnt;
397                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
398                         return;
399                 rule = &sa_out[*ri];
400                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
401         }
402
403         /* spi number */
404         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
405         if (status->status < 0)
406                 return;
407         if (atoi(tokens[1]) == INVALID_SPI)
408                 return;
409         rule->flags = 0;
410         rule->spi = atoi(tokens[1]);
411         rule->portid = UINT16_MAX;
412         ips = ipsec_get_primary_session(rule);
413
414         for (ti = 2; ti < n_tokens; ti++) {
415                 if (strcmp(tokens[ti], "mode") == 0) {
416                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
417                         if (status->status < 0)
418                                 return;
419
420                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
421                         if (status->status < 0)
422                                 return;
423
424                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
425                                 sa_cnt->nb_v4++;
426                                 rule->flags |= IP4_TUNNEL;
427                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
428                                 sa_cnt->nb_v6++;
429                                 rule->flags |= IP6_TUNNEL;
430                         } else if (strcmp(tokens[ti], "transport") == 0) {
431                                 sa_cnt->nb_v4++;
432                                 sa_cnt->nb_v6++;
433                                 rule->flags |= TRANSPORT;
434                         } else {
435                                 APP_CHECK(0, status, "unrecognized "
436                                         "input \"%s\"", tokens[ti]);
437                                 return;
438                         }
439
440                         mode_p = 1;
441                         continue;
442                 }
443
444                 if (strcmp(tokens[ti], "telemetry") == 0) {
445                         rule->flags |= SA_TELEMETRY_ENABLE;
446                         continue;
447                 }
448
449                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
450                         const struct supported_cipher_algo *algo;
451                         uint32_t key_len;
452
453                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
454                                 status);
455                         if (status->status < 0)
456                                 return;
457
458                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
459                         if (status->status < 0)
460                                 return;
461
462                         algo = find_match_cipher_algo(tokens[ti]);
463
464                         APP_CHECK(algo != NULL, status, "unrecognized "
465                                 "input \"%s\"", tokens[ti]);
466
467                         if (status->status < 0)
468                                 return;
469
470                         rule->cipher_algo = algo->algo;
471                         rule->block_size = algo->block_size;
472                         rule->iv_len = algo->iv_len;
473                         rule->cipher_key_len = algo->key_len;
474
475                         /* for NULL algorithm, no cipher key required */
476                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
477                                 cipher_algo_p = 1;
478                                 continue;
479                         }
480
481                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
482                         if (status->status < 0)
483                                 return;
484
485                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
486                                 status, "unrecognized input \"%s\", "
487                                 "expect \"cipher_key\"", tokens[ti]);
488                         if (status->status < 0)
489                                 return;
490
491                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
492                         if (status->status < 0)
493                                 return;
494
495                         key_len = parse_key_string(tokens[ti],
496                                 rule->cipher_key);
497                         APP_CHECK(key_len == rule->cipher_key_len, status,
498                                 "unrecognized input \"%s\"", tokens[ti]);
499                         if (status->status < 0)
500                                 return;
501
502                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
503                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
504                                 rule->salt = (uint32_t)rte_rand();
505
506                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
507                                 key_len -= 4;
508                                 rule->cipher_key_len = key_len;
509                                 memcpy(&rule->salt,
510                                         &rule->cipher_key[key_len], 4);
511                         }
512
513                         cipher_algo_p = 1;
514                         continue;
515                 }
516
517                 if (strcmp(tokens[ti], "auth_algo") == 0) {
518                         const struct supported_auth_algo *algo;
519                         uint32_t key_len;
520
521                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
522                                 status);
523                         if (status->status < 0)
524                                 return;
525
526                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
527                         if (status->status < 0)
528                                 return;
529
530                         algo = find_match_auth_algo(tokens[ti]);
531                         APP_CHECK(algo != NULL, status, "unrecognized "
532                                 "input \"%s\"", tokens[ti]);
533
534                         if (status->status < 0)
535                                 return;
536
537                         rule->auth_algo = algo->algo;
538                         rule->auth_key_len = algo->key_len;
539                         rule->digest_len = algo->digest_len;
540
541                         /* NULL algorithm and combined algos do not
542                          * require auth key
543                          */
544                         if (algo->key_not_req) {
545                                 auth_algo_p = 1;
546                                 continue;
547                         }
548
549                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
550                         if (status->status < 0)
551                                 return;
552
553                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
554                                 status, "unrecognized input \"%s\", "
555                                 "expect \"auth_key\"", tokens[ti]);
556                         if (status->status < 0)
557                                 return;
558
559                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
560                         if (status->status < 0)
561                                 return;
562
563                         key_len = parse_key_string(tokens[ti],
564                                 rule->auth_key);
565                         APP_CHECK(key_len == rule->auth_key_len, status,
566                                 "unrecognized input \"%s\"", tokens[ti]);
567                         if (status->status < 0)
568                                 return;
569
570                         if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
571                                 key_len -= 4;
572                                 rule->auth_key_len = key_len;
573                                 rule->iv_len = algo->iv_len;
574                                 memcpy(&rule->salt,
575                                         &rule->auth_key[key_len], 4);
576                         }
577
578                         auth_algo_p = 1;
579                         continue;
580                 }
581
582                 if (strcmp(tokens[ti], "aead_algo") == 0) {
583                         const struct supported_aead_algo *algo;
584                         uint32_t key_len;
585
586                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
587                                 status);
588                         if (status->status < 0)
589                                 return;
590
591                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
592                         if (status->status < 0)
593                                 return;
594
595                         algo = find_match_aead_algo(tokens[ti]);
596
597                         APP_CHECK(algo != NULL, status, "unrecognized "
598                                 "input \"%s\"", tokens[ti]);
599
600                         if (status->status < 0)
601                                 return;
602
603                         rule->aead_algo = algo->algo;
604                         rule->cipher_key_len = algo->key_len;
605                         rule->digest_len = algo->digest_len;
606                         rule->aad_len = algo->aad_len;
607                         rule->block_size = algo->block_size;
608                         rule->iv_len = algo->iv_len;
609
610                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
611                         if (status->status < 0)
612                                 return;
613
614                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
615                                 status, "unrecognized input \"%s\", "
616                                 "expect \"aead_key\"", tokens[ti]);
617                         if (status->status < 0)
618                                 return;
619
620                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
621                         if (status->status < 0)
622                                 return;
623
624                         key_len = parse_key_string(tokens[ti],
625                                 rule->cipher_key);
626                         APP_CHECK(key_len == rule->cipher_key_len, status,
627                                 "unrecognized input \"%s\"", tokens[ti]);
628                         if (status->status < 0)
629                                 return;
630
631                         key_len -= 4;
632                         rule->cipher_key_len = key_len;
633                         memcpy(&rule->salt,
634                                 &rule->cipher_key[key_len], 4);
635
636                         aead_algo_p = 1;
637                         continue;
638                 }
639
640                 if (strcmp(tokens[ti], "src") == 0) {
641                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
642                         if (status->status < 0)
643                                 return;
644
645                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
646                         if (status->status < 0)
647                                 return;
648
649                         if (IS_IP4_TUNNEL(rule->flags)) {
650                                 struct in_addr ip;
651
652                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
653                                         &ip, NULL) == 0, status,
654                                         "unrecognized input \"%s\", "
655                                         "expect valid ipv4 addr",
656                                         tokens[ti]);
657                                 if (status->status < 0)
658                                         return;
659                                 rule->src.ip.ip4 = rte_bswap32(
660                                         (uint32_t)ip.s_addr);
661                         } else if (IS_IP6_TUNNEL(rule->flags)) {
662                                 struct in6_addr ip;
663
664                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
665                                         NULL) == 0, status,
666                                         "unrecognized input \"%s\", "
667                                         "expect valid ipv6 addr",
668                                         tokens[ti]);
669                                 if (status->status < 0)
670                                         return;
671                                 memcpy(rule->src.ip.ip6.ip6_b,
672                                         ip.s6_addr, 16);
673                         } else if (IS_TRANSPORT(rule->flags)) {
674                                 APP_CHECK(0, status, "unrecognized input "
675                                         "\"%s\"", tokens[ti]);
676                                 return;
677                         }
678
679                         src_p = 1;
680                         continue;
681                 }
682
683                 if (strcmp(tokens[ti], "dst") == 0) {
684                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
685                         if (status->status < 0)
686                                 return;
687
688                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
689                         if (status->status < 0)
690                                 return;
691
692                         if (IS_IP4_TUNNEL(rule->flags)) {
693                                 struct in_addr ip;
694
695                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
696                                         &ip, NULL) == 0, status,
697                                         "unrecognized input \"%s\", "
698                                         "expect valid ipv4 addr",
699                                         tokens[ti]);
700                                 if (status->status < 0)
701                                         return;
702                                 rule->dst.ip.ip4 = rte_bswap32(
703                                         (uint32_t)ip.s_addr);
704                         } else if (IS_IP6_TUNNEL(rule->flags)) {
705                                 struct in6_addr ip;
706
707                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
708                                         NULL) == 0, status,
709                                         "unrecognized input \"%s\", "
710                                         "expect valid ipv6 addr",
711                                         tokens[ti]);
712                                 if (status->status < 0)
713                                         return;
714                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
715                         } else if (IS_TRANSPORT(rule->flags)) {
716                                 APP_CHECK(0, status, "unrecognized "
717                                         "input \"%s\"", tokens[ti]);
718                                 return;
719                         }
720
721                         dst_p = 1;
722                         continue;
723                 }
724
725                 if (strcmp(tokens[ti], "type") == 0) {
726                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
727                         if (status->status < 0)
728                                 return;
729
730                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
731                         if (status->status < 0)
732                                 return;
733
734                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
735                                 ips->type =
736                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
737                         else if (strcmp(tokens[ti],
738                                         "inline-protocol-offload") == 0)
739                                 ips->type =
740                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
741                         else if (strcmp(tokens[ti],
742                                         "lookaside-protocol-offload") == 0)
743                                 ips->type =
744                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
745                         else if (strcmp(tokens[ti], "no-offload") == 0)
746                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
747                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
748                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
749                         else {
750                                 APP_CHECK(0, status, "Invalid input \"%s\"",
751                                                 tokens[ti]);
752                                 return;
753                         }
754
755                         type_p = 1;
756                         continue;
757                 }
758
759                 if (strcmp(tokens[ti], "port_id") == 0) {
760                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
761                         if (status->status < 0)
762                                 return;
763                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
764                         if (status->status < 0)
765                                 return;
766                         if (rule->portid == UINT16_MAX)
767                                 rule->portid = atoi(tokens[ti]);
768                         else if (rule->portid != atoi(tokens[ti])) {
769                                 APP_CHECK(0, status,
770                                         "portid %s not matching with already assigned portid %u",
771                                         tokens[ti], rule->portid);
772                                 return;
773                         }
774                         portid_p = 1;
775                         continue;
776                 }
777
778                 if (strcmp(tokens[ti], "mss") == 0) {
779                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
780                         if (status->status < 0)
781                                 return;
782                         rule->mss = atoi(tokens[ti]);
783                         if (status->status < 0)
784                                 return;
785                         continue;
786                 }
787
788                 if (strcmp(tokens[ti], "esn") == 0) {
789                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
790                         if (status->status < 0)
791                                 return;
792                         rule->esn = atoll(tokens[ti]);
793                         if (status->status < 0)
794                                 return;
795                         continue;
796                 }
797
798                 if (strcmp(tokens[ti], "fallback") == 0) {
799                         struct rte_ipsec_session *fb;
800
801                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
802                                 "not allowed for legacy mode.");
803                         if (status->status < 0)
804                                 return;
805                         APP_CHECK(ips->type ==
806                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
807                                 "Fallback session allowed if primary session "
808                                 "is of type inline-crypto-offload only.");
809                         if (status->status < 0)
810                                 return;
811                         APP_CHECK(rule->direction ==
812                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
813                                 "Fallback session not allowed for egress "
814                                 "rule");
815                         if (status->status < 0)
816                                 return;
817                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
818                         if (status->status < 0)
819                                 return;
820                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
821                         if (status->status < 0)
822                                 return;
823                         fb = ipsec_get_fallback_session(rule);
824                         if (strcmp(tokens[ti], "lookaside-none") == 0)
825                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
826                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
827                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
828                         else {
829                                 APP_CHECK(0, status, "unrecognized fallback "
830                                         "type %s.", tokens[ti]);
831                                 return;
832                         }
833
834                         rule->fallback_sessions = 1;
835                         nb_crypto_sessions++;
836                         fallback_p = 1;
837                         continue;
838                 }
839                 if (strcmp(tokens[ti], "flow-direction") == 0) {
840                         switch (ips->type) {
841                         case RTE_SECURITY_ACTION_TYPE_NONE:
842                         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
843                                 rule->fdir_flag = 1;
844                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
845                                 if (status->status < 0)
846                                         return;
847                                 if (rule->portid == UINT16_MAX)
848                                         rule->portid = atoi(tokens[ti]);
849                                 else if (rule->portid != atoi(tokens[ti])) {
850                                         APP_CHECK(0, status,
851                                                 "portid %s not matching with already assigned portid %u",
852                                                 tokens[ti], rule->portid);
853                                         return;
854                                 }
855                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
856                                 if (status->status < 0)
857                                         return;
858                                 rule->fdir_qid = atoi(tokens[ti]);
859                                 /* validating portid and queueid */
860                                 status_p = check_flow_params(rule->portid,
861                                                 rule->fdir_qid);
862                                 if (status_p < 0) {
863                                         printf("port id %u / queue id %u is "
864                                                 "not valid\n", rule->portid,
865                                                  rule->fdir_qid);
866                                 }
867                                 break;
868                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
869                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
870                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
871                         default:
872                                 APP_CHECK(0, status,
873                                         "flow director not supported for security session type %d",
874                                         ips->type);
875                                 return;
876                         }
877                         continue;
878                 }
879                 if (strcmp(tokens[ti], "udp-encap") == 0) {
880                         switch (ips->type) {
881                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
882                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
883                                 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
884                                                    status);
885                                 if (status->status < 0)
886                                         return;
887
888                                 rule->udp_encap = 1;
889                                 app_sa_prm.udp_encap = 1;
890                                 udp_encap_p = 1;
891                                 break;
892                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
893                                 rule->udp_encap = 1;
894                                 rule->udp.sport = 0;
895                                 rule->udp.dport = 4500;
896                                 break;
897                         default:
898                                 APP_CHECK(0, status,
899                                         "UDP encapsulation not supported for "
900                                         "security session type %d",
901                                         ips->type);
902                                 return;
903                         }
904                         continue;
905                 }
906
907                 /* unrecognizable input */
908                 APP_CHECK(0, status, "unrecognized input \"%s\"",
909                         tokens[ti]);
910                 return;
911         }
912
913         if (aead_algo_p) {
914                 APP_CHECK(cipher_algo_p == 0, status,
915                                 "AEAD used, no need for cipher options");
916                 if (status->status < 0)
917                         return;
918
919                 APP_CHECK(auth_algo_p == 0, status,
920                                 "AEAD used, no need for auth options");
921                 if (status->status < 0)
922                         return;
923         } else {
924                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
925                 if (status->status < 0)
926                         return;
927
928                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
929                 if (status->status < 0)
930                         return;
931         }
932
933         APP_CHECK(mode_p == 1, status, "missing mode option");
934         if (status->status < 0)
935                 return;
936
937         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
938                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
939                 printf("Missing portid option, falling back to non-offload\n");
940
941         if (!type_p || (!portid_p && ips->type !=
942                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
943                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
944         }
945
946         if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
947                 wrkr_flags |= INL_CR_F;
948         else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
949                 wrkr_flags |= INL_PR_F;
950         else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
951                 wrkr_flags |= LA_PR_F;
952         else
953                 wrkr_flags |= LA_ANY_F;
954
955         nb_crypto_sessions++;
956         *ri = *ri + 1;
957 }
958
959 static void
960 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
961 {
962         uint32_t i;
963         uint8_t a, b, c, d;
964         const struct rte_ipsec_session *ips;
965         const struct rte_ipsec_session *fallback_ips;
966
967         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
968
969         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
970                 if (cipher_algos[i].algo == sa->cipher_algo &&
971                                 cipher_algos[i].key_len == sa->cipher_key_len) {
972                         printf("%s ", cipher_algos[i].keyword);
973                         break;
974                 }
975         }
976
977         for (i = 0; i < RTE_DIM(auth_algos); i++) {
978                 if (auth_algos[i].algo == sa->auth_algo) {
979                         printf("%s ", auth_algos[i].keyword);
980                         break;
981                 }
982         }
983
984         for (i = 0; i < RTE_DIM(aead_algos); i++) {
985                 if (aead_algos[i].algo == sa->aead_algo &&
986                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
987                         printf("%s ", aead_algos[i].keyword);
988                         break;
989                 }
990         }
991
992         printf("mode:");
993         if (sa->udp_encap)
994                 printf("UDP encapsulated ");
995
996         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
997         case IP4_TUNNEL:
998                 printf("IP4Tunnel ");
999                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
1000                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
1001                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
1002                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
1003                 break;
1004         case IP6_TUNNEL:
1005                 printf("IP6Tunnel ");
1006                 for (i = 0; i < 16; i++) {
1007                         if (i % 2 && i != 15)
1008                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
1009                         else
1010                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
1011                 }
1012                 printf(" ");
1013                 for (i = 0; i < 16; i++) {
1014                         if (i % 2 && i != 15)
1015                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
1016                         else
1017                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
1018                 }
1019                 break;
1020         case TRANSPORT:
1021                 printf("Transport ");
1022                 break;
1023         }
1024
1025         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
1026         printf(" type:");
1027         switch (ips->type) {
1028         case RTE_SECURITY_ACTION_TYPE_NONE:
1029                 printf("no-offload ");
1030                 break;
1031         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1032                 printf("inline-crypto-offload ");
1033                 break;
1034         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1035                 printf("inline-protocol-offload ");
1036                 break;
1037         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1038                 printf("lookaside-protocol-offload ");
1039                 break;
1040         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1041                 printf("cpu-crypto-accelerated ");
1042                 break;
1043         }
1044
1045         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
1046         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
1047                 printf("inline fallback: ");
1048                 switch (fallback_ips->type) {
1049                 case RTE_SECURITY_ACTION_TYPE_NONE:
1050                         printf("lookaside-none");
1051                         break;
1052                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1053                         printf("cpu-crypto-accelerated");
1054                         break;
1055                 default:
1056                         printf("invalid");
1057                         break;
1058                 }
1059         }
1060         if (sa->fdir_flag == 1)
1061                 printf("flow-direction port %d queue %d", sa->portid,
1062                                 sa->fdir_qid);
1063
1064         printf("\n");
1065 }
1066
1067 static struct sa_ctx *
1068 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
1069 {
1070         char s[PATH_MAX];
1071         struct sa_ctx *sa_ctx;
1072         uint32_t mz_size;
1073         const struct rte_memzone *mz;
1074
1075         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
1076
1077         /* Create SA context */
1078         printf("Creating SA context with %u maximum entries on socket %d\n",
1079                         nb_sa, socket_id);
1080
1081         mz_size = sizeof(struct ipsec_xf) * nb_sa;
1082         mz = rte_memzone_reserve(s, mz_size, socket_id,
1083                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
1084         if (mz == NULL) {
1085                 printf("Failed to allocate SA XFORM memory\n");
1086                 rte_errno = ENOMEM;
1087                 return NULL;
1088         }
1089
1090         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
1091                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
1092
1093         if (sa_ctx == NULL) {
1094                 printf("Failed to allocate SA CTX memory\n");
1095                 rte_errno = ENOMEM;
1096                 rte_memzone_free(mz);
1097                 return NULL;
1098         }
1099
1100         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
1101         sa_ctx->nb_sa = nb_sa;
1102
1103         return sa_ctx;
1104 }
1105
1106 static int
1107 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
1108 {
1109         struct rte_eth_dev_info dev_info;
1110         int retval;
1111
1112         retval = rte_eth_dev_info_get(portid, &dev_info);
1113         if (retval != 0) {
1114                 RTE_LOG(ERR, IPSEC,
1115                         "Error during getting device (port %u) info: %s\n",
1116                         portid, strerror(-retval));
1117
1118                 return retval;
1119         }
1120
1121         if (inbound) {
1122                 if ((dev_info.rx_offload_capa &
1123                                 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1124                         RTE_LOG(WARNING, PORT,
1125                                 "hardware RX IPSec offload is not supported\n");
1126                         return -EINVAL;
1127                 }
1128
1129         } else { /* outbound */
1130                 if ((dev_info.tx_offload_capa &
1131                                 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1132                         RTE_LOG(WARNING, PORT,
1133                                 "hardware TX IPSec offload is not supported\n");
1134                         return -EINVAL;
1135                 }
1136                 if (tso && (dev_info.tx_offload_capa &
1137                                 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1138                         RTE_LOG(WARNING, PORT,
1139                                 "hardware TCP TSO offload is not supported\n");
1140                         return -EINVAL;
1141                 }
1142         }
1143         return 0;
1144 }
1145
1146 /*
1147  * Helper function, tries to determine next_proto for SPI
1148  * by searching though SP rules.
1149  */
1150 static int
1151 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1152                 struct ip_addr ip_addr[2], uint32_t mask[2])
1153 {
1154         int32_t rc4, rc6;
1155
1156         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1157                                 ip_addr, mask);
1158         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1159                                 ip_addr, mask);
1160
1161         if (rc4 >= 0) {
1162                 if (rc6 >= 0) {
1163                         RTE_LOG(ERR, IPSEC,
1164                                 "%s: SPI %u used simultaneously by "
1165                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
1166                                 __func__, spi, rc4, rc6);
1167                         return -EINVAL;
1168                 } else
1169                         return IPPROTO_IPIP;
1170         } else if (rc6 < 0) {
1171                 RTE_LOG(ERR, IPSEC,
1172                         "%s: SPI %u is not used by any SP rule\n",
1173                         __func__, spi);
1174                 return -EINVAL;
1175         } else
1176                 return IPPROTO_IPV6;
1177 }
1178
1179 /*
1180  * Helper function for getting source and destination IP addresses
1181  * from SP. Needed for inline crypto transport mode, as addresses are not
1182  * provided in config file for that mode. It checks if SP for current SA exists,
1183  * and based on what type of protocol is returned, it stores appropriate
1184  * addresses got from SP into SA.
1185  */
1186 static int
1187 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1188 {
1189         int protocol;
1190         struct ip_addr ip_addr[2];
1191         uint32_t mask[2];
1192
1193         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1194         if (protocol < 0)
1195                 return protocol;
1196         else if (protocol == IPPROTO_IPIP) {
1197                 sa->flags |= IP4_TRANSPORT;
1198                 if (mask[0] == IP4_FULL_MASK &&
1199                                 mask[1] == IP4_FULL_MASK &&
1200                                 ip_addr[0].ip.ip4 != 0 &&
1201                                 ip_addr[1].ip.ip4 != 0) {
1202
1203                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1204                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1205                 } else {
1206                         RTE_LOG(ERR, IPSEC,
1207                         "%s: No valid address or mask entry in"
1208                         " IPv4 SP rule for SPI %u\n",
1209                         __func__, sa->spi);
1210                         return -EINVAL;
1211                 }
1212         } else if (protocol == IPPROTO_IPV6) {
1213                 sa->flags |= IP6_TRANSPORT;
1214                 if (mask[0] == IP6_FULL_MASK &&
1215                                 mask[1] == IP6_FULL_MASK &&
1216                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1217                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1218                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1219                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1220
1221                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1222                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1223                 } else {
1224                         RTE_LOG(ERR, IPSEC,
1225                         "%s: No valid address or mask entry in"
1226                         " IPv6 SP rule for SPI %u\n",
1227                         __func__, sa->spi);
1228                         return -EINVAL;
1229                 }
1230         }
1231         return 0;
1232 }
1233
1234 static int
1235 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1236                 uint32_t nb_entries, uint32_t inbound,
1237                 struct socket_ctx *skt_ctx,
1238                 struct ipsec_ctx *ips_ctx[])
1239 {
1240         struct ipsec_sa *sa;
1241         uint32_t i, idx;
1242         uint16_t iv_length, aad_length;
1243         int inline_status;
1244         int32_t rc;
1245         struct rte_ipsec_session *ips;
1246
1247         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1248         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1249
1250         for (i = 0; i < nb_entries; i++) {
1251                 idx = i;
1252                 sa = &sa_ctx->sa[idx];
1253                 if (sa->spi != 0) {
1254                         printf("Index %u already in use by SPI %u\n",
1255                                         idx, sa->spi);
1256                         return -EINVAL;
1257                 }
1258                 *sa = entries[i];
1259
1260                 if (inbound) {
1261                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1262                         if (rc != 0)
1263                                 return rc;
1264                 }
1265
1266                 sa->seq = 0;
1267                 ips = ipsec_get_primary_session(sa);
1268
1269                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1270                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1271                         if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1272                                 return -EINVAL;
1273                 }
1274
1275                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1276                 case IP4_TUNNEL:
1277                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1278                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1279                         break;
1280                 case TRANSPORT:
1281                         if (ips->type ==
1282                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1283                                 inline_status =
1284                                         sa_add_address_inline_crypto(sa);
1285                                 if (inline_status < 0)
1286                                         return inline_status;
1287                         }
1288                         break;
1289                 }
1290
1291
1292                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
1293                         sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
1294                         sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1295
1296                         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
1297                                 iv_length = 11;
1298                         else
1299                                 iv_length = 12;
1300
1301                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1302                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1303                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1304                         sa_ctx->xf[idx].a.aead.key.length =
1305                                 sa->cipher_key_len;
1306                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1307                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1308                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1309                         sa_ctx->xf[idx].a.next = NULL;
1310                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1311                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1312                         sa_ctx->xf[idx].a.aead.aad_length =
1313                                 sa->aad_len + aad_length;
1314                         sa_ctx->xf[idx].a.aead.digest_length =
1315                                 sa->digest_len;
1316
1317                         sa->xforms = &sa_ctx->xf[idx].a;
1318                 } else {
1319                         switch (sa->cipher_algo) {
1320                         case RTE_CRYPTO_CIPHER_NULL:
1321                         case RTE_CRYPTO_CIPHER_DES_CBC:
1322                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1323                         case RTE_CRYPTO_CIPHER_AES_CBC:
1324                         case RTE_CRYPTO_CIPHER_AES_CTR:
1325                                 iv_length = sa->iv_len;
1326                                 break;
1327                         default:
1328                                 RTE_LOG(ERR, IPSEC_ESP,
1329                                                 "unsupported cipher algorithm %u\n",
1330                                                 sa->cipher_algo);
1331                                 return -EINVAL;
1332                         }
1333
1334                         /* AES_GMAC uses salt like AEAD algorithms */
1335                         if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
1336                                 iv_length = 12;
1337
1338                         if (inbound) {
1339                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1340                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1341                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1342                                 sa_ctx->xf[idx].b.cipher.key.length =
1343                                         sa->cipher_key_len;
1344                                 sa_ctx->xf[idx].b.cipher.op =
1345                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1346                                 sa_ctx->xf[idx].b.next = NULL;
1347                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1348                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1349
1350                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1351                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1352                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1353                                 sa_ctx->xf[idx].a.auth.key.length =
1354                                         sa->auth_key_len;
1355                                 sa_ctx->xf[idx].a.auth.digest_length =
1356                                         sa->digest_len;
1357                                 sa_ctx->xf[idx].a.auth.op =
1358                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1359                                 sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
1360                                 sa_ctx->xf[idx].a.auth.iv.length = iv_length;
1361
1362                         } else { /* outbound */
1363                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1364                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1365                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1366                                 sa_ctx->xf[idx].a.cipher.key.length =
1367                                         sa->cipher_key_len;
1368                                 sa_ctx->xf[idx].a.cipher.op =
1369                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1370                                 sa_ctx->xf[idx].a.next = NULL;
1371                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1372                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1373
1374                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1375                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1376                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1377                                 sa_ctx->xf[idx].b.auth.key.length =
1378                                         sa->auth_key_len;
1379                                 sa_ctx->xf[idx].b.auth.digest_length =
1380                                         sa->digest_len;
1381                                 sa_ctx->xf[idx].b.auth.op =
1382                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1383                                 sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
1384                                 sa_ctx->xf[idx].b.auth.iv.length = iv_length;
1385
1386                         }
1387
1388                         if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
1389                                 sa->xforms = inbound ?
1390                                         &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
1391                                 sa->xforms->next = NULL;
1392
1393                         } else {
1394                                 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1395                                 sa_ctx->xf[idx].b.next = NULL;
1396                                 sa->xforms = &sa_ctx->xf[idx].a;
1397                         }
1398                 }
1399
1400                 if (ips->type ==
1401                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1402                         ips->type ==
1403                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1404                         rc = create_inline_session(skt_ctx, sa, ips);
1405                         if (rc != 0) {
1406                                 RTE_LOG(ERR, IPSEC_ESP,
1407                                         "create_inline_session() failed\n");
1408                                 return -EINVAL;
1409                         }
1410                 } else {
1411                         rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
1412                         if (rc != 0) {
1413                                 RTE_LOG(ERR, IPSEC_ESP,
1414                                         "create_lookaside_session() failed\n");
1415                                 return -EINVAL;
1416                         }
1417                 }
1418
1419                 if (sa->fdir_flag && inbound) {
1420                         rc = create_ipsec_esp_flow(sa);
1421                         if (rc != 0)
1422                                 RTE_LOG(ERR, IPSEC_ESP,
1423                                         "create_ipsec_esp_flow() failed\n");
1424                 }
1425                 print_one_sa_rule(sa, inbound);
1426         }
1427
1428         return 0;
1429 }
1430
1431 static inline int
1432 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1433                 uint32_t nb_entries, struct socket_ctx *skt_ctx,
1434                 struct ipsec_ctx *ips_ctx[])
1435 {
1436         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
1437 }
1438
1439 static inline int
1440 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1441                 uint32_t nb_entries, struct socket_ctx *skt_ctx,
1442                 struct ipsec_ctx *ips_ctx[])
1443 {
1444         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
1445 }
1446
1447 /*
1448  * helper function, fills parameters that are identical for all SAs
1449  */
1450 static void
1451 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1452         const struct app_sa_prm *app_prm)
1453 {
1454         memset(prm, 0, sizeof(*prm));
1455
1456         prm->flags = app_prm->flags;
1457         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1458         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1459 }
1460
1461 static int
1462 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1463         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1464 {
1465         int32_t rc;
1466
1467         /*
1468          * Try to get SPI next proto by searching that SPI in SPD.
1469          * probably not the optimal way, but there seems nothing
1470          * better right now.
1471          */
1472         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1473         if (rc < 0)
1474                 return rc;
1475
1476         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1477         prm->userdata = (uintptr_t)ss;
1478
1479         /* setup ipsec xform */
1480         prm->ipsec_xform.spi = ss->spi;
1481         prm->ipsec_xform.salt = ss->salt;
1482         prm->ipsec_xform.direction = ss->direction;
1483         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1484         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1485                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1486                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1487         prm->ipsec_xform.options.udp_encap = ss->udp_encap;
1488         prm->ipsec_xform.options.ecn = 1;
1489         prm->ipsec_xform.options.copy_dscp = 1;
1490
1491         if (IS_IP4_TUNNEL(ss->flags)) {
1492                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1493                 prm->tun.hdr_len = sizeof(*v4);
1494                 prm->tun.next_proto = rc;
1495                 prm->tun.hdr = v4;
1496         } else if (IS_IP6_TUNNEL(ss->flags)) {
1497                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1498                 prm->tun.hdr_len = sizeof(*v6);
1499                 prm->tun.next_proto = rc;
1500                 prm->tun.hdr = v6;
1501         } else {
1502                 /* transport mode */
1503                 prm->trs.proto = rc;
1504         }
1505
1506         /* setup crypto section */
1507         prm->crypto_xform = ss->xforms;
1508         return 0;
1509 }
1510
1511 static int
1512 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1513 {
1514         int32_t rc = 0;
1515
1516         ss->sa = sa;
1517
1518         rc = rte_ipsec_session_prepare(ss);
1519         if (rc != 0)
1520                 memset(ss, 0, sizeof(*ss));
1521
1522         return rc;
1523 }
1524
1525 /*
1526  * Initialise related rte_ipsec_sa object.
1527  */
1528 static int
1529 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1530 {
1531         int rc;
1532         struct rte_ipsec_sa_prm prm;
1533         struct rte_ipsec_session *ips;
1534         struct rte_ipv4_hdr v4  = {
1535                 .version_ihl = IPVERSION << 4 |
1536                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1537                 .time_to_live = IPDEFTTL,
1538                 .next_proto_id = IPPROTO_ESP,
1539                 .src_addr = lsa->src.ip.ip4,
1540                 .dst_addr = lsa->dst.ip.ip4,
1541         };
1542         struct rte_ipv6_hdr v6 = {
1543                 .vtc_flow = htonl(IP6_VERSION << 28),
1544                 .proto = IPPROTO_ESP,
1545         };
1546
1547         if (IS_IP6_TUNNEL(lsa->flags)) {
1548                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1549                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1550         }
1551
1552         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1553         if (rc == 0)
1554                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1555         if (rc < 0)
1556                 return rc;
1557
1558         if (lsa->flags & SA_TELEMETRY_ENABLE)
1559                 rte_ipsec_telemetry_sa_add(sa);
1560
1561         /* init primary processing session */
1562         ips = ipsec_get_primary_session(lsa);
1563         rc = fill_ipsec_session(ips, sa);
1564         if (rc != 0)
1565                 return rc;
1566
1567         /* init inline fallback processing session */
1568         if (lsa->fallback_sessions == 1)
1569                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1570
1571         return rc;
1572 }
1573
1574 /*
1575  * Allocate space and init rte_ipsec_sa structures,
1576  * one per session.
1577  */
1578 static int
1579 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1580 {
1581         int32_t rc, sz;
1582         uint32_t i, idx;
1583         size_t tsz;
1584         struct rte_ipsec_sa *sa;
1585         struct ipsec_sa *lsa;
1586         struct rte_ipsec_sa_prm prm;
1587
1588         /* determine SA size */
1589         idx = 0;
1590         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1591         sz = rte_ipsec_sa_size(&prm);
1592         if (sz < 0) {
1593                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1594                         "failed to determine SA size, error code: %d\n",
1595                         __func__, ctx, nb_ent, socket, sz);
1596                 return sz;
1597         }
1598
1599         tsz = sz * nb_ent;
1600
1601         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1602         if (ctx->satbl == NULL) {
1603                 RTE_LOG(ERR, IPSEC,
1604                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1605                         __func__,  ctx, nb_ent, socket, tsz);
1606                 return -ENOMEM;
1607         }
1608
1609         rc = 0;
1610         for (i = 0; i != nb_ent && rc == 0; i++) {
1611
1612                 idx = i;
1613
1614                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1615                 lsa = ctx->sa + idx;
1616
1617                 rc = ipsec_sa_init(lsa, sa, sz);
1618         }
1619
1620         return rc;
1621 }
1622
1623 static int
1624 sa_cmp(const void *p, const void *q)
1625 {
1626         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1627         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1628
1629         return (int)(spi1 - spi2);
1630 }
1631
1632 /*
1633  * Walk through all SA rules to find an SA with given SPI
1634  */
1635 int
1636 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1637 {
1638         uint32_t num;
1639         struct ipsec_sa *sa;
1640         struct ipsec_sa tmpl;
1641         const struct ipsec_sa *sar;
1642
1643         sar = sa_ctx->sa;
1644         if (inbound != 0)
1645                 num = nb_sa_in;
1646         else
1647                 num = nb_sa_out;
1648
1649         tmpl.spi = spi;
1650
1651         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1652         if (sa != NULL)
1653                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1654
1655         return -ENOENT;
1656 }
1657
1658 void
1659 sa_init(struct socket_ctx *ctx, int32_t socket_id,
1660                 struct lcore_conf *lcore_conf)
1661 {
1662         int32_t rc;
1663         const char *name;
1664         uint32_t lcore_id;
1665         struct ipsec_ctx *ipsec_ctx[RTE_MAX_LCORE];
1666
1667         if (ctx == NULL)
1668                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1669
1670         if (ctx->sa_in != NULL)
1671                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1672                                 "initialized\n", socket_id);
1673
1674         if (ctx->sa_out != NULL)
1675                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1676                                 "initialized\n", socket_id);
1677
1678         if (nb_sa_in > 0) {
1679                 name = "sa_in";
1680                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1681                 if (ctx->sa_in == NULL)
1682                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1683                                 "context %s in socket %d\n", rte_errno,
1684                                 name, socket_id);
1685
1686                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1687                                 &sa_in_cnt);
1688                 if (rc != 0)
1689                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1690                 RTE_LCORE_FOREACH(lcore_id)
1691                         ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
1692                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
1693
1694                 if (app_sa_prm.enable != 0) {
1695                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1696                                 socket_id);
1697                         if (rc != 0)
1698                                 rte_exit(EXIT_FAILURE,
1699                                         "failed to init inbound SAs\n");
1700                 }
1701         } else
1702                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1703
1704         if (nb_sa_out > 0) {
1705                 name = "sa_out";
1706                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1707                 if (ctx->sa_out == NULL)
1708                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1709                                 "context %s in socket %d\n", rte_errno,
1710                                 name, socket_id);
1711
1712                 RTE_LCORE_FOREACH(lcore_id)
1713                         ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
1714                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
1715
1716                 if (app_sa_prm.enable != 0) {
1717                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1718                                 socket_id);
1719                         if (rc != 0)
1720                                 rte_exit(EXIT_FAILURE,
1721                                         "failed to init outbound SAs\n");
1722                 }
1723         } else
1724                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1725                         "specified\n");
1726 }
1727
1728 int
1729 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1730 {
1731         struct ipsec_mbuf_metadata *priv;
1732         struct ipsec_sa *sa;
1733
1734         priv = get_priv(m);
1735         sa = priv->sa;
1736         if (sa != NULL)
1737                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1738
1739         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1740         return 0;
1741 }
1742
1743 void
1744 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1745                 void *sa_arr[], uint16_t nb_pkts)
1746 {
1747         uint32_t i;
1748         void *result_sa;
1749         struct ipsec_sa *sa;
1750
1751         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1752
1753         /*
1754          * Mark need for inline offload fallback on the LSB of SA pointer.
1755          * Thanks to packet grouping mechanism which ipsec_process is using
1756          * packets marked for fallback processing will form separate group.
1757          *
1758          * Because it is not safe to use SA pointer it is casted to generic
1759          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1760          * to get valid struct pointer.
1761          */
1762         for (i = 0; i < nb_pkts; i++) {
1763                 if (sa_arr[i] == NULL)
1764                         continue;
1765
1766                 result_sa = sa = sa_arr[i];
1767                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1768                         sa->fallback_sessions > 0) {
1769                         uintptr_t intsa = (uintptr_t)sa;
1770                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1771                         result_sa = (void *)intsa;
1772                 }
1773                 sa_arr[i] = result_sa;
1774         }
1775 }
1776
1777 void
1778 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1779                 void *sa[], uint16_t nb_pkts)
1780 {
1781         uint32_t i;
1782
1783         for (i = 0; i < nb_pkts; i++)
1784                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1785 }
1786
1787 /*
1788  * Select HW offloads to be used.
1789  */
1790 int
1791 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1792                 uint64_t *tx_offloads)
1793 {
1794         struct ipsec_sa *rule;
1795         uint32_t idx_sa;
1796         enum rte_security_session_action_type rule_type;
1797         struct rte_eth_dev_info dev_info;
1798         int ret;
1799
1800         *rx_offloads = 0;
1801         *tx_offloads = 0;
1802
1803         ret = rte_eth_dev_info_get(port_id, &dev_info);
1804         if (ret != 0)
1805                 rte_exit(EXIT_FAILURE,
1806                         "Error during getting device (port %u) info: %s\n",
1807                         port_id, strerror(-ret));
1808
1809         /* Check for inbound rules that use offloads and use this port */
1810         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1811                 rule = &sa_in[idx_sa];
1812                 rule_type = ipsec_get_action_type(rule);
1813                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1814                                 rule_type ==
1815                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1816                                 && rule->portid == port_id)
1817                         *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1818         }
1819
1820         /* Check for outbound rules that use offloads and use this port */
1821         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1822                 rule = &sa_out[idx_sa];
1823                 rule_type = ipsec_get_action_type(rule);
1824                 switch (rule_type) {
1825                 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1826                         /* Checksum offload is not needed for inline protocol as
1827                          * all processing for Outbound IPSec packets will be
1828                          * implicitly taken care and for non-IPSec packets,
1829                          * there is no need of IPv4 Checksum offload.
1830                          */
1831                         if (rule->portid == port_id) {
1832                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1833                                 if (rule->mss)
1834                                         *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
1835                                                          RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
1836                         }
1837                         break;
1838                 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1839                         if (rule->portid == port_id) {
1840                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1841                                 if (rule->mss)
1842                                         *tx_offloads |=
1843                                                 RTE_ETH_TX_OFFLOAD_TCP_TSO;
1844                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1845                         }
1846                         break;
1847                 default:
1848                         /* Enable IPv4 checksum offload even if one of lookaside
1849                          * SA's are present.
1850                          */
1851                         if (dev_info.tx_offload_capa &
1852                             RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1853                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1854                         break;
1855                 }
1856         }
1857         return 0;
1858 }
1859
1860 void
1861 sa_sort_arr(void)
1862 {
1863         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1864         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1865 }
1866
1867 uint32_t
1868 get_nb_crypto_sessions(void)
1869 {
1870         return nb_crypto_sessions;
1871 }