5cdd794f017f0c5d8ef3dec9df78a33b6843484c
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28
29 #define IPDEFTTL 64
30
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36
37 struct supported_cipher_algo {
38         const char *keyword;
39         enum rte_crypto_cipher_algorithm algo;
40         uint16_t iv_len;
41         uint16_t block_size;
42         uint16_t key_len;
43 };
44
45 struct supported_auth_algo {
46         const char *keyword;
47         enum rte_crypto_auth_algorithm algo;
48         uint16_t digest_len;
49         uint16_t key_len;
50         uint8_t key_not_req;
51 };
52
53 struct supported_aead_algo {
54         const char *keyword;
55         enum rte_crypto_aead_algorithm algo;
56         uint16_t iv_len;
57         uint16_t block_size;
58         uint16_t digest_len;
59         uint16_t key_len;
60         uint8_t aad_len;
61 };
62
63
64 const struct supported_cipher_algo cipher_algos[] = {
65         {
66                 .keyword = "null",
67                 .algo = RTE_CRYPTO_CIPHER_NULL,
68                 .iv_len = 0,
69                 .block_size = 4,
70                 .key_len = 0
71         },
72         {
73                 .keyword = "aes-128-cbc",
74                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
75                 .iv_len = 16,
76                 .block_size = 16,
77                 .key_len = 16
78         },
79         {
80                 .keyword = "aes-192-cbc",
81                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82                 .iv_len = 16,
83                 .block_size = 16,
84                 .key_len = 24
85         },
86         {
87                 .keyword = "aes-256-cbc",
88                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89                 .iv_len = 16,
90                 .block_size = 16,
91                 .key_len = 32
92         },
93         {
94                 .keyword = "aes-128-ctr",
95                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
96                 .iv_len = 8,
97                 .block_size = 4,
98                 .key_len = 20
99         },
100         {
101                 .keyword = "3des-cbc",
102                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
103                 .iv_len = 8,
104                 .block_size = 8,
105                 .key_len = 24
106         }
107 };
108
109 const struct supported_auth_algo auth_algos[] = {
110         {
111                 .keyword = "null",
112                 .algo = RTE_CRYPTO_AUTH_NULL,
113                 .digest_len = 0,
114                 .key_len = 0,
115                 .key_not_req = 1
116         },
117         {
118                 .keyword = "sha1-hmac",
119                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120                 .digest_len = 12,
121                 .key_len = 20
122         },
123         {
124                 .keyword = "sha256-hmac",
125                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126                 .digest_len = 16,
127                 .key_len = 32
128         }
129 };
130
131 const struct supported_aead_algo aead_algos[] = {
132         {
133                 .keyword = "aes-128-gcm",
134                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
135                 .iv_len = 8,
136                 .block_size = 4,
137                 .key_len = 20,
138                 .digest_len = 16,
139                 .aad_len = 8,
140         },
141         {
142                 .keyword = "aes-192-gcm",
143                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
144                 .iv_len = 8,
145                 .block_size = 4,
146                 .key_len = 28,
147                 .digest_len = 16,
148                 .aad_len = 8,
149         },
150         {
151                 .keyword = "aes-256-gcm",
152                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
153                 .iv_len = 8,
154                 .block_size = 4,
155                 .key_len = 36,
156                 .digest_len = 16,
157                 .aad_len = 8,
158         }
159 };
160
161 #define SA_INIT_NB      128
162
163 static uint32_t nb_crypto_sessions;
164 struct ipsec_sa *sa_out;
165 uint32_t nb_sa_out;
166 static uint32_t sa_out_sz;
167 static struct ipsec_sa_cnt sa_out_cnt;
168
169 struct ipsec_sa *sa_in;
170 uint32_t nb_sa_in;
171 static uint32_t sa_in_sz;
172 static struct ipsec_sa_cnt sa_in_cnt;
173
174 static const struct supported_cipher_algo *
175 find_match_cipher_algo(const char *cipher_keyword)
176 {
177         size_t i;
178
179         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
180                 const struct supported_cipher_algo *algo =
181                         &cipher_algos[i];
182
183                 if (strcmp(cipher_keyword, algo->keyword) == 0)
184                         return algo;
185         }
186
187         return NULL;
188 }
189
190 static const struct supported_auth_algo *
191 find_match_auth_algo(const char *auth_keyword)
192 {
193         size_t i;
194
195         for (i = 0; i < RTE_DIM(auth_algos); i++) {
196                 const struct supported_auth_algo *algo =
197                         &auth_algos[i];
198
199                 if (strcmp(auth_keyword, algo->keyword) == 0)
200                         return algo;
201         }
202
203         return NULL;
204 }
205
206 static const struct supported_aead_algo *
207 find_match_aead_algo(const char *aead_keyword)
208 {
209         size_t i;
210
211         for (i = 0; i < RTE_DIM(aead_algos); i++) {
212                 const struct supported_aead_algo *algo =
213                         &aead_algos[i];
214
215                 if (strcmp(aead_keyword, algo->keyword) == 0)
216                         return algo;
217         }
218
219         return NULL;
220 }
221
222 /** parse_key_string
223  *  parse x:x:x:x.... hex number key string into uint8_t *key
224  *  return:
225  *  > 0: number of bytes parsed
226  *  0:   failed
227  */
228 static uint32_t
229 parse_key_string(const char *key_str, uint8_t *key)
230 {
231         const char *pt_start = key_str, *pt_end = key_str;
232         uint32_t nb_bytes = 0;
233
234         while (pt_end != NULL) {
235                 char sub_str[3] = {0};
236
237                 pt_end = strchr(pt_start, ':');
238
239                 if (pt_end == NULL) {
240                         if (strlen(pt_start) > 2)
241                                 return 0;
242                         strncpy(sub_str, pt_start, 2);
243                 } else {
244                         if (pt_end - pt_start > 2)
245                                 return 0;
246
247                         strncpy(sub_str, pt_start, pt_end - pt_start);
248                         pt_start = pt_end + 1;
249                 }
250
251                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
252         }
253
254         return nb_bytes;
255 }
256
257 static int
258 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
259 {
260         if (*sa_tbl == NULL) {
261                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
262                 if (*sa_tbl == NULL)
263                         return -1;
264                 *cur_sz = SA_INIT_NB;
265                 return 0;
266         }
267
268         if (cur_cnt >= *cur_sz) {
269                 *sa_tbl = realloc(*sa_tbl,
270                         *cur_sz * sizeof(struct ipsec_sa) * 2);
271                 if (*sa_tbl == NULL)
272                         return -1;
273                 /* clean reallocated extra space */
274                 memset(&(*sa_tbl)[*cur_sz], 0,
275                         *cur_sz * sizeof(struct ipsec_sa));
276                 *cur_sz *= 2;
277         }
278
279         return 0;
280 }
281
282 void
283 parse_sa_tokens(char **tokens, uint32_t n_tokens,
284         struct parse_status *status)
285 {
286         struct ipsec_sa *rule = NULL;
287         struct rte_ipsec_session *ips;
288         uint32_t ti; /*token index*/
289         uint32_t *ri /*rule index*/;
290         struct ipsec_sa_cnt *sa_cnt;
291         uint32_t cipher_algo_p = 0;
292         uint32_t auth_algo_p = 0;
293         uint32_t aead_algo_p = 0;
294         uint32_t src_p = 0;
295         uint32_t dst_p = 0;
296         uint32_t mode_p = 0;
297         uint32_t type_p = 0;
298         uint32_t portid_p = 0;
299         uint32_t fallback_p = 0;
300         int16_t status_p = 0;
301         uint16_t udp_encap_p = 0;
302
303         if (strcmp(tokens[0], "in") == 0) {
304                 ri = &nb_sa_in;
305                 sa_cnt = &sa_in_cnt;
306                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
307                         return;
308                 rule = &sa_in[*ri];
309                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
310         } else {
311                 ri = &nb_sa_out;
312                 sa_cnt = &sa_out_cnt;
313                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
314                         return;
315                 rule = &sa_out[*ri];
316                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
317         }
318
319         /* spi number */
320         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
321         if (status->status < 0)
322                 return;
323         if (atoi(tokens[1]) == INVALID_SPI)
324                 return;
325         rule->spi = atoi(tokens[1]);
326         rule->portid = UINT16_MAX;
327         ips = ipsec_get_primary_session(rule);
328
329         for (ti = 2; ti < n_tokens; ti++) {
330                 if (strcmp(tokens[ti], "mode") == 0) {
331                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
332                         if (status->status < 0)
333                                 return;
334
335                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
336                         if (status->status < 0)
337                                 return;
338
339                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
340                                 sa_cnt->nb_v4++;
341                                 rule->flags = IP4_TUNNEL;
342                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
343                                 sa_cnt->nb_v6++;
344                                 rule->flags = IP6_TUNNEL;
345                         } else if (strcmp(tokens[ti], "transport") == 0) {
346                                 sa_cnt->nb_v4++;
347                                 sa_cnt->nb_v6++;
348                                 rule->flags = TRANSPORT;
349                         } else {
350                                 APP_CHECK(0, status, "unrecognized "
351                                         "input \"%s\"", tokens[ti]);
352                                 return;
353                         }
354
355                         mode_p = 1;
356                         continue;
357                 }
358
359                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
360                         const struct supported_cipher_algo *algo;
361                         uint32_t key_len;
362
363                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
364                                 status);
365                         if (status->status < 0)
366                                 return;
367
368                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
369                         if (status->status < 0)
370                                 return;
371
372                         algo = find_match_cipher_algo(tokens[ti]);
373
374                         APP_CHECK(algo != NULL, status, "unrecognized "
375                                 "input \"%s\"", tokens[ti]);
376
377                         if (status->status < 0)
378                                 return;
379
380                         rule->cipher_algo = algo->algo;
381                         rule->block_size = algo->block_size;
382                         rule->iv_len = algo->iv_len;
383                         rule->cipher_key_len = algo->key_len;
384
385                         /* for NULL algorithm, no cipher key required */
386                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
387                                 cipher_algo_p = 1;
388                                 continue;
389                         }
390
391                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
392                         if (status->status < 0)
393                                 return;
394
395                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
396                                 status, "unrecognized input \"%s\", "
397                                 "expect \"cipher_key\"", tokens[ti]);
398                         if (status->status < 0)
399                                 return;
400
401                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
402                         if (status->status < 0)
403                                 return;
404
405                         key_len = parse_key_string(tokens[ti],
406                                 rule->cipher_key);
407                         APP_CHECK(key_len == rule->cipher_key_len, status,
408                                 "unrecognized input \"%s\"", tokens[ti]);
409                         if (status->status < 0)
410                                 return;
411
412                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
413                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
414                                 rule->salt = (uint32_t)rte_rand();
415
416                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
417                                 key_len -= 4;
418                                 rule->cipher_key_len = key_len;
419                                 memcpy(&rule->salt,
420                                         &rule->cipher_key[key_len], 4);
421                         }
422
423                         cipher_algo_p = 1;
424                         continue;
425                 }
426
427                 if (strcmp(tokens[ti], "auth_algo") == 0) {
428                         const struct supported_auth_algo *algo;
429                         uint32_t key_len;
430
431                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
432                                 status);
433                         if (status->status < 0)
434                                 return;
435
436                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
437                         if (status->status < 0)
438                                 return;
439
440                         algo = find_match_auth_algo(tokens[ti]);
441                         APP_CHECK(algo != NULL, status, "unrecognized "
442                                 "input \"%s\"", tokens[ti]);
443
444                         if (status->status < 0)
445                                 return;
446
447                         rule->auth_algo = algo->algo;
448                         rule->auth_key_len = algo->key_len;
449                         rule->digest_len = algo->digest_len;
450
451                         /* NULL algorithm and combined algos do not
452                          * require auth key
453                          */
454                         if (algo->key_not_req) {
455                                 auth_algo_p = 1;
456                                 continue;
457                         }
458
459                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
460                         if (status->status < 0)
461                                 return;
462
463                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
464                                 status, "unrecognized input \"%s\", "
465                                 "expect \"auth_key\"", tokens[ti]);
466                         if (status->status < 0)
467                                 return;
468
469                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
470                         if (status->status < 0)
471                                 return;
472
473                         key_len = parse_key_string(tokens[ti],
474                                 rule->auth_key);
475                         APP_CHECK(key_len == rule->auth_key_len, status,
476                                 "unrecognized input \"%s\"", tokens[ti]);
477                         if (status->status < 0)
478                                 return;
479
480                         auth_algo_p = 1;
481                         continue;
482                 }
483
484                 if (strcmp(tokens[ti], "aead_algo") == 0) {
485                         const struct supported_aead_algo *algo;
486                         uint32_t key_len;
487
488                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
489                                 status);
490                         if (status->status < 0)
491                                 return;
492
493                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494                         if (status->status < 0)
495                                 return;
496
497                         algo = find_match_aead_algo(tokens[ti]);
498
499                         APP_CHECK(algo != NULL, status, "unrecognized "
500                                 "input \"%s\"", tokens[ti]);
501
502                         if (status->status < 0)
503                                 return;
504
505                         rule->aead_algo = algo->algo;
506                         rule->cipher_key_len = algo->key_len;
507                         rule->digest_len = algo->digest_len;
508                         rule->aad_len = algo->aad_len;
509                         rule->block_size = algo->block_size;
510                         rule->iv_len = algo->iv_len;
511
512                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
513                         if (status->status < 0)
514                                 return;
515
516                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
517                                 status, "unrecognized input \"%s\", "
518                                 "expect \"aead_key\"", tokens[ti]);
519                         if (status->status < 0)
520                                 return;
521
522                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
523                         if (status->status < 0)
524                                 return;
525
526                         key_len = parse_key_string(tokens[ti],
527                                 rule->cipher_key);
528                         APP_CHECK(key_len == rule->cipher_key_len, status,
529                                 "unrecognized input \"%s\"", tokens[ti]);
530                         if (status->status < 0)
531                                 return;
532
533                         key_len -= 4;
534                         rule->cipher_key_len = key_len;
535                         memcpy(&rule->salt,
536                                 &rule->cipher_key[key_len], 4);
537
538                         aead_algo_p = 1;
539                         continue;
540                 }
541
542                 if (strcmp(tokens[ti], "src") == 0) {
543                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
544                         if (status->status < 0)
545                                 return;
546
547                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
548                         if (status->status < 0)
549                                 return;
550
551                         if (IS_IP4_TUNNEL(rule->flags)) {
552                                 struct in_addr ip;
553
554                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
555                                         &ip, NULL) == 0, status,
556                                         "unrecognized input \"%s\", "
557                                         "expect valid ipv4 addr",
558                                         tokens[ti]);
559                                 if (status->status < 0)
560                                         return;
561                                 rule->src.ip.ip4 = rte_bswap32(
562                                         (uint32_t)ip.s_addr);
563                         } else if (IS_IP6_TUNNEL(rule->flags)) {
564                                 struct in6_addr ip;
565
566                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
567                                         NULL) == 0, status,
568                                         "unrecognized input \"%s\", "
569                                         "expect valid ipv6 addr",
570                                         tokens[ti]);
571                                 if (status->status < 0)
572                                         return;
573                                 memcpy(rule->src.ip.ip6.ip6_b,
574                                         ip.s6_addr, 16);
575                         } else if (IS_TRANSPORT(rule->flags)) {
576                                 APP_CHECK(0, status, "unrecognized input "
577                                         "\"%s\"", tokens[ti]);
578                                 return;
579                         }
580
581                         src_p = 1;
582                         continue;
583                 }
584
585                 if (strcmp(tokens[ti], "dst") == 0) {
586                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
587                         if (status->status < 0)
588                                 return;
589
590                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
591                         if (status->status < 0)
592                                 return;
593
594                         if (IS_IP4_TUNNEL(rule->flags)) {
595                                 struct in_addr ip;
596
597                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
598                                         &ip, NULL) == 0, status,
599                                         "unrecognized input \"%s\", "
600                                         "expect valid ipv4 addr",
601                                         tokens[ti]);
602                                 if (status->status < 0)
603                                         return;
604                                 rule->dst.ip.ip4 = rte_bswap32(
605                                         (uint32_t)ip.s_addr);
606                         } else if (IS_IP6_TUNNEL(rule->flags)) {
607                                 struct in6_addr ip;
608
609                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
610                                         NULL) == 0, status,
611                                         "unrecognized input \"%s\", "
612                                         "expect valid ipv6 addr",
613                                         tokens[ti]);
614                                 if (status->status < 0)
615                                         return;
616                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
617                         } else if (IS_TRANSPORT(rule->flags)) {
618                                 APP_CHECK(0, status, "unrecognized "
619                                         "input \"%s\"", tokens[ti]);
620                                 return;
621                         }
622
623                         dst_p = 1;
624                         continue;
625                 }
626
627                 if (strcmp(tokens[ti], "type") == 0) {
628                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
629                         if (status->status < 0)
630                                 return;
631
632                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
633                         if (status->status < 0)
634                                 return;
635
636                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
637                                 ips->type =
638                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
639                         else if (strcmp(tokens[ti],
640                                         "inline-protocol-offload") == 0)
641                                 ips->type =
642                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
643                         else if (strcmp(tokens[ti],
644                                         "lookaside-protocol-offload") == 0)
645                                 ips->type =
646                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
647                         else if (strcmp(tokens[ti], "no-offload") == 0)
648                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
649                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
650                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
651                         else {
652                                 APP_CHECK(0, status, "Invalid input \"%s\"",
653                                                 tokens[ti]);
654                                 return;
655                         }
656
657                         type_p = 1;
658                         continue;
659                 }
660
661                 if (strcmp(tokens[ti], "port_id") == 0) {
662                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
663                         if (status->status < 0)
664                                 return;
665                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
666                         if (status->status < 0)
667                                 return;
668                         if (rule->portid == UINT16_MAX)
669                                 rule->portid = atoi(tokens[ti]);
670                         else if (rule->portid != atoi(tokens[ti])) {
671                                 APP_CHECK(0, status,
672                                         "portid %s not matching with already assigned portid %u",
673                                         tokens[ti], rule->portid);
674                                 return;
675                         }
676                         portid_p = 1;
677                         continue;
678                 }
679
680                 if (strcmp(tokens[ti], "fallback") == 0) {
681                         struct rte_ipsec_session *fb;
682
683                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
684                                 "not allowed for legacy mode.");
685                         if (status->status < 0)
686                                 return;
687                         APP_CHECK(ips->type ==
688                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
689                                 "Fallback session allowed if primary session "
690                                 "is of type inline-crypto-offload only.");
691                         if (status->status < 0)
692                                 return;
693                         APP_CHECK(rule->direction ==
694                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
695                                 "Fallback session not allowed for egress "
696                                 "rule");
697                         if (status->status < 0)
698                                 return;
699                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
700                         if (status->status < 0)
701                                 return;
702                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
703                         if (status->status < 0)
704                                 return;
705                         fb = ipsec_get_fallback_session(rule);
706                         if (strcmp(tokens[ti], "lookaside-none") == 0)
707                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
708                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
709                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
710                         else {
711                                 APP_CHECK(0, status, "unrecognized fallback "
712                                         "type %s.", tokens[ti]);
713                                 return;
714                         }
715
716                         rule->fallback_sessions = 1;
717                         nb_crypto_sessions++;
718                         fallback_p = 1;
719                         continue;
720                 }
721                 if (strcmp(tokens[ti], "flow-direction") == 0) {
722                         switch (ips->type) {
723                         case RTE_SECURITY_ACTION_TYPE_NONE:
724                         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
725                                 rule->fdir_flag = 1;
726                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
727                                 if (status->status < 0)
728                                         return;
729                                 if (rule->portid == UINT16_MAX)
730                                         rule->portid = atoi(tokens[ti]);
731                                 else if (rule->portid != atoi(tokens[ti])) {
732                                         APP_CHECK(0, status,
733                                                 "portid %s not matching with already assigned portid %u",
734                                                 tokens[ti], rule->portid);
735                                         return;
736                                 }
737                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
738                                 if (status->status < 0)
739                                         return;
740                                 rule->fdir_qid = atoi(tokens[ti]);
741                                 /* validating portid and queueid */
742                                 status_p = check_flow_params(rule->portid,
743                                                 rule->fdir_qid);
744                                 if (status_p < 0) {
745                                         printf("port id %u / queue id %u is "
746                                                 "not valid\n", rule->portid,
747                                                  rule->fdir_qid);
748                                 }
749                                 break;
750                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
751                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
752                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
753                         default:
754                                 APP_CHECK(0, status,
755                                         "flow director not supported for security session type %d",
756                                         ips->type);
757                                 return;
758                         }
759                         continue;
760                 }
761                 if (strcmp(tokens[ti], "udp-encap") == 0) {
762                         switch (ips->type) {
763                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
764                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
765                                 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
766                                                    status);
767                                 if (status->status < 0)
768                                         return;
769
770                                 rule->udp_encap = 1;
771                                 app_sa_prm.udp_encap = 1;
772                                 udp_encap_p = 1;
773                                 break;
774                         default:
775                                 APP_CHECK(0, status,
776                                         "UDP encapsulation not supported for "
777                                         "security session type %d",
778                                         ips->type);
779                                 return;
780                         }
781                         continue;
782                 }
783
784                 /* unrecognizeable input */
785                 APP_CHECK(0, status, "unrecognized input \"%s\"",
786                         tokens[ti]);
787                 return;
788         }
789
790         if (aead_algo_p) {
791                 APP_CHECK(cipher_algo_p == 0, status,
792                                 "AEAD used, no need for cipher options");
793                 if (status->status < 0)
794                         return;
795
796                 APP_CHECK(auth_algo_p == 0, status,
797                                 "AEAD used, no need for auth options");
798                 if (status->status < 0)
799                         return;
800         } else {
801                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
802                 if (status->status < 0)
803                         return;
804
805                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
806                 if (status->status < 0)
807                         return;
808         }
809
810         APP_CHECK(mode_p == 1, status, "missing mode option");
811         if (status->status < 0)
812                 return;
813
814         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
815                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
816                 printf("Missing portid option, falling back to non-offload\n");
817
818         if (!type_p || (!portid_p && ips->type !=
819                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
820                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
821         }
822
823         nb_crypto_sessions++;
824         *ri = *ri + 1;
825 }
826
827 static void
828 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
829 {
830         uint32_t i;
831         uint8_t a, b, c, d;
832         const struct rte_ipsec_session *ips;
833         const struct rte_ipsec_session *fallback_ips;
834
835         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
836
837         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
838                 if (cipher_algos[i].algo == sa->cipher_algo &&
839                                 cipher_algos[i].key_len == sa->cipher_key_len) {
840                         printf("%s ", cipher_algos[i].keyword);
841                         break;
842                 }
843         }
844
845         for (i = 0; i < RTE_DIM(auth_algos); i++) {
846                 if (auth_algos[i].algo == sa->auth_algo) {
847                         printf("%s ", auth_algos[i].keyword);
848                         break;
849                 }
850         }
851
852         for (i = 0; i < RTE_DIM(aead_algos); i++) {
853                 if (aead_algos[i].algo == sa->aead_algo &&
854                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
855                         printf("%s ", aead_algos[i].keyword);
856                         break;
857                 }
858         }
859
860         printf("mode:");
861
862         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
863         case IP4_TUNNEL:
864                 printf("IP4Tunnel ");
865                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
866                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
867                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
868                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
869                 break;
870         case IP6_TUNNEL:
871                 printf("IP6Tunnel ");
872                 for (i = 0; i < 16; i++) {
873                         if (i % 2 && i != 15)
874                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
875                         else
876                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
877                 }
878                 printf(" ");
879                 for (i = 0; i < 16; i++) {
880                         if (i % 2 && i != 15)
881                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
882                         else
883                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
884                 }
885                 break;
886         case TRANSPORT:
887                 printf("Transport ");
888                 break;
889         }
890
891         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
892         printf(" type:");
893         switch (ips->type) {
894         case RTE_SECURITY_ACTION_TYPE_NONE:
895                 printf("no-offload ");
896                 break;
897         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
898                 printf("inline-crypto-offload ");
899                 break;
900         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
901                 printf("inline-protocol-offload ");
902                 break;
903         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
904                 printf("lookaside-protocol-offload ");
905                 break;
906         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
907                 printf("cpu-crypto-accelerated ");
908                 break;
909         }
910
911         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
912         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
913                 printf("inline fallback: ");
914                 switch (fallback_ips->type) {
915                 case RTE_SECURITY_ACTION_TYPE_NONE:
916                         printf("lookaside-none");
917                         break;
918                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
919                         printf("cpu-crypto-accelerated");
920                         break;
921                 default:
922                         printf("invalid");
923                         break;
924                 }
925         }
926         if (sa->fdir_flag == 1)
927                 printf("flow-direction port %d queue %d", sa->portid,
928                                 sa->fdir_qid);
929
930         printf("\n");
931 }
932
933 static struct sa_ctx *
934 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
935 {
936         char s[PATH_MAX];
937         struct sa_ctx *sa_ctx;
938         uint32_t mz_size;
939         const struct rte_memzone *mz;
940
941         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
942
943         /* Create SA context */
944         printf("Creating SA context with %u maximum entries on socket %d\n",
945                         nb_sa, socket_id);
946
947         mz_size = sizeof(struct ipsec_xf) * nb_sa;
948         mz = rte_memzone_reserve(s, mz_size, socket_id,
949                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
950         if (mz == NULL) {
951                 printf("Failed to allocate SA XFORM memory\n");
952                 rte_errno = ENOMEM;
953                 return NULL;
954         }
955
956         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
957                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
958
959         if (sa_ctx == NULL) {
960                 printf("Failed to allocate SA CTX memory\n");
961                 rte_errno = ENOMEM;
962                 rte_memzone_free(mz);
963                 return NULL;
964         }
965
966         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
967         sa_ctx->nb_sa = nb_sa;
968
969         return sa_ctx;
970 }
971
972 static int
973 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
974 {
975         struct rte_eth_dev_info dev_info;
976         int retval;
977
978         retval = rte_eth_dev_info_get(portid, &dev_info);
979         if (retval != 0) {
980                 RTE_LOG(ERR, IPSEC,
981                         "Error during getting device (port %u) info: %s\n",
982                         portid, strerror(-retval));
983
984                 return retval;
985         }
986
987         if (inbound) {
988                 if ((dev_info.rx_offload_capa &
989                                 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
990                         RTE_LOG(WARNING, PORT,
991                                 "hardware RX IPSec offload is not supported\n");
992                         return -EINVAL;
993                 }
994
995         } else { /* outbound */
996                 if ((dev_info.tx_offload_capa &
997                                 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
998                         RTE_LOG(WARNING, PORT,
999                                 "hardware TX IPSec offload is not supported\n");
1000                         return -EINVAL;
1001                 }
1002         }
1003         return 0;
1004 }
1005
1006 /*
1007  * Helper function, tries to determine next_proto for SPI
1008  * by searching though SP rules.
1009  */
1010 static int
1011 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1012                 struct ip_addr ip_addr[2], uint32_t mask[2])
1013 {
1014         int32_t rc4, rc6;
1015
1016         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1017                                 ip_addr, mask);
1018         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1019                                 ip_addr, mask);
1020
1021         if (rc4 >= 0) {
1022                 if (rc6 >= 0) {
1023                         RTE_LOG(ERR, IPSEC,
1024                                 "%s: SPI %u used simultaeously by "
1025                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
1026                                 __func__, spi, rc4, rc6);
1027                         return -EINVAL;
1028                 } else
1029                         return IPPROTO_IPIP;
1030         } else if (rc6 < 0) {
1031                 RTE_LOG(ERR, IPSEC,
1032                         "%s: SPI %u is not used by any SP rule\n",
1033                         __func__, spi);
1034                 return -EINVAL;
1035         } else
1036                 return IPPROTO_IPV6;
1037 }
1038
1039 /*
1040  * Helper function for getting source and destination IP addresses
1041  * from SP. Needed for inline crypto transport mode, as addresses are not
1042  * provided in config file for that mode. It checks if SP for current SA exists,
1043  * and based on what type of protocol is returned, it stores appropriate
1044  * addresses got from SP into SA.
1045  */
1046 static int
1047 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1048 {
1049         int protocol;
1050         struct ip_addr ip_addr[2];
1051         uint32_t mask[2];
1052
1053         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1054         if (protocol < 0)
1055                 return protocol;
1056         else if (protocol == IPPROTO_IPIP) {
1057                 sa->flags |= IP4_TRANSPORT;
1058                 if (mask[0] == IP4_FULL_MASK &&
1059                                 mask[1] == IP4_FULL_MASK &&
1060                                 ip_addr[0].ip.ip4 != 0 &&
1061                                 ip_addr[1].ip.ip4 != 0) {
1062
1063                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1064                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1065                 } else {
1066                         RTE_LOG(ERR, IPSEC,
1067                         "%s: No valid address or mask entry in"
1068                         " IPv4 SP rule for SPI %u\n",
1069                         __func__, sa->spi);
1070                         return -EINVAL;
1071                 }
1072         } else if (protocol == IPPROTO_IPV6) {
1073                 sa->flags |= IP6_TRANSPORT;
1074                 if (mask[0] == IP6_FULL_MASK &&
1075                                 mask[1] == IP6_FULL_MASK &&
1076                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1077                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1078                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1079                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1080
1081                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1082                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1083                 } else {
1084                         RTE_LOG(ERR, IPSEC,
1085                         "%s: No valid address or mask entry in"
1086                         " IPv6 SP rule for SPI %u\n",
1087                         __func__, sa->spi);
1088                         return -EINVAL;
1089                 }
1090         }
1091         return 0;
1092 }
1093
1094 static int
1095 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1096                 uint32_t nb_entries, uint32_t inbound,
1097                 struct socket_ctx *skt_ctx)
1098 {
1099         struct ipsec_sa *sa;
1100         uint32_t i, idx;
1101         uint16_t iv_length, aad_length;
1102         int inline_status;
1103         int32_t rc;
1104         struct rte_ipsec_session *ips;
1105
1106         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1107         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1108
1109         for (i = 0; i < nb_entries; i++) {
1110                 idx = i;
1111                 sa = &sa_ctx->sa[idx];
1112                 if (sa->spi != 0) {
1113                         printf("Index %u already in use by SPI %u\n",
1114                                         idx, sa->spi);
1115                         return -EINVAL;
1116                 }
1117                 *sa = entries[i];
1118
1119                 if (inbound) {
1120                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1121                         if (rc != 0)
1122                                 return rc;
1123                 }
1124
1125                 sa->seq = 0;
1126                 ips = ipsec_get_primary_session(sa);
1127
1128                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1129                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1130                         if (check_eth_dev_caps(sa->portid, inbound))
1131                                 return -EINVAL;
1132                 }
1133
1134                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1135                 case IP4_TUNNEL:
1136                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1137                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1138                         break;
1139                 case TRANSPORT:
1140                         if (ips->type ==
1141                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1142                                 inline_status =
1143                                         sa_add_address_inline_crypto(sa);
1144                                 if (inline_status < 0)
1145                                         return inline_status;
1146                         }
1147                         break;
1148                 }
1149
1150                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1151                         iv_length = 12;
1152
1153                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1154                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1155                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1156                         sa_ctx->xf[idx].a.aead.key.length =
1157                                 sa->cipher_key_len;
1158                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1159                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1160                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1161                         sa_ctx->xf[idx].a.next = NULL;
1162                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1163                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1164                         sa_ctx->xf[idx].a.aead.aad_length =
1165                                 sa->aad_len + aad_length;
1166                         sa_ctx->xf[idx].a.aead.digest_length =
1167                                 sa->digest_len;
1168
1169                         sa->xforms = &sa_ctx->xf[idx].a;
1170                 } else {
1171                         switch (sa->cipher_algo) {
1172                         case RTE_CRYPTO_CIPHER_NULL:
1173                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1174                         case RTE_CRYPTO_CIPHER_AES_CBC:
1175                                 iv_length = sa->iv_len;
1176                                 break;
1177                         case RTE_CRYPTO_CIPHER_AES_CTR:
1178                                 iv_length = 16;
1179                                 break;
1180                         default:
1181                                 RTE_LOG(ERR, IPSEC_ESP,
1182                                                 "unsupported cipher algorithm %u\n",
1183                                                 sa->cipher_algo);
1184                                 return -EINVAL;
1185                         }
1186
1187                         if (inbound) {
1188                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1189                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1190                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1191                                 sa_ctx->xf[idx].b.cipher.key.length =
1192                                         sa->cipher_key_len;
1193                                 sa_ctx->xf[idx].b.cipher.op =
1194                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1195                                 sa_ctx->xf[idx].b.next = NULL;
1196                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1197                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1198
1199                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1200                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1201                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1202                                 sa_ctx->xf[idx].a.auth.key.length =
1203                                         sa->auth_key_len;
1204                                 sa_ctx->xf[idx].a.auth.digest_length =
1205                                         sa->digest_len;
1206                                 sa_ctx->xf[idx].a.auth.op =
1207                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1208                         } else { /* outbound */
1209                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1210                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1211                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1212                                 sa_ctx->xf[idx].a.cipher.key.length =
1213                                         sa->cipher_key_len;
1214                                 sa_ctx->xf[idx].a.cipher.op =
1215                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1216                                 sa_ctx->xf[idx].a.next = NULL;
1217                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1218                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1219
1220                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1221                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1222                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1223                                 sa_ctx->xf[idx].b.auth.key.length =
1224                                         sa->auth_key_len;
1225                                 sa_ctx->xf[idx].b.auth.digest_length =
1226                                         sa->digest_len;
1227                                 sa_ctx->xf[idx].b.auth.op =
1228                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1229                         }
1230
1231                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1232                         sa_ctx->xf[idx].b.next = NULL;
1233                         sa->xforms = &sa_ctx->xf[idx].a;
1234                 }
1235
1236                 if (ips->type ==
1237                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1238                         ips->type ==
1239                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1240                         rc = create_inline_session(skt_ctx, sa, ips);
1241                         if (rc != 0) {
1242                                 RTE_LOG(ERR, IPSEC_ESP,
1243                                         "create_inline_session() failed\n");
1244                                 return -EINVAL;
1245                         }
1246                 }
1247
1248                 if (sa->fdir_flag && inbound) {
1249                         rc = create_ipsec_esp_flow(sa);
1250                         if (rc != 0)
1251                                 RTE_LOG(ERR, IPSEC_ESP,
1252                                         "create_ipsec_esp_flow() failed\n");
1253                 }
1254                 print_one_sa_rule(sa, inbound);
1255         }
1256
1257         return 0;
1258 }
1259
1260 static inline int
1261 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1262                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1263 {
1264         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1265 }
1266
1267 static inline int
1268 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1269                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1270 {
1271         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1272 }
1273
1274 /*
1275  * helper function, fills parameters that are identical for all SAs
1276  */
1277 static void
1278 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1279         const struct app_sa_prm *app_prm)
1280 {
1281         memset(prm, 0, sizeof(*prm));
1282
1283         prm->flags = app_prm->flags;
1284         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1285         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1286 }
1287
1288 static int
1289 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1290         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1291 {
1292         int32_t rc;
1293
1294         /*
1295          * Try to get SPI next proto by searching that SPI in SPD.
1296          * probably not the optimal way, but there seems nothing
1297          * better right now.
1298          */
1299         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1300         if (rc < 0)
1301                 return rc;
1302
1303         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1304         prm->userdata = (uintptr_t)ss;
1305
1306         /* setup ipsec xform */
1307         prm->ipsec_xform.spi = ss->spi;
1308         prm->ipsec_xform.salt = ss->salt;
1309         prm->ipsec_xform.direction = ss->direction;
1310         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1311         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1312                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1313                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1314         prm->ipsec_xform.options.ecn = 1;
1315         prm->ipsec_xform.options.copy_dscp = 1;
1316
1317         if (IS_IP4_TUNNEL(ss->flags)) {
1318                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1319                 prm->tun.hdr_len = sizeof(*v4);
1320                 prm->tun.next_proto = rc;
1321                 prm->tun.hdr = v4;
1322         } else if (IS_IP6_TUNNEL(ss->flags)) {
1323                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1324                 prm->tun.hdr_len = sizeof(*v6);
1325                 prm->tun.next_proto = rc;
1326                 prm->tun.hdr = v6;
1327         } else {
1328                 /* transport mode */
1329                 prm->trs.proto = rc;
1330         }
1331
1332         /* setup crypto section */
1333         prm->crypto_xform = ss->xforms;
1334         return 0;
1335 }
1336
1337 static int
1338 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1339 {
1340         int32_t rc = 0;
1341
1342         ss->sa = sa;
1343
1344         if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1345                 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1346                 if (ss->security.ses != NULL) {
1347                         rc = rte_ipsec_session_prepare(ss);
1348                         if (rc != 0)
1349                                 memset(ss, 0, sizeof(*ss));
1350                 }
1351         }
1352
1353         return rc;
1354 }
1355
1356 /*
1357  * Initialise related rte_ipsec_sa object.
1358  */
1359 static int
1360 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1361 {
1362         int rc;
1363         struct rte_ipsec_sa_prm prm;
1364         struct rte_ipsec_session *ips;
1365         struct rte_ipv4_hdr v4  = {
1366                 .version_ihl = IPVERSION << 4 |
1367                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1368                 .time_to_live = IPDEFTTL,
1369                 .next_proto_id = IPPROTO_ESP,
1370                 .src_addr = lsa->src.ip.ip4,
1371                 .dst_addr = lsa->dst.ip.ip4,
1372         };
1373         struct rte_ipv6_hdr v6 = {
1374                 .vtc_flow = htonl(IP6_VERSION << 28),
1375                 .proto = IPPROTO_ESP,
1376         };
1377
1378         if (IS_IP6_TUNNEL(lsa->flags)) {
1379                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1380                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1381         }
1382
1383         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1384         if (rc == 0)
1385                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1386         if (rc < 0)
1387                 return rc;
1388
1389         /* init primary processing session */
1390         ips = ipsec_get_primary_session(lsa);
1391         rc = fill_ipsec_session(ips, sa);
1392         if (rc != 0)
1393                 return rc;
1394
1395         /* init inline fallback processing session */
1396         if (lsa->fallback_sessions == 1)
1397                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1398
1399         return rc;
1400 }
1401
1402 /*
1403  * Allocate space and init rte_ipsec_sa strcutures,
1404  * one per session.
1405  */
1406 static int
1407 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1408 {
1409         int32_t rc, sz;
1410         uint32_t i, idx;
1411         size_t tsz;
1412         struct rte_ipsec_sa *sa;
1413         struct ipsec_sa *lsa;
1414         struct rte_ipsec_sa_prm prm;
1415
1416         /* determine SA size */
1417         idx = 0;
1418         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1419         sz = rte_ipsec_sa_size(&prm);
1420         if (sz < 0) {
1421                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1422                         "failed to determine SA size, error code: %d\n",
1423                         __func__, ctx, nb_ent, socket, sz);
1424                 return sz;
1425         }
1426
1427         tsz = sz * nb_ent;
1428
1429         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1430         if (ctx->satbl == NULL) {
1431                 RTE_LOG(ERR, IPSEC,
1432                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1433                         __func__,  ctx, nb_ent, socket, tsz);
1434                 return -ENOMEM;
1435         }
1436
1437         rc = 0;
1438         for (i = 0; i != nb_ent && rc == 0; i++) {
1439
1440                 idx = i;
1441
1442                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1443                 lsa = ctx->sa + idx;
1444
1445                 rc = ipsec_sa_init(lsa, sa, sz);
1446         }
1447
1448         return rc;
1449 }
1450
1451 static int
1452 sa_cmp(const void *p, const void *q)
1453 {
1454         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1455         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1456
1457         return (int)(spi1 - spi2);
1458 }
1459
1460 /*
1461  * Walk through all SA rules to find an SA with given SPI
1462  */
1463 int
1464 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1465 {
1466         uint32_t num;
1467         struct ipsec_sa *sa;
1468         struct ipsec_sa tmpl;
1469         const struct ipsec_sa *sar;
1470
1471         sar = sa_ctx->sa;
1472         if (inbound != 0)
1473                 num = nb_sa_in;
1474         else
1475                 num = nb_sa_out;
1476
1477         tmpl.spi = spi;
1478
1479         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1480         if (sa != NULL)
1481                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1482
1483         return -ENOENT;
1484 }
1485
1486 void
1487 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1488 {
1489         int32_t rc;
1490         const char *name;
1491
1492         if (ctx == NULL)
1493                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1494
1495         if (ctx->sa_in != NULL)
1496                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1497                                 "initialized\n", socket_id);
1498
1499         if (ctx->sa_out != NULL)
1500                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1501                                 "initialized\n", socket_id);
1502
1503         if (nb_sa_in > 0) {
1504                 name = "sa_in";
1505                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1506                 if (ctx->sa_in == NULL)
1507                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1508                                 "context %s in socket %d\n", rte_errno,
1509                                 name, socket_id);
1510
1511                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1512                                 &sa_in_cnt);
1513                 if (rc != 0)
1514                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1515
1516                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1517
1518                 if (app_sa_prm.enable != 0) {
1519                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1520                                 socket_id);
1521                         if (rc != 0)
1522                                 rte_exit(EXIT_FAILURE,
1523                                         "failed to init inbound SAs\n");
1524                 }
1525         } else
1526                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1527
1528         if (nb_sa_out > 0) {
1529                 name = "sa_out";
1530                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1531                 if (ctx->sa_out == NULL)
1532                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1533                                 "context %s in socket %d\n", rte_errno,
1534                                 name, socket_id);
1535
1536                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1537
1538                 if (app_sa_prm.enable != 0) {
1539                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1540                                 socket_id);
1541                         if (rc != 0)
1542                                 rte_exit(EXIT_FAILURE,
1543                                         "failed to init outbound SAs\n");
1544                 }
1545         } else
1546                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1547                         "specified\n");
1548 }
1549
1550 int
1551 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1552 {
1553         struct ipsec_mbuf_metadata *priv;
1554         struct ipsec_sa *sa;
1555
1556         priv = get_priv(m);
1557         sa = priv->sa;
1558         if (sa != NULL)
1559                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1560
1561         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1562         return 0;
1563 }
1564
1565 void
1566 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1567                 void *sa_arr[], uint16_t nb_pkts)
1568 {
1569         uint32_t i;
1570         void *result_sa;
1571         struct ipsec_sa *sa;
1572
1573         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1574
1575         /*
1576          * Mark need for inline offload fallback on the LSB of SA pointer.
1577          * Thanks to packet grouping mechanism which ipsec_process is using
1578          * packets marked for fallback processing will form separate group.
1579          *
1580          * Because it is not safe to use SA pointer it is casted to generic
1581          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1582          * to get valid struct pointer.
1583          */
1584         for (i = 0; i < nb_pkts; i++) {
1585                 if (sa_arr[i] == NULL)
1586                         continue;
1587
1588                 result_sa = sa = sa_arr[i];
1589                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1590                         sa->fallback_sessions > 0) {
1591                         uintptr_t intsa = (uintptr_t)sa;
1592                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1593                         result_sa = (void *)intsa;
1594                 }
1595                 sa_arr[i] = result_sa;
1596         }
1597 }
1598
1599 void
1600 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1601                 void *sa[], uint16_t nb_pkts)
1602 {
1603         uint32_t i;
1604
1605         for (i = 0; i < nb_pkts; i++)
1606                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1607 }
1608
1609 /*
1610  * Select HW offloads to be used.
1611  */
1612 int
1613 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1614                 uint64_t *tx_offloads)
1615 {
1616         struct ipsec_sa *rule;
1617         uint32_t idx_sa;
1618         enum rte_security_session_action_type rule_type;
1619
1620         *rx_offloads = 0;
1621         *tx_offloads = 0;
1622
1623         /* Check for inbound rules that use offloads and use this port */
1624         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1625                 rule = &sa_in[idx_sa];
1626                 rule_type = ipsec_get_action_type(rule);
1627                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1628                                 rule_type ==
1629                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1630                                 && rule->portid == port_id)
1631                         *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1632         }
1633
1634         /* Check for outbound rules that use offloads and use this port */
1635         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1636                 rule = &sa_out[idx_sa];
1637                 rule_type = ipsec_get_action_type(rule);
1638                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1639                                 rule_type ==
1640                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1641                                 && rule->portid == port_id)
1642                         *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1643         }
1644         return 0;
1645 }
1646
1647 void
1648 sa_sort_arr(void)
1649 {
1650         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1651         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1652 }
1653
1654 uint32_t
1655 get_nb_crypto_sessions(void)
1656 {
1657         return nb_crypto_sessions;
1658 }