examples/ipsec-secgw: support 192/256 AES key sizes
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28
29 #define IPDEFTTL 64
30
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36
37 struct supported_cipher_algo {
38         const char *keyword;
39         enum rte_crypto_cipher_algorithm algo;
40         uint16_t iv_len;
41         uint16_t block_size;
42         uint16_t key_len;
43 };
44
45 struct supported_auth_algo {
46         const char *keyword;
47         enum rte_crypto_auth_algorithm algo;
48         uint16_t digest_len;
49         uint16_t key_len;
50         uint8_t key_not_req;
51 };
52
53 struct supported_aead_algo {
54         const char *keyword;
55         enum rte_crypto_aead_algorithm algo;
56         uint16_t iv_len;
57         uint16_t block_size;
58         uint16_t digest_len;
59         uint16_t key_len;
60         uint8_t aad_len;
61 };
62
63
64 const struct supported_cipher_algo cipher_algos[] = {
65         {
66                 .keyword = "null",
67                 .algo = RTE_CRYPTO_CIPHER_NULL,
68                 .iv_len = 0,
69                 .block_size = 4,
70                 .key_len = 0
71         },
72         {
73                 .keyword = "aes-128-cbc",
74                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
75                 .iv_len = 16,
76                 .block_size = 16,
77                 .key_len = 16
78         },
79         {
80                 .keyword = "aes-192-cbc",
81                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82                 .iv_len = 16,
83                 .block_size = 16,
84                 .key_len = 24
85         },
86         {
87                 .keyword = "aes-256-cbc",
88                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89                 .iv_len = 16,
90                 .block_size = 16,
91                 .key_len = 32
92         },
93         {
94                 .keyword = "aes-128-ctr",
95                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
96                 .iv_len = 8,
97                 .block_size = 4,
98                 .key_len = 20
99         },
100         {
101                 .keyword = "3des-cbc",
102                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
103                 .iv_len = 8,
104                 .block_size = 8,
105                 .key_len = 24
106         }
107 };
108
109 const struct supported_auth_algo auth_algos[] = {
110         {
111                 .keyword = "null",
112                 .algo = RTE_CRYPTO_AUTH_NULL,
113                 .digest_len = 0,
114                 .key_len = 0,
115                 .key_not_req = 1
116         },
117         {
118                 .keyword = "sha1-hmac",
119                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120                 .digest_len = 12,
121                 .key_len = 20
122         },
123         {
124                 .keyword = "sha256-hmac",
125                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126                 .digest_len = 16,
127                 .key_len = 32
128         }
129 };
130
131 const struct supported_aead_algo aead_algos[] = {
132         {
133                 .keyword = "aes-128-gcm",
134                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
135                 .iv_len = 8,
136                 .block_size = 4,
137                 .key_len = 20,
138                 .digest_len = 16,
139                 .aad_len = 8,
140         },
141         {
142                 .keyword = "aes-192-gcm",
143                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
144                 .iv_len = 8,
145                 .block_size = 4,
146                 .key_len = 28,
147                 .digest_len = 16,
148                 .aad_len = 8,
149         },
150         {
151                 .keyword = "aes-256-gcm",
152                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
153                 .iv_len = 8,
154                 .block_size = 4,
155                 .key_len = 36,
156                 .digest_len = 16,
157                 .aad_len = 8,
158         }
159 };
160
161 #define SA_INIT_NB      128
162
163 struct ipsec_sa *sa_out;
164 uint32_t nb_sa_out;
165 static uint32_t sa_out_sz;
166 static struct ipsec_sa_cnt sa_out_cnt;
167
168 struct ipsec_sa *sa_in;
169 uint32_t nb_sa_in;
170 static uint32_t sa_in_sz;
171 static struct ipsec_sa_cnt sa_in_cnt;
172
173 static const struct supported_cipher_algo *
174 find_match_cipher_algo(const char *cipher_keyword)
175 {
176         size_t i;
177
178         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
179                 const struct supported_cipher_algo *algo =
180                         &cipher_algos[i];
181
182                 if (strcmp(cipher_keyword, algo->keyword) == 0)
183                         return algo;
184         }
185
186         return NULL;
187 }
188
189 static const struct supported_auth_algo *
190 find_match_auth_algo(const char *auth_keyword)
191 {
192         size_t i;
193
194         for (i = 0; i < RTE_DIM(auth_algos); i++) {
195                 const struct supported_auth_algo *algo =
196                         &auth_algos[i];
197
198                 if (strcmp(auth_keyword, algo->keyword) == 0)
199                         return algo;
200         }
201
202         return NULL;
203 }
204
205 static const struct supported_aead_algo *
206 find_match_aead_algo(const char *aead_keyword)
207 {
208         size_t i;
209
210         for (i = 0; i < RTE_DIM(aead_algos); i++) {
211                 const struct supported_aead_algo *algo =
212                         &aead_algos[i];
213
214                 if (strcmp(aead_keyword, algo->keyword) == 0)
215                         return algo;
216         }
217
218         return NULL;
219 }
220
221 /** parse_key_string
222  *  parse x:x:x:x.... hex number key string into uint8_t *key
223  *  return:
224  *  > 0: number of bytes parsed
225  *  0:   failed
226  */
227 static uint32_t
228 parse_key_string(const char *key_str, uint8_t *key)
229 {
230         const char *pt_start = key_str, *pt_end = key_str;
231         uint32_t nb_bytes = 0;
232
233         while (pt_end != NULL) {
234                 char sub_str[3] = {0};
235
236                 pt_end = strchr(pt_start, ':');
237
238                 if (pt_end == NULL) {
239                         if (strlen(pt_start) > 2)
240                                 return 0;
241                         strncpy(sub_str, pt_start, 2);
242                 } else {
243                         if (pt_end - pt_start > 2)
244                                 return 0;
245
246                         strncpy(sub_str, pt_start, pt_end - pt_start);
247                         pt_start = pt_end + 1;
248                 }
249
250                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
251         }
252
253         return nb_bytes;
254 }
255
256 static int
257 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
258 {
259         if (*sa_tbl == NULL) {
260                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
261                 if (*sa_tbl == NULL)
262                         return -1;
263                 *cur_sz = SA_INIT_NB;
264                 return 0;
265         }
266
267         if (cur_cnt >= *cur_sz) {
268                 *sa_tbl = realloc(*sa_tbl,
269                         *cur_sz * sizeof(struct ipsec_sa) * 2);
270                 if (*sa_tbl == NULL)
271                         return -1;
272                 /* clean reallocated extra space */
273                 memset(&(*sa_tbl)[*cur_sz], 0,
274                         *cur_sz * sizeof(struct ipsec_sa));
275                 *cur_sz *= 2;
276         }
277
278         return 0;
279 }
280
281 void
282 parse_sa_tokens(char **tokens, uint32_t n_tokens,
283         struct parse_status *status)
284 {
285         struct ipsec_sa *rule = NULL;
286         struct rte_ipsec_session *ips;
287         uint32_t ti; /*token index*/
288         uint32_t *ri /*rule index*/;
289         struct ipsec_sa_cnt *sa_cnt;
290         uint32_t cipher_algo_p = 0;
291         uint32_t auth_algo_p = 0;
292         uint32_t aead_algo_p = 0;
293         uint32_t src_p = 0;
294         uint32_t dst_p = 0;
295         uint32_t mode_p = 0;
296         uint32_t type_p = 0;
297         uint32_t portid_p = 0;
298         uint32_t fallback_p = 0;
299
300         if (strcmp(tokens[0], "in") == 0) {
301                 ri = &nb_sa_in;
302                 sa_cnt = &sa_in_cnt;
303                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
304                         return;
305                 rule = &sa_in[*ri];
306                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
307         } else {
308                 ri = &nb_sa_out;
309                 sa_cnt = &sa_out_cnt;
310                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
311                         return;
312                 rule = &sa_out[*ri];
313                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
314         }
315
316         /* spi number */
317         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
318         if (status->status < 0)
319                 return;
320         if (atoi(tokens[1]) == INVALID_SPI)
321                 return;
322         rule->spi = atoi(tokens[1]);
323         ips = ipsec_get_primary_session(rule);
324
325         for (ti = 2; ti < n_tokens; ti++) {
326                 if (strcmp(tokens[ti], "mode") == 0) {
327                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
328                         if (status->status < 0)
329                                 return;
330
331                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332                         if (status->status < 0)
333                                 return;
334
335                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
336                                 sa_cnt->nb_v4++;
337                                 rule->flags = IP4_TUNNEL;
338                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
339                                 sa_cnt->nb_v6++;
340                                 rule->flags = IP6_TUNNEL;
341                         } else if (strcmp(tokens[ti], "transport") == 0) {
342                                 sa_cnt->nb_v4++;
343                                 sa_cnt->nb_v6++;
344                                 rule->flags = TRANSPORT;
345                         } else {
346                                 APP_CHECK(0, status, "unrecognized "
347                                         "input \"%s\"", tokens[ti]);
348                                 return;
349                         }
350
351                         mode_p = 1;
352                         continue;
353                 }
354
355                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
356                         const struct supported_cipher_algo *algo;
357                         uint32_t key_len;
358
359                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
360                                 status);
361                         if (status->status < 0)
362                                 return;
363
364                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
365                         if (status->status < 0)
366                                 return;
367
368                         algo = find_match_cipher_algo(tokens[ti]);
369
370                         APP_CHECK(algo != NULL, status, "unrecognized "
371                                 "input \"%s\"", tokens[ti]);
372
373                         if (status->status < 0)
374                                 return;
375
376                         rule->cipher_algo = algo->algo;
377                         rule->block_size = algo->block_size;
378                         rule->iv_len = algo->iv_len;
379                         rule->cipher_key_len = algo->key_len;
380
381                         /* for NULL algorithm, no cipher key required */
382                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
383                                 cipher_algo_p = 1;
384                                 continue;
385                         }
386
387                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
388                         if (status->status < 0)
389                                 return;
390
391                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
392                                 status, "unrecognized input \"%s\", "
393                                 "expect \"cipher_key\"", tokens[ti]);
394                         if (status->status < 0)
395                                 return;
396
397                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
398                         if (status->status < 0)
399                                 return;
400
401                         key_len = parse_key_string(tokens[ti],
402                                 rule->cipher_key);
403                         APP_CHECK(key_len == rule->cipher_key_len, status,
404                                 "unrecognized input \"%s\"", tokens[ti]);
405                         if (status->status < 0)
406                                 return;
407
408                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
409                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
410                                 rule->salt = (uint32_t)rte_rand();
411
412                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
413                                 key_len -= 4;
414                                 rule->cipher_key_len = key_len;
415                                 memcpy(&rule->salt,
416                                         &rule->cipher_key[key_len], 4);
417                         }
418
419                         cipher_algo_p = 1;
420                         continue;
421                 }
422
423                 if (strcmp(tokens[ti], "auth_algo") == 0) {
424                         const struct supported_auth_algo *algo;
425                         uint32_t key_len;
426
427                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
428                                 status);
429                         if (status->status < 0)
430                                 return;
431
432                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
433                         if (status->status < 0)
434                                 return;
435
436                         algo = find_match_auth_algo(tokens[ti]);
437                         APP_CHECK(algo != NULL, status, "unrecognized "
438                                 "input \"%s\"", tokens[ti]);
439
440                         if (status->status < 0)
441                                 return;
442
443                         rule->auth_algo = algo->algo;
444                         rule->auth_key_len = algo->key_len;
445                         rule->digest_len = algo->digest_len;
446
447                         /* NULL algorithm and combined algos do not
448                          * require auth key
449                          */
450                         if (algo->key_not_req) {
451                                 auth_algo_p = 1;
452                                 continue;
453                         }
454
455                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
456                         if (status->status < 0)
457                                 return;
458
459                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
460                                 status, "unrecognized input \"%s\", "
461                                 "expect \"auth_key\"", tokens[ti]);
462                         if (status->status < 0)
463                                 return;
464
465                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
466                         if (status->status < 0)
467                                 return;
468
469                         key_len = parse_key_string(tokens[ti],
470                                 rule->auth_key);
471                         APP_CHECK(key_len == rule->auth_key_len, status,
472                                 "unrecognized input \"%s\"", tokens[ti]);
473                         if (status->status < 0)
474                                 return;
475
476                         auth_algo_p = 1;
477                         continue;
478                 }
479
480                 if (strcmp(tokens[ti], "aead_algo") == 0) {
481                         const struct supported_aead_algo *algo;
482                         uint32_t key_len;
483
484                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
485                                 status);
486                         if (status->status < 0)
487                                 return;
488
489                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
490                         if (status->status < 0)
491                                 return;
492
493                         algo = find_match_aead_algo(tokens[ti]);
494
495                         APP_CHECK(algo != NULL, status, "unrecognized "
496                                 "input \"%s\"", tokens[ti]);
497
498                         if (status->status < 0)
499                                 return;
500
501                         rule->aead_algo = algo->algo;
502                         rule->cipher_key_len = algo->key_len;
503                         rule->digest_len = algo->digest_len;
504                         rule->aad_len = algo->aad_len;
505                         rule->block_size = algo->block_size;
506                         rule->iv_len = algo->iv_len;
507
508                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
509                         if (status->status < 0)
510                                 return;
511
512                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
513                                 status, "unrecognized input \"%s\", "
514                                 "expect \"aead_key\"", tokens[ti]);
515                         if (status->status < 0)
516                                 return;
517
518                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
519                         if (status->status < 0)
520                                 return;
521
522                         key_len = parse_key_string(tokens[ti],
523                                 rule->cipher_key);
524                         APP_CHECK(key_len == rule->cipher_key_len, status,
525                                 "unrecognized input \"%s\"", tokens[ti]);
526                         if (status->status < 0)
527                                 return;
528
529                         key_len -= 4;
530                         rule->cipher_key_len = key_len;
531                         memcpy(&rule->salt,
532                                 &rule->cipher_key[key_len], 4);
533
534                         aead_algo_p = 1;
535                         continue;
536                 }
537
538                 if (strcmp(tokens[ti], "src") == 0) {
539                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
540                         if (status->status < 0)
541                                 return;
542
543                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
544                         if (status->status < 0)
545                                 return;
546
547                         if (IS_IP4_TUNNEL(rule->flags)) {
548                                 struct in_addr ip;
549
550                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
551                                         &ip, NULL) == 0, status,
552                                         "unrecognized input \"%s\", "
553                                         "expect valid ipv4 addr",
554                                         tokens[ti]);
555                                 if (status->status < 0)
556                                         return;
557                                 rule->src.ip.ip4 = rte_bswap32(
558                                         (uint32_t)ip.s_addr);
559                         } else if (IS_IP6_TUNNEL(rule->flags)) {
560                                 struct in6_addr ip;
561
562                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
563                                         NULL) == 0, status,
564                                         "unrecognized input \"%s\", "
565                                         "expect valid ipv6 addr",
566                                         tokens[ti]);
567                                 if (status->status < 0)
568                                         return;
569                                 memcpy(rule->src.ip.ip6.ip6_b,
570                                         ip.s6_addr, 16);
571                         } else if (IS_TRANSPORT(rule->flags)) {
572                                 APP_CHECK(0, status, "unrecognized input "
573                                         "\"%s\"", tokens[ti]);
574                                 return;
575                         }
576
577                         src_p = 1;
578                         continue;
579                 }
580
581                 if (strcmp(tokens[ti], "dst") == 0) {
582                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
583                         if (status->status < 0)
584                                 return;
585
586                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
587                         if (status->status < 0)
588                                 return;
589
590                         if (IS_IP4_TUNNEL(rule->flags)) {
591                                 struct in_addr ip;
592
593                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
594                                         &ip, NULL) == 0, status,
595                                         "unrecognized input \"%s\", "
596                                         "expect valid ipv4 addr",
597                                         tokens[ti]);
598                                 if (status->status < 0)
599                                         return;
600                                 rule->dst.ip.ip4 = rte_bswap32(
601                                         (uint32_t)ip.s_addr);
602                         } else if (IS_IP6_TUNNEL(rule->flags)) {
603                                 struct in6_addr ip;
604
605                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
606                                         NULL) == 0, status,
607                                         "unrecognized input \"%s\", "
608                                         "expect valid ipv6 addr",
609                                         tokens[ti]);
610                                 if (status->status < 0)
611                                         return;
612                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
613                         } else if (IS_TRANSPORT(rule->flags)) {
614                                 APP_CHECK(0, status, "unrecognized "
615                                         "input \"%s\"", tokens[ti]);
616                                 return;
617                         }
618
619                         dst_p = 1;
620                         continue;
621                 }
622
623                 if (strcmp(tokens[ti], "type") == 0) {
624                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
625                         if (status->status < 0)
626                                 return;
627
628                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
629                         if (status->status < 0)
630                                 return;
631
632                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
633                                 ips->type =
634                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
635                         else if (strcmp(tokens[ti],
636                                         "inline-protocol-offload") == 0)
637                                 ips->type =
638                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
639                         else if (strcmp(tokens[ti],
640                                         "lookaside-protocol-offload") == 0)
641                                 ips->type =
642                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
643                         else if (strcmp(tokens[ti], "no-offload") == 0)
644                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
645                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
646                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
647                         else {
648                                 APP_CHECK(0, status, "Invalid input \"%s\"",
649                                                 tokens[ti]);
650                                 return;
651                         }
652
653                         type_p = 1;
654                         continue;
655                 }
656
657                 if (strcmp(tokens[ti], "port_id") == 0) {
658                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
659                         if (status->status < 0)
660                                 return;
661                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
662                         if (status->status < 0)
663                                 return;
664                         rule->portid = atoi(tokens[ti]);
665                         if (status->status < 0)
666                                 return;
667                         portid_p = 1;
668                         continue;
669                 }
670
671                 if (strcmp(tokens[ti], "fallback") == 0) {
672                         struct rte_ipsec_session *fb;
673
674                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
675                                 "not allowed for legacy mode.");
676                         if (status->status < 0)
677                                 return;
678                         APP_CHECK(ips->type ==
679                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
680                                 "Fallback session allowed if primary session "
681                                 "is of type inline-crypto-offload only.");
682                         if (status->status < 0)
683                                 return;
684                         APP_CHECK(rule->direction ==
685                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
686                                 "Fallback session not allowed for egress "
687                                 "rule");
688                         if (status->status < 0)
689                                 return;
690                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
691                         if (status->status < 0)
692                                 return;
693                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
694                         if (status->status < 0)
695                                 return;
696                         fb = ipsec_get_fallback_session(rule);
697                         if (strcmp(tokens[ti], "lookaside-none") == 0)
698                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
699                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
700                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
701                         else {
702                                 APP_CHECK(0, status, "unrecognized fallback "
703                                         "type %s.", tokens[ti]);
704                                 return;
705                         }
706
707                         rule->fallback_sessions = 1;
708                         fallback_p = 1;
709                         continue;
710                 }
711
712                 /* unrecognizeable input */
713                 APP_CHECK(0, status, "unrecognized input \"%s\"",
714                         tokens[ti]);
715                 return;
716         }
717
718         if (aead_algo_p) {
719                 APP_CHECK(cipher_algo_p == 0, status,
720                                 "AEAD used, no need for cipher options");
721                 if (status->status < 0)
722                         return;
723
724                 APP_CHECK(auth_algo_p == 0, status,
725                                 "AEAD used, no need for auth options");
726                 if (status->status < 0)
727                         return;
728         } else {
729                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
730                 if (status->status < 0)
731                         return;
732
733                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
734                 if (status->status < 0)
735                         return;
736         }
737
738         APP_CHECK(mode_p == 1, status, "missing mode option");
739         if (status->status < 0)
740                 return;
741
742         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
743                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
744                 printf("Missing portid option, falling back to non-offload\n");
745
746         if (!type_p || (!portid_p && ips->type !=
747                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
748                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
749                 rule->portid = -1;
750         }
751
752         *ri = *ri + 1;
753 }
754
755 static void
756 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
757 {
758         uint32_t i;
759         uint8_t a, b, c, d;
760         const struct rte_ipsec_session *ips;
761         const struct rte_ipsec_session *fallback_ips;
762
763         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
764
765         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
766                 if (cipher_algos[i].algo == sa->cipher_algo &&
767                                 cipher_algos[i].key_len == sa->cipher_key_len) {
768                         printf("%s ", cipher_algos[i].keyword);
769                         break;
770                 }
771         }
772
773         for (i = 0; i < RTE_DIM(auth_algos); i++) {
774                 if (auth_algos[i].algo == sa->auth_algo) {
775                         printf("%s ", auth_algos[i].keyword);
776                         break;
777                 }
778         }
779
780         for (i = 0; i < RTE_DIM(aead_algos); i++) {
781                 if (aead_algos[i].algo == sa->aead_algo &&
782                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
783                         printf("%s ", aead_algos[i].keyword);
784                         break;
785                 }
786         }
787
788         printf("mode:");
789
790         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
791         case IP4_TUNNEL:
792                 printf("IP4Tunnel ");
793                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
794                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
795                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
796                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
797                 break;
798         case IP6_TUNNEL:
799                 printf("IP6Tunnel ");
800                 for (i = 0; i < 16; i++) {
801                         if (i % 2 && i != 15)
802                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
803                         else
804                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
805                 }
806                 printf(" ");
807                 for (i = 0; i < 16; i++) {
808                         if (i % 2 && i != 15)
809                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
810                         else
811                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
812                 }
813                 break;
814         case TRANSPORT:
815                 printf("Transport ");
816                 break;
817         }
818
819         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
820         printf(" type:");
821         switch (ips->type) {
822         case RTE_SECURITY_ACTION_TYPE_NONE:
823                 printf("no-offload ");
824                 break;
825         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
826                 printf("inline-crypto-offload ");
827                 break;
828         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
829                 printf("inline-protocol-offload ");
830                 break;
831         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
832                 printf("lookaside-protocol-offload ");
833                 break;
834         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
835                 printf("cpu-crypto-accelerated");
836                 break;
837         }
838
839         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
840         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
841                 printf("inline fallback: ");
842                 switch (fallback_ips->type) {
843                 case RTE_SECURITY_ACTION_TYPE_NONE:
844                         printf("lookaside-none");
845                         break;
846                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
847                         printf("cpu-crypto-accelerated");
848                         break;
849                 default:
850                         printf("invalid");
851                         break;
852                 }
853         }
854         printf("\n");
855 }
856
857 static struct sa_ctx *
858 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
859 {
860         char s[PATH_MAX];
861         struct sa_ctx *sa_ctx;
862         uint32_t mz_size;
863         const struct rte_memzone *mz;
864
865         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
866
867         /* Create SA context */
868         printf("Creating SA context with %u maximum entries on socket %d\n",
869                         nb_sa, socket_id);
870
871         mz_size = sizeof(struct ipsec_xf) * nb_sa;
872         mz = rte_memzone_reserve(s, mz_size, socket_id,
873                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
874         if (mz == NULL) {
875                 printf("Failed to allocate SA XFORM memory\n");
876                 rte_errno = ENOMEM;
877                 return NULL;
878         }
879
880         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
881                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
882
883         if (sa_ctx == NULL) {
884                 printf("Failed to allocate SA CTX memory\n");
885                 rte_errno = ENOMEM;
886                 rte_memzone_free(mz);
887                 return NULL;
888         }
889
890         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
891         sa_ctx->nb_sa = nb_sa;
892
893         return sa_ctx;
894 }
895
896 static int
897 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
898 {
899         struct rte_eth_dev_info dev_info;
900         int retval;
901
902         retval = rte_eth_dev_info_get(portid, &dev_info);
903         if (retval != 0) {
904                 RTE_LOG(ERR, IPSEC,
905                         "Error during getting device (port %u) info: %s\n",
906                         portid, strerror(-retval));
907
908                 return retval;
909         }
910
911         if (inbound) {
912                 if ((dev_info.rx_offload_capa &
913                                 DEV_RX_OFFLOAD_SECURITY) == 0) {
914                         RTE_LOG(WARNING, PORT,
915                                 "hardware RX IPSec offload is not supported\n");
916                         return -EINVAL;
917                 }
918
919         } else { /* outbound */
920                 if ((dev_info.tx_offload_capa &
921                                 DEV_TX_OFFLOAD_SECURITY) == 0) {
922                         RTE_LOG(WARNING, PORT,
923                                 "hardware TX IPSec offload is not supported\n");
924                         return -EINVAL;
925                 }
926         }
927         return 0;
928 }
929
930 /*
931  * Helper function, tries to determine next_proto for SPI
932  * by searching though SP rules.
933  */
934 static int
935 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
936                 struct ip_addr ip_addr[2], uint32_t mask[2])
937 {
938         int32_t rc4, rc6;
939
940         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
941                                 ip_addr, mask);
942         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
943                                 ip_addr, mask);
944
945         if (rc4 >= 0) {
946                 if (rc6 >= 0) {
947                         RTE_LOG(ERR, IPSEC,
948                                 "%s: SPI %u used simultaeously by "
949                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
950                                 __func__, spi, rc4, rc6);
951                         return -EINVAL;
952                 } else
953                         return IPPROTO_IPIP;
954         } else if (rc6 < 0) {
955                 RTE_LOG(ERR, IPSEC,
956                         "%s: SPI %u is not used by any SP rule\n",
957                         __func__, spi);
958                 return -EINVAL;
959         } else
960                 return IPPROTO_IPV6;
961 }
962
963 /*
964  * Helper function for getting source and destination IP addresses
965  * from SP. Needed for inline crypto transport mode, as addresses are not
966  * provided in config file for that mode. It checks if SP for current SA exists,
967  * and based on what type of protocol is returned, it stores appropriate
968  * addresses got from SP into SA.
969  */
970 static int
971 sa_add_address_inline_crypto(struct ipsec_sa *sa)
972 {
973         int protocol;
974         struct ip_addr ip_addr[2];
975         uint32_t mask[2];
976
977         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
978         if (protocol < 0)
979                 return protocol;
980         else if (protocol == IPPROTO_IPIP) {
981                 sa->flags |= IP4_TRANSPORT;
982                 if (mask[0] == IP4_FULL_MASK &&
983                                 mask[1] == IP4_FULL_MASK &&
984                                 ip_addr[0].ip.ip4 != 0 &&
985                                 ip_addr[1].ip.ip4 != 0) {
986
987                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
988                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
989                 } else {
990                         RTE_LOG(ERR, IPSEC,
991                         "%s: No valid address or mask entry in"
992                         " IPv4 SP rule for SPI %u\n",
993                         __func__, sa->spi);
994                         return -EINVAL;
995                 }
996         } else if (protocol == IPPROTO_IPV6) {
997                 sa->flags |= IP6_TRANSPORT;
998                 if (mask[0] == IP6_FULL_MASK &&
999                                 mask[1] == IP6_FULL_MASK &&
1000                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1001                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1002                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1003                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1004
1005                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1006                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1007                 } else {
1008                         RTE_LOG(ERR, IPSEC,
1009                         "%s: No valid address or mask entry in"
1010                         " IPv6 SP rule for SPI %u\n",
1011                         __func__, sa->spi);
1012                         return -EINVAL;
1013                 }
1014         }
1015         return 0;
1016 }
1017
1018 static int
1019 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1020                 uint32_t nb_entries, uint32_t inbound,
1021                 struct socket_ctx *skt_ctx)
1022 {
1023         struct ipsec_sa *sa;
1024         uint32_t i, idx;
1025         uint16_t iv_length, aad_length;
1026         int inline_status;
1027         int32_t rc;
1028         struct rte_ipsec_session *ips;
1029
1030         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1031         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1032
1033         for (i = 0; i < nb_entries; i++) {
1034                 idx = i;
1035                 sa = &sa_ctx->sa[idx];
1036                 if (sa->spi != 0) {
1037                         printf("Index %u already in use by SPI %u\n",
1038                                         idx, sa->spi);
1039                         return -EINVAL;
1040                 }
1041                 *sa = entries[i];
1042
1043                 if (inbound) {
1044                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1045                         if (rc != 0)
1046                                 return rc;
1047                 }
1048
1049                 sa->seq = 0;
1050                 ips = ipsec_get_primary_session(sa);
1051
1052                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1053                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1054                         if (check_eth_dev_caps(sa->portid, inbound))
1055                                 return -EINVAL;
1056                 }
1057
1058                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1059                 case IP4_TUNNEL:
1060                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1061                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1062                         break;
1063                 case TRANSPORT:
1064                         if (ips->type ==
1065                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1066                                 inline_status =
1067                                         sa_add_address_inline_crypto(sa);
1068                                 if (inline_status < 0)
1069                                         return inline_status;
1070                         }
1071                         break;
1072                 }
1073
1074                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1075                         iv_length = 12;
1076
1077                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1078                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1079                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1080                         sa_ctx->xf[idx].a.aead.key.length =
1081                                 sa->cipher_key_len;
1082                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1083                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1084                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1085                         sa_ctx->xf[idx].a.next = NULL;
1086                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1087                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1088                         sa_ctx->xf[idx].a.aead.aad_length =
1089                                 sa->aad_len + aad_length;
1090                         sa_ctx->xf[idx].a.aead.digest_length =
1091                                 sa->digest_len;
1092
1093                         sa->xforms = &sa_ctx->xf[idx].a;
1094                 } else {
1095                         switch (sa->cipher_algo) {
1096                         case RTE_CRYPTO_CIPHER_NULL:
1097                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1098                         case RTE_CRYPTO_CIPHER_AES_CBC:
1099                                 iv_length = sa->iv_len;
1100                                 break;
1101                         case RTE_CRYPTO_CIPHER_AES_CTR:
1102                                 iv_length = 16;
1103                                 break;
1104                         default:
1105                                 RTE_LOG(ERR, IPSEC_ESP,
1106                                                 "unsupported cipher algorithm %u\n",
1107                                                 sa->cipher_algo);
1108                                 return -EINVAL;
1109                         }
1110
1111                         if (inbound) {
1112                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1113                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1114                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1115                                 sa_ctx->xf[idx].b.cipher.key.length =
1116                                         sa->cipher_key_len;
1117                                 sa_ctx->xf[idx].b.cipher.op =
1118                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1119                                 sa_ctx->xf[idx].b.next = NULL;
1120                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1121                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1122
1123                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1124                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1125                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1126                                 sa_ctx->xf[idx].a.auth.key.length =
1127                                         sa->auth_key_len;
1128                                 sa_ctx->xf[idx].a.auth.digest_length =
1129                                         sa->digest_len;
1130                                 sa_ctx->xf[idx].a.auth.op =
1131                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1132                         } else { /* outbound */
1133                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1134                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1135                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1136                                 sa_ctx->xf[idx].a.cipher.key.length =
1137                                         sa->cipher_key_len;
1138                                 sa_ctx->xf[idx].a.cipher.op =
1139                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1140                                 sa_ctx->xf[idx].a.next = NULL;
1141                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1142                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1143
1144                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1145                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1146                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1147                                 sa_ctx->xf[idx].b.auth.key.length =
1148                                         sa->auth_key_len;
1149                                 sa_ctx->xf[idx].b.auth.digest_length =
1150                                         sa->digest_len;
1151                                 sa_ctx->xf[idx].b.auth.op =
1152                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1153                         }
1154
1155                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1156                         sa_ctx->xf[idx].b.next = NULL;
1157                         sa->xforms = &sa_ctx->xf[idx].a;
1158                 }
1159
1160                 if (ips->type ==
1161                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1162                         ips->type ==
1163                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1164                         rc = create_inline_session(skt_ctx, sa, ips);
1165                         if (rc != 0) {
1166                                 RTE_LOG(ERR, IPSEC_ESP,
1167                                         "create_inline_session() failed\n");
1168                                 return -EINVAL;
1169                         }
1170                 }
1171
1172                 print_one_sa_rule(sa, inbound);
1173         }
1174
1175         return 0;
1176 }
1177
1178 static inline int
1179 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1180                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1181 {
1182         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1183 }
1184
1185 static inline int
1186 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1187                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1188 {
1189         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1190 }
1191
1192 /*
1193  * helper function, fills parameters that are identical for all SAs
1194  */
1195 static void
1196 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1197         const struct app_sa_prm *app_prm)
1198 {
1199         memset(prm, 0, sizeof(*prm));
1200
1201         prm->flags = app_prm->flags;
1202         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1203         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1204 }
1205
1206 static int
1207 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1208         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1209 {
1210         int32_t rc;
1211
1212         /*
1213          * Try to get SPI next proto by searching that SPI in SPD.
1214          * probably not the optimal way, but there seems nothing
1215          * better right now.
1216          */
1217         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1218         if (rc < 0)
1219                 return rc;
1220
1221         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1222         prm->userdata = (uintptr_t)ss;
1223
1224         /* setup ipsec xform */
1225         prm->ipsec_xform.spi = ss->spi;
1226         prm->ipsec_xform.salt = ss->salt;
1227         prm->ipsec_xform.direction = ss->direction;
1228         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1229         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1230                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1231                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1232         prm->ipsec_xform.options.ecn = 1;
1233         prm->ipsec_xform.options.copy_dscp = 1;
1234
1235         if (IS_IP4_TUNNEL(ss->flags)) {
1236                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1237                 prm->tun.hdr_len = sizeof(*v4);
1238                 prm->tun.next_proto = rc;
1239                 prm->tun.hdr = v4;
1240         } else if (IS_IP6_TUNNEL(ss->flags)) {
1241                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1242                 prm->tun.hdr_len = sizeof(*v6);
1243                 prm->tun.next_proto = rc;
1244                 prm->tun.hdr = v6;
1245         } else {
1246                 /* transport mode */
1247                 prm->trs.proto = rc;
1248         }
1249
1250         /* setup crypto section */
1251         prm->crypto_xform = ss->xforms;
1252         return 0;
1253 }
1254
1255 static int
1256 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1257 {
1258         int32_t rc = 0;
1259
1260         ss->sa = sa;
1261
1262         if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1263                 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1264                 if (ss->security.ses != NULL) {
1265                         rc = rte_ipsec_session_prepare(ss);
1266                         if (rc != 0)
1267                                 memset(ss, 0, sizeof(*ss));
1268                 }
1269         }
1270
1271         return rc;
1272 }
1273
1274 /*
1275  * Initialise related rte_ipsec_sa object.
1276  */
1277 static int
1278 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1279 {
1280         int rc;
1281         struct rte_ipsec_sa_prm prm;
1282         struct rte_ipsec_session *ips;
1283         struct rte_ipv4_hdr v4  = {
1284                 .version_ihl = IPVERSION << 4 |
1285                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1286                 .time_to_live = IPDEFTTL,
1287                 .next_proto_id = IPPROTO_ESP,
1288                 .src_addr = lsa->src.ip.ip4,
1289                 .dst_addr = lsa->dst.ip.ip4,
1290         };
1291         struct rte_ipv6_hdr v6 = {
1292                 .vtc_flow = htonl(IP6_VERSION << 28),
1293                 .proto = IPPROTO_ESP,
1294         };
1295
1296         if (IS_IP6_TUNNEL(lsa->flags)) {
1297                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1298                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1299         }
1300
1301         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1302         if (rc == 0)
1303                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1304         if (rc < 0)
1305                 return rc;
1306
1307         /* init primary processing session */
1308         ips = ipsec_get_primary_session(lsa);
1309         rc = fill_ipsec_session(ips, sa);
1310         if (rc != 0)
1311                 return rc;
1312
1313         /* init inline fallback processing session */
1314         if (lsa->fallback_sessions == 1)
1315                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1316
1317         return rc;
1318 }
1319
1320 /*
1321  * Allocate space and init rte_ipsec_sa strcutures,
1322  * one per session.
1323  */
1324 static int
1325 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1326 {
1327         int32_t rc, sz;
1328         uint32_t i, idx;
1329         size_t tsz;
1330         struct rte_ipsec_sa *sa;
1331         struct ipsec_sa *lsa;
1332         struct rte_ipsec_sa_prm prm;
1333
1334         /* determine SA size */
1335         idx = 0;
1336         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1337         sz = rte_ipsec_sa_size(&prm);
1338         if (sz < 0) {
1339                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1340                         "failed to determine SA size, error code: %d\n",
1341                         __func__, ctx, nb_ent, socket, sz);
1342                 return sz;
1343         }
1344
1345         tsz = sz * nb_ent;
1346
1347         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1348         if (ctx->satbl == NULL) {
1349                 RTE_LOG(ERR, IPSEC,
1350                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1351                         __func__,  ctx, nb_ent, socket, tsz);
1352                 return -ENOMEM;
1353         }
1354
1355         rc = 0;
1356         for (i = 0; i != nb_ent && rc == 0; i++) {
1357
1358                 idx = i;
1359
1360                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1361                 lsa = ctx->sa + idx;
1362
1363                 rc = ipsec_sa_init(lsa, sa, sz);
1364         }
1365
1366         return rc;
1367 }
1368
1369 static int
1370 sa_cmp(const void *p, const void *q)
1371 {
1372         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1373         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1374
1375         return (int)(spi1 - spi2);
1376 }
1377
1378 /*
1379  * Walk through all SA rules to find an SA with given SPI
1380  */
1381 int
1382 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1383 {
1384         uint32_t num;
1385         struct ipsec_sa *sa;
1386         struct ipsec_sa tmpl;
1387         const struct ipsec_sa *sar;
1388
1389         sar = sa_ctx->sa;
1390         if (inbound != 0)
1391                 num = nb_sa_in;
1392         else
1393                 num = nb_sa_out;
1394
1395         tmpl.spi = spi;
1396
1397         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1398         if (sa != NULL)
1399                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1400
1401         return -ENOENT;
1402 }
1403
1404 void
1405 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1406 {
1407         int32_t rc;
1408         const char *name;
1409
1410         if (ctx == NULL)
1411                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1412
1413         if (ctx->sa_in != NULL)
1414                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1415                                 "initialized\n", socket_id);
1416
1417         if (ctx->sa_out != NULL)
1418                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1419                                 "initialized\n", socket_id);
1420
1421         if (nb_sa_in > 0) {
1422                 name = "sa_in";
1423                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1424                 if (ctx->sa_in == NULL)
1425                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1426                                 "context %s in socket %d\n", rte_errno,
1427                                 name, socket_id);
1428
1429                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1430                                 &sa_in_cnt);
1431                 if (rc != 0)
1432                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1433
1434                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1435
1436                 if (app_sa_prm.enable != 0) {
1437                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1438                                 socket_id);
1439                         if (rc != 0)
1440                                 rte_exit(EXIT_FAILURE,
1441                                         "failed to init inbound SAs\n");
1442                 }
1443         } else
1444                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1445
1446         if (nb_sa_out > 0) {
1447                 name = "sa_out";
1448                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1449                 if (ctx->sa_out == NULL)
1450                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1451                                 "context %s in socket %d\n", rte_errno,
1452                                 name, socket_id);
1453
1454                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1455
1456                 if (app_sa_prm.enable != 0) {
1457                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1458                                 socket_id);
1459                         if (rc != 0)
1460                                 rte_exit(EXIT_FAILURE,
1461                                         "failed to init outbound SAs\n");
1462                 }
1463         } else
1464                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1465                         "specified\n");
1466 }
1467
1468 int
1469 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1470 {
1471         struct ipsec_mbuf_metadata *priv;
1472         struct ipsec_sa *sa;
1473
1474         priv = get_priv(m);
1475         sa = priv->sa;
1476         if (sa != NULL)
1477                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1478
1479         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1480         return 0;
1481 }
1482
1483 void
1484 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1485                 void *sa_arr[], uint16_t nb_pkts)
1486 {
1487         uint32_t i;
1488         void *result_sa;
1489         struct ipsec_sa *sa;
1490
1491         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1492
1493         /*
1494          * Mark need for inline offload fallback on the LSB of SA pointer.
1495          * Thanks to packet grouping mechanism which ipsec_process is using
1496          * packets marked for fallback processing will form separate group.
1497          *
1498          * Because it is not safe to use SA pointer it is casted to generic
1499          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1500          * to get valid struct pointer.
1501          */
1502         for (i = 0; i < nb_pkts; i++) {
1503                 if (sa_arr[i] == NULL)
1504                         continue;
1505
1506                 result_sa = sa = sa_arr[i];
1507                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1508                         sa->fallback_sessions > 0) {
1509                         uintptr_t intsa = (uintptr_t)sa;
1510                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1511                         result_sa = (void *)intsa;
1512                 }
1513                 sa_arr[i] = result_sa;
1514         }
1515 }
1516
1517 void
1518 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1519                 void *sa[], uint16_t nb_pkts)
1520 {
1521         uint32_t i;
1522
1523         for (i = 0; i < nb_pkts; i++)
1524                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1525 }
1526
1527 /*
1528  * Select HW offloads to be used.
1529  */
1530 int
1531 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1532                 uint64_t *tx_offloads)
1533 {
1534         struct ipsec_sa *rule;
1535         uint32_t idx_sa;
1536         enum rte_security_session_action_type rule_type;
1537
1538         *rx_offloads = 0;
1539         *tx_offloads = 0;
1540
1541         /* Check for inbound rules that use offloads and use this port */
1542         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1543                 rule = &sa_in[idx_sa];
1544                 rule_type = ipsec_get_action_type(rule);
1545                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1546                                 rule_type ==
1547                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1548                                 && rule->portid == port_id)
1549                         *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1550         }
1551
1552         /* Check for outbound rules that use offloads and use this port */
1553         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1554                 rule = &sa_out[idx_sa];
1555                 rule_type = ipsec_get_action_type(rule);
1556                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1557                                 rule_type ==
1558                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1559                                 && rule->portid == port_id)
1560                         *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1561         }
1562         return 0;
1563 }
1564
1565 void
1566 sa_sort_arr(void)
1567 {
1568         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1569         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1570 }