examples/ipsec-secgw: support header reconstruction
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27
28 #define IPDEFTTL 64
29
30 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
31
32 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
33
34 struct supported_cipher_algo {
35         const char *keyword;
36         enum rte_crypto_cipher_algorithm algo;
37         uint16_t iv_len;
38         uint16_t block_size;
39         uint16_t key_len;
40 };
41
42 struct supported_auth_algo {
43         const char *keyword;
44         enum rte_crypto_auth_algorithm algo;
45         uint16_t digest_len;
46         uint16_t key_len;
47         uint8_t key_not_req;
48 };
49
50 struct supported_aead_algo {
51         const char *keyword;
52         enum rte_crypto_aead_algorithm algo;
53         uint16_t iv_len;
54         uint16_t block_size;
55         uint16_t digest_len;
56         uint16_t key_len;
57         uint8_t aad_len;
58 };
59
60
61 const struct supported_cipher_algo cipher_algos[] = {
62         {
63                 .keyword = "null",
64                 .algo = RTE_CRYPTO_CIPHER_NULL,
65                 .iv_len = 0,
66                 .block_size = 4,
67                 .key_len = 0
68         },
69         {
70                 .keyword = "aes-128-cbc",
71                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
72                 .iv_len = 16,
73                 .block_size = 16,
74                 .key_len = 16
75         },
76         {
77                 .keyword = "aes-256-cbc",
78                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
79                 .iv_len = 16,
80                 .block_size = 16,
81                 .key_len = 32
82         },
83         {
84                 .keyword = "aes-128-ctr",
85                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
86                 .iv_len = 8,
87                 .block_size = 4,
88                 .key_len = 20
89         },
90         {
91                 .keyword = "3des-cbc",
92                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
93                 .iv_len = 8,
94                 .block_size = 8,
95                 .key_len = 24
96         }
97 };
98
99 const struct supported_auth_algo auth_algos[] = {
100         {
101                 .keyword = "null",
102                 .algo = RTE_CRYPTO_AUTH_NULL,
103                 .digest_len = 0,
104                 .key_len = 0,
105                 .key_not_req = 1
106         },
107         {
108                 .keyword = "sha1-hmac",
109                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
110                 .digest_len = 12,
111                 .key_len = 20
112         },
113         {
114                 .keyword = "sha256-hmac",
115                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
116                 .digest_len = 12,
117                 .key_len = 32
118         }
119 };
120
121 const struct supported_aead_algo aead_algos[] = {
122         {
123                 .keyword = "aes-128-gcm",
124                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
125                 .iv_len = 8,
126                 .block_size = 4,
127                 .key_len = 20,
128                 .digest_len = 16,
129                 .aad_len = 8,
130         }
131 };
132
133 static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
134 static uint32_t nb_sa_out;
135
136 static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
137 static uint32_t nb_sa_in;
138
139 static const struct supported_cipher_algo *
140 find_match_cipher_algo(const char *cipher_keyword)
141 {
142         size_t i;
143
144         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
145                 const struct supported_cipher_algo *algo =
146                         &cipher_algos[i];
147
148                 if (strcmp(cipher_keyword, algo->keyword) == 0)
149                         return algo;
150         }
151
152         return NULL;
153 }
154
155 static const struct supported_auth_algo *
156 find_match_auth_algo(const char *auth_keyword)
157 {
158         size_t i;
159
160         for (i = 0; i < RTE_DIM(auth_algos); i++) {
161                 const struct supported_auth_algo *algo =
162                         &auth_algos[i];
163
164                 if (strcmp(auth_keyword, algo->keyword) == 0)
165                         return algo;
166         }
167
168         return NULL;
169 }
170
171 static const struct supported_aead_algo *
172 find_match_aead_algo(const char *aead_keyword)
173 {
174         size_t i;
175
176         for (i = 0; i < RTE_DIM(aead_algos); i++) {
177                 const struct supported_aead_algo *algo =
178                         &aead_algos[i];
179
180                 if (strcmp(aead_keyword, algo->keyword) == 0)
181                         return algo;
182         }
183
184         return NULL;
185 }
186
187 /** parse_key_string
188  *  parse x:x:x:x.... hex number key string into uint8_t *key
189  *  return:
190  *  > 0: number of bytes parsed
191  *  0:   failed
192  */
193 static uint32_t
194 parse_key_string(const char *key_str, uint8_t *key)
195 {
196         const char *pt_start = key_str, *pt_end = key_str;
197         uint32_t nb_bytes = 0;
198
199         while (pt_end != NULL) {
200                 char sub_str[3] = {0};
201
202                 pt_end = strchr(pt_start, ':');
203
204                 if (pt_end == NULL) {
205                         if (strlen(pt_start) > 2)
206                                 return 0;
207                         strncpy(sub_str, pt_start, 2);
208                 } else {
209                         if (pt_end - pt_start > 2)
210                                 return 0;
211
212                         strncpy(sub_str, pt_start, pt_end - pt_start);
213                         pt_start = pt_end + 1;
214                 }
215
216                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
217         }
218
219         return nb_bytes;
220 }
221
222 void
223 parse_sa_tokens(char **tokens, uint32_t n_tokens,
224         struct parse_status *status)
225 {
226         struct ipsec_sa *rule = NULL;
227         uint32_t ti; /*token index*/
228         uint32_t *ri /*rule index*/;
229         uint32_t cipher_algo_p = 0;
230         uint32_t auth_algo_p = 0;
231         uint32_t aead_algo_p = 0;
232         uint32_t src_p = 0;
233         uint32_t dst_p = 0;
234         uint32_t mode_p = 0;
235         uint32_t type_p = 0;
236         uint32_t portid_p = 0;
237
238         if (strcmp(tokens[0], "in") == 0) {
239                 ri = &nb_sa_in;
240
241                 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
242                         "too many sa rules, abort insertion\n");
243                 if (status->status < 0)
244                         return;
245
246                 rule = &sa_in[*ri];
247         } else {
248                 ri = &nb_sa_out;
249
250                 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
251                         "too many sa rules, abort insertion\n");
252                 if (status->status < 0)
253                         return;
254
255                 rule = &sa_out[*ri];
256         }
257
258         /* spi number */
259         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
260         if (status->status < 0)
261                 return;
262         if (atoi(tokens[1]) == INVALID_SPI)
263                 return;
264         rule->spi = atoi(tokens[1]);
265
266         for (ti = 2; ti < n_tokens; ti++) {
267                 if (strcmp(tokens[ti], "mode") == 0) {
268                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
269                         if (status->status < 0)
270                                 return;
271
272                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
273                         if (status->status < 0)
274                                 return;
275
276                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
277                                 rule->flags = IP4_TUNNEL;
278                         else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
279                                 rule->flags = IP6_TUNNEL;
280                         else if (strcmp(tokens[ti], "transport") == 0)
281                                 rule->flags = TRANSPORT;
282                         else {
283                                 APP_CHECK(0, status, "unrecognized "
284                                         "input \"%s\"", tokens[ti]);
285                                 return;
286                         }
287
288                         mode_p = 1;
289                         continue;
290                 }
291
292                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
293                         const struct supported_cipher_algo *algo;
294                         uint32_t key_len;
295
296                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
297                                 status);
298                         if (status->status < 0)
299                                 return;
300
301                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
302                         if (status->status < 0)
303                                 return;
304
305                         algo = find_match_cipher_algo(tokens[ti]);
306
307                         APP_CHECK(algo != NULL, status, "unrecognized "
308                                 "input \"%s\"", tokens[ti]);
309
310                         rule->cipher_algo = algo->algo;
311                         rule->block_size = algo->block_size;
312                         rule->iv_len = algo->iv_len;
313                         rule->cipher_key_len = algo->key_len;
314
315                         /* for NULL algorithm, no cipher key required */
316                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
317                                 cipher_algo_p = 1;
318                                 continue;
319                         }
320
321                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
322                         if (status->status < 0)
323                                 return;
324
325                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
326                                 status, "unrecognized input \"%s\", "
327                                 "expect \"cipher_key\"", tokens[ti]);
328                         if (status->status < 0)
329                                 return;
330
331                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332                         if (status->status < 0)
333                                 return;
334
335                         key_len = parse_key_string(tokens[ti],
336                                 rule->cipher_key);
337                         APP_CHECK(key_len == rule->cipher_key_len, status,
338                                 "unrecognized input \"%s\"", tokens[ti]);
339                         if (status->status < 0)
340                                 return;
341
342                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
343                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
344                                 rule->salt = (uint32_t)rte_rand();
345
346                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
347                                 key_len -= 4;
348                                 rule->cipher_key_len = key_len;
349                                 memcpy(&rule->salt,
350                                         &rule->cipher_key[key_len], 4);
351                         }
352
353                         cipher_algo_p = 1;
354                         continue;
355                 }
356
357                 if (strcmp(tokens[ti], "auth_algo") == 0) {
358                         const struct supported_auth_algo *algo;
359                         uint32_t key_len;
360
361                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
362                                 status);
363                         if (status->status < 0)
364                                 return;
365
366                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
367                         if (status->status < 0)
368                                 return;
369
370                         algo = find_match_auth_algo(tokens[ti]);
371                         APP_CHECK(algo != NULL, status, "unrecognized "
372                                 "input \"%s\"", tokens[ti]);
373
374                         rule->auth_algo = algo->algo;
375                         rule->auth_key_len = algo->key_len;
376                         rule->digest_len = algo->digest_len;
377
378                         /* NULL algorithm and combined algos do not
379                          * require auth key
380                          */
381                         if (algo->key_not_req) {
382                                 auth_algo_p = 1;
383                                 continue;
384                         }
385
386                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
387                         if (status->status < 0)
388                                 return;
389
390                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
391                                 status, "unrecognized input \"%s\", "
392                                 "expect \"auth_key\"", tokens[ti]);
393                         if (status->status < 0)
394                                 return;
395
396                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
397                         if (status->status < 0)
398                                 return;
399
400                         key_len = parse_key_string(tokens[ti],
401                                 rule->auth_key);
402                         APP_CHECK(key_len == rule->auth_key_len, status,
403                                 "unrecognized input \"%s\"", tokens[ti]);
404                         if (status->status < 0)
405                                 return;
406
407                         auth_algo_p = 1;
408                         continue;
409                 }
410
411                 if (strcmp(tokens[ti], "aead_algo") == 0) {
412                         const struct supported_aead_algo *algo;
413                         uint32_t key_len;
414
415                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
416                                 status);
417                         if (status->status < 0)
418                                 return;
419
420                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
421                         if (status->status < 0)
422                                 return;
423
424                         algo = find_match_aead_algo(tokens[ti]);
425
426                         APP_CHECK(algo != NULL, status, "unrecognized "
427                                 "input \"%s\"", tokens[ti]);
428
429                         rule->aead_algo = algo->algo;
430                         rule->cipher_key_len = algo->key_len;
431                         rule->digest_len = algo->digest_len;
432                         rule->aad_len = algo->aad_len;
433                         rule->block_size = algo->block_size;
434                         rule->iv_len = algo->iv_len;
435
436                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
437                         if (status->status < 0)
438                                 return;
439
440                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
441                                 status, "unrecognized input \"%s\", "
442                                 "expect \"aead_key\"", tokens[ti]);
443                         if (status->status < 0)
444                                 return;
445
446                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
447                         if (status->status < 0)
448                                 return;
449
450                         key_len = parse_key_string(tokens[ti],
451                                 rule->cipher_key);
452                         APP_CHECK(key_len == rule->cipher_key_len, status,
453                                 "unrecognized input \"%s\"", tokens[ti]);
454                         if (status->status < 0)
455                                 return;
456
457                         key_len -= 4;
458                         rule->cipher_key_len = key_len;
459                         memcpy(&rule->salt,
460                                 &rule->cipher_key[key_len], 4);
461
462                         aead_algo_p = 1;
463                         continue;
464                 }
465
466                 if (strcmp(tokens[ti], "src") == 0) {
467                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
468                         if (status->status < 0)
469                                 return;
470
471                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
472                         if (status->status < 0)
473                                 return;
474
475                         if (IS_IP4_TUNNEL(rule->flags)) {
476                                 struct in_addr ip;
477
478                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
479                                         &ip, NULL) == 0, status,
480                                         "unrecognized input \"%s\", "
481                                         "expect valid ipv4 addr",
482                                         tokens[ti]);
483                                 if (status->status < 0)
484                                         return;
485                                 rule->src.ip.ip4 = rte_bswap32(
486                                         (uint32_t)ip.s_addr);
487                         } else if (IS_IP6_TUNNEL(rule->flags)) {
488                                 struct in6_addr ip;
489
490                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
491                                         NULL) == 0, status,
492                                         "unrecognized input \"%s\", "
493                                         "expect valid ipv6 addr",
494                                         tokens[ti]);
495                                 if (status->status < 0)
496                                         return;
497                                 memcpy(rule->src.ip.ip6.ip6_b,
498                                         ip.s6_addr, 16);
499                         } else if (IS_TRANSPORT(rule->flags)) {
500                                 APP_CHECK(0, status, "unrecognized input "
501                                         "\"%s\"", tokens[ti]);
502                                 return;
503                         }
504
505                         src_p = 1;
506                         continue;
507                 }
508
509                 if (strcmp(tokens[ti], "dst") == 0) {
510                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
511                         if (status->status < 0)
512                                 return;
513
514                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
515                         if (status->status < 0)
516                                 return;
517
518                         if (IS_IP4_TUNNEL(rule->flags)) {
519                                 struct in_addr ip;
520
521                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
522                                         &ip, NULL) == 0, status,
523                                         "unrecognized input \"%s\", "
524                                         "expect valid ipv4 addr",
525                                         tokens[ti]);
526                                 if (status->status < 0)
527                                         return;
528                                 rule->dst.ip.ip4 = rte_bswap32(
529                                         (uint32_t)ip.s_addr);
530                         } else if (IS_IP6_TUNNEL(rule->flags)) {
531                                 struct in6_addr ip;
532
533                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
534                                         NULL) == 0, status,
535                                         "unrecognized input \"%s\", "
536                                         "expect valid ipv6 addr",
537                                         tokens[ti]);
538                                 if (status->status < 0)
539                                         return;
540                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
541                         } else if (IS_TRANSPORT(rule->flags)) {
542                                 APP_CHECK(0, status, "unrecognized "
543                                         "input \"%s\"", tokens[ti]);
544                                 return;
545                         }
546
547                         dst_p = 1;
548                         continue;
549                 }
550
551                 if (strcmp(tokens[ti], "type") == 0) {
552                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
553                         if (status->status < 0)
554                                 return;
555
556                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
557                         if (status->status < 0)
558                                 return;
559
560                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
561                                 rule->type =
562                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
563                         else if (strcmp(tokens[ti],
564                                         "inline-protocol-offload") == 0)
565                                 rule->type =
566                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
567                         else if (strcmp(tokens[ti],
568                                         "lookaside-protocol-offload") == 0)
569                                 rule->type =
570                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
571                         else if (strcmp(tokens[ti], "no-offload") == 0)
572                                 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
573                         else {
574                                 APP_CHECK(0, status, "Invalid input \"%s\"",
575                                                 tokens[ti]);
576                                 return;
577                         }
578
579                         type_p = 1;
580                         continue;
581                 }
582
583                 if (strcmp(tokens[ti], "port_id") == 0) {
584                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
585                         if (status->status < 0)
586                                 return;
587                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
588                         if (status->status < 0)
589                                 return;
590                         rule->portid = atoi(tokens[ti]);
591                         if (status->status < 0)
592                                 return;
593                         portid_p = 1;
594                         continue;
595                 }
596
597                 /* unrecognizeable input */
598                 APP_CHECK(0, status, "unrecognized input \"%s\"",
599                         tokens[ti]);
600                 return;
601         }
602
603         if (aead_algo_p) {
604                 APP_CHECK(cipher_algo_p == 0, status,
605                                 "AEAD used, no need for cipher options");
606                 if (status->status < 0)
607                         return;
608
609                 APP_CHECK(auth_algo_p == 0, status,
610                                 "AEAD used, no need for auth options");
611                 if (status->status < 0)
612                         return;
613         } else {
614                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
615                 if (status->status < 0)
616                         return;
617
618                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
619                 if (status->status < 0)
620                         return;
621         }
622
623         APP_CHECK(mode_p == 1, status, "missing mode option");
624         if (status->status < 0)
625                 return;
626
627         if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
628                 printf("Missing portid option, falling back to non-offload\n");
629
630         if (!type_p || !portid_p) {
631                 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
632                 rule->portid = -1;
633         }
634
635         *ri = *ri + 1;
636 }
637
638 static void
639 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
640 {
641         uint32_t i;
642         uint8_t a, b, c, d;
643
644         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
645
646         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
647                 if (cipher_algos[i].algo == sa->cipher_algo &&
648                                 cipher_algos[i].key_len == sa->cipher_key_len) {
649                         printf("%s ", cipher_algos[i].keyword);
650                         break;
651                 }
652         }
653
654         for (i = 0; i < RTE_DIM(auth_algos); i++) {
655                 if (auth_algos[i].algo == sa->auth_algo) {
656                         printf("%s ", auth_algos[i].keyword);
657                         break;
658                 }
659         }
660
661         for (i = 0; i < RTE_DIM(aead_algos); i++) {
662                 if (aead_algos[i].algo == sa->aead_algo) {
663                         printf("%s ", aead_algos[i].keyword);
664                         break;
665                 }
666         }
667
668         printf("mode:");
669
670         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
671         case IP4_TUNNEL:
672                 printf("IP4Tunnel ");
673                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
674                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
675                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
676                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
677                 break;
678         case IP6_TUNNEL:
679                 printf("IP6Tunnel ");
680                 for (i = 0; i < 16; i++) {
681                         if (i % 2 && i != 15)
682                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
683                         else
684                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
685                 }
686                 printf(" ");
687                 for (i = 0; i < 16; i++) {
688                         if (i % 2 && i != 15)
689                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
690                         else
691                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
692                 }
693                 break;
694         case TRANSPORT:
695                 printf("Transport ");
696                 break;
697         }
698         printf(" type:");
699         switch (sa->type) {
700         case RTE_SECURITY_ACTION_TYPE_NONE:
701                 printf("no-offload ");
702                 break;
703         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
704                 printf("inline-crypto-offload ");
705                 break;
706         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
707                 printf("inline-protocol-offload ");
708                 break;
709         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
710                 printf("lookaside-protocol-offload ");
711                 break;
712         }
713         printf("\n");
714 }
715
716 struct sa_ctx {
717         void *satbl; /* pointer to array of rte_ipsec_sa objects*/
718         struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
719         union {
720                 struct {
721                         struct rte_crypto_sym_xform a;
722                         struct rte_crypto_sym_xform b;
723                 };
724         } xf[IPSEC_SA_MAX_ENTRIES];
725 };
726
727 static struct sa_ctx *
728 sa_create(const char *name, int32_t socket_id)
729 {
730         char s[PATH_MAX];
731         struct sa_ctx *sa_ctx;
732         uint32_t mz_size;
733         const struct rte_memzone *mz;
734
735         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
736
737         /* Create SA array table */
738         printf("Creating SA context with %u maximum entries on socket %d\n",
739                         IPSEC_SA_MAX_ENTRIES, socket_id);
740
741         mz_size = sizeof(struct sa_ctx);
742         mz = rte_memzone_reserve(s, mz_size, socket_id,
743                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
744         if (mz == NULL) {
745                 printf("Failed to allocate SA DB memory\n");
746                 rte_errno = -ENOMEM;
747                 return NULL;
748         }
749
750         sa_ctx = (struct sa_ctx *)mz->addr;
751
752         return sa_ctx;
753 }
754
755 static int
756 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
757 {
758         struct rte_eth_dev_info dev_info;
759
760         rte_eth_dev_info_get(portid, &dev_info);
761
762         if (inbound) {
763                 if ((dev_info.rx_offload_capa &
764                                 DEV_RX_OFFLOAD_SECURITY) == 0) {
765                         RTE_LOG(WARNING, PORT,
766                                 "hardware RX IPSec offload is not supported\n");
767                         return -EINVAL;
768                 }
769
770         } else { /* outbound */
771                 if ((dev_info.tx_offload_capa &
772                                 DEV_TX_OFFLOAD_SECURITY) == 0) {
773                         RTE_LOG(WARNING, PORT,
774                                 "hardware TX IPSec offload is not supported\n");
775                         return -EINVAL;
776                 }
777         }
778         return 0;
779 }
780
781 /*
782  * Helper function, tries to determine next_proto for SPI
783  * by searching though SP rules.
784  */
785 static int
786 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
787                 struct ip_addr ip_addr[2], uint32_t mask[2])
788 {
789         int32_t rc4, rc6;
790
791         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
792                                 ip_addr, mask);
793         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
794                                 ip_addr, mask);
795
796         if (rc4 >= 0) {
797                 if (rc6 >= 0) {
798                         RTE_LOG(ERR, IPSEC,
799                                 "%s: SPI %u used simultaeously by "
800                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
801                                 __func__, spi, rc4, rc6);
802                         return -EINVAL;
803                 } else
804                         return IPPROTO_IPIP;
805         } else if (rc6 < 0) {
806                 RTE_LOG(ERR, IPSEC,
807                         "%s: SPI %u is not used by any SP rule\n",
808                         __func__, spi);
809                 return -EINVAL;
810         } else
811                 return IPPROTO_IPV6;
812 }
813
814 /*
815  * Helper function for getting source and destination IP addresses
816  * from SP. Needed for inline crypto transport mode, as addresses are not
817  * provided in config file for that mode. It checks if SP for current SA exists,
818  * and based on what type of protocol is returned, it stores appropriate
819  * addresses got from SP into SA.
820  */
821 static int
822 sa_add_address_inline_crypto(struct ipsec_sa *sa)
823 {
824         int protocol;
825         struct ip_addr ip_addr[2];
826         uint32_t mask[2];
827
828         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
829         if (protocol < 0)
830                 return protocol;
831         else if (protocol == IPPROTO_IPIP) {
832                 sa->flags |= IP4_TRANSPORT;
833                 if (mask[0] == IP4_FULL_MASK &&
834                                 mask[1] == IP4_FULL_MASK &&
835                                 ip_addr[0].ip.ip4 != 0 &&
836                                 ip_addr[1].ip.ip4 != 0) {
837
838                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
839                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
840                 } else {
841                         RTE_LOG(ERR, IPSEC,
842                         "%s: No valid address or mask entry in"
843                         " IPv4 SP rule for SPI %u\n",
844                         __func__, sa->spi);
845                         return -EINVAL;
846                 }
847         } else if (protocol == IPPROTO_IPV6) {
848                 sa->flags |= IP6_TRANSPORT;
849                 if (mask[0] == IP6_FULL_MASK &&
850                                 mask[1] == IP6_FULL_MASK &&
851                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
852                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
853                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
854                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
855
856                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
857                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
858                 } else {
859                         RTE_LOG(ERR, IPSEC,
860                         "%s: No valid address or mask entry in"
861                         " IPv6 SP rule for SPI %u\n",
862                         __func__, sa->spi);
863                         return -EINVAL;
864                 }
865         }
866         return 0;
867 }
868
869 static int
870 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
871                 uint32_t nb_entries, uint32_t inbound)
872 {
873         struct ipsec_sa *sa;
874         uint32_t i, idx;
875         uint16_t iv_length, aad_length;
876         int inline_status;
877
878         /* for ESN upper 32 bits of SQN also need to be part of AAD */
879         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
880
881         for (i = 0; i < nb_entries; i++) {
882                 idx = SPI2IDX(entries[i].spi);
883                 sa = &sa_ctx->sa[idx];
884                 if (sa->spi != 0) {
885                         printf("Index %u already in use by SPI %u\n",
886                                         idx, sa->spi);
887                         return -EINVAL;
888                 }
889                 *sa = entries[i];
890                 sa->seq = 0;
891
892                 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
893                         sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
894                         if (check_eth_dev_caps(sa->portid, inbound))
895                                 return -EINVAL;
896                 }
897
898                 sa->direction = (inbound == 1) ?
899                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
900                                 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
901
902                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
903                 case IP4_TUNNEL:
904                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
905                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
906                         break;
907                 case TRANSPORT:
908                         if (sa->type ==
909                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
910                                 inline_status =
911                                         sa_add_address_inline_crypto(sa);
912                                 if (inline_status < 0)
913                                         return inline_status;
914                         }
915                         break;
916                 }
917
918                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
919                         iv_length = 16;
920
921                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
922                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
923                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
924                         sa_ctx->xf[idx].a.aead.key.length =
925                                 sa->cipher_key_len;
926                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
927                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
928                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
929                         sa_ctx->xf[idx].a.next = NULL;
930                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
931                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
932                         sa_ctx->xf[idx].a.aead.aad_length =
933                                 sa->aad_len + aad_length;
934                         sa_ctx->xf[idx].a.aead.digest_length =
935                                 sa->digest_len;
936
937                         sa->xforms = &sa_ctx->xf[idx].a;
938
939                         print_one_sa_rule(sa, inbound);
940                 } else {
941                         switch (sa->cipher_algo) {
942                         case RTE_CRYPTO_CIPHER_NULL:
943                         case RTE_CRYPTO_CIPHER_3DES_CBC:
944                         case RTE_CRYPTO_CIPHER_AES_CBC:
945                                 iv_length = sa->iv_len;
946                                 break;
947                         case RTE_CRYPTO_CIPHER_AES_CTR:
948                                 iv_length = 16;
949                                 break;
950                         default:
951                                 RTE_LOG(ERR, IPSEC_ESP,
952                                                 "unsupported cipher algorithm %u\n",
953                                                 sa->cipher_algo);
954                                 return -EINVAL;
955                         }
956
957                         if (inbound) {
958                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
959                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
960                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
961                                 sa_ctx->xf[idx].b.cipher.key.length =
962                                         sa->cipher_key_len;
963                                 sa_ctx->xf[idx].b.cipher.op =
964                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
965                                 sa_ctx->xf[idx].b.next = NULL;
966                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
967                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
968
969                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
970                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
971                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
972                                 sa_ctx->xf[idx].a.auth.key.length =
973                                         sa->auth_key_len;
974                                 sa_ctx->xf[idx].a.auth.digest_length =
975                                         sa->digest_len;
976                                 sa_ctx->xf[idx].a.auth.op =
977                                         RTE_CRYPTO_AUTH_OP_VERIFY;
978                         } else { /* outbound */
979                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
980                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
981                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
982                                 sa_ctx->xf[idx].a.cipher.key.length =
983                                         sa->cipher_key_len;
984                                 sa_ctx->xf[idx].a.cipher.op =
985                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
986                                 sa_ctx->xf[idx].a.next = NULL;
987                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
988                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
989
990                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
991                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
992                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
993                                 sa_ctx->xf[idx].b.auth.key.length =
994                                         sa->auth_key_len;
995                                 sa_ctx->xf[idx].b.auth.digest_length =
996                                         sa->digest_len;
997                                 sa_ctx->xf[idx].b.auth.op =
998                                         RTE_CRYPTO_AUTH_OP_GENERATE;
999                         }
1000
1001                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1002                         sa_ctx->xf[idx].b.next = NULL;
1003                         sa->xforms = &sa_ctx->xf[idx].a;
1004
1005                         print_one_sa_rule(sa, inbound);
1006                 }
1007         }
1008
1009         return 0;
1010 }
1011
1012 static inline int
1013 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1014                 uint32_t nb_entries)
1015 {
1016         return sa_add_rules(sa_ctx, entries, nb_entries, 0);
1017 }
1018
1019 static inline int
1020 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1021                 uint32_t nb_entries)
1022 {
1023         return sa_add_rules(sa_ctx, entries, nb_entries, 1);
1024 }
1025
1026 /*
1027  * helper function, fills parameters that are identical for all SAs
1028  */
1029 static void
1030 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1031         const struct app_sa_prm *app_prm)
1032 {
1033         memset(prm, 0, sizeof(*prm));
1034
1035         prm->flags = app_prm->flags;
1036         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1037         prm->replay_win_sz = app_prm->window_size;
1038 }
1039
1040 static int
1041 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1042         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1043 {
1044         int32_t rc;
1045
1046         /*
1047          * Try to get SPI next proto by searching that SPI in SPD.
1048          * probably not the optimal way, but there seems nothing
1049          * better right now.
1050          */
1051         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1052         if (rc < 0)
1053                 return rc;
1054
1055         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1056         prm->userdata = (uintptr_t)ss;
1057
1058         /* setup ipsec xform */
1059         prm->ipsec_xform.spi = ss->spi;
1060         prm->ipsec_xform.salt = ss->salt;
1061         prm->ipsec_xform.direction = ss->direction;
1062         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1063         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1064                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1065                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1066         prm->ipsec_xform.options.ecn = 1;
1067         prm->ipsec_xform.options.copy_dscp = 1;
1068
1069         if (IS_IP4_TUNNEL(ss->flags)) {
1070                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1071                 prm->tun.hdr_len = sizeof(*v4);
1072                 prm->tun.next_proto = rc;
1073                 prm->tun.hdr = v4;
1074         } else if (IS_IP6_TUNNEL(ss->flags)) {
1075                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1076                 prm->tun.hdr_len = sizeof(*v6);
1077                 prm->tun.next_proto = rc;
1078                 prm->tun.hdr = v6;
1079         } else {
1080                 /* transport mode */
1081                 prm->trs.proto = rc;
1082         }
1083
1084         /* setup crypto section */
1085         prm->crypto_xform = ss->xforms;
1086         return 0;
1087 }
1088
1089 static void
1090 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
1091         const struct ipsec_sa *lsa)
1092 {
1093         ss->sa = sa;
1094         ss->type = lsa->type;
1095
1096         /* setup crypto section */
1097         if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
1098                 ss->crypto.ses = lsa->crypto_session;
1099         /* setup session action type */
1100         } else {
1101                 ss->security.ses = lsa->sec_session;
1102                 ss->security.ctx = lsa->security_ctx;
1103                 ss->security.ol_flags = lsa->ol_flags;
1104         }
1105 }
1106
1107 /*
1108  * Initialise related rte_ipsec_sa object.
1109  */
1110 static int
1111 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1112 {
1113         int rc;
1114         struct rte_ipsec_sa_prm prm;
1115         struct rte_ipv4_hdr v4  = {
1116                 .version_ihl = IPVERSION << 4 |
1117                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1118                 .time_to_live = IPDEFTTL,
1119                 .next_proto_id = IPPROTO_ESP,
1120                 .src_addr = lsa->src.ip.ip4,
1121                 .dst_addr = lsa->dst.ip.ip4,
1122         };
1123         struct rte_ipv6_hdr v6 = {
1124                 .vtc_flow = htonl(IP6_VERSION << 28),
1125                 .proto = IPPROTO_ESP,
1126         };
1127
1128         if (IS_IP6_TUNNEL(lsa->flags)) {
1129                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1130                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1131         }
1132
1133         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1134         if (rc == 0)
1135                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1136         if (rc < 0)
1137                 return rc;
1138
1139         fill_ipsec_session(&lsa->ips, sa, lsa);
1140         return 0;
1141 }
1142
1143 /*
1144  * Allocate space and init rte_ipsec_sa strcutures,
1145  * one per session.
1146  */
1147 static int
1148 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1149         uint32_t nb_ent, int32_t socket)
1150 {
1151         int32_t rc, sz;
1152         uint32_t i, idx;
1153         size_t tsz;
1154         struct rte_ipsec_sa *sa;
1155         struct ipsec_sa *lsa;
1156         struct rte_ipsec_sa_prm prm;
1157
1158         /* determine SA size */
1159         idx = SPI2IDX(ent[0].spi);
1160         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1161         sz = rte_ipsec_sa_size(&prm);
1162         if (sz < 0) {
1163                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1164                         "failed to determine SA size, error code: %d\n",
1165                         __func__, ctx, nb_ent, socket, sz);
1166                 return sz;
1167         }
1168
1169         tsz = sz * nb_ent;
1170
1171         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1172         if (ctx->satbl == NULL) {
1173                 RTE_LOG(ERR, IPSEC,
1174                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1175                         __func__,  ctx, nb_ent, socket, tsz);
1176                 return -ENOMEM;
1177         }
1178
1179         rc = 0;
1180         for (i = 0; i != nb_ent && rc == 0; i++) {
1181
1182                 idx = SPI2IDX(ent[i].spi);
1183
1184                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1185                 lsa = ctx->sa + idx;
1186
1187                 rc = ipsec_sa_init(lsa, sa, sz);
1188         }
1189
1190         return rc;
1191 }
1192
1193 /*
1194  * Walk through all SA rules to find an SA with given SPI
1195  */
1196 int
1197 sa_spi_present(uint32_t spi, int inbound)
1198 {
1199         uint32_t i, num;
1200         const struct ipsec_sa *sar;
1201
1202         if (inbound != 0) {
1203                 sar = sa_in;
1204                 num = nb_sa_in;
1205         } else {
1206                 sar = sa_out;
1207                 num = nb_sa_out;
1208         }
1209
1210         for (i = 0; i != num; i++) {
1211                 if (sar[i].spi == spi)
1212                         return i;
1213         }
1214
1215         return -ENOENT;
1216 }
1217
1218 void
1219 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1220 {
1221         int32_t rc;
1222         const char *name;
1223
1224         if (ctx == NULL)
1225                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1226
1227         if (ctx->sa_in != NULL)
1228                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1229                                 "initialized\n", socket_id);
1230
1231         if (ctx->sa_out != NULL)
1232                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1233                                 "initialized\n", socket_id);
1234
1235         if (nb_sa_in > 0) {
1236                 name = "sa_in";
1237                 ctx->sa_in = sa_create(name, socket_id);
1238                 if (ctx->sa_in == NULL)
1239                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1240                                 "context %s in socket %d\n", rte_errno,
1241                                 name, socket_id);
1242
1243                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
1244
1245                 if (app_sa_prm.enable != 0) {
1246                         rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1247                                 socket_id);
1248                         if (rc != 0)
1249                                 rte_exit(EXIT_FAILURE,
1250                                         "failed to init inbound SAs\n");
1251                 }
1252         } else
1253                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1254
1255         if (nb_sa_out > 0) {
1256                 name = "sa_out";
1257                 ctx->sa_out = sa_create(name, socket_id);
1258                 if (ctx->sa_out == NULL)
1259                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1260                                 "context %s in socket %d\n", rte_errno,
1261                                 name, socket_id);
1262
1263                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
1264
1265                 if (app_sa_prm.enable != 0) {
1266                         rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1267                                 socket_id);
1268                         if (rc != 0)
1269                                 rte_exit(EXIT_FAILURE,
1270                                         "failed to init outbound SAs\n");
1271                 }
1272         } else
1273                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1274                         "specified\n");
1275 }
1276
1277 int
1278 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1279 {
1280         struct ipsec_mbuf_metadata *priv;
1281         struct ipsec_sa *sa;
1282
1283         priv = get_priv(m);
1284         sa = priv->sa;
1285         if (sa != NULL)
1286                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1287
1288         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1289         return 0;
1290 }
1291
1292 static inline void
1293 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1294                 struct ipsec_sa **sa_ret)
1295 {
1296         struct rte_esp_hdr *esp;
1297         struct ip *ip;
1298         uint32_t *src4_addr;
1299         uint8_t *src6_addr;
1300         struct ipsec_sa *sa;
1301
1302         *sa_ret = NULL;
1303
1304         ip = rte_pktmbuf_mtod(pkt, struct ip *);
1305         esp = rte_pktmbuf_mtod_offset(pkt, struct rte_esp_hdr *, pkt->l3_len);
1306
1307         if (esp->spi == INVALID_SPI)
1308                 return;
1309
1310         sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1311         if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1312                 return;
1313
1314         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1315         case IP4_TUNNEL:
1316                 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1317                 if ((ip->ip_v == IPVERSION) &&
1318                                 (sa->src.ip.ip4 == *src4_addr) &&
1319                                 (sa->dst.ip.ip4 == *(src4_addr + 1)))
1320                         *sa_ret = sa;
1321                 break;
1322         case IP6_TUNNEL:
1323                 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1324                 if ((ip->ip_v == IP6_VERSION) &&
1325                                 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1326                                 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1327                         *sa_ret = sa;
1328                 break;
1329         case TRANSPORT:
1330                 *sa_ret = sa;
1331         }
1332 }
1333
1334 void
1335 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1336                 struct ipsec_sa *sa[], uint16_t nb_pkts)
1337 {
1338         uint32_t i;
1339
1340         for (i = 0; i < nb_pkts; i++)
1341                 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1342 }
1343
1344 void
1345 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1346                 struct ipsec_sa *sa[], uint16_t nb_pkts)
1347 {
1348         uint32_t i;
1349
1350         for (i = 0; i < nb_pkts; i++)
1351                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1352 }
1353
1354 /*
1355  * Select HW offloads to be used.
1356  */
1357 int
1358 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1359                 uint64_t *tx_offloads)
1360 {
1361         struct ipsec_sa *rule;
1362         uint32_t idx_sa;
1363
1364         *rx_offloads = 0;
1365         *tx_offloads = 0;
1366
1367         /* Check for inbound rules that use offloads and use this port */
1368         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1369                 rule = &sa_in[idx_sa];
1370                 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1371                                 rule->type ==
1372                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1373                                 && rule->portid == port_id)
1374                         *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1375         }
1376
1377         /* Check for outbound rules that use offloads and use this port */
1378         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1379                 rule = &sa_out[idx_sa];
1380                 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1381                                 rule->type ==
1382                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1383                                 && rule->portid == port_id)
1384                         *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1385         }
1386         return 0;
1387 }