examples/ipsec-secgw: support UDP encap for inline crypto
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_random.h>
22 #include <rte_ethdev.h>
23 #include <rte_malloc.h>
24
25 #include "ipsec.h"
26 #include "esp.h"
27 #include "parser.h"
28 #include "sad.h"
29
30 #define IPDEFTTL 64
31
32 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
33
34 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
35
36 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
37
38 struct supported_cipher_algo {
39         const char *keyword;
40         enum rte_crypto_cipher_algorithm algo;
41         uint16_t iv_len;
42         uint16_t block_size;
43         uint16_t key_len;
44 };
45
46 struct supported_auth_algo {
47         const char *keyword;
48         enum rte_crypto_auth_algorithm algo;
49         uint16_t digest_len;
50         uint16_t key_len;
51         uint8_t key_not_req;
52 };
53
54 struct supported_aead_algo {
55         const char *keyword;
56         enum rte_crypto_aead_algorithm algo;
57         uint16_t iv_len;
58         uint16_t block_size;
59         uint16_t digest_len;
60         uint16_t key_len;
61         uint8_t aad_len;
62 };
63
64
65 const struct supported_cipher_algo cipher_algos[] = {
66         {
67                 .keyword = "null",
68                 .algo = RTE_CRYPTO_CIPHER_NULL,
69                 .iv_len = 0,
70                 .block_size = 4,
71                 .key_len = 0
72         },
73         {
74                 .keyword = "aes-128-cbc",
75                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
76                 .iv_len = 16,
77                 .block_size = 16,
78                 .key_len = 16
79         },
80         {
81                 .keyword = "aes-192-cbc",
82                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
83                 .iv_len = 16,
84                 .block_size = 16,
85                 .key_len = 24
86         },
87         {
88                 .keyword = "aes-256-cbc",
89                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
90                 .iv_len = 16,
91                 .block_size = 16,
92                 .key_len = 32
93         },
94         {
95                 .keyword = "aes-128-ctr",
96                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
97                 .iv_len = 8,
98                 .block_size = 4,
99                 .key_len = 20
100         },
101         {
102                 .keyword = "3des-cbc",
103                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
104                 .iv_len = 8,
105                 .block_size = 8,
106                 .key_len = 24
107         }
108 };
109
110 const struct supported_auth_algo auth_algos[] = {
111         {
112                 .keyword = "null",
113                 .algo = RTE_CRYPTO_AUTH_NULL,
114                 .digest_len = 0,
115                 .key_len = 0,
116                 .key_not_req = 1
117         },
118         {
119                 .keyword = "sha1-hmac",
120                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
121                 .digest_len = 12,
122                 .key_len = 20
123         },
124         {
125                 .keyword = "sha256-hmac",
126                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
127                 .digest_len = 16,
128                 .key_len = 32
129         }
130 };
131
132 const struct supported_aead_algo aead_algos[] = {
133         {
134                 .keyword = "aes-128-gcm",
135                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
136                 .iv_len = 8,
137                 .block_size = 4,
138                 .key_len = 20,
139                 .digest_len = 16,
140                 .aad_len = 8,
141         },
142         {
143                 .keyword = "aes-192-gcm",
144                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
145                 .iv_len = 8,
146                 .block_size = 4,
147                 .key_len = 28,
148                 .digest_len = 16,
149                 .aad_len = 8,
150         },
151         {
152                 .keyword = "aes-256-gcm",
153                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
154                 .iv_len = 8,
155                 .block_size = 4,
156                 .key_len = 36,
157                 .digest_len = 16,
158                 .aad_len = 8,
159         }
160 };
161
162 #define SA_INIT_NB      128
163
164 static uint32_t nb_crypto_sessions;
165 struct ipsec_sa *sa_out;
166 uint32_t nb_sa_out;
167 static uint32_t sa_out_sz;
168 static struct ipsec_sa_cnt sa_out_cnt;
169
170 struct ipsec_sa *sa_in;
171 uint32_t nb_sa_in;
172 static uint32_t sa_in_sz;
173 static struct ipsec_sa_cnt sa_in_cnt;
174
175 static const struct supported_cipher_algo *
176 find_match_cipher_algo(const char *cipher_keyword)
177 {
178         size_t i;
179
180         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
181                 const struct supported_cipher_algo *algo =
182                         &cipher_algos[i];
183
184                 if (strcmp(cipher_keyword, algo->keyword) == 0)
185                         return algo;
186         }
187
188         return NULL;
189 }
190
191 static const struct supported_auth_algo *
192 find_match_auth_algo(const char *auth_keyword)
193 {
194         size_t i;
195
196         for (i = 0; i < RTE_DIM(auth_algos); i++) {
197                 const struct supported_auth_algo *algo =
198                         &auth_algos[i];
199
200                 if (strcmp(auth_keyword, algo->keyword) == 0)
201                         return algo;
202         }
203
204         return NULL;
205 }
206
207 static const struct supported_aead_algo *
208 find_match_aead_algo(const char *aead_keyword)
209 {
210         size_t i;
211
212         for (i = 0; i < RTE_DIM(aead_algos); i++) {
213                 const struct supported_aead_algo *algo =
214                         &aead_algos[i];
215
216                 if (strcmp(aead_keyword, algo->keyword) == 0)
217                         return algo;
218         }
219
220         return NULL;
221 }
222
223 /** parse_key_string
224  *  parse x:x:x:x.... hex number key string into uint8_t *key
225  *  return:
226  *  > 0: number of bytes parsed
227  *  0:   failed
228  */
229 static uint32_t
230 parse_key_string(const char *key_str, uint8_t *key)
231 {
232         const char *pt_start = key_str, *pt_end = key_str;
233         uint32_t nb_bytes = 0;
234
235         while (pt_end != NULL) {
236                 char sub_str[3] = {0};
237
238                 pt_end = strchr(pt_start, ':');
239
240                 if (pt_end == NULL) {
241                         if (strlen(pt_start) > 2)
242                                 return 0;
243                         strncpy(sub_str, pt_start, 2);
244                 } else {
245                         if (pt_end - pt_start > 2)
246                                 return 0;
247
248                         strncpy(sub_str, pt_start, pt_end - pt_start);
249                         pt_start = pt_end + 1;
250                 }
251
252                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
253         }
254
255         return nb_bytes;
256 }
257
258 static int
259 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
260 {
261         if (*sa_tbl == NULL) {
262                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
263                 if (*sa_tbl == NULL)
264                         return -1;
265                 *cur_sz = SA_INIT_NB;
266                 return 0;
267         }
268
269         if (cur_cnt >= *cur_sz) {
270                 *sa_tbl = realloc(*sa_tbl,
271                         *cur_sz * sizeof(struct ipsec_sa) * 2);
272                 if (*sa_tbl == NULL)
273                         return -1;
274                 /* clean reallocated extra space */
275                 memset(&(*sa_tbl)[*cur_sz], 0,
276                         *cur_sz * sizeof(struct ipsec_sa));
277                 *cur_sz *= 2;
278         }
279
280         return 0;
281 }
282
283 void
284 parse_sa_tokens(char **tokens, uint32_t n_tokens,
285         struct parse_status *status)
286 {
287         struct ipsec_sa *rule = NULL;
288         struct rte_ipsec_session *ips;
289         uint32_t ti; /*token index*/
290         uint32_t *ri /*rule index*/;
291         struct ipsec_sa_cnt *sa_cnt;
292         uint32_t cipher_algo_p = 0;
293         uint32_t auth_algo_p = 0;
294         uint32_t aead_algo_p = 0;
295         uint32_t src_p = 0;
296         uint32_t dst_p = 0;
297         uint32_t mode_p = 0;
298         uint32_t type_p = 0;
299         uint32_t portid_p = 0;
300         uint32_t fallback_p = 0;
301         int16_t status_p = 0;
302         uint16_t udp_encap_p = 0;
303
304         if (strcmp(tokens[0], "in") == 0) {
305                 ri = &nb_sa_in;
306                 sa_cnt = &sa_in_cnt;
307                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
308                         return;
309                 rule = &sa_in[*ri];
310                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
311         } else {
312                 ri = &nb_sa_out;
313                 sa_cnt = &sa_out_cnt;
314                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
315                         return;
316                 rule = &sa_out[*ri];
317                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
318         }
319
320         /* spi number */
321         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
322         if (status->status < 0)
323                 return;
324         if (atoi(tokens[1]) == INVALID_SPI)
325                 return;
326         rule->spi = atoi(tokens[1]);
327         rule->portid = UINT16_MAX;
328         ips = ipsec_get_primary_session(rule);
329
330         for (ti = 2; ti < n_tokens; ti++) {
331                 if (strcmp(tokens[ti], "mode") == 0) {
332                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
333                         if (status->status < 0)
334                                 return;
335
336                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
337                         if (status->status < 0)
338                                 return;
339
340                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
341                                 sa_cnt->nb_v4++;
342                                 rule->flags = IP4_TUNNEL;
343                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
344                                 sa_cnt->nb_v6++;
345                                 rule->flags = IP6_TUNNEL;
346                         } else if (strcmp(tokens[ti], "transport") == 0) {
347                                 sa_cnt->nb_v4++;
348                                 sa_cnt->nb_v6++;
349                                 rule->flags = TRANSPORT;
350                         } else {
351                                 APP_CHECK(0, status, "unrecognized "
352                                         "input \"%s\"", tokens[ti]);
353                                 return;
354                         }
355
356                         mode_p = 1;
357                         continue;
358                 }
359
360                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
361                         const struct supported_cipher_algo *algo;
362                         uint32_t key_len;
363
364                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
365                                 status);
366                         if (status->status < 0)
367                                 return;
368
369                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
370                         if (status->status < 0)
371                                 return;
372
373                         algo = find_match_cipher_algo(tokens[ti]);
374
375                         APP_CHECK(algo != NULL, status, "unrecognized "
376                                 "input \"%s\"", tokens[ti]);
377
378                         if (status->status < 0)
379                                 return;
380
381                         rule->cipher_algo = algo->algo;
382                         rule->block_size = algo->block_size;
383                         rule->iv_len = algo->iv_len;
384                         rule->cipher_key_len = algo->key_len;
385
386                         /* for NULL algorithm, no cipher key required */
387                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
388                                 cipher_algo_p = 1;
389                                 continue;
390                         }
391
392                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
393                         if (status->status < 0)
394                                 return;
395
396                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
397                                 status, "unrecognized input \"%s\", "
398                                 "expect \"cipher_key\"", tokens[ti]);
399                         if (status->status < 0)
400                                 return;
401
402                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
403                         if (status->status < 0)
404                                 return;
405
406                         key_len = parse_key_string(tokens[ti],
407                                 rule->cipher_key);
408                         APP_CHECK(key_len == rule->cipher_key_len, status,
409                                 "unrecognized input \"%s\"", tokens[ti]);
410                         if (status->status < 0)
411                                 return;
412
413                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
414                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
415                                 rule->salt = (uint32_t)rte_rand();
416
417                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
418                                 key_len -= 4;
419                                 rule->cipher_key_len = key_len;
420                                 memcpy(&rule->salt,
421                                         &rule->cipher_key[key_len], 4);
422                         }
423
424                         cipher_algo_p = 1;
425                         continue;
426                 }
427
428                 if (strcmp(tokens[ti], "auth_algo") == 0) {
429                         const struct supported_auth_algo *algo;
430                         uint32_t key_len;
431
432                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
433                                 status);
434                         if (status->status < 0)
435                                 return;
436
437                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
438                         if (status->status < 0)
439                                 return;
440
441                         algo = find_match_auth_algo(tokens[ti]);
442                         APP_CHECK(algo != NULL, status, "unrecognized "
443                                 "input \"%s\"", tokens[ti]);
444
445                         if (status->status < 0)
446                                 return;
447
448                         rule->auth_algo = algo->algo;
449                         rule->auth_key_len = algo->key_len;
450                         rule->digest_len = algo->digest_len;
451
452                         /* NULL algorithm and combined algos do not
453                          * require auth key
454                          */
455                         if (algo->key_not_req) {
456                                 auth_algo_p = 1;
457                                 continue;
458                         }
459
460                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
461                         if (status->status < 0)
462                                 return;
463
464                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
465                                 status, "unrecognized input \"%s\", "
466                                 "expect \"auth_key\"", tokens[ti]);
467                         if (status->status < 0)
468                                 return;
469
470                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
471                         if (status->status < 0)
472                                 return;
473
474                         key_len = parse_key_string(tokens[ti],
475                                 rule->auth_key);
476                         APP_CHECK(key_len == rule->auth_key_len, status,
477                                 "unrecognized input \"%s\"", tokens[ti]);
478                         if (status->status < 0)
479                                 return;
480
481                         auth_algo_p = 1;
482                         continue;
483                 }
484
485                 if (strcmp(tokens[ti], "aead_algo") == 0) {
486                         const struct supported_aead_algo *algo;
487                         uint32_t key_len;
488
489                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
490                                 status);
491                         if (status->status < 0)
492                                 return;
493
494                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
495                         if (status->status < 0)
496                                 return;
497
498                         algo = find_match_aead_algo(tokens[ti]);
499
500                         APP_CHECK(algo != NULL, status, "unrecognized "
501                                 "input \"%s\"", tokens[ti]);
502
503                         if (status->status < 0)
504                                 return;
505
506                         rule->aead_algo = algo->algo;
507                         rule->cipher_key_len = algo->key_len;
508                         rule->digest_len = algo->digest_len;
509                         rule->aad_len = algo->aad_len;
510                         rule->block_size = algo->block_size;
511                         rule->iv_len = algo->iv_len;
512
513                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
514                         if (status->status < 0)
515                                 return;
516
517                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
518                                 status, "unrecognized input \"%s\", "
519                                 "expect \"aead_key\"", tokens[ti]);
520                         if (status->status < 0)
521                                 return;
522
523                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
524                         if (status->status < 0)
525                                 return;
526
527                         key_len = parse_key_string(tokens[ti],
528                                 rule->cipher_key);
529                         APP_CHECK(key_len == rule->cipher_key_len, status,
530                                 "unrecognized input \"%s\"", tokens[ti]);
531                         if (status->status < 0)
532                                 return;
533
534                         key_len -= 4;
535                         rule->cipher_key_len = key_len;
536                         memcpy(&rule->salt,
537                                 &rule->cipher_key[key_len], 4);
538
539                         aead_algo_p = 1;
540                         continue;
541                 }
542
543                 if (strcmp(tokens[ti], "src") == 0) {
544                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
545                         if (status->status < 0)
546                                 return;
547
548                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
549                         if (status->status < 0)
550                                 return;
551
552                         if (IS_IP4_TUNNEL(rule->flags)) {
553                                 struct in_addr ip;
554
555                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
556                                         &ip, NULL) == 0, status,
557                                         "unrecognized input \"%s\", "
558                                         "expect valid ipv4 addr",
559                                         tokens[ti]);
560                                 if (status->status < 0)
561                                         return;
562                                 rule->src.ip.ip4 = rte_bswap32(
563                                         (uint32_t)ip.s_addr);
564                         } else if (IS_IP6_TUNNEL(rule->flags)) {
565                                 struct in6_addr ip;
566
567                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
568                                         NULL) == 0, status,
569                                         "unrecognized input \"%s\", "
570                                         "expect valid ipv6 addr",
571                                         tokens[ti]);
572                                 if (status->status < 0)
573                                         return;
574                                 memcpy(rule->src.ip.ip6.ip6_b,
575                                         ip.s6_addr, 16);
576                         } else if (IS_TRANSPORT(rule->flags)) {
577                                 APP_CHECK(0, status, "unrecognized input "
578                                         "\"%s\"", tokens[ti]);
579                                 return;
580                         }
581
582                         src_p = 1;
583                         continue;
584                 }
585
586                 if (strcmp(tokens[ti], "dst") == 0) {
587                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
588                         if (status->status < 0)
589                                 return;
590
591                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
592                         if (status->status < 0)
593                                 return;
594
595                         if (IS_IP4_TUNNEL(rule->flags)) {
596                                 struct in_addr ip;
597
598                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
599                                         &ip, NULL) == 0, status,
600                                         "unrecognized input \"%s\", "
601                                         "expect valid ipv4 addr",
602                                         tokens[ti]);
603                                 if (status->status < 0)
604                                         return;
605                                 rule->dst.ip.ip4 = rte_bswap32(
606                                         (uint32_t)ip.s_addr);
607                         } else if (IS_IP6_TUNNEL(rule->flags)) {
608                                 struct in6_addr ip;
609
610                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
611                                         NULL) == 0, status,
612                                         "unrecognized input \"%s\", "
613                                         "expect valid ipv6 addr",
614                                         tokens[ti]);
615                                 if (status->status < 0)
616                                         return;
617                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
618                         } else if (IS_TRANSPORT(rule->flags)) {
619                                 APP_CHECK(0, status, "unrecognized "
620                                         "input \"%s\"", tokens[ti]);
621                                 return;
622                         }
623
624                         dst_p = 1;
625                         continue;
626                 }
627
628                 if (strcmp(tokens[ti], "type") == 0) {
629                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
630                         if (status->status < 0)
631                                 return;
632
633                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
634                         if (status->status < 0)
635                                 return;
636
637                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
638                                 ips->type =
639                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
640                         else if (strcmp(tokens[ti],
641                                         "inline-protocol-offload") == 0)
642                                 ips->type =
643                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
644                         else if (strcmp(tokens[ti],
645                                         "lookaside-protocol-offload") == 0)
646                                 ips->type =
647                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
648                         else if (strcmp(tokens[ti], "no-offload") == 0)
649                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
650                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
651                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
652                         else {
653                                 APP_CHECK(0, status, "Invalid input \"%s\"",
654                                                 tokens[ti]);
655                                 return;
656                         }
657
658                         type_p = 1;
659                         continue;
660                 }
661
662                 if (strcmp(tokens[ti], "port_id") == 0) {
663                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
664                         if (status->status < 0)
665                                 return;
666                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
667                         if (status->status < 0)
668                                 return;
669                         if (rule->portid == UINT16_MAX)
670                                 rule->portid = atoi(tokens[ti]);
671                         else if (rule->portid != atoi(tokens[ti])) {
672                                 APP_CHECK(0, status,
673                                         "portid %s not matching with already assigned portid %u",
674                                         tokens[ti], rule->portid);
675                                 return;
676                         }
677                         portid_p = 1;
678                         continue;
679                 }
680
681                 if (strcmp(tokens[ti], "mss") == 0) {
682                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
683                         if (status->status < 0)
684                                 return;
685                         rule->mss = atoi(tokens[ti]);
686                         if (status->status < 0)
687                                 return;
688                         continue;
689                 }
690
691                 if (strcmp(tokens[ti], "fallback") == 0) {
692                         struct rte_ipsec_session *fb;
693
694                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
695                                 "not allowed for legacy mode.");
696                         if (status->status < 0)
697                                 return;
698                         APP_CHECK(ips->type ==
699                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
700                                 "Fallback session allowed if primary session "
701                                 "is of type inline-crypto-offload only.");
702                         if (status->status < 0)
703                                 return;
704                         APP_CHECK(rule->direction ==
705                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
706                                 "Fallback session not allowed for egress "
707                                 "rule");
708                         if (status->status < 0)
709                                 return;
710                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
711                         if (status->status < 0)
712                                 return;
713                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
714                         if (status->status < 0)
715                                 return;
716                         fb = ipsec_get_fallback_session(rule);
717                         if (strcmp(tokens[ti], "lookaside-none") == 0)
718                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
719                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
720                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
721                         else {
722                                 APP_CHECK(0, status, "unrecognized fallback "
723                                         "type %s.", tokens[ti]);
724                                 return;
725                         }
726
727                         rule->fallback_sessions = 1;
728                         nb_crypto_sessions++;
729                         fallback_p = 1;
730                         continue;
731                 }
732                 if (strcmp(tokens[ti], "flow-direction") == 0) {
733                         switch (ips->type) {
734                         case RTE_SECURITY_ACTION_TYPE_NONE:
735                         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
736                                 rule->fdir_flag = 1;
737                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
738                                 if (status->status < 0)
739                                         return;
740                                 if (rule->portid == UINT16_MAX)
741                                         rule->portid = atoi(tokens[ti]);
742                                 else if (rule->portid != atoi(tokens[ti])) {
743                                         APP_CHECK(0, status,
744                                                 "portid %s not matching with already assigned portid %u",
745                                                 tokens[ti], rule->portid);
746                                         return;
747                                 }
748                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
749                                 if (status->status < 0)
750                                         return;
751                                 rule->fdir_qid = atoi(tokens[ti]);
752                                 /* validating portid and queueid */
753                                 status_p = check_flow_params(rule->portid,
754                                                 rule->fdir_qid);
755                                 if (status_p < 0) {
756                                         printf("port id %u / queue id %u is "
757                                                 "not valid\n", rule->portid,
758                                                  rule->fdir_qid);
759                                 }
760                                 break;
761                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
762                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
763                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
764                         default:
765                                 APP_CHECK(0, status,
766                                         "flow director not supported for security session type %d",
767                                         ips->type);
768                                 return;
769                         }
770                         continue;
771                 }
772                 if (strcmp(tokens[ti], "udp-encap") == 0) {
773                         switch (ips->type) {
774                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
775                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
776                                 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
777                                                    status);
778                                 if (status->status < 0)
779                                         return;
780
781                                 rule->udp_encap = 1;
782                                 app_sa_prm.udp_encap = 1;
783                                 udp_encap_p = 1;
784                                 break;
785                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
786                                 rule->udp_encap = 1;
787                                 rule->udp.sport = 0;
788                                 rule->udp.dport = 4500;
789                                 break;
790                         default:
791                                 APP_CHECK(0, status,
792                                         "UDP encapsulation not supported for "
793                                         "security session type %d",
794                                         ips->type);
795                                 return;
796                         }
797                         continue;
798                 }
799
800                 /* unrecognizeable input */
801                 APP_CHECK(0, status, "unrecognized input \"%s\"",
802                         tokens[ti]);
803                 return;
804         }
805
806         if (aead_algo_p) {
807                 APP_CHECK(cipher_algo_p == 0, status,
808                                 "AEAD used, no need for cipher options");
809                 if (status->status < 0)
810                         return;
811
812                 APP_CHECK(auth_algo_p == 0, status,
813                                 "AEAD used, no need for auth options");
814                 if (status->status < 0)
815                         return;
816         } else {
817                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
818                 if (status->status < 0)
819                         return;
820
821                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
822                 if (status->status < 0)
823                         return;
824         }
825
826         APP_CHECK(mode_p == 1, status, "missing mode option");
827         if (status->status < 0)
828                 return;
829
830         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
831                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
832                 printf("Missing portid option, falling back to non-offload\n");
833
834         if (!type_p || (!portid_p && ips->type !=
835                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
836                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
837         }
838
839         nb_crypto_sessions++;
840         *ri = *ri + 1;
841 }
842
843 static void
844 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
845 {
846         uint32_t i;
847         uint8_t a, b, c, d;
848         const struct rte_ipsec_session *ips;
849         const struct rte_ipsec_session *fallback_ips;
850
851         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
852
853         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
854                 if (cipher_algos[i].algo == sa->cipher_algo &&
855                                 cipher_algos[i].key_len == sa->cipher_key_len) {
856                         printf("%s ", cipher_algos[i].keyword);
857                         break;
858                 }
859         }
860
861         for (i = 0; i < RTE_DIM(auth_algos); i++) {
862                 if (auth_algos[i].algo == sa->auth_algo) {
863                         printf("%s ", auth_algos[i].keyword);
864                         break;
865                 }
866         }
867
868         for (i = 0; i < RTE_DIM(aead_algos); i++) {
869                 if (aead_algos[i].algo == sa->aead_algo &&
870                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
871                         printf("%s ", aead_algos[i].keyword);
872                         break;
873                 }
874         }
875
876         printf("mode:");
877         if (sa->udp_encap)
878                 printf("UDP encapsulated ");
879
880         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
881         case IP4_TUNNEL:
882                 printf("IP4Tunnel ");
883                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
884                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
885                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
886                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
887                 break;
888         case IP6_TUNNEL:
889                 printf("IP6Tunnel ");
890                 for (i = 0; i < 16; i++) {
891                         if (i % 2 && i != 15)
892                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
893                         else
894                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
895                 }
896                 printf(" ");
897                 for (i = 0; i < 16; i++) {
898                         if (i % 2 && i != 15)
899                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
900                         else
901                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
902                 }
903                 break;
904         case TRANSPORT:
905                 printf("Transport ");
906                 break;
907         }
908
909         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
910         printf(" type:");
911         switch (ips->type) {
912         case RTE_SECURITY_ACTION_TYPE_NONE:
913                 printf("no-offload ");
914                 break;
915         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
916                 printf("inline-crypto-offload ");
917                 break;
918         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
919                 printf("inline-protocol-offload ");
920                 break;
921         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
922                 printf("lookaside-protocol-offload ");
923                 break;
924         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
925                 printf("cpu-crypto-accelerated ");
926                 break;
927         }
928
929         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
930         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
931                 printf("inline fallback: ");
932                 switch (fallback_ips->type) {
933                 case RTE_SECURITY_ACTION_TYPE_NONE:
934                         printf("lookaside-none");
935                         break;
936                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
937                         printf("cpu-crypto-accelerated");
938                         break;
939                 default:
940                         printf("invalid");
941                         break;
942                 }
943         }
944         if (sa->fdir_flag == 1)
945                 printf("flow-direction port %d queue %d", sa->portid,
946                                 sa->fdir_qid);
947
948         printf("\n");
949 }
950
951 static struct sa_ctx *
952 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
953 {
954         char s[PATH_MAX];
955         struct sa_ctx *sa_ctx;
956         uint32_t mz_size;
957         const struct rte_memzone *mz;
958
959         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
960
961         /* Create SA context */
962         printf("Creating SA context with %u maximum entries on socket %d\n",
963                         nb_sa, socket_id);
964
965         mz_size = sizeof(struct ipsec_xf) * nb_sa;
966         mz = rte_memzone_reserve(s, mz_size, socket_id,
967                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
968         if (mz == NULL) {
969                 printf("Failed to allocate SA XFORM memory\n");
970                 rte_errno = ENOMEM;
971                 return NULL;
972         }
973
974         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
975                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
976
977         if (sa_ctx == NULL) {
978                 printf("Failed to allocate SA CTX memory\n");
979                 rte_errno = ENOMEM;
980                 rte_memzone_free(mz);
981                 return NULL;
982         }
983
984         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
985         sa_ctx->nb_sa = nb_sa;
986
987         return sa_ctx;
988 }
989
990 static int
991 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
992 {
993         struct rte_eth_dev_info dev_info;
994         int retval;
995
996         retval = rte_eth_dev_info_get(portid, &dev_info);
997         if (retval != 0) {
998                 RTE_LOG(ERR, IPSEC,
999                         "Error during getting device (port %u) info: %s\n",
1000                         portid, strerror(-retval));
1001
1002                 return retval;
1003         }
1004
1005         if (inbound) {
1006                 if ((dev_info.rx_offload_capa &
1007                                 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1008                         RTE_LOG(WARNING, PORT,
1009                                 "hardware RX IPSec offload is not supported\n");
1010                         return -EINVAL;
1011                 }
1012
1013         } else { /* outbound */
1014                 if ((dev_info.tx_offload_capa &
1015                                 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1016                         RTE_LOG(WARNING, PORT,
1017                                 "hardware TX IPSec offload is not supported\n");
1018                         return -EINVAL;
1019                 }
1020                 if (tso && (dev_info.tx_offload_capa &
1021                                 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1022                         RTE_LOG(WARNING, PORT,
1023                                 "hardware TCP TSO offload is not supported\n");
1024                         return -EINVAL;
1025                 }
1026         }
1027         return 0;
1028 }
1029
1030 /*
1031  * Helper function, tries to determine next_proto for SPI
1032  * by searching though SP rules.
1033  */
1034 static int
1035 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1036                 struct ip_addr ip_addr[2], uint32_t mask[2])
1037 {
1038         int32_t rc4, rc6;
1039
1040         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1041                                 ip_addr, mask);
1042         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1043                                 ip_addr, mask);
1044
1045         if (rc4 >= 0) {
1046                 if (rc6 >= 0) {
1047                         RTE_LOG(ERR, IPSEC,
1048                                 "%s: SPI %u used simultaeously by "
1049                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
1050                                 __func__, spi, rc4, rc6);
1051                         return -EINVAL;
1052                 } else
1053                         return IPPROTO_IPIP;
1054         } else if (rc6 < 0) {
1055                 RTE_LOG(ERR, IPSEC,
1056                         "%s: SPI %u is not used by any SP rule\n",
1057                         __func__, spi);
1058                 return -EINVAL;
1059         } else
1060                 return IPPROTO_IPV6;
1061 }
1062
1063 /*
1064  * Helper function for getting source and destination IP addresses
1065  * from SP. Needed for inline crypto transport mode, as addresses are not
1066  * provided in config file for that mode. It checks if SP for current SA exists,
1067  * and based on what type of protocol is returned, it stores appropriate
1068  * addresses got from SP into SA.
1069  */
1070 static int
1071 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1072 {
1073         int protocol;
1074         struct ip_addr ip_addr[2];
1075         uint32_t mask[2];
1076
1077         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1078         if (protocol < 0)
1079                 return protocol;
1080         else if (protocol == IPPROTO_IPIP) {
1081                 sa->flags |= IP4_TRANSPORT;
1082                 if (mask[0] == IP4_FULL_MASK &&
1083                                 mask[1] == IP4_FULL_MASK &&
1084                                 ip_addr[0].ip.ip4 != 0 &&
1085                                 ip_addr[1].ip.ip4 != 0) {
1086
1087                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1088                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1089                 } else {
1090                         RTE_LOG(ERR, IPSEC,
1091                         "%s: No valid address or mask entry in"
1092                         " IPv4 SP rule for SPI %u\n",
1093                         __func__, sa->spi);
1094                         return -EINVAL;
1095                 }
1096         } else if (protocol == IPPROTO_IPV6) {
1097                 sa->flags |= IP6_TRANSPORT;
1098                 if (mask[0] == IP6_FULL_MASK &&
1099                                 mask[1] == IP6_FULL_MASK &&
1100                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1101                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1102                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1103                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1104
1105                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1106                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1107                 } else {
1108                         RTE_LOG(ERR, IPSEC,
1109                         "%s: No valid address or mask entry in"
1110                         " IPv6 SP rule for SPI %u\n",
1111                         __func__, sa->spi);
1112                         return -EINVAL;
1113                 }
1114         }
1115         return 0;
1116 }
1117
1118 static int
1119 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1120                 uint32_t nb_entries, uint32_t inbound,
1121                 struct socket_ctx *skt_ctx)
1122 {
1123         struct ipsec_sa *sa;
1124         uint32_t i, idx;
1125         uint16_t iv_length, aad_length;
1126         int inline_status;
1127         int32_t rc;
1128         struct rte_ipsec_session *ips;
1129
1130         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1131         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1132
1133         for (i = 0; i < nb_entries; i++) {
1134                 idx = i;
1135                 sa = &sa_ctx->sa[idx];
1136                 if (sa->spi != 0) {
1137                         printf("Index %u already in use by SPI %u\n",
1138                                         idx, sa->spi);
1139                         return -EINVAL;
1140                 }
1141                 *sa = entries[i];
1142
1143                 if (inbound) {
1144                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1145                         if (rc != 0)
1146                                 return rc;
1147                 }
1148
1149                 sa->seq = 0;
1150                 ips = ipsec_get_primary_session(sa);
1151
1152                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1153                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1154                         if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1155                                 return -EINVAL;
1156                 }
1157
1158                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1159                 case IP4_TUNNEL:
1160                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1161                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1162                         break;
1163                 case TRANSPORT:
1164                         if (ips->type ==
1165                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1166                                 inline_status =
1167                                         sa_add_address_inline_crypto(sa);
1168                                 if (inline_status < 0)
1169                                         return inline_status;
1170                         }
1171                         break;
1172                 }
1173
1174                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1175                         iv_length = 12;
1176
1177                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1178                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1179                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1180                         sa_ctx->xf[idx].a.aead.key.length =
1181                                 sa->cipher_key_len;
1182                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1183                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1184                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1185                         sa_ctx->xf[idx].a.next = NULL;
1186                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1187                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1188                         sa_ctx->xf[idx].a.aead.aad_length =
1189                                 sa->aad_len + aad_length;
1190                         sa_ctx->xf[idx].a.aead.digest_length =
1191                                 sa->digest_len;
1192
1193                         sa->xforms = &sa_ctx->xf[idx].a;
1194                 } else {
1195                         switch (sa->cipher_algo) {
1196                         case RTE_CRYPTO_CIPHER_NULL:
1197                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1198                         case RTE_CRYPTO_CIPHER_AES_CBC:
1199                                 iv_length = sa->iv_len;
1200                                 break;
1201                         case RTE_CRYPTO_CIPHER_AES_CTR:
1202                                 iv_length = 16;
1203                                 break;
1204                         default:
1205                                 RTE_LOG(ERR, IPSEC_ESP,
1206                                                 "unsupported cipher algorithm %u\n",
1207                                                 sa->cipher_algo);
1208                                 return -EINVAL;
1209                         }
1210
1211                         if (inbound) {
1212                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1213                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1214                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1215                                 sa_ctx->xf[idx].b.cipher.key.length =
1216                                         sa->cipher_key_len;
1217                                 sa_ctx->xf[idx].b.cipher.op =
1218                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1219                                 sa_ctx->xf[idx].b.next = NULL;
1220                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1221                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1222
1223                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1224                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1225                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1226                                 sa_ctx->xf[idx].a.auth.key.length =
1227                                         sa->auth_key_len;
1228                                 sa_ctx->xf[idx].a.auth.digest_length =
1229                                         sa->digest_len;
1230                                 sa_ctx->xf[idx].a.auth.op =
1231                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1232                         } else { /* outbound */
1233                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1234                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1235                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1236                                 sa_ctx->xf[idx].a.cipher.key.length =
1237                                         sa->cipher_key_len;
1238                                 sa_ctx->xf[idx].a.cipher.op =
1239                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1240                                 sa_ctx->xf[idx].a.next = NULL;
1241                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1242                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1243
1244                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1245                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1246                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1247                                 sa_ctx->xf[idx].b.auth.key.length =
1248                                         sa->auth_key_len;
1249                                 sa_ctx->xf[idx].b.auth.digest_length =
1250                                         sa->digest_len;
1251                                 sa_ctx->xf[idx].b.auth.op =
1252                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1253                         }
1254
1255                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1256                         sa_ctx->xf[idx].b.next = NULL;
1257                         sa->xforms = &sa_ctx->xf[idx].a;
1258                 }
1259
1260                 if (ips->type ==
1261                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1262                         ips->type ==
1263                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1264                         rc = create_inline_session(skt_ctx, sa, ips);
1265                         if (rc != 0) {
1266                                 RTE_LOG(ERR, IPSEC_ESP,
1267                                         "create_inline_session() failed\n");
1268                                 return -EINVAL;
1269                         }
1270                 }
1271
1272                 if (sa->fdir_flag && inbound) {
1273                         rc = create_ipsec_esp_flow(sa);
1274                         if (rc != 0)
1275                                 RTE_LOG(ERR, IPSEC_ESP,
1276                                         "create_ipsec_esp_flow() failed\n");
1277                 }
1278                 print_one_sa_rule(sa, inbound);
1279         }
1280
1281         return 0;
1282 }
1283
1284 static inline int
1285 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1286                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1287 {
1288         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1289 }
1290
1291 static inline int
1292 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1293                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1294 {
1295         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1296 }
1297
1298 /*
1299  * helper function, fills parameters that are identical for all SAs
1300  */
1301 static void
1302 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1303         const struct app_sa_prm *app_prm)
1304 {
1305         memset(prm, 0, sizeof(*prm));
1306
1307         prm->flags = app_prm->flags;
1308         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1309         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1310 }
1311
1312 static int
1313 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1314         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1315 {
1316         int32_t rc;
1317
1318         /*
1319          * Try to get SPI next proto by searching that SPI in SPD.
1320          * probably not the optimal way, but there seems nothing
1321          * better right now.
1322          */
1323         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1324         if (rc < 0)
1325                 return rc;
1326
1327         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1328         prm->userdata = (uintptr_t)ss;
1329
1330         /* setup ipsec xform */
1331         prm->ipsec_xform.spi = ss->spi;
1332         prm->ipsec_xform.salt = ss->salt;
1333         prm->ipsec_xform.direction = ss->direction;
1334         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1335         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1336                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1337                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1338         prm->ipsec_xform.options.udp_encap = ss->udp_encap;
1339         prm->ipsec_xform.options.ecn = 1;
1340         prm->ipsec_xform.options.copy_dscp = 1;
1341
1342         if (IS_IP4_TUNNEL(ss->flags)) {
1343                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1344                 prm->tun.hdr_len = sizeof(*v4);
1345                 prm->tun.next_proto = rc;
1346                 prm->tun.hdr = v4;
1347         } else if (IS_IP6_TUNNEL(ss->flags)) {
1348                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1349                 prm->tun.hdr_len = sizeof(*v6);
1350                 prm->tun.next_proto = rc;
1351                 prm->tun.hdr = v6;
1352         } else {
1353                 /* transport mode */
1354                 prm->trs.proto = rc;
1355         }
1356
1357         /* setup crypto section */
1358         prm->crypto_xform = ss->xforms;
1359         return 0;
1360 }
1361
1362 static int
1363 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1364 {
1365         int32_t rc = 0;
1366
1367         ss->sa = sa;
1368
1369         if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1370                 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1371                 if (ss->security.ses != NULL) {
1372                         rc = rte_ipsec_session_prepare(ss);
1373                         if (rc != 0)
1374                                 memset(ss, 0, sizeof(*ss));
1375                 }
1376         }
1377
1378         return rc;
1379 }
1380
1381 /*
1382  * Initialise related rte_ipsec_sa object.
1383  */
1384 static int
1385 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1386 {
1387         int rc;
1388         struct rte_ipsec_sa_prm prm;
1389         struct rte_ipsec_session *ips;
1390         struct rte_ipv4_hdr v4  = {
1391                 .version_ihl = IPVERSION << 4 |
1392                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1393                 .time_to_live = IPDEFTTL,
1394                 .next_proto_id = IPPROTO_ESP,
1395                 .src_addr = lsa->src.ip.ip4,
1396                 .dst_addr = lsa->dst.ip.ip4,
1397         };
1398         struct rte_ipv6_hdr v6 = {
1399                 .vtc_flow = htonl(IP6_VERSION << 28),
1400                 .proto = IPPROTO_ESP,
1401         };
1402
1403         if (IS_IP6_TUNNEL(lsa->flags)) {
1404                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1405                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1406         }
1407
1408         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1409         if (rc == 0)
1410                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1411         if (rc < 0)
1412                 return rc;
1413
1414         /* init primary processing session */
1415         ips = ipsec_get_primary_session(lsa);
1416         rc = fill_ipsec_session(ips, sa);
1417         if (rc != 0)
1418                 return rc;
1419
1420         /* init inline fallback processing session */
1421         if (lsa->fallback_sessions == 1)
1422                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1423
1424         return rc;
1425 }
1426
1427 /*
1428  * Allocate space and init rte_ipsec_sa strcutures,
1429  * one per session.
1430  */
1431 static int
1432 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1433 {
1434         int32_t rc, sz;
1435         uint32_t i, idx;
1436         size_t tsz;
1437         struct rte_ipsec_sa *sa;
1438         struct ipsec_sa *lsa;
1439         struct rte_ipsec_sa_prm prm;
1440
1441         /* determine SA size */
1442         idx = 0;
1443         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1444         sz = rte_ipsec_sa_size(&prm);
1445         if (sz < 0) {
1446                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1447                         "failed to determine SA size, error code: %d\n",
1448                         __func__, ctx, nb_ent, socket, sz);
1449                 return sz;
1450         }
1451
1452         tsz = sz * nb_ent;
1453
1454         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1455         if (ctx->satbl == NULL) {
1456                 RTE_LOG(ERR, IPSEC,
1457                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1458                         __func__,  ctx, nb_ent, socket, tsz);
1459                 return -ENOMEM;
1460         }
1461
1462         rc = 0;
1463         for (i = 0; i != nb_ent && rc == 0; i++) {
1464
1465                 idx = i;
1466
1467                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1468                 lsa = ctx->sa + idx;
1469
1470                 rc = ipsec_sa_init(lsa, sa, sz);
1471         }
1472
1473         return rc;
1474 }
1475
1476 static int
1477 sa_cmp(const void *p, const void *q)
1478 {
1479         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1480         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1481
1482         return (int)(spi1 - spi2);
1483 }
1484
1485 /*
1486  * Walk through all SA rules to find an SA with given SPI
1487  */
1488 int
1489 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1490 {
1491         uint32_t num;
1492         struct ipsec_sa *sa;
1493         struct ipsec_sa tmpl;
1494         const struct ipsec_sa *sar;
1495
1496         sar = sa_ctx->sa;
1497         if (inbound != 0)
1498                 num = nb_sa_in;
1499         else
1500                 num = nb_sa_out;
1501
1502         tmpl.spi = spi;
1503
1504         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1505         if (sa != NULL)
1506                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1507
1508         return -ENOENT;
1509 }
1510
1511 void
1512 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1513 {
1514         int32_t rc;
1515         const char *name;
1516
1517         if (ctx == NULL)
1518                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1519
1520         if (ctx->sa_in != NULL)
1521                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1522                                 "initialized\n", socket_id);
1523
1524         if (ctx->sa_out != NULL)
1525                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1526                                 "initialized\n", socket_id);
1527
1528         if (nb_sa_in > 0) {
1529                 name = "sa_in";
1530                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1531                 if (ctx->sa_in == NULL)
1532                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1533                                 "context %s in socket %d\n", rte_errno,
1534                                 name, socket_id);
1535
1536                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1537                                 &sa_in_cnt);
1538                 if (rc != 0)
1539                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1540
1541                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1542
1543                 if (app_sa_prm.enable != 0) {
1544                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1545                                 socket_id);
1546                         if (rc != 0)
1547                                 rte_exit(EXIT_FAILURE,
1548                                         "failed to init inbound SAs\n");
1549                 }
1550         } else
1551                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1552
1553         if (nb_sa_out > 0) {
1554                 name = "sa_out";
1555                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1556                 if (ctx->sa_out == NULL)
1557                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1558                                 "context %s in socket %d\n", rte_errno,
1559                                 name, socket_id);
1560
1561                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1562
1563                 if (app_sa_prm.enable != 0) {
1564                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1565                                 socket_id);
1566                         if (rc != 0)
1567                                 rte_exit(EXIT_FAILURE,
1568                                         "failed to init outbound SAs\n");
1569                 }
1570         } else
1571                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1572                         "specified\n");
1573 }
1574
1575 int
1576 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1577 {
1578         struct ipsec_mbuf_metadata *priv;
1579         struct ipsec_sa *sa;
1580
1581         priv = get_priv(m);
1582         sa = priv->sa;
1583         if (sa != NULL)
1584                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1585
1586         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1587         return 0;
1588 }
1589
1590 void
1591 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1592                 void *sa_arr[], uint16_t nb_pkts)
1593 {
1594         uint32_t i;
1595         void *result_sa;
1596         struct ipsec_sa *sa;
1597
1598         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1599
1600         /*
1601          * Mark need for inline offload fallback on the LSB of SA pointer.
1602          * Thanks to packet grouping mechanism which ipsec_process is using
1603          * packets marked for fallback processing will form separate group.
1604          *
1605          * Because it is not safe to use SA pointer it is casted to generic
1606          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1607          * to get valid struct pointer.
1608          */
1609         for (i = 0; i < nb_pkts; i++) {
1610                 if (sa_arr[i] == NULL)
1611                         continue;
1612
1613                 result_sa = sa = sa_arr[i];
1614                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1615                         sa->fallback_sessions > 0) {
1616                         uintptr_t intsa = (uintptr_t)sa;
1617                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1618                         result_sa = (void *)intsa;
1619                 }
1620                 sa_arr[i] = result_sa;
1621         }
1622 }
1623
1624 void
1625 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1626                 void *sa[], uint16_t nb_pkts)
1627 {
1628         uint32_t i;
1629
1630         for (i = 0; i < nb_pkts; i++)
1631                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1632 }
1633
1634 /*
1635  * Select HW offloads to be used.
1636  */
1637 int
1638 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1639                 uint64_t *tx_offloads)
1640 {
1641         struct ipsec_sa *rule;
1642         uint32_t idx_sa;
1643         enum rte_security_session_action_type rule_type;
1644
1645         *rx_offloads = 0;
1646         *tx_offloads = 0;
1647
1648         /* Check for inbound rules that use offloads and use this port */
1649         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1650                 rule = &sa_in[idx_sa];
1651                 rule_type = ipsec_get_action_type(rule);
1652                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1653                                 rule_type ==
1654                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1655                                 && rule->portid == port_id)
1656                         *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1657         }
1658
1659         /* Check for outbound rules that use offloads and use this port */
1660         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1661                 rule = &sa_out[idx_sa];
1662                 rule_type = ipsec_get_action_type(rule);
1663                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1664                                 rule_type ==
1665                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1666                                 && rule->portid == port_id) {
1667                         *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1668                         if (rule->mss)
1669                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1670                 }
1671         }
1672         return 0;
1673 }
1674
1675 void
1676 sa_sort_arr(void)
1677 {
1678         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1679         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1680 }
1681
1682 uint32_t
1683 get_nb_crypto_sessions(void)
1684 {
1685         return nb_crypto_sessions;
1686 }