examples/ipsec-secgw: support TCP TSO
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28
29 #define IPDEFTTL 64
30
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
36
37 struct supported_cipher_algo {
38         const char *keyword;
39         enum rte_crypto_cipher_algorithm algo;
40         uint16_t iv_len;
41         uint16_t block_size;
42         uint16_t key_len;
43 };
44
45 struct supported_auth_algo {
46         const char *keyword;
47         enum rte_crypto_auth_algorithm algo;
48         uint16_t digest_len;
49         uint16_t key_len;
50         uint8_t key_not_req;
51 };
52
53 struct supported_aead_algo {
54         const char *keyword;
55         enum rte_crypto_aead_algorithm algo;
56         uint16_t iv_len;
57         uint16_t block_size;
58         uint16_t digest_len;
59         uint16_t key_len;
60         uint8_t aad_len;
61 };
62
63
64 const struct supported_cipher_algo cipher_algos[] = {
65         {
66                 .keyword = "null",
67                 .algo = RTE_CRYPTO_CIPHER_NULL,
68                 .iv_len = 0,
69                 .block_size = 4,
70                 .key_len = 0
71         },
72         {
73                 .keyword = "aes-128-cbc",
74                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
75                 .iv_len = 16,
76                 .block_size = 16,
77                 .key_len = 16
78         },
79         {
80                 .keyword = "aes-192-cbc",
81                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82                 .iv_len = 16,
83                 .block_size = 16,
84                 .key_len = 24
85         },
86         {
87                 .keyword = "aes-256-cbc",
88                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89                 .iv_len = 16,
90                 .block_size = 16,
91                 .key_len = 32
92         },
93         {
94                 .keyword = "aes-128-ctr",
95                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
96                 .iv_len = 8,
97                 .block_size = 4,
98                 .key_len = 20
99         },
100         {
101                 .keyword = "3des-cbc",
102                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
103                 .iv_len = 8,
104                 .block_size = 8,
105                 .key_len = 24
106         }
107 };
108
109 const struct supported_auth_algo auth_algos[] = {
110         {
111                 .keyword = "null",
112                 .algo = RTE_CRYPTO_AUTH_NULL,
113                 .digest_len = 0,
114                 .key_len = 0,
115                 .key_not_req = 1
116         },
117         {
118                 .keyword = "sha1-hmac",
119                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120                 .digest_len = 12,
121                 .key_len = 20
122         },
123         {
124                 .keyword = "sha256-hmac",
125                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126                 .digest_len = 16,
127                 .key_len = 32
128         }
129 };
130
131 const struct supported_aead_algo aead_algos[] = {
132         {
133                 .keyword = "aes-128-gcm",
134                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
135                 .iv_len = 8,
136                 .block_size = 4,
137                 .key_len = 20,
138                 .digest_len = 16,
139                 .aad_len = 8,
140         },
141         {
142                 .keyword = "aes-192-gcm",
143                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
144                 .iv_len = 8,
145                 .block_size = 4,
146                 .key_len = 28,
147                 .digest_len = 16,
148                 .aad_len = 8,
149         },
150         {
151                 .keyword = "aes-256-gcm",
152                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
153                 .iv_len = 8,
154                 .block_size = 4,
155                 .key_len = 36,
156                 .digest_len = 16,
157                 .aad_len = 8,
158         }
159 };
160
161 #define SA_INIT_NB      128
162
163 static uint32_t nb_crypto_sessions;
164 struct ipsec_sa *sa_out;
165 uint32_t nb_sa_out;
166 static uint32_t sa_out_sz;
167 static struct ipsec_sa_cnt sa_out_cnt;
168
169 struct ipsec_sa *sa_in;
170 uint32_t nb_sa_in;
171 static uint32_t sa_in_sz;
172 static struct ipsec_sa_cnt sa_in_cnt;
173
174 static const struct supported_cipher_algo *
175 find_match_cipher_algo(const char *cipher_keyword)
176 {
177         size_t i;
178
179         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
180                 const struct supported_cipher_algo *algo =
181                         &cipher_algos[i];
182
183                 if (strcmp(cipher_keyword, algo->keyword) == 0)
184                         return algo;
185         }
186
187         return NULL;
188 }
189
190 static const struct supported_auth_algo *
191 find_match_auth_algo(const char *auth_keyword)
192 {
193         size_t i;
194
195         for (i = 0; i < RTE_DIM(auth_algos); i++) {
196                 const struct supported_auth_algo *algo =
197                         &auth_algos[i];
198
199                 if (strcmp(auth_keyword, algo->keyword) == 0)
200                         return algo;
201         }
202
203         return NULL;
204 }
205
206 static const struct supported_aead_algo *
207 find_match_aead_algo(const char *aead_keyword)
208 {
209         size_t i;
210
211         for (i = 0; i < RTE_DIM(aead_algos); i++) {
212                 const struct supported_aead_algo *algo =
213                         &aead_algos[i];
214
215                 if (strcmp(aead_keyword, algo->keyword) == 0)
216                         return algo;
217         }
218
219         return NULL;
220 }
221
222 /** parse_key_string
223  *  parse x:x:x:x.... hex number key string into uint8_t *key
224  *  return:
225  *  > 0: number of bytes parsed
226  *  0:   failed
227  */
228 static uint32_t
229 parse_key_string(const char *key_str, uint8_t *key)
230 {
231         const char *pt_start = key_str, *pt_end = key_str;
232         uint32_t nb_bytes = 0;
233
234         while (pt_end != NULL) {
235                 char sub_str[3] = {0};
236
237                 pt_end = strchr(pt_start, ':');
238
239                 if (pt_end == NULL) {
240                         if (strlen(pt_start) > 2)
241                                 return 0;
242                         strncpy(sub_str, pt_start, 2);
243                 } else {
244                         if (pt_end - pt_start > 2)
245                                 return 0;
246
247                         strncpy(sub_str, pt_start, pt_end - pt_start);
248                         pt_start = pt_end + 1;
249                 }
250
251                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
252         }
253
254         return nb_bytes;
255 }
256
257 static int
258 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
259 {
260         if (*sa_tbl == NULL) {
261                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
262                 if (*sa_tbl == NULL)
263                         return -1;
264                 *cur_sz = SA_INIT_NB;
265                 return 0;
266         }
267
268         if (cur_cnt >= *cur_sz) {
269                 *sa_tbl = realloc(*sa_tbl,
270                         *cur_sz * sizeof(struct ipsec_sa) * 2);
271                 if (*sa_tbl == NULL)
272                         return -1;
273                 /* clean reallocated extra space */
274                 memset(&(*sa_tbl)[*cur_sz], 0,
275                         *cur_sz * sizeof(struct ipsec_sa));
276                 *cur_sz *= 2;
277         }
278
279         return 0;
280 }
281
282 void
283 parse_sa_tokens(char **tokens, uint32_t n_tokens,
284         struct parse_status *status)
285 {
286         struct ipsec_sa *rule = NULL;
287         struct rte_ipsec_session *ips;
288         uint32_t ti; /*token index*/
289         uint32_t *ri /*rule index*/;
290         struct ipsec_sa_cnt *sa_cnt;
291         uint32_t cipher_algo_p = 0;
292         uint32_t auth_algo_p = 0;
293         uint32_t aead_algo_p = 0;
294         uint32_t src_p = 0;
295         uint32_t dst_p = 0;
296         uint32_t mode_p = 0;
297         uint32_t type_p = 0;
298         uint32_t portid_p = 0;
299         uint32_t fallback_p = 0;
300         int16_t status_p = 0;
301         uint16_t udp_encap_p = 0;
302
303         if (strcmp(tokens[0], "in") == 0) {
304                 ri = &nb_sa_in;
305                 sa_cnt = &sa_in_cnt;
306                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
307                         return;
308                 rule = &sa_in[*ri];
309                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
310         } else {
311                 ri = &nb_sa_out;
312                 sa_cnt = &sa_out_cnt;
313                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
314                         return;
315                 rule = &sa_out[*ri];
316                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
317         }
318
319         /* spi number */
320         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
321         if (status->status < 0)
322                 return;
323         if (atoi(tokens[1]) == INVALID_SPI)
324                 return;
325         rule->spi = atoi(tokens[1]);
326         rule->portid = UINT16_MAX;
327         ips = ipsec_get_primary_session(rule);
328
329         for (ti = 2; ti < n_tokens; ti++) {
330                 if (strcmp(tokens[ti], "mode") == 0) {
331                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
332                         if (status->status < 0)
333                                 return;
334
335                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
336                         if (status->status < 0)
337                                 return;
338
339                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
340                                 sa_cnt->nb_v4++;
341                                 rule->flags = IP4_TUNNEL;
342                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
343                                 sa_cnt->nb_v6++;
344                                 rule->flags = IP6_TUNNEL;
345                         } else if (strcmp(tokens[ti], "transport") == 0) {
346                                 sa_cnt->nb_v4++;
347                                 sa_cnt->nb_v6++;
348                                 rule->flags = TRANSPORT;
349                         } else {
350                                 APP_CHECK(0, status, "unrecognized "
351                                         "input \"%s\"", tokens[ti]);
352                                 return;
353                         }
354
355                         mode_p = 1;
356                         continue;
357                 }
358
359                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
360                         const struct supported_cipher_algo *algo;
361                         uint32_t key_len;
362
363                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
364                                 status);
365                         if (status->status < 0)
366                                 return;
367
368                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
369                         if (status->status < 0)
370                                 return;
371
372                         algo = find_match_cipher_algo(tokens[ti]);
373
374                         APP_CHECK(algo != NULL, status, "unrecognized "
375                                 "input \"%s\"", tokens[ti]);
376
377                         if (status->status < 0)
378                                 return;
379
380                         rule->cipher_algo = algo->algo;
381                         rule->block_size = algo->block_size;
382                         rule->iv_len = algo->iv_len;
383                         rule->cipher_key_len = algo->key_len;
384
385                         /* for NULL algorithm, no cipher key required */
386                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
387                                 cipher_algo_p = 1;
388                                 continue;
389                         }
390
391                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
392                         if (status->status < 0)
393                                 return;
394
395                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
396                                 status, "unrecognized input \"%s\", "
397                                 "expect \"cipher_key\"", tokens[ti]);
398                         if (status->status < 0)
399                                 return;
400
401                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
402                         if (status->status < 0)
403                                 return;
404
405                         key_len = parse_key_string(tokens[ti],
406                                 rule->cipher_key);
407                         APP_CHECK(key_len == rule->cipher_key_len, status,
408                                 "unrecognized input \"%s\"", tokens[ti]);
409                         if (status->status < 0)
410                                 return;
411
412                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
413                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
414                                 rule->salt = (uint32_t)rte_rand();
415
416                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
417                                 key_len -= 4;
418                                 rule->cipher_key_len = key_len;
419                                 memcpy(&rule->salt,
420                                         &rule->cipher_key[key_len], 4);
421                         }
422
423                         cipher_algo_p = 1;
424                         continue;
425                 }
426
427                 if (strcmp(tokens[ti], "auth_algo") == 0) {
428                         const struct supported_auth_algo *algo;
429                         uint32_t key_len;
430
431                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
432                                 status);
433                         if (status->status < 0)
434                                 return;
435
436                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
437                         if (status->status < 0)
438                                 return;
439
440                         algo = find_match_auth_algo(tokens[ti]);
441                         APP_CHECK(algo != NULL, status, "unrecognized "
442                                 "input \"%s\"", tokens[ti]);
443
444                         if (status->status < 0)
445                                 return;
446
447                         rule->auth_algo = algo->algo;
448                         rule->auth_key_len = algo->key_len;
449                         rule->digest_len = algo->digest_len;
450
451                         /* NULL algorithm and combined algos do not
452                          * require auth key
453                          */
454                         if (algo->key_not_req) {
455                                 auth_algo_p = 1;
456                                 continue;
457                         }
458
459                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
460                         if (status->status < 0)
461                                 return;
462
463                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
464                                 status, "unrecognized input \"%s\", "
465                                 "expect \"auth_key\"", tokens[ti]);
466                         if (status->status < 0)
467                                 return;
468
469                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
470                         if (status->status < 0)
471                                 return;
472
473                         key_len = parse_key_string(tokens[ti],
474                                 rule->auth_key);
475                         APP_CHECK(key_len == rule->auth_key_len, status,
476                                 "unrecognized input \"%s\"", tokens[ti]);
477                         if (status->status < 0)
478                                 return;
479
480                         auth_algo_p = 1;
481                         continue;
482                 }
483
484                 if (strcmp(tokens[ti], "aead_algo") == 0) {
485                         const struct supported_aead_algo *algo;
486                         uint32_t key_len;
487
488                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
489                                 status);
490                         if (status->status < 0)
491                                 return;
492
493                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494                         if (status->status < 0)
495                                 return;
496
497                         algo = find_match_aead_algo(tokens[ti]);
498
499                         APP_CHECK(algo != NULL, status, "unrecognized "
500                                 "input \"%s\"", tokens[ti]);
501
502                         if (status->status < 0)
503                                 return;
504
505                         rule->aead_algo = algo->algo;
506                         rule->cipher_key_len = algo->key_len;
507                         rule->digest_len = algo->digest_len;
508                         rule->aad_len = algo->aad_len;
509                         rule->block_size = algo->block_size;
510                         rule->iv_len = algo->iv_len;
511
512                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
513                         if (status->status < 0)
514                                 return;
515
516                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
517                                 status, "unrecognized input \"%s\", "
518                                 "expect \"aead_key\"", tokens[ti]);
519                         if (status->status < 0)
520                                 return;
521
522                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
523                         if (status->status < 0)
524                                 return;
525
526                         key_len = parse_key_string(tokens[ti],
527                                 rule->cipher_key);
528                         APP_CHECK(key_len == rule->cipher_key_len, status,
529                                 "unrecognized input \"%s\"", tokens[ti]);
530                         if (status->status < 0)
531                                 return;
532
533                         key_len -= 4;
534                         rule->cipher_key_len = key_len;
535                         memcpy(&rule->salt,
536                                 &rule->cipher_key[key_len], 4);
537
538                         aead_algo_p = 1;
539                         continue;
540                 }
541
542                 if (strcmp(tokens[ti], "src") == 0) {
543                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
544                         if (status->status < 0)
545                                 return;
546
547                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
548                         if (status->status < 0)
549                                 return;
550
551                         if (IS_IP4_TUNNEL(rule->flags)) {
552                                 struct in_addr ip;
553
554                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
555                                         &ip, NULL) == 0, status,
556                                         "unrecognized input \"%s\", "
557                                         "expect valid ipv4 addr",
558                                         tokens[ti]);
559                                 if (status->status < 0)
560                                         return;
561                                 rule->src.ip.ip4 = rte_bswap32(
562                                         (uint32_t)ip.s_addr);
563                         } else if (IS_IP6_TUNNEL(rule->flags)) {
564                                 struct in6_addr ip;
565
566                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
567                                         NULL) == 0, status,
568                                         "unrecognized input \"%s\", "
569                                         "expect valid ipv6 addr",
570                                         tokens[ti]);
571                                 if (status->status < 0)
572                                         return;
573                                 memcpy(rule->src.ip.ip6.ip6_b,
574                                         ip.s6_addr, 16);
575                         } else if (IS_TRANSPORT(rule->flags)) {
576                                 APP_CHECK(0, status, "unrecognized input "
577                                         "\"%s\"", tokens[ti]);
578                                 return;
579                         }
580
581                         src_p = 1;
582                         continue;
583                 }
584
585                 if (strcmp(tokens[ti], "dst") == 0) {
586                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
587                         if (status->status < 0)
588                                 return;
589
590                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
591                         if (status->status < 0)
592                                 return;
593
594                         if (IS_IP4_TUNNEL(rule->flags)) {
595                                 struct in_addr ip;
596
597                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
598                                         &ip, NULL) == 0, status,
599                                         "unrecognized input \"%s\", "
600                                         "expect valid ipv4 addr",
601                                         tokens[ti]);
602                                 if (status->status < 0)
603                                         return;
604                                 rule->dst.ip.ip4 = rte_bswap32(
605                                         (uint32_t)ip.s_addr);
606                         } else if (IS_IP6_TUNNEL(rule->flags)) {
607                                 struct in6_addr ip;
608
609                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
610                                         NULL) == 0, status,
611                                         "unrecognized input \"%s\", "
612                                         "expect valid ipv6 addr",
613                                         tokens[ti]);
614                                 if (status->status < 0)
615                                         return;
616                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
617                         } else if (IS_TRANSPORT(rule->flags)) {
618                                 APP_CHECK(0, status, "unrecognized "
619                                         "input \"%s\"", tokens[ti]);
620                                 return;
621                         }
622
623                         dst_p = 1;
624                         continue;
625                 }
626
627                 if (strcmp(tokens[ti], "type") == 0) {
628                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
629                         if (status->status < 0)
630                                 return;
631
632                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
633                         if (status->status < 0)
634                                 return;
635
636                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
637                                 ips->type =
638                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
639                         else if (strcmp(tokens[ti],
640                                         "inline-protocol-offload") == 0)
641                                 ips->type =
642                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
643                         else if (strcmp(tokens[ti],
644                                         "lookaside-protocol-offload") == 0)
645                                 ips->type =
646                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
647                         else if (strcmp(tokens[ti], "no-offload") == 0)
648                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
649                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
650                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
651                         else {
652                                 APP_CHECK(0, status, "Invalid input \"%s\"",
653                                                 tokens[ti]);
654                                 return;
655                         }
656
657                         type_p = 1;
658                         continue;
659                 }
660
661                 if (strcmp(tokens[ti], "port_id") == 0) {
662                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
663                         if (status->status < 0)
664                                 return;
665                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
666                         if (status->status < 0)
667                                 return;
668                         if (rule->portid == UINT16_MAX)
669                                 rule->portid = atoi(tokens[ti]);
670                         else if (rule->portid != atoi(tokens[ti])) {
671                                 APP_CHECK(0, status,
672                                         "portid %s not matching with already assigned portid %u",
673                                         tokens[ti], rule->portid);
674                                 return;
675                         }
676                         portid_p = 1;
677                         continue;
678                 }
679
680                 if (strcmp(tokens[ti], "mss") == 0) {
681                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
682                         if (status->status < 0)
683                                 return;
684                         rule->mss = atoi(tokens[ti]);
685                         if (status->status < 0)
686                                 return;
687                         continue;
688                 }
689
690                 if (strcmp(tokens[ti], "fallback") == 0) {
691                         struct rte_ipsec_session *fb;
692
693                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
694                                 "not allowed for legacy mode.");
695                         if (status->status < 0)
696                                 return;
697                         APP_CHECK(ips->type ==
698                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
699                                 "Fallback session allowed if primary session "
700                                 "is of type inline-crypto-offload only.");
701                         if (status->status < 0)
702                                 return;
703                         APP_CHECK(rule->direction ==
704                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
705                                 "Fallback session not allowed for egress "
706                                 "rule");
707                         if (status->status < 0)
708                                 return;
709                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
710                         if (status->status < 0)
711                                 return;
712                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
713                         if (status->status < 0)
714                                 return;
715                         fb = ipsec_get_fallback_session(rule);
716                         if (strcmp(tokens[ti], "lookaside-none") == 0)
717                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
718                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
719                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
720                         else {
721                                 APP_CHECK(0, status, "unrecognized fallback "
722                                         "type %s.", tokens[ti]);
723                                 return;
724                         }
725
726                         rule->fallback_sessions = 1;
727                         nb_crypto_sessions++;
728                         fallback_p = 1;
729                         continue;
730                 }
731                 if (strcmp(tokens[ti], "flow-direction") == 0) {
732                         switch (ips->type) {
733                         case RTE_SECURITY_ACTION_TYPE_NONE:
734                         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
735                                 rule->fdir_flag = 1;
736                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
737                                 if (status->status < 0)
738                                         return;
739                                 if (rule->portid == UINT16_MAX)
740                                         rule->portid = atoi(tokens[ti]);
741                                 else if (rule->portid != atoi(tokens[ti])) {
742                                         APP_CHECK(0, status,
743                                                 "portid %s not matching with already assigned portid %u",
744                                                 tokens[ti], rule->portid);
745                                         return;
746                                 }
747                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
748                                 if (status->status < 0)
749                                         return;
750                                 rule->fdir_qid = atoi(tokens[ti]);
751                                 /* validating portid and queueid */
752                                 status_p = check_flow_params(rule->portid,
753                                                 rule->fdir_qid);
754                                 if (status_p < 0) {
755                                         printf("port id %u / queue id %u is "
756                                                 "not valid\n", rule->portid,
757                                                  rule->fdir_qid);
758                                 }
759                                 break;
760                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
761                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
762                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
763                         default:
764                                 APP_CHECK(0, status,
765                                         "flow director not supported for security session type %d",
766                                         ips->type);
767                                 return;
768                         }
769                         continue;
770                 }
771                 if (strcmp(tokens[ti], "udp-encap") == 0) {
772                         switch (ips->type) {
773                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
774                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
775                                 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
776                                                    status);
777                                 if (status->status < 0)
778                                         return;
779
780                                 rule->udp_encap = 1;
781                                 app_sa_prm.udp_encap = 1;
782                                 udp_encap_p = 1;
783                                 break;
784                         default:
785                                 APP_CHECK(0, status,
786                                         "UDP encapsulation not supported for "
787                                         "security session type %d",
788                                         ips->type);
789                                 return;
790                         }
791                         continue;
792                 }
793
794                 /* unrecognizeable input */
795                 APP_CHECK(0, status, "unrecognized input \"%s\"",
796                         tokens[ti]);
797                 return;
798         }
799
800         if (aead_algo_p) {
801                 APP_CHECK(cipher_algo_p == 0, status,
802                                 "AEAD used, no need for cipher options");
803                 if (status->status < 0)
804                         return;
805
806                 APP_CHECK(auth_algo_p == 0, status,
807                                 "AEAD used, no need for auth options");
808                 if (status->status < 0)
809                         return;
810         } else {
811                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
812                 if (status->status < 0)
813                         return;
814
815                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
816                 if (status->status < 0)
817                         return;
818         }
819
820         APP_CHECK(mode_p == 1, status, "missing mode option");
821         if (status->status < 0)
822                 return;
823
824         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
825                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
826                 printf("Missing portid option, falling back to non-offload\n");
827
828         if (!type_p || (!portid_p && ips->type !=
829                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
830                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
831         }
832
833         nb_crypto_sessions++;
834         *ri = *ri + 1;
835 }
836
837 static void
838 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
839 {
840         uint32_t i;
841         uint8_t a, b, c, d;
842         const struct rte_ipsec_session *ips;
843         const struct rte_ipsec_session *fallback_ips;
844
845         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
846
847         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
848                 if (cipher_algos[i].algo == sa->cipher_algo &&
849                                 cipher_algos[i].key_len == sa->cipher_key_len) {
850                         printf("%s ", cipher_algos[i].keyword);
851                         break;
852                 }
853         }
854
855         for (i = 0; i < RTE_DIM(auth_algos); i++) {
856                 if (auth_algos[i].algo == sa->auth_algo) {
857                         printf("%s ", auth_algos[i].keyword);
858                         break;
859                 }
860         }
861
862         for (i = 0; i < RTE_DIM(aead_algos); i++) {
863                 if (aead_algos[i].algo == sa->aead_algo &&
864                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
865                         printf("%s ", aead_algos[i].keyword);
866                         break;
867                 }
868         }
869
870         printf("mode:");
871
872         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
873         case IP4_TUNNEL:
874                 printf("IP4Tunnel ");
875                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
876                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
877                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
878                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
879                 break;
880         case IP6_TUNNEL:
881                 printf("IP6Tunnel ");
882                 for (i = 0; i < 16; i++) {
883                         if (i % 2 && i != 15)
884                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
885                         else
886                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
887                 }
888                 printf(" ");
889                 for (i = 0; i < 16; i++) {
890                         if (i % 2 && i != 15)
891                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
892                         else
893                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
894                 }
895                 break;
896         case TRANSPORT:
897                 printf("Transport ");
898                 break;
899         }
900
901         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
902         printf(" type:");
903         switch (ips->type) {
904         case RTE_SECURITY_ACTION_TYPE_NONE:
905                 printf("no-offload ");
906                 break;
907         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
908                 printf("inline-crypto-offload ");
909                 break;
910         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
911                 printf("inline-protocol-offload ");
912                 break;
913         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
914                 printf("lookaside-protocol-offload ");
915                 break;
916         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
917                 printf("cpu-crypto-accelerated ");
918                 break;
919         }
920
921         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
922         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
923                 printf("inline fallback: ");
924                 switch (fallback_ips->type) {
925                 case RTE_SECURITY_ACTION_TYPE_NONE:
926                         printf("lookaside-none");
927                         break;
928                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
929                         printf("cpu-crypto-accelerated");
930                         break;
931                 default:
932                         printf("invalid");
933                         break;
934                 }
935         }
936         if (sa->fdir_flag == 1)
937                 printf("flow-direction port %d queue %d", sa->portid,
938                                 sa->fdir_qid);
939
940         printf("\n");
941 }
942
943 static struct sa_ctx *
944 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
945 {
946         char s[PATH_MAX];
947         struct sa_ctx *sa_ctx;
948         uint32_t mz_size;
949         const struct rte_memzone *mz;
950
951         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
952
953         /* Create SA context */
954         printf("Creating SA context with %u maximum entries on socket %d\n",
955                         nb_sa, socket_id);
956
957         mz_size = sizeof(struct ipsec_xf) * nb_sa;
958         mz = rte_memzone_reserve(s, mz_size, socket_id,
959                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
960         if (mz == NULL) {
961                 printf("Failed to allocate SA XFORM memory\n");
962                 rte_errno = ENOMEM;
963                 return NULL;
964         }
965
966         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
967                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
968
969         if (sa_ctx == NULL) {
970                 printf("Failed to allocate SA CTX memory\n");
971                 rte_errno = ENOMEM;
972                 rte_memzone_free(mz);
973                 return NULL;
974         }
975
976         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
977         sa_ctx->nb_sa = nb_sa;
978
979         return sa_ctx;
980 }
981
982 static int
983 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
984 {
985         struct rte_eth_dev_info dev_info;
986         int retval;
987
988         retval = rte_eth_dev_info_get(portid, &dev_info);
989         if (retval != 0) {
990                 RTE_LOG(ERR, IPSEC,
991                         "Error during getting device (port %u) info: %s\n",
992                         portid, strerror(-retval));
993
994                 return retval;
995         }
996
997         if (inbound) {
998                 if ((dev_info.rx_offload_capa &
999                                 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1000                         RTE_LOG(WARNING, PORT,
1001                                 "hardware RX IPSec offload is not supported\n");
1002                         return -EINVAL;
1003                 }
1004
1005         } else { /* outbound */
1006                 if ((dev_info.tx_offload_capa &
1007                                 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1008                         RTE_LOG(WARNING, PORT,
1009                                 "hardware TX IPSec offload is not supported\n");
1010                         return -EINVAL;
1011                 }
1012                 if (tso && (dev_info.tx_offload_capa &
1013                                 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1014                         RTE_LOG(WARNING, PORT,
1015                                 "hardware TCP TSO offload is not supported\n");
1016                         return -EINVAL;
1017                 }
1018         }
1019         return 0;
1020 }
1021
1022 /*
1023  * Helper function, tries to determine next_proto for SPI
1024  * by searching though SP rules.
1025  */
1026 static int
1027 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1028                 struct ip_addr ip_addr[2], uint32_t mask[2])
1029 {
1030         int32_t rc4, rc6;
1031
1032         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1033                                 ip_addr, mask);
1034         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1035                                 ip_addr, mask);
1036
1037         if (rc4 >= 0) {
1038                 if (rc6 >= 0) {
1039                         RTE_LOG(ERR, IPSEC,
1040                                 "%s: SPI %u used simultaeously by "
1041                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
1042                                 __func__, spi, rc4, rc6);
1043                         return -EINVAL;
1044                 } else
1045                         return IPPROTO_IPIP;
1046         } else if (rc6 < 0) {
1047                 RTE_LOG(ERR, IPSEC,
1048                         "%s: SPI %u is not used by any SP rule\n",
1049                         __func__, spi);
1050                 return -EINVAL;
1051         } else
1052                 return IPPROTO_IPV6;
1053 }
1054
1055 /*
1056  * Helper function for getting source and destination IP addresses
1057  * from SP. Needed for inline crypto transport mode, as addresses are not
1058  * provided in config file for that mode. It checks if SP for current SA exists,
1059  * and based on what type of protocol is returned, it stores appropriate
1060  * addresses got from SP into SA.
1061  */
1062 static int
1063 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1064 {
1065         int protocol;
1066         struct ip_addr ip_addr[2];
1067         uint32_t mask[2];
1068
1069         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1070         if (protocol < 0)
1071                 return protocol;
1072         else if (protocol == IPPROTO_IPIP) {
1073                 sa->flags |= IP4_TRANSPORT;
1074                 if (mask[0] == IP4_FULL_MASK &&
1075                                 mask[1] == IP4_FULL_MASK &&
1076                                 ip_addr[0].ip.ip4 != 0 &&
1077                                 ip_addr[1].ip.ip4 != 0) {
1078
1079                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1080                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1081                 } else {
1082                         RTE_LOG(ERR, IPSEC,
1083                         "%s: No valid address or mask entry in"
1084                         " IPv4 SP rule for SPI %u\n",
1085                         __func__, sa->spi);
1086                         return -EINVAL;
1087                 }
1088         } else if (protocol == IPPROTO_IPV6) {
1089                 sa->flags |= IP6_TRANSPORT;
1090                 if (mask[0] == IP6_FULL_MASK &&
1091                                 mask[1] == IP6_FULL_MASK &&
1092                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1093                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1094                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1095                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1096
1097                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1098                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1099                 } else {
1100                         RTE_LOG(ERR, IPSEC,
1101                         "%s: No valid address or mask entry in"
1102                         " IPv6 SP rule for SPI %u\n",
1103                         __func__, sa->spi);
1104                         return -EINVAL;
1105                 }
1106         }
1107         return 0;
1108 }
1109
1110 static int
1111 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1112                 uint32_t nb_entries, uint32_t inbound,
1113                 struct socket_ctx *skt_ctx)
1114 {
1115         struct ipsec_sa *sa;
1116         uint32_t i, idx;
1117         uint16_t iv_length, aad_length;
1118         int inline_status;
1119         int32_t rc;
1120         struct rte_ipsec_session *ips;
1121
1122         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1123         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1124
1125         for (i = 0; i < nb_entries; i++) {
1126                 idx = i;
1127                 sa = &sa_ctx->sa[idx];
1128                 if (sa->spi != 0) {
1129                         printf("Index %u already in use by SPI %u\n",
1130                                         idx, sa->spi);
1131                         return -EINVAL;
1132                 }
1133                 *sa = entries[i];
1134
1135                 if (inbound) {
1136                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1137                         if (rc != 0)
1138                                 return rc;
1139                 }
1140
1141                 sa->seq = 0;
1142                 ips = ipsec_get_primary_session(sa);
1143
1144                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1145                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1146                         if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1147                                 return -EINVAL;
1148                 }
1149
1150                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1151                 case IP4_TUNNEL:
1152                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1153                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1154                         break;
1155                 case TRANSPORT:
1156                         if (ips->type ==
1157                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1158                                 inline_status =
1159                                         sa_add_address_inline_crypto(sa);
1160                                 if (inline_status < 0)
1161                                         return inline_status;
1162                         }
1163                         break;
1164                 }
1165
1166                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1167                         iv_length = 12;
1168
1169                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1170                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1171                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1172                         sa_ctx->xf[idx].a.aead.key.length =
1173                                 sa->cipher_key_len;
1174                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1175                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1176                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1177                         sa_ctx->xf[idx].a.next = NULL;
1178                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1179                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1180                         sa_ctx->xf[idx].a.aead.aad_length =
1181                                 sa->aad_len + aad_length;
1182                         sa_ctx->xf[idx].a.aead.digest_length =
1183                                 sa->digest_len;
1184
1185                         sa->xforms = &sa_ctx->xf[idx].a;
1186                 } else {
1187                         switch (sa->cipher_algo) {
1188                         case RTE_CRYPTO_CIPHER_NULL:
1189                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1190                         case RTE_CRYPTO_CIPHER_AES_CBC:
1191                                 iv_length = sa->iv_len;
1192                                 break;
1193                         case RTE_CRYPTO_CIPHER_AES_CTR:
1194                                 iv_length = 16;
1195                                 break;
1196                         default:
1197                                 RTE_LOG(ERR, IPSEC_ESP,
1198                                                 "unsupported cipher algorithm %u\n",
1199                                                 sa->cipher_algo);
1200                                 return -EINVAL;
1201                         }
1202
1203                         if (inbound) {
1204                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1205                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1206                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1207                                 sa_ctx->xf[idx].b.cipher.key.length =
1208                                         sa->cipher_key_len;
1209                                 sa_ctx->xf[idx].b.cipher.op =
1210                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1211                                 sa_ctx->xf[idx].b.next = NULL;
1212                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1213                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1214
1215                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1216                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1217                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1218                                 sa_ctx->xf[idx].a.auth.key.length =
1219                                         sa->auth_key_len;
1220                                 sa_ctx->xf[idx].a.auth.digest_length =
1221                                         sa->digest_len;
1222                                 sa_ctx->xf[idx].a.auth.op =
1223                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1224                         } else { /* outbound */
1225                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1226                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1227                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1228                                 sa_ctx->xf[idx].a.cipher.key.length =
1229                                         sa->cipher_key_len;
1230                                 sa_ctx->xf[idx].a.cipher.op =
1231                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1232                                 sa_ctx->xf[idx].a.next = NULL;
1233                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1234                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1235
1236                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1237                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1238                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1239                                 sa_ctx->xf[idx].b.auth.key.length =
1240                                         sa->auth_key_len;
1241                                 sa_ctx->xf[idx].b.auth.digest_length =
1242                                         sa->digest_len;
1243                                 sa_ctx->xf[idx].b.auth.op =
1244                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1245                         }
1246
1247                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1248                         sa_ctx->xf[idx].b.next = NULL;
1249                         sa->xforms = &sa_ctx->xf[idx].a;
1250                 }
1251
1252                 if (ips->type ==
1253                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1254                         ips->type ==
1255                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1256                         rc = create_inline_session(skt_ctx, sa, ips);
1257                         if (rc != 0) {
1258                                 RTE_LOG(ERR, IPSEC_ESP,
1259                                         "create_inline_session() failed\n");
1260                                 return -EINVAL;
1261                         }
1262                 }
1263
1264                 if (sa->fdir_flag && inbound) {
1265                         rc = create_ipsec_esp_flow(sa);
1266                         if (rc != 0)
1267                                 RTE_LOG(ERR, IPSEC_ESP,
1268                                         "create_ipsec_esp_flow() failed\n");
1269                 }
1270                 print_one_sa_rule(sa, inbound);
1271         }
1272
1273         return 0;
1274 }
1275
1276 static inline int
1277 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1278                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1279 {
1280         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1281 }
1282
1283 static inline int
1284 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1285                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1286 {
1287         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1288 }
1289
1290 /*
1291  * helper function, fills parameters that are identical for all SAs
1292  */
1293 static void
1294 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1295         const struct app_sa_prm *app_prm)
1296 {
1297         memset(prm, 0, sizeof(*prm));
1298
1299         prm->flags = app_prm->flags;
1300         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1301         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1302 }
1303
1304 static int
1305 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1306         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1307 {
1308         int32_t rc;
1309
1310         /*
1311          * Try to get SPI next proto by searching that SPI in SPD.
1312          * probably not the optimal way, but there seems nothing
1313          * better right now.
1314          */
1315         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1316         if (rc < 0)
1317                 return rc;
1318
1319         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1320         prm->userdata = (uintptr_t)ss;
1321
1322         /* setup ipsec xform */
1323         prm->ipsec_xform.spi = ss->spi;
1324         prm->ipsec_xform.salt = ss->salt;
1325         prm->ipsec_xform.direction = ss->direction;
1326         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1327         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1328                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1329                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1330         prm->ipsec_xform.options.ecn = 1;
1331         prm->ipsec_xform.options.copy_dscp = 1;
1332
1333         if (IS_IP4_TUNNEL(ss->flags)) {
1334                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1335                 prm->tun.hdr_len = sizeof(*v4);
1336                 prm->tun.next_proto = rc;
1337                 prm->tun.hdr = v4;
1338         } else if (IS_IP6_TUNNEL(ss->flags)) {
1339                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1340                 prm->tun.hdr_len = sizeof(*v6);
1341                 prm->tun.next_proto = rc;
1342                 prm->tun.hdr = v6;
1343         } else {
1344                 /* transport mode */
1345                 prm->trs.proto = rc;
1346         }
1347
1348         /* setup crypto section */
1349         prm->crypto_xform = ss->xforms;
1350         return 0;
1351 }
1352
1353 static int
1354 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1355 {
1356         int32_t rc = 0;
1357
1358         ss->sa = sa;
1359
1360         if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1361                 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1362                 if (ss->security.ses != NULL) {
1363                         rc = rte_ipsec_session_prepare(ss);
1364                         if (rc != 0)
1365                                 memset(ss, 0, sizeof(*ss));
1366                 }
1367         }
1368
1369         return rc;
1370 }
1371
1372 /*
1373  * Initialise related rte_ipsec_sa object.
1374  */
1375 static int
1376 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1377 {
1378         int rc;
1379         struct rte_ipsec_sa_prm prm;
1380         struct rte_ipsec_session *ips;
1381         struct rte_ipv4_hdr v4  = {
1382                 .version_ihl = IPVERSION << 4 |
1383                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1384                 .time_to_live = IPDEFTTL,
1385                 .next_proto_id = IPPROTO_ESP,
1386                 .src_addr = lsa->src.ip.ip4,
1387                 .dst_addr = lsa->dst.ip.ip4,
1388         };
1389         struct rte_ipv6_hdr v6 = {
1390                 .vtc_flow = htonl(IP6_VERSION << 28),
1391                 .proto = IPPROTO_ESP,
1392         };
1393
1394         if (IS_IP6_TUNNEL(lsa->flags)) {
1395                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1396                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1397         }
1398
1399         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1400         if (rc == 0)
1401                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1402         if (rc < 0)
1403                 return rc;
1404
1405         /* init primary processing session */
1406         ips = ipsec_get_primary_session(lsa);
1407         rc = fill_ipsec_session(ips, sa);
1408         if (rc != 0)
1409                 return rc;
1410
1411         /* init inline fallback processing session */
1412         if (lsa->fallback_sessions == 1)
1413                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1414
1415         return rc;
1416 }
1417
1418 /*
1419  * Allocate space and init rte_ipsec_sa strcutures,
1420  * one per session.
1421  */
1422 static int
1423 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1424 {
1425         int32_t rc, sz;
1426         uint32_t i, idx;
1427         size_t tsz;
1428         struct rte_ipsec_sa *sa;
1429         struct ipsec_sa *lsa;
1430         struct rte_ipsec_sa_prm prm;
1431
1432         /* determine SA size */
1433         idx = 0;
1434         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1435         sz = rte_ipsec_sa_size(&prm);
1436         if (sz < 0) {
1437                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1438                         "failed to determine SA size, error code: %d\n",
1439                         __func__, ctx, nb_ent, socket, sz);
1440                 return sz;
1441         }
1442
1443         tsz = sz * nb_ent;
1444
1445         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1446         if (ctx->satbl == NULL) {
1447                 RTE_LOG(ERR, IPSEC,
1448                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1449                         __func__,  ctx, nb_ent, socket, tsz);
1450                 return -ENOMEM;
1451         }
1452
1453         rc = 0;
1454         for (i = 0; i != nb_ent && rc == 0; i++) {
1455
1456                 idx = i;
1457
1458                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1459                 lsa = ctx->sa + idx;
1460
1461                 rc = ipsec_sa_init(lsa, sa, sz);
1462         }
1463
1464         return rc;
1465 }
1466
1467 static int
1468 sa_cmp(const void *p, const void *q)
1469 {
1470         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1471         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1472
1473         return (int)(spi1 - spi2);
1474 }
1475
1476 /*
1477  * Walk through all SA rules to find an SA with given SPI
1478  */
1479 int
1480 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1481 {
1482         uint32_t num;
1483         struct ipsec_sa *sa;
1484         struct ipsec_sa tmpl;
1485         const struct ipsec_sa *sar;
1486
1487         sar = sa_ctx->sa;
1488         if (inbound != 0)
1489                 num = nb_sa_in;
1490         else
1491                 num = nb_sa_out;
1492
1493         tmpl.spi = spi;
1494
1495         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1496         if (sa != NULL)
1497                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1498
1499         return -ENOENT;
1500 }
1501
1502 void
1503 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1504 {
1505         int32_t rc;
1506         const char *name;
1507
1508         if (ctx == NULL)
1509                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1510
1511         if (ctx->sa_in != NULL)
1512                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1513                                 "initialized\n", socket_id);
1514
1515         if (ctx->sa_out != NULL)
1516                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1517                                 "initialized\n", socket_id);
1518
1519         if (nb_sa_in > 0) {
1520                 name = "sa_in";
1521                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1522                 if (ctx->sa_in == NULL)
1523                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1524                                 "context %s in socket %d\n", rte_errno,
1525                                 name, socket_id);
1526
1527                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1528                                 &sa_in_cnt);
1529                 if (rc != 0)
1530                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1531
1532                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1533
1534                 if (app_sa_prm.enable != 0) {
1535                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1536                                 socket_id);
1537                         if (rc != 0)
1538                                 rte_exit(EXIT_FAILURE,
1539                                         "failed to init inbound SAs\n");
1540                 }
1541         } else
1542                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1543
1544         if (nb_sa_out > 0) {
1545                 name = "sa_out";
1546                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1547                 if (ctx->sa_out == NULL)
1548                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1549                                 "context %s in socket %d\n", rte_errno,
1550                                 name, socket_id);
1551
1552                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1553
1554                 if (app_sa_prm.enable != 0) {
1555                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1556                                 socket_id);
1557                         if (rc != 0)
1558                                 rte_exit(EXIT_FAILURE,
1559                                         "failed to init outbound SAs\n");
1560                 }
1561         } else
1562                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1563                         "specified\n");
1564 }
1565
1566 int
1567 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1568 {
1569         struct ipsec_mbuf_metadata *priv;
1570         struct ipsec_sa *sa;
1571
1572         priv = get_priv(m);
1573         sa = priv->sa;
1574         if (sa != NULL)
1575                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1576
1577         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1578         return 0;
1579 }
1580
1581 void
1582 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1583                 void *sa_arr[], uint16_t nb_pkts)
1584 {
1585         uint32_t i;
1586         void *result_sa;
1587         struct ipsec_sa *sa;
1588
1589         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1590
1591         /*
1592          * Mark need for inline offload fallback on the LSB of SA pointer.
1593          * Thanks to packet grouping mechanism which ipsec_process is using
1594          * packets marked for fallback processing will form separate group.
1595          *
1596          * Because it is not safe to use SA pointer it is casted to generic
1597          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1598          * to get valid struct pointer.
1599          */
1600         for (i = 0; i < nb_pkts; i++) {
1601                 if (sa_arr[i] == NULL)
1602                         continue;
1603
1604                 result_sa = sa = sa_arr[i];
1605                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1606                         sa->fallback_sessions > 0) {
1607                         uintptr_t intsa = (uintptr_t)sa;
1608                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1609                         result_sa = (void *)intsa;
1610                 }
1611                 sa_arr[i] = result_sa;
1612         }
1613 }
1614
1615 void
1616 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1617                 void *sa[], uint16_t nb_pkts)
1618 {
1619         uint32_t i;
1620
1621         for (i = 0; i < nb_pkts; i++)
1622                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1623 }
1624
1625 /*
1626  * Select HW offloads to be used.
1627  */
1628 int
1629 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1630                 uint64_t *tx_offloads)
1631 {
1632         struct ipsec_sa *rule;
1633         uint32_t idx_sa;
1634         enum rte_security_session_action_type rule_type;
1635
1636         *rx_offloads = 0;
1637         *tx_offloads = 0;
1638
1639         /* Check for inbound rules that use offloads and use this port */
1640         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1641                 rule = &sa_in[idx_sa];
1642                 rule_type = ipsec_get_action_type(rule);
1643                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1644                                 rule_type ==
1645                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1646                                 && rule->portid == port_id)
1647                         *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1648         }
1649
1650         /* Check for outbound rules that use offloads and use this port */
1651         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1652                 rule = &sa_out[idx_sa];
1653                 rule_type = ipsec_get_action_type(rule);
1654                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1655                                 rule_type ==
1656                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1657                                 && rule->portid == port_id) {
1658                         *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1659                         if (rule->mss)
1660                                 *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1661                 }
1662         }
1663         return 0;
1664 }
1665
1666 void
1667 sa_sort_arr(void)
1668 {
1669         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1670         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1671 }
1672
1673 uint32_t
1674 get_nb_crypto_sessions(void)
1675 {
1676         return nb_crypto_sessions;
1677 }