power: refactor ACPI and intel_pstate support
[dpdk.git] / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 /*
6  * Security Associations
7  */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
23
24 #include "ipsec.h"
25 #include "esp.h"
26 #include "parser.h"
27 #include "sad.h"
28
29 #define IPDEFTTL 64
30
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36
37 struct supported_cipher_algo {
38         const char *keyword;
39         enum rte_crypto_cipher_algorithm algo;
40         uint16_t iv_len;
41         uint16_t block_size;
42         uint16_t key_len;
43 };
44
45 struct supported_auth_algo {
46         const char *keyword;
47         enum rte_crypto_auth_algorithm algo;
48         uint16_t digest_len;
49         uint16_t key_len;
50         uint8_t key_not_req;
51 };
52
53 struct supported_aead_algo {
54         const char *keyword;
55         enum rte_crypto_aead_algorithm algo;
56         uint16_t iv_len;
57         uint16_t block_size;
58         uint16_t digest_len;
59         uint16_t key_len;
60         uint8_t aad_len;
61 };
62
63
64 const struct supported_cipher_algo cipher_algos[] = {
65         {
66                 .keyword = "null",
67                 .algo = RTE_CRYPTO_CIPHER_NULL,
68                 .iv_len = 0,
69                 .block_size = 4,
70                 .key_len = 0
71         },
72         {
73                 .keyword = "aes-128-cbc",
74                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
75                 .iv_len = 16,
76                 .block_size = 16,
77                 .key_len = 16
78         },
79         {
80                 .keyword = "aes-192-cbc",
81                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82                 .iv_len = 16,
83                 .block_size = 16,
84                 .key_len = 24
85         },
86         {
87                 .keyword = "aes-256-cbc",
88                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89                 .iv_len = 16,
90                 .block_size = 16,
91                 .key_len = 32
92         },
93         {
94                 .keyword = "aes-128-ctr",
95                 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
96                 .iv_len = 8,
97                 .block_size = 4,
98                 .key_len = 20
99         },
100         {
101                 .keyword = "3des-cbc",
102                 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
103                 .iv_len = 8,
104                 .block_size = 8,
105                 .key_len = 24
106         }
107 };
108
109 const struct supported_auth_algo auth_algos[] = {
110         {
111                 .keyword = "null",
112                 .algo = RTE_CRYPTO_AUTH_NULL,
113                 .digest_len = 0,
114                 .key_len = 0,
115                 .key_not_req = 1
116         },
117         {
118                 .keyword = "sha1-hmac",
119                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
120                 .digest_len = 12,
121                 .key_len = 20
122         },
123         {
124                 .keyword = "sha256-hmac",
125                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
126                 .digest_len = 16,
127                 .key_len = 32
128         }
129 };
130
131 const struct supported_aead_algo aead_algos[] = {
132         {
133                 .keyword = "aes-128-gcm",
134                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
135                 .iv_len = 8,
136                 .block_size = 4,
137                 .key_len = 20,
138                 .digest_len = 16,
139                 .aad_len = 8,
140         },
141         {
142                 .keyword = "aes-192-gcm",
143                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
144                 .iv_len = 8,
145                 .block_size = 4,
146                 .key_len = 28,
147                 .digest_len = 16,
148                 .aad_len = 8,
149         },
150         {
151                 .keyword = "aes-256-gcm",
152                 .algo = RTE_CRYPTO_AEAD_AES_GCM,
153                 .iv_len = 8,
154                 .block_size = 4,
155                 .key_len = 36,
156                 .digest_len = 16,
157                 .aad_len = 8,
158         }
159 };
160
161 #define SA_INIT_NB      128
162
163 static uint32_t nb_crypto_sessions;
164 struct ipsec_sa *sa_out;
165 uint32_t nb_sa_out;
166 static uint32_t sa_out_sz;
167 static struct ipsec_sa_cnt sa_out_cnt;
168
169 struct ipsec_sa *sa_in;
170 uint32_t nb_sa_in;
171 static uint32_t sa_in_sz;
172 static struct ipsec_sa_cnt sa_in_cnt;
173
174 static const struct supported_cipher_algo *
175 find_match_cipher_algo(const char *cipher_keyword)
176 {
177         size_t i;
178
179         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
180                 const struct supported_cipher_algo *algo =
181                         &cipher_algos[i];
182
183                 if (strcmp(cipher_keyword, algo->keyword) == 0)
184                         return algo;
185         }
186
187         return NULL;
188 }
189
190 static const struct supported_auth_algo *
191 find_match_auth_algo(const char *auth_keyword)
192 {
193         size_t i;
194
195         for (i = 0; i < RTE_DIM(auth_algos); i++) {
196                 const struct supported_auth_algo *algo =
197                         &auth_algos[i];
198
199                 if (strcmp(auth_keyword, algo->keyword) == 0)
200                         return algo;
201         }
202
203         return NULL;
204 }
205
206 static const struct supported_aead_algo *
207 find_match_aead_algo(const char *aead_keyword)
208 {
209         size_t i;
210
211         for (i = 0; i < RTE_DIM(aead_algos); i++) {
212                 const struct supported_aead_algo *algo =
213                         &aead_algos[i];
214
215                 if (strcmp(aead_keyword, algo->keyword) == 0)
216                         return algo;
217         }
218
219         return NULL;
220 }
221
222 /** parse_key_string
223  *  parse x:x:x:x.... hex number key string into uint8_t *key
224  *  return:
225  *  > 0: number of bytes parsed
226  *  0:   failed
227  */
228 static uint32_t
229 parse_key_string(const char *key_str, uint8_t *key)
230 {
231         const char *pt_start = key_str, *pt_end = key_str;
232         uint32_t nb_bytes = 0;
233
234         while (pt_end != NULL) {
235                 char sub_str[3] = {0};
236
237                 pt_end = strchr(pt_start, ':');
238
239                 if (pt_end == NULL) {
240                         if (strlen(pt_start) > 2)
241                                 return 0;
242                         strncpy(sub_str, pt_start, 2);
243                 } else {
244                         if (pt_end - pt_start > 2)
245                                 return 0;
246
247                         strncpy(sub_str, pt_start, pt_end - pt_start);
248                         pt_start = pt_end + 1;
249                 }
250
251                 key[nb_bytes++] = strtol(sub_str, NULL, 16);
252         }
253
254         return nb_bytes;
255 }
256
257 static int
258 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
259 {
260         if (*sa_tbl == NULL) {
261                 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
262                 if (*sa_tbl == NULL)
263                         return -1;
264                 *cur_sz = SA_INIT_NB;
265                 return 0;
266         }
267
268         if (cur_cnt >= *cur_sz) {
269                 *sa_tbl = realloc(*sa_tbl,
270                         *cur_sz * sizeof(struct ipsec_sa) * 2);
271                 if (*sa_tbl == NULL)
272                         return -1;
273                 /* clean reallocated extra space */
274                 memset(&(*sa_tbl)[*cur_sz], 0,
275                         *cur_sz * sizeof(struct ipsec_sa));
276                 *cur_sz *= 2;
277         }
278
279         return 0;
280 }
281
282 void
283 parse_sa_tokens(char **tokens, uint32_t n_tokens,
284         struct parse_status *status)
285 {
286         struct ipsec_sa *rule = NULL;
287         struct rte_ipsec_session *ips;
288         uint32_t ti; /*token index*/
289         uint32_t *ri /*rule index*/;
290         struct ipsec_sa_cnt *sa_cnt;
291         uint32_t cipher_algo_p = 0;
292         uint32_t auth_algo_p = 0;
293         uint32_t aead_algo_p = 0;
294         uint32_t src_p = 0;
295         uint32_t dst_p = 0;
296         uint32_t mode_p = 0;
297         uint32_t type_p = 0;
298         uint32_t portid_p = 0;
299         uint32_t fallback_p = 0;
300         int16_t status_p = 0;
301         uint16_t udp_encap_p = 0;
302
303         if (strcmp(tokens[0], "in") == 0) {
304                 ri = &nb_sa_in;
305                 sa_cnt = &sa_in_cnt;
306                 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
307                         return;
308                 rule = &sa_in[*ri];
309                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
310         } else {
311                 ri = &nb_sa_out;
312                 sa_cnt = &sa_out_cnt;
313                 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
314                         return;
315                 rule = &sa_out[*ri];
316                 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
317         }
318
319         /* spi number */
320         APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
321         if (status->status < 0)
322                 return;
323         if (atoi(tokens[1]) == INVALID_SPI)
324                 return;
325         rule->spi = atoi(tokens[1]);
326         rule->portid = UINT16_MAX;
327         ips = ipsec_get_primary_session(rule);
328
329         for (ti = 2; ti < n_tokens; ti++) {
330                 if (strcmp(tokens[ti], "mode") == 0) {
331                         APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
332                         if (status->status < 0)
333                                 return;
334
335                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
336                         if (status->status < 0)
337                                 return;
338
339                         if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
340                                 sa_cnt->nb_v4++;
341                                 rule->flags = IP4_TUNNEL;
342                         } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
343                                 sa_cnt->nb_v6++;
344                                 rule->flags = IP6_TUNNEL;
345                         } else if (strcmp(tokens[ti], "transport") == 0) {
346                                 sa_cnt->nb_v4++;
347                                 sa_cnt->nb_v6++;
348                                 rule->flags = TRANSPORT;
349                         } else {
350                                 APP_CHECK(0, status, "unrecognized "
351                                         "input \"%s\"", tokens[ti]);
352                                 return;
353                         }
354
355                         mode_p = 1;
356                         continue;
357                 }
358
359                 if (strcmp(tokens[ti], "cipher_algo") == 0) {
360                         const struct supported_cipher_algo *algo;
361                         uint32_t key_len;
362
363                         APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
364                                 status);
365                         if (status->status < 0)
366                                 return;
367
368                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
369                         if (status->status < 0)
370                                 return;
371
372                         algo = find_match_cipher_algo(tokens[ti]);
373
374                         APP_CHECK(algo != NULL, status, "unrecognized "
375                                 "input \"%s\"", tokens[ti]);
376
377                         if (status->status < 0)
378                                 return;
379
380                         rule->cipher_algo = algo->algo;
381                         rule->block_size = algo->block_size;
382                         rule->iv_len = algo->iv_len;
383                         rule->cipher_key_len = algo->key_len;
384
385                         /* for NULL algorithm, no cipher key required */
386                         if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
387                                 cipher_algo_p = 1;
388                                 continue;
389                         }
390
391                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
392                         if (status->status < 0)
393                                 return;
394
395                         APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
396                                 status, "unrecognized input \"%s\", "
397                                 "expect \"cipher_key\"", tokens[ti]);
398                         if (status->status < 0)
399                                 return;
400
401                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
402                         if (status->status < 0)
403                                 return;
404
405                         key_len = parse_key_string(tokens[ti],
406                                 rule->cipher_key);
407                         APP_CHECK(key_len == rule->cipher_key_len, status,
408                                 "unrecognized input \"%s\"", tokens[ti]);
409                         if (status->status < 0)
410                                 return;
411
412                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
413                                 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
414                                 rule->salt = (uint32_t)rte_rand();
415
416                         if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
417                                 key_len -= 4;
418                                 rule->cipher_key_len = key_len;
419                                 memcpy(&rule->salt,
420                                         &rule->cipher_key[key_len], 4);
421                         }
422
423                         cipher_algo_p = 1;
424                         continue;
425                 }
426
427                 if (strcmp(tokens[ti], "auth_algo") == 0) {
428                         const struct supported_auth_algo *algo;
429                         uint32_t key_len;
430
431                         APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
432                                 status);
433                         if (status->status < 0)
434                                 return;
435
436                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
437                         if (status->status < 0)
438                                 return;
439
440                         algo = find_match_auth_algo(tokens[ti]);
441                         APP_CHECK(algo != NULL, status, "unrecognized "
442                                 "input \"%s\"", tokens[ti]);
443
444                         if (status->status < 0)
445                                 return;
446
447                         rule->auth_algo = algo->algo;
448                         rule->auth_key_len = algo->key_len;
449                         rule->digest_len = algo->digest_len;
450
451                         /* NULL algorithm and combined algos do not
452                          * require auth key
453                          */
454                         if (algo->key_not_req) {
455                                 auth_algo_p = 1;
456                                 continue;
457                         }
458
459                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
460                         if (status->status < 0)
461                                 return;
462
463                         APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
464                                 status, "unrecognized input \"%s\", "
465                                 "expect \"auth_key\"", tokens[ti]);
466                         if (status->status < 0)
467                                 return;
468
469                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
470                         if (status->status < 0)
471                                 return;
472
473                         key_len = parse_key_string(tokens[ti],
474                                 rule->auth_key);
475                         APP_CHECK(key_len == rule->auth_key_len, status,
476                                 "unrecognized input \"%s\"", tokens[ti]);
477                         if (status->status < 0)
478                                 return;
479
480                         auth_algo_p = 1;
481                         continue;
482                 }
483
484                 if (strcmp(tokens[ti], "aead_algo") == 0) {
485                         const struct supported_aead_algo *algo;
486                         uint32_t key_len;
487
488                         APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
489                                 status);
490                         if (status->status < 0)
491                                 return;
492
493                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494                         if (status->status < 0)
495                                 return;
496
497                         algo = find_match_aead_algo(tokens[ti]);
498
499                         APP_CHECK(algo != NULL, status, "unrecognized "
500                                 "input \"%s\"", tokens[ti]);
501
502                         if (status->status < 0)
503                                 return;
504
505                         rule->aead_algo = algo->algo;
506                         rule->cipher_key_len = algo->key_len;
507                         rule->digest_len = algo->digest_len;
508                         rule->aad_len = algo->aad_len;
509                         rule->block_size = algo->block_size;
510                         rule->iv_len = algo->iv_len;
511
512                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
513                         if (status->status < 0)
514                                 return;
515
516                         APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
517                                 status, "unrecognized input \"%s\", "
518                                 "expect \"aead_key\"", tokens[ti]);
519                         if (status->status < 0)
520                                 return;
521
522                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
523                         if (status->status < 0)
524                                 return;
525
526                         key_len = parse_key_string(tokens[ti],
527                                 rule->cipher_key);
528                         APP_CHECK(key_len == rule->cipher_key_len, status,
529                                 "unrecognized input \"%s\"", tokens[ti]);
530                         if (status->status < 0)
531                                 return;
532
533                         key_len -= 4;
534                         rule->cipher_key_len = key_len;
535                         memcpy(&rule->salt,
536                                 &rule->cipher_key[key_len], 4);
537
538                         aead_algo_p = 1;
539                         continue;
540                 }
541
542                 if (strcmp(tokens[ti], "src") == 0) {
543                         APP_CHECK_PRESENCE(src_p, tokens[ti], status);
544                         if (status->status < 0)
545                                 return;
546
547                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
548                         if (status->status < 0)
549                                 return;
550
551                         if (IS_IP4_TUNNEL(rule->flags)) {
552                                 struct in_addr ip;
553
554                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
555                                         &ip, NULL) == 0, status,
556                                         "unrecognized input \"%s\", "
557                                         "expect valid ipv4 addr",
558                                         tokens[ti]);
559                                 if (status->status < 0)
560                                         return;
561                                 rule->src.ip.ip4 = rte_bswap32(
562                                         (uint32_t)ip.s_addr);
563                         } else if (IS_IP6_TUNNEL(rule->flags)) {
564                                 struct in6_addr ip;
565
566                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
567                                         NULL) == 0, status,
568                                         "unrecognized input \"%s\", "
569                                         "expect valid ipv6 addr",
570                                         tokens[ti]);
571                                 if (status->status < 0)
572                                         return;
573                                 memcpy(rule->src.ip.ip6.ip6_b,
574                                         ip.s6_addr, 16);
575                         } else if (IS_TRANSPORT(rule->flags)) {
576                                 APP_CHECK(0, status, "unrecognized input "
577                                         "\"%s\"", tokens[ti]);
578                                 return;
579                         }
580
581                         src_p = 1;
582                         continue;
583                 }
584
585                 if (strcmp(tokens[ti], "dst") == 0) {
586                         APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
587                         if (status->status < 0)
588                                 return;
589
590                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
591                         if (status->status < 0)
592                                 return;
593
594                         if (IS_IP4_TUNNEL(rule->flags)) {
595                                 struct in_addr ip;
596
597                                 APP_CHECK(parse_ipv4_addr(tokens[ti],
598                                         &ip, NULL) == 0, status,
599                                         "unrecognized input \"%s\", "
600                                         "expect valid ipv4 addr",
601                                         tokens[ti]);
602                                 if (status->status < 0)
603                                         return;
604                                 rule->dst.ip.ip4 = rte_bswap32(
605                                         (uint32_t)ip.s_addr);
606                         } else if (IS_IP6_TUNNEL(rule->flags)) {
607                                 struct in6_addr ip;
608
609                                 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
610                                         NULL) == 0, status,
611                                         "unrecognized input \"%s\", "
612                                         "expect valid ipv6 addr",
613                                         tokens[ti]);
614                                 if (status->status < 0)
615                                         return;
616                                 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
617                         } else if (IS_TRANSPORT(rule->flags)) {
618                                 APP_CHECK(0, status, "unrecognized "
619                                         "input \"%s\"", tokens[ti]);
620                                 return;
621                         }
622
623                         dst_p = 1;
624                         continue;
625                 }
626
627                 if (strcmp(tokens[ti], "type") == 0) {
628                         APP_CHECK_PRESENCE(type_p, tokens[ti], status);
629                         if (status->status < 0)
630                                 return;
631
632                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
633                         if (status->status < 0)
634                                 return;
635
636                         if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
637                                 ips->type =
638                                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
639                         else if (strcmp(tokens[ti],
640                                         "inline-protocol-offload") == 0)
641                                 ips->type =
642                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
643                         else if (strcmp(tokens[ti],
644                                         "lookaside-protocol-offload") == 0)
645                                 ips->type =
646                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
647                         else if (strcmp(tokens[ti], "no-offload") == 0)
648                                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
649                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
650                                 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
651                         else {
652                                 APP_CHECK(0, status, "Invalid input \"%s\"",
653                                                 tokens[ti]);
654                                 return;
655                         }
656
657                         type_p = 1;
658                         continue;
659                 }
660
661                 if (strcmp(tokens[ti], "port_id") == 0) {
662                         APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
663                         if (status->status < 0)
664                                 return;
665                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
666                         if (status->status < 0)
667                                 return;
668                         if (rule->portid == UINT16_MAX)
669                                 rule->portid = atoi(tokens[ti]);
670                         else if (rule->portid != atoi(tokens[ti])) {
671                                 APP_CHECK(0, status,
672                                         "portid %s not matching with already assigned portid %u",
673                                         tokens[ti], rule->portid);
674                                 return;
675                         }
676                         portid_p = 1;
677                         continue;
678                 }
679
680                 if (strcmp(tokens[ti], "fallback") == 0) {
681                         struct rte_ipsec_session *fb;
682
683                         APP_CHECK(app_sa_prm.enable, status, "Fallback session "
684                                 "not allowed for legacy mode.");
685                         if (status->status < 0)
686                                 return;
687                         APP_CHECK(ips->type ==
688                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
689                                 "Fallback session allowed if primary session "
690                                 "is of type inline-crypto-offload only.");
691                         if (status->status < 0)
692                                 return;
693                         APP_CHECK(rule->direction ==
694                                 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
695                                 "Fallback session not allowed for egress "
696                                 "rule");
697                         if (status->status < 0)
698                                 return;
699                         APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
700                         if (status->status < 0)
701                                 return;
702                         INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
703                         if (status->status < 0)
704                                 return;
705                         fb = ipsec_get_fallback_session(rule);
706                         if (strcmp(tokens[ti], "lookaside-none") == 0)
707                                 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
708                         else if (strcmp(tokens[ti], "cpu-crypto") == 0)
709                                 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
710                         else {
711                                 APP_CHECK(0, status, "unrecognized fallback "
712                                         "type %s.", tokens[ti]);
713                                 return;
714                         }
715
716                         rule->fallback_sessions = 1;
717                         nb_crypto_sessions++;
718                         fallback_p = 1;
719                         continue;
720                 }
721                 if (strcmp(tokens[ti], "flow-direction") == 0) {
722                         switch (ips->type) {
723                         case RTE_SECURITY_ACTION_TYPE_NONE:
724                         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
725                                 rule->fdir_flag = 1;
726                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
727                                 if (status->status < 0)
728                                         return;
729                                 if (rule->portid == UINT16_MAX)
730                                         rule->portid = atoi(tokens[ti]);
731                                 else if (rule->portid != atoi(tokens[ti])) {
732                                         APP_CHECK(0, status,
733                                                 "portid %s not matching with already assigned portid %u",
734                                                 tokens[ti], rule->portid);
735                                         return;
736                                 }
737                                 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
738                                 if (status->status < 0)
739                                         return;
740                                 rule->fdir_qid = atoi(tokens[ti]);
741                                 /* validating portid and queueid */
742                                 status_p = check_flow_params(rule->portid,
743                                                 rule->fdir_qid);
744                                 if (status_p < 0) {
745                                         printf("port id %u / queue id %u is "
746                                                 "not valid\n", rule->portid,
747                                                  rule->fdir_qid);
748                                 }
749                                 break;
750                         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
751                         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
752                         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
753                         default:
754                                 APP_CHECK(0, status,
755                                         "flow director not supported for security session type %d",
756                                         ips->type);
757                                 return;
758                         }
759                         continue;
760                 }
761                 if (strcmp(tokens[ti], "udp-encap") == 0) {
762                         APP_CHECK(ips->type ==
763                                 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
764                                 status, "UDP encapsulation is allowed if the "
765                                 "session is of type lookaside-protocol-offload "
766                                 "only.");
767                         if (status->status < 0)
768                                 return;
769                         APP_CHECK_PRESENCE(udp_encap_p, tokens[ti], status);
770                         if (status->status < 0)
771                                 return;
772
773                         rule->udp_encap = 1;
774                         app_sa_prm.udp_encap = 1;
775                         udp_encap_p = 1;
776                         continue;
777                 }
778
779                 /* unrecognizeable input */
780                 APP_CHECK(0, status, "unrecognized input \"%s\"",
781                         tokens[ti]);
782                 return;
783         }
784
785         if (aead_algo_p) {
786                 APP_CHECK(cipher_algo_p == 0, status,
787                                 "AEAD used, no need for cipher options");
788                 if (status->status < 0)
789                         return;
790
791                 APP_CHECK(auth_algo_p == 0, status,
792                                 "AEAD used, no need for auth options");
793                 if (status->status < 0)
794                         return;
795         } else {
796                 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
797                 if (status->status < 0)
798                         return;
799
800                 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
801                 if (status->status < 0)
802                         return;
803         }
804
805         APP_CHECK(mode_p == 1, status, "missing mode option");
806         if (status->status < 0)
807                 return;
808
809         if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
810                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
811                 printf("Missing portid option, falling back to non-offload\n");
812
813         if (!type_p || (!portid_p && ips->type !=
814                         RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
815                 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
816         }
817
818         nb_crypto_sessions++;
819         *ri = *ri + 1;
820 }
821
822 static void
823 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
824 {
825         uint32_t i;
826         uint8_t a, b, c, d;
827         const struct rte_ipsec_session *ips;
828         const struct rte_ipsec_session *fallback_ips;
829
830         printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
831
832         for (i = 0; i < RTE_DIM(cipher_algos); i++) {
833                 if (cipher_algos[i].algo == sa->cipher_algo &&
834                                 cipher_algos[i].key_len == sa->cipher_key_len) {
835                         printf("%s ", cipher_algos[i].keyword);
836                         break;
837                 }
838         }
839
840         for (i = 0; i < RTE_DIM(auth_algos); i++) {
841                 if (auth_algos[i].algo == sa->auth_algo) {
842                         printf("%s ", auth_algos[i].keyword);
843                         break;
844                 }
845         }
846
847         for (i = 0; i < RTE_DIM(aead_algos); i++) {
848                 if (aead_algos[i].algo == sa->aead_algo &&
849                                 aead_algos[i].key_len-4 == sa->cipher_key_len) {
850                         printf("%s ", aead_algos[i].keyword);
851                         break;
852                 }
853         }
854
855         printf("mode:");
856
857         switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
858         case IP4_TUNNEL:
859                 printf("IP4Tunnel ");
860                 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
861                 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
862                 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
863                 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
864                 break;
865         case IP6_TUNNEL:
866                 printf("IP6Tunnel ");
867                 for (i = 0; i < 16; i++) {
868                         if (i % 2 && i != 15)
869                                 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
870                         else
871                                 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
872                 }
873                 printf(" ");
874                 for (i = 0; i < 16; i++) {
875                         if (i % 2 && i != 15)
876                                 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
877                         else
878                                 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
879                 }
880                 break;
881         case TRANSPORT:
882                 printf("Transport ");
883                 break;
884         }
885
886         ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
887         printf(" type:");
888         switch (ips->type) {
889         case RTE_SECURITY_ACTION_TYPE_NONE:
890                 printf("no-offload ");
891                 break;
892         case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
893                 printf("inline-crypto-offload ");
894                 break;
895         case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
896                 printf("inline-protocol-offload ");
897                 break;
898         case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
899                 printf("lookaside-protocol-offload ");
900                 break;
901         case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
902                 printf("cpu-crypto-accelerated ");
903                 break;
904         }
905
906         fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
907         if (fallback_ips != NULL && sa->fallback_sessions > 0) {
908                 printf("inline fallback: ");
909                 switch (fallback_ips->type) {
910                 case RTE_SECURITY_ACTION_TYPE_NONE:
911                         printf("lookaside-none");
912                         break;
913                 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
914                         printf("cpu-crypto-accelerated");
915                         break;
916                 default:
917                         printf("invalid");
918                         break;
919                 }
920         }
921         if (sa->fdir_flag == 1)
922                 printf("flow-direction port %d queue %d", sa->portid,
923                                 sa->fdir_qid);
924
925         printf("\n");
926 }
927
928 static struct sa_ctx *
929 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
930 {
931         char s[PATH_MAX];
932         struct sa_ctx *sa_ctx;
933         uint32_t mz_size;
934         const struct rte_memzone *mz;
935
936         snprintf(s, sizeof(s), "%s_%u", name, socket_id);
937
938         /* Create SA context */
939         printf("Creating SA context with %u maximum entries on socket %d\n",
940                         nb_sa, socket_id);
941
942         mz_size = sizeof(struct ipsec_xf) * nb_sa;
943         mz = rte_memzone_reserve(s, mz_size, socket_id,
944                         RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
945         if (mz == NULL) {
946                 printf("Failed to allocate SA XFORM memory\n");
947                 rte_errno = ENOMEM;
948                 return NULL;
949         }
950
951         sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
952                 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
953
954         if (sa_ctx == NULL) {
955                 printf("Failed to allocate SA CTX memory\n");
956                 rte_errno = ENOMEM;
957                 rte_memzone_free(mz);
958                 return NULL;
959         }
960
961         sa_ctx->xf = (struct ipsec_xf *)mz->addr;
962         sa_ctx->nb_sa = nb_sa;
963
964         return sa_ctx;
965 }
966
967 static int
968 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
969 {
970         struct rte_eth_dev_info dev_info;
971         int retval;
972
973         retval = rte_eth_dev_info_get(portid, &dev_info);
974         if (retval != 0) {
975                 RTE_LOG(ERR, IPSEC,
976                         "Error during getting device (port %u) info: %s\n",
977                         portid, strerror(-retval));
978
979                 return retval;
980         }
981
982         if (inbound) {
983                 if ((dev_info.rx_offload_capa &
984                                 DEV_RX_OFFLOAD_SECURITY) == 0) {
985                         RTE_LOG(WARNING, PORT,
986                                 "hardware RX IPSec offload is not supported\n");
987                         return -EINVAL;
988                 }
989
990         } else { /* outbound */
991                 if ((dev_info.tx_offload_capa &
992                                 DEV_TX_OFFLOAD_SECURITY) == 0) {
993                         RTE_LOG(WARNING, PORT,
994                                 "hardware TX IPSec offload is not supported\n");
995                         return -EINVAL;
996                 }
997         }
998         return 0;
999 }
1000
1001 /*
1002  * Helper function, tries to determine next_proto for SPI
1003  * by searching though SP rules.
1004  */
1005 static int
1006 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1007                 struct ip_addr ip_addr[2], uint32_t mask[2])
1008 {
1009         int32_t rc4, rc6;
1010
1011         rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1012                                 ip_addr, mask);
1013         rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1014                                 ip_addr, mask);
1015
1016         if (rc4 >= 0) {
1017                 if (rc6 >= 0) {
1018                         RTE_LOG(ERR, IPSEC,
1019                                 "%s: SPI %u used simultaeously by "
1020                                 "IPv4(%d) and IPv6 (%d) SP rules\n",
1021                                 __func__, spi, rc4, rc6);
1022                         return -EINVAL;
1023                 } else
1024                         return IPPROTO_IPIP;
1025         } else if (rc6 < 0) {
1026                 RTE_LOG(ERR, IPSEC,
1027                         "%s: SPI %u is not used by any SP rule\n",
1028                         __func__, spi);
1029                 return -EINVAL;
1030         } else
1031                 return IPPROTO_IPV6;
1032 }
1033
1034 /*
1035  * Helper function for getting source and destination IP addresses
1036  * from SP. Needed for inline crypto transport mode, as addresses are not
1037  * provided in config file for that mode. It checks if SP for current SA exists,
1038  * and based on what type of protocol is returned, it stores appropriate
1039  * addresses got from SP into SA.
1040  */
1041 static int
1042 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1043 {
1044         int protocol;
1045         struct ip_addr ip_addr[2];
1046         uint32_t mask[2];
1047
1048         protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1049         if (protocol < 0)
1050                 return protocol;
1051         else if (protocol == IPPROTO_IPIP) {
1052                 sa->flags |= IP4_TRANSPORT;
1053                 if (mask[0] == IP4_FULL_MASK &&
1054                                 mask[1] == IP4_FULL_MASK &&
1055                                 ip_addr[0].ip.ip4 != 0 &&
1056                                 ip_addr[1].ip.ip4 != 0) {
1057
1058                         sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1059                         sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1060                 } else {
1061                         RTE_LOG(ERR, IPSEC,
1062                         "%s: No valid address or mask entry in"
1063                         " IPv4 SP rule for SPI %u\n",
1064                         __func__, sa->spi);
1065                         return -EINVAL;
1066                 }
1067         } else if (protocol == IPPROTO_IPV6) {
1068                 sa->flags |= IP6_TRANSPORT;
1069                 if (mask[0] == IP6_FULL_MASK &&
1070                                 mask[1] == IP6_FULL_MASK &&
1071                                 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1072                                 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1073                                 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1074                                 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1075
1076                         sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1077                         sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1078                 } else {
1079                         RTE_LOG(ERR, IPSEC,
1080                         "%s: No valid address or mask entry in"
1081                         " IPv6 SP rule for SPI %u\n",
1082                         __func__, sa->spi);
1083                         return -EINVAL;
1084                 }
1085         }
1086         return 0;
1087 }
1088
1089 static int
1090 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1091                 uint32_t nb_entries, uint32_t inbound,
1092                 struct socket_ctx *skt_ctx)
1093 {
1094         struct ipsec_sa *sa;
1095         uint32_t i, idx;
1096         uint16_t iv_length, aad_length;
1097         int inline_status;
1098         int32_t rc;
1099         struct rte_ipsec_session *ips;
1100
1101         /* for ESN upper 32 bits of SQN also need to be part of AAD */
1102         aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1103
1104         for (i = 0; i < nb_entries; i++) {
1105                 idx = i;
1106                 sa = &sa_ctx->sa[idx];
1107                 if (sa->spi != 0) {
1108                         printf("Index %u already in use by SPI %u\n",
1109                                         idx, sa->spi);
1110                         return -EINVAL;
1111                 }
1112                 *sa = entries[i];
1113
1114                 if (inbound) {
1115                         rc = ipsec_sad_add(&sa_ctx->sad, sa);
1116                         if (rc != 0)
1117                                 return rc;
1118                 }
1119
1120                 sa->seq = 0;
1121                 ips = ipsec_get_primary_session(sa);
1122
1123                 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1124                         ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1125                         if (check_eth_dev_caps(sa->portid, inbound))
1126                                 return -EINVAL;
1127                 }
1128
1129                 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1130                 case IP4_TUNNEL:
1131                         sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1132                         sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1133                         break;
1134                 case TRANSPORT:
1135                         if (ips->type ==
1136                                 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1137                                 inline_status =
1138                                         sa_add_address_inline_crypto(sa);
1139                                 if (inline_status < 0)
1140                                         return inline_status;
1141                         }
1142                         break;
1143                 }
1144
1145                 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1146                         iv_length = 12;
1147
1148                         sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1149                         sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1150                         sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1151                         sa_ctx->xf[idx].a.aead.key.length =
1152                                 sa->cipher_key_len;
1153                         sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1154                                 RTE_CRYPTO_AEAD_OP_DECRYPT :
1155                                 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1156                         sa_ctx->xf[idx].a.next = NULL;
1157                         sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1158                         sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1159                         sa_ctx->xf[idx].a.aead.aad_length =
1160                                 sa->aad_len + aad_length;
1161                         sa_ctx->xf[idx].a.aead.digest_length =
1162                                 sa->digest_len;
1163
1164                         sa->xforms = &sa_ctx->xf[idx].a;
1165                 } else {
1166                         switch (sa->cipher_algo) {
1167                         case RTE_CRYPTO_CIPHER_NULL:
1168                         case RTE_CRYPTO_CIPHER_3DES_CBC:
1169                         case RTE_CRYPTO_CIPHER_AES_CBC:
1170                                 iv_length = sa->iv_len;
1171                                 break;
1172                         case RTE_CRYPTO_CIPHER_AES_CTR:
1173                                 iv_length = 16;
1174                                 break;
1175                         default:
1176                                 RTE_LOG(ERR, IPSEC_ESP,
1177                                                 "unsupported cipher algorithm %u\n",
1178                                                 sa->cipher_algo);
1179                                 return -EINVAL;
1180                         }
1181
1182                         if (inbound) {
1183                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1184                                 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1185                                 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1186                                 sa_ctx->xf[idx].b.cipher.key.length =
1187                                         sa->cipher_key_len;
1188                                 sa_ctx->xf[idx].b.cipher.op =
1189                                         RTE_CRYPTO_CIPHER_OP_DECRYPT;
1190                                 sa_ctx->xf[idx].b.next = NULL;
1191                                 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1192                                 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1193
1194                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1195                                 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1196                                 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1197                                 sa_ctx->xf[idx].a.auth.key.length =
1198                                         sa->auth_key_len;
1199                                 sa_ctx->xf[idx].a.auth.digest_length =
1200                                         sa->digest_len;
1201                                 sa_ctx->xf[idx].a.auth.op =
1202                                         RTE_CRYPTO_AUTH_OP_VERIFY;
1203                         } else { /* outbound */
1204                                 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1205                                 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1206                                 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1207                                 sa_ctx->xf[idx].a.cipher.key.length =
1208                                         sa->cipher_key_len;
1209                                 sa_ctx->xf[idx].a.cipher.op =
1210                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1211                                 sa_ctx->xf[idx].a.next = NULL;
1212                                 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1213                                 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1214
1215                                 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1216                                 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1217                                 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1218                                 sa_ctx->xf[idx].b.auth.key.length =
1219                                         sa->auth_key_len;
1220                                 sa_ctx->xf[idx].b.auth.digest_length =
1221                                         sa->digest_len;
1222                                 sa_ctx->xf[idx].b.auth.op =
1223                                         RTE_CRYPTO_AUTH_OP_GENERATE;
1224                         }
1225
1226                         sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1227                         sa_ctx->xf[idx].b.next = NULL;
1228                         sa->xforms = &sa_ctx->xf[idx].a;
1229                 }
1230
1231                 if (ips->type ==
1232                         RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1233                         ips->type ==
1234                         RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1235                         rc = create_inline_session(skt_ctx, sa, ips);
1236                         if (rc != 0) {
1237                                 RTE_LOG(ERR, IPSEC_ESP,
1238                                         "create_inline_session() failed\n");
1239                                 return -EINVAL;
1240                         }
1241                 }
1242
1243                 if (sa->fdir_flag && inbound) {
1244                         rc = create_ipsec_esp_flow(sa);
1245                         if (rc != 0)
1246                                 RTE_LOG(ERR, IPSEC_ESP,
1247                                         "create_ipsec_esp_flow() failed\n");
1248                 }
1249                 print_one_sa_rule(sa, inbound);
1250         }
1251
1252         return 0;
1253 }
1254
1255 static inline int
1256 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1257                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1258 {
1259         return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1260 }
1261
1262 static inline int
1263 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1264                 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1265 {
1266         return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1267 }
1268
1269 /*
1270  * helper function, fills parameters that are identical for all SAs
1271  */
1272 static void
1273 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1274         const struct app_sa_prm *app_prm)
1275 {
1276         memset(prm, 0, sizeof(*prm));
1277
1278         prm->flags = app_prm->flags;
1279         prm->ipsec_xform.options.esn = app_prm->enable_esn;
1280         prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1281 }
1282
1283 static int
1284 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1285         const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1286 {
1287         int32_t rc;
1288
1289         /*
1290          * Try to get SPI next proto by searching that SPI in SPD.
1291          * probably not the optimal way, but there seems nothing
1292          * better right now.
1293          */
1294         rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1295         if (rc < 0)
1296                 return rc;
1297
1298         fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1299         prm->userdata = (uintptr_t)ss;
1300
1301         /* setup ipsec xform */
1302         prm->ipsec_xform.spi = ss->spi;
1303         prm->ipsec_xform.salt = ss->salt;
1304         prm->ipsec_xform.direction = ss->direction;
1305         prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1306         prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1307                 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1308                 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1309         prm->ipsec_xform.options.ecn = 1;
1310         prm->ipsec_xform.options.copy_dscp = 1;
1311
1312         if (IS_IP4_TUNNEL(ss->flags)) {
1313                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1314                 prm->tun.hdr_len = sizeof(*v4);
1315                 prm->tun.next_proto = rc;
1316                 prm->tun.hdr = v4;
1317         } else if (IS_IP6_TUNNEL(ss->flags)) {
1318                 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1319                 prm->tun.hdr_len = sizeof(*v6);
1320                 prm->tun.next_proto = rc;
1321                 prm->tun.hdr = v6;
1322         } else {
1323                 /* transport mode */
1324                 prm->trs.proto = rc;
1325         }
1326
1327         /* setup crypto section */
1328         prm->crypto_xform = ss->xforms;
1329         return 0;
1330 }
1331
1332 static int
1333 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1334 {
1335         int32_t rc = 0;
1336
1337         ss->sa = sa;
1338
1339         if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1340                 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1341                 if (ss->security.ses != NULL) {
1342                         rc = rte_ipsec_session_prepare(ss);
1343                         if (rc != 0)
1344                                 memset(ss, 0, sizeof(*ss));
1345                 }
1346         }
1347
1348         return rc;
1349 }
1350
1351 /*
1352  * Initialise related rte_ipsec_sa object.
1353  */
1354 static int
1355 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1356 {
1357         int rc;
1358         struct rte_ipsec_sa_prm prm;
1359         struct rte_ipsec_session *ips;
1360         struct rte_ipv4_hdr v4  = {
1361                 .version_ihl = IPVERSION << 4 |
1362                         sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1363                 .time_to_live = IPDEFTTL,
1364                 .next_proto_id = IPPROTO_ESP,
1365                 .src_addr = lsa->src.ip.ip4,
1366                 .dst_addr = lsa->dst.ip.ip4,
1367         };
1368         struct rte_ipv6_hdr v6 = {
1369                 .vtc_flow = htonl(IP6_VERSION << 28),
1370                 .proto = IPPROTO_ESP,
1371         };
1372
1373         if (IS_IP6_TUNNEL(lsa->flags)) {
1374                 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1375                 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1376         }
1377
1378         rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1379         if (rc == 0)
1380                 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1381         if (rc < 0)
1382                 return rc;
1383
1384         /* init primary processing session */
1385         ips = ipsec_get_primary_session(lsa);
1386         rc = fill_ipsec_session(ips, sa);
1387         if (rc != 0)
1388                 return rc;
1389
1390         /* init inline fallback processing session */
1391         if (lsa->fallback_sessions == 1)
1392                 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1393
1394         return rc;
1395 }
1396
1397 /*
1398  * Allocate space and init rte_ipsec_sa strcutures,
1399  * one per session.
1400  */
1401 static int
1402 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1403 {
1404         int32_t rc, sz;
1405         uint32_t i, idx;
1406         size_t tsz;
1407         struct rte_ipsec_sa *sa;
1408         struct ipsec_sa *lsa;
1409         struct rte_ipsec_sa_prm prm;
1410
1411         /* determine SA size */
1412         idx = 0;
1413         fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1414         sz = rte_ipsec_sa_size(&prm);
1415         if (sz < 0) {
1416                 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1417                         "failed to determine SA size, error code: %d\n",
1418                         __func__, ctx, nb_ent, socket, sz);
1419                 return sz;
1420         }
1421
1422         tsz = sz * nb_ent;
1423
1424         ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1425         if (ctx->satbl == NULL) {
1426                 RTE_LOG(ERR, IPSEC,
1427                         "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1428                         __func__,  ctx, nb_ent, socket, tsz);
1429                 return -ENOMEM;
1430         }
1431
1432         rc = 0;
1433         for (i = 0; i != nb_ent && rc == 0; i++) {
1434
1435                 idx = i;
1436
1437                 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1438                 lsa = ctx->sa + idx;
1439
1440                 rc = ipsec_sa_init(lsa, sa, sz);
1441         }
1442
1443         return rc;
1444 }
1445
1446 static int
1447 sa_cmp(const void *p, const void *q)
1448 {
1449         uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1450         uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1451
1452         return (int)(spi1 - spi2);
1453 }
1454
1455 /*
1456  * Walk through all SA rules to find an SA with given SPI
1457  */
1458 int
1459 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1460 {
1461         uint32_t num;
1462         struct ipsec_sa *sa;
1463         struct ipsec_sa tmpl;
1464         const struct ipsec_sa *sar;
1465
1466         sar = sa_ctx->sa;
1467         if (inbound != 0)
1468                 num = nb_sa_in;
1469         else
1470                 num = nb_sa_out;
1471
1472         tmpl.spi = spi;
1473
1474         sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1475         if (sa != NULL)
1476                 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1477
1478         return -ENOENT;
1479 }
1480
1481 void
1482 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1483 {
1484         int32_t rc;
1485         const char *name;
1486
1487         if (ctx == NULL)
1488                 rte_exit(EXIT_FAILURE, "NULL context.\n");
1489
1490         if (ctx->sa_in != NULL)
1491                 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1492                                 "initialized\n", socket_id);
1493
1494         if (ctx->sa_out != NULL)
1495                 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1496                                 "initialized\n", socket_id);
1497
1498         if (nb_sa_in > 0) {
1499                 name = "sa_in";
1500                 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1501                 if (ctx->sa_in == NULL)
1502                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1503                                 "context %s in socket %d\n", rte_errno,
1504                                 name, socket_id);
1505
1506                 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1507                                 &sa_in_cnt);
1508                 if (rc != 0)
1509                         rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1510
1511                 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1512
1513                 if (app_sa_prm.enable != 0) {
1514                         rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1515                                 socket_id);
1516                         if (rc != 0)
1517                                 rte_exit(EXIT_FAILURE,
1518                                         "failed to init inbound SAs\n");
1519                 }
1520         } else
1521                 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1522
1523         if (nb_sa_out > 0) {
1524                 name = "sa_out";
1525                 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1526                 if (ctx->sa_out == NULL)
1527                         rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1528                                 "context %s in socket %d\n", rte_errno,
1529                                 name, socket_id);
1530
1531                 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1532
1533                 if (app_sa_prm.enable != 0) {
1534                         rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1535                                 socket_id);
1536                         if (rc != 0)
1537                                 rte_exit(EXIT_FAILURE,
1538                                         "failed to init outbound SAs\n");
1539                 }
1540         } else
1541                 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1542                         "specified\n");
1543 }
1544
1545 int
1546 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1547 {
1548         struct ipsec_mbuf_metadata *priv;
1549         struct ipsec_sa *sa;
1550
1551         priv = get_priv(m);
1552         sa = priv->sa;
1553         if (sa != NULL)
1554                 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1555
1556         RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1557         return 0;
1558 }
1559
1560 void
1561 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1562                 void *sa_arr[], uint16_t nb_pkts)
1563 {
1564         uint32_t i;
1565         void *result_sa;
1566         struct ipsec_sa *sa;
1567
1568         sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1569
1570         /*
1571          * Mark need for inline offload fallback on the LSB of SA pointer.
1572          * Thanks to packet grouping mechanism which ipsec_process is using
1573          * packets marked for fallback processing will form separate group.
1574          *
1575          * Because it is not safe to use SA pointer it is casted to generic
1576          * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1577          * to get valid struct pointer.
1578          */
1579         for (i = 0; i < nb_pkts; i++) {
1580                 if (sa_arr[i] == NULL)
1581                         continue;
1582
1583                 result_sa = sa = sa_arr[i];
1584                 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1585                         sa->fallback_sessions > 0) {
1586                         uintptr_t intsa = (uintptr_t)sa;
1587                         intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1588                         result_sa = (void *)intsa;
1589                 }
1590                 sa_arr[i] = result_sa;
1591         }
1592 }
1593
1594 void
1595 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1596                 void *sa[], uint16_t nb_pkts)
1597 {
1598         uint32_t i;
1599
1600         for (i = 0; i < nb_pkts; i++)
1601                 sa[i] = &sa_ctx->sa[sa_idx[i]];
1602 }
1603
1604 /*
1605  * Select HW offloads to be used.
1606  */
1607 int
1608 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1609                 uint64_t *tx_offloads)
1610 {
1611         struct ipsec_sa *rule;
1612         uint32_t idx_sa;
1613         enum rte_security_session_action_type rule_type;
1614
1615         *rx_offloads = 0;
1616         *tx_offloads = 0;
1617
1618         /* Check for inbound rules that use offloads and use this port */
1619         for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1620                 rule = &sa_in[idx_sa];
1621                 rule_type = ipsec_get_action_type(rule);
1622                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1623                                 rule_type ==
1624                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1625                                 && rule->portid == port_id)
1626                         *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1627         }
1628
1629         /* Check for outbound rules that use offloads and use this port */
1630         for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1631                 rule = &sa_out[idx_sa];
1632                 rule_type = ipsec_get_action_type(rule);
1633                 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1634                                 rule_type ==
1635                                 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1636                                 && rule->portid == port_id)
1637                         *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1638         }
1639         return 0;
1640 }
1641
1642 void
1643 sa_sort_arr(void)
1644 {
1645         qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1646         qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1647 }
1648
1649 uint32_t
1650 get_nb_crypto_sessions(void)
1651 {
1652         return nb_crypto_sessions;
1653 }