1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
29 struct supported_cipher_algo {
31 enum rte_crypto_cipher_algorithm algo;
37 struct supported_auth_algo {
39 enum rte_crypto_auth_algorithm algo;
45 struct supported_aead_algo {
47 enum rte_crypto_aead_algorithm algo;
56 const struct supported_cipher_algo cipher_algos[] = {
59 .algo = RTE_CRYPTO_CIPHER_NULL,
65 .keyword = "aes-128-cbc",
66 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
72 .keyword = "aes-128-ctr",
73 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
75 .block_size = 16, /* XXX AESNI MB limition, should be 4 */
80 const struct supported_auth_algo auth_algos[] = {
83 .algo = RTE_CRYPTO_AUTH_NULL,
89 .keyword = "sha1-hmac",
90 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
95 .keyword = "sha256-hmac",
96 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
102 const struct supported_aead_algo aead_algos[] = {
104 .keyword = "aes-128-gcm",
105 .algo = RTE_CRYPTO_AEAD_AES_GCM,
114 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
117 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
120 static const struct supported_cipher_algo *
121 find_match_cipher_algo(const char *cipher_keyword)
125 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
126 const struct supported_cipher_algo *algo =
129 if (strcmp(cipher_keyword, algo->keyword) == 0)
136 static const struct supported_auth_algo *
137 find_match_auth_algo(const char *auth_keyword)
141 for (i = 0; i < RTE_DIM(auth_algos); i++) {
142 const struct supported_auth_algo *algo =
145 if (strcmp(auth_keyword, algo->keyword) == 0)
152 static const struct supported_aead_algo *
153 find_match_aead_algo(const char *aead_keyword)
157 for (i = 0; i < RTE_DIM(aead_algos); i++) {
158 const struct supported_aead_algo *algo =
161 if (strcmp(aead_keyword, algo->keyword) == 0)
169 * parse x:x:x:x.... hex number key string into uint8_t *key
171 * > 0: number of bytes parsed
175 parse_key_string(const char *key_str, uint8_t *key)
177 const char *pt_start = key_str, *pt_end = key_str;
178 uint32_t nb_bytes = 0;
180 while (pt_end != NULL) {
181 char sub_str[3] = {0};
183 pt_end = strchr(pt_start, ':');
185 if (pt_end == NULL) {
186 if (strlen(pt_start) > 2)
188 strncpy(sub_str, pt_start, 2);
190 if (pt_end - pt_start > 2)
193 strncpy(sub_str, pt_start, pt_end - pt_start);
194 pt_start = pt_end + 1;
197 key[nb_bytes++] = strtol(sub_str, NULL, 16);
204 parse_sa_tokens(char **tokens, uint32_t n_tokens,
205 struct parse_status *status)
207 struct ipsec_sa *rule = NULL;
208 uint32_t ti; /*token index*/
209 uint32_t *ri /*rule index*/;
210 uint32_t cipher_algo_p = 0;
211 uint32_t auth_algo_p = 0;
212 uint32_t aead_algo_p = 0;
217 uint32_t portid_p = 0;
219 if (strcmp(tokens[0], "in") == 0) {
222 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
223 "too many sa rules, abort insertion\n");
224 if (status->status < 0)
231 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
232 "too many sa rules, abort insertion\n");
233 if (status->status < 0)
240 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
241 if (status->status < 0)
243 if (atoi(tokens[1]) == INVALID_SPI)
245 rule->spi = atoi(tokens[1]);
247 for (ti = 2; ti < n_tokens; ti++) {
248 if (strcmp(tokens[ti], "mode") == 0) {
249 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
250 if (status->status < 0)
253 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
254 if (status->status < 0)
257 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
258 rule->flags = IP4_TUNNEL;
259 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
260 rule->flags = IP6_TUNNEL;
261 else if (strcmp(tokens[ti], "transport") == 0)
262 rule->flags = TRANSPORT;
264 APP_CHECK(0, status, "unrecognized "
265 "input \"%s\"", tokens[ti]);
273 if (strcmp(tokens[ti], "cipher_algo") == 0) {
274 const struct supported_cipher_algo *algo;
277 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
279 if (status->status < 0)
282 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
283 if (status->status < 0)
286 algo = find_match_cipher_algo(tokens[ti]);
288 APP_CHECK(algo != NULL, status, "unrecognized "
289 "input \"%s\"", tokens[ti]);
291 rule->cipher_algo = algo->algo;
292 rule->block_size = algo->block_size;
293 rule->iv_len = algo->iv_len;
294 rule->cipher_key_len = algo->key_len;
296 /* for NULL algorithm, no cipher key required */
297 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
302 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
303 if (status->status < 0)
306 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
307 status, "unrecognized input \"%s\", "
308 "expect \"cipher_key\"", tokens[ti]);
309 if (status->status < 0)
312 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
313 if (status->status < 0)
316 key_len = parse_key_string(tokens[ti],
318 APP_CHECK(key_len == rule->cipher_key_len, status,
319 "unrecognized input \"%s\"", tokens[ti]);
320 if (status->status < 0)
323 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
324 rule->salt = (uint32_t)rte_rand();
326 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
328 rule->cipher_key_len = key_len;
330 &rule->cipher_key[key_len], 4);
337 if (strcmp(tokens[ti], "auth_algo") == 0) {
338 const struct supported_auth_algo *algo;
341 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
343 if (status->status < 0)
346 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
347 if (status->status < 0)
350 algo = find_match_auth_algo(tokens[ti]);
351 APP_CHECK(algo != NULL, status, "unrecognized "
352 "input \"%s\"", tokens[ti]);
354 rule->auth_algo = algo->algo;
355 rule->auth_key_len = algo->key_len;
356 rule->digest_len = algo->digest_len;
358 /* NULL algorithm and combined algos do not
361 if (algo->key_not_req) {
366 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
367 if (status->status < 0)
370 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
371 status, "unrecognized input \"%s\", "
372 "expect \"auth_key\"", tokens[ti]);
373 if (status->status < 0)
376 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
377 if (status->status < 0)
380 key_len = parse_key_string(tokens[ti],
382 APP_CHECK(key_len == rule->auth_key_len, status,
383 "unrecognized input \"%s\"", tokens[ti]);
384 if (status->status < 0)
391 if (strcmp(tokens[ti], "aead_algo") == 0) {
392 const struct supported_aead_algo *algo;
395 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
397 if (status->status < 0)
400 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
401 if (status->status < 0)
404 algo = find_match_aead_algo(tokens[ti]);
406 APP_CHECK(algo != NULL, status, "unrecognized "
407 "input \"%s\"", tokens[ti]);
409 rule->aead_algo = algo->algo;
410 rule->cipher_key_len = algo->key_len;
411 rule->digest_len = algo->digest_len;
412 rule->aad_len = algo->aad_len;
413 rule->block_size = algo->block_size;
414 rule->iv_len = algo->iv_len;
416 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
417 if (status->status < 0)
420 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
421 status, "unrecognized input \"%s\", "
422 "expect \"aead_key\"", tokens[ti]);
423 if (status->status < 0)
426 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
427 if (status->status < 0)
430 key_len = parse_key_string(tokens[ti],
432 APP_CHECK(key_len == rule->cipher_key_len, status,
433 "unrecognized input \"%s\"", tokens[ti]);
434 if (status->status < 0)
438 rule->cipher_key_len = key_len;
440 &rule->cipher_key[key_len], 4);
446 if (strcmp(tokens[ti], "src") == 0) {
447 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
448 if (status->status < 0)
451 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
452 if (status->status < 0)
455 if (rule->flags == IP4_TUNNEL) {
458 APP_CHECK(parse_ipv4_addr(tokens[ti],
459 &ip, NULL) == 0, status,
460 "unrecognized input \"%s\", "
461 "expect valid ipv4 addr",
463 if (status->status < 0)
465 rule->src.ip.ip4 = rte_bswap32(
466 (uint32_t)ip.s_addr);
467 } else if (rule->flags == IP6_TUNNEL) {
470 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
472 "unrecognized input \"%s\", "
473 "expect valid ipv6 addr",
475 if (status->status < 0)
477 memcpy(rule->src.ip.ip6.ip6_b,
479 } else if (rule->flags == TRANSPORT) {
480 APP_CHECK(0, status, "unrecognized input "
481 "\"%s\"", tokens[ti]);
489 if (strcmp(tokens[ti], "dst") == 0) {
490 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
491 if (status->status < 0)
494 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
495 if (status->status < 0)
498 if (rule->flags == IP4_TUNNEL) {
501 APP_CHECK(parse_ipv4_addr(tokens[ti],
502 &ip, NULL) == 0, status,
503 "unrecognized input \"%s\", "
504 "expect valid ipv4 addr",
506 if (status->status < 0)
508 rule->dst.ip.ip4 = rte_bswap32(
509 (uint32_t)ip.s_addr);
510 } else if (rule->flags == IP6_TUNNEL) {
513 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
515 "unrecognized input \"%s\", "
516 "expect valid ipv6 addr",
518 if (status->status < 0)
520 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
521 } else if (rule->flags == TRANSPORT) {
522 APP_CHECK(0, status, "unrecognized "
523 "input \"%s\"", tokens[ti]);
531 if (strcmp(tokens[ti], "type") == 0) {
532 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
533 if (status->status < 0)
536 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
537 if (status->status < 0)
540 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
542 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
543 else if (strcmp(tokens[ti],
544 "inline-protocol-offload") == 0)
546 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
547 else if (strcmp(tokens[ti],
548 "lookaside-protocol-offload") == 0)
550 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
551 else if (strcmp(tokens[ti], "no-offload") == 0)
552 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
554 APP_CHECK(0, status, "Invalid input \"%s\"",
563 if (strcmp(tokens[ti], "port_id") == 0) {
564 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
565 if (status->status < 0)
567 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
568 if (status->status < 0)
570 rule->portid = atoi(tokens[ti]);
571 if (status->status < 0)
577 /* unrecognizeable input */
578 APP_CHECK(0, status, "unrecognized input \"%s\"",
584 APP_CHECK(cipher_algo_p == 0, status,
585 "AEAD used, no need for cipher options");
586 if (status->status < 0)
589 APP_CHECK(auth_algo_p == 0, status,
590 "AEAD used, no need for auth options");
591 if (status->status < 0)
594 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
595 if (status->status < 0)
598 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
599 if (status->status < 0)
603 APP_CHECK(mode_p == 1, status, "missing mode option");
604 if (status->status < 0)
607 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
608 printf("Missing portid option, falling back to non-offload\n");
610 if (!type_p || !portid_p) {
611 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
619 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
624 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
626 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
627 if (cipher_algos[i].algo == sa->cipher_algo) {
628 printf("%s ", cipher_algos[i].keyword);
633 for (i = 0; i < RTE_DIM(auth_algos); i++) {
634 if (auth_algos[i].algo == sa->auth_algo) {
635 printf("%s ", auth_algos[i].keyword);
640 for (i = 0; i < RTE_DIM(aead_algos); i++) {
641 if (aead_algos[i].algo == sa->aead_algo) {
642 printf("%s ", aead_algos[i].keyword);
651 printf("IP4Tunnel ");
652 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
653 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
654 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
655 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
658 printf("IP6Tunnel ");
659 for (i = 0; i < 16; i++) {
660 if (i % 2 && i != 15)
661 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
663 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
666 for (i = 0; i < 16; i++) {
667 if (i % 2 && i != 15)
668 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
670 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
681 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
684 struct rte_crypto_sym_xform a;
685 struct rte_crypto_sym_xform b;
687 } xf[IPSEC_SA_MAX_ENTRIES];
690 static struct sa_ctx *
691 sa_create(const char *name, int32_t socket_id)
694 struct sa_ctx *sa_ctx;
696 const struct rte_memzone *mz;
698 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
700 /* Create SA array table */
701 printf("Creating SA context with %u maximum entries\n",
702 IPSEC_SA_MAX_ENTRIES);
704 mz_size = sizeof(struct sa_ctx);
705 mz = rte_memzone_reserve(s, mz_size, socket_id,
706 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
708 printf("Failed to allocate SA DB memory\n");
713 sa_ctx = (struct sa_ctx *)mz->addr;
719 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
721 struct rte_eth_dev_info dev_info;
723 rte_eth_dev_info_get(portid, &dev_info);
726 if ((dev_info.rx_offload_capa &
727 DEV_RX_OFFLOAD_SECURITY) == 0) {
728 RTE_LOG(WARNING, PORT,
729 "hardware RX IPSec offload is not supported\n");
733 } else { /* outbound */
734 if ((dev_info.tx_offload_capa &
735 DEV_TX_OFFLOAD_SECURITY) == 0) {
736 RTE_LOG(WARNING, PORT,
737 "hardware TX IPSec offload is not supported\n");
746 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
747 uint32_t nb_entries, uint32_t inbound)
753 for (i = 0; i < nb_entries; i++) {
754 idx = SPI2IDX(entries[i].spi);
755 sa = &sa_ctx->sa[idx];
757 printf("Index %u already in use by SPI %u\n",
764 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
765 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
766 if (check_eth_dev_caps(sa->portid, inbound))
770 sa->direction = (inbound == 1) ?
771 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
772 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
776 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
777 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
780 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
783 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
784 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
785 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
786 sa_ctx->xf[idx].a.aead.key.length =
788 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
789 RTE_CRYPTO_AEAD_OP_DECRYPT :
790 RTE_CRYPTO_AEAD_OP_ENCRYPT;
791 sa_ctx->xf[idx].a.next = NULL;
792 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
793 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
794 sa_ctx->xf[idx].a.aead.aad_length =
796 sa_ctx->xf[idx].a.aead.digest_length =
799 sa->xforms = &sa_ctx->xf[idx].a;
801 print_one_sa_rule(sa, inbound);
803 switch (sa->cipher_algo) {
804 case RTE_CRYPTO_CIPHER_NULL:
805 case RTE_CRYPTO_CIPHER_AES_CBC:
806 iv_length = sa->iv_len;
808 case RTE_CRYPTO_CIPHER_AES_CTR:
812 RTE_LOG(ERR, IPSEC_ESP,
813 "unsupported cipher algorithm %u\n",
819 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
820 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
821 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
822 sa_ctx->xf[idx].b.cipher.key.length =
824 sa_ctx->xf[idx].b.cipher.op =
825 RTE_CRYPTO_CIPHER_OP_DECRYPT;
826 sa_ctx->xf[idx].b.next = NULL;
827 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
828 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
830 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
831 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
832 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
833 sa_ctx->xf[idx].a.auth.key.length =
835 sa_ctx->xf[idx].a.auth.digest_length =
837 sa_ctx->xf[idx].a.auth.op =
838 RTE_CRYPTO_AUTH_OP_VERIFY;
839 } else { /* outbound */
840 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
841 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
842 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
843 sa_ctx->xf[idx].a.cipher.key.length =
845 sa_ctx->xf[idx].a.cipher.op =
846 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
847 sa_ctx->xf[idx].a.next = NULL;
848 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
849 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
851 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
852 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
853 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
854 sa_ctx->xf[idx].b.auth.key.length =
856 sa_ctx->xf[idx].b.auth.digest_length =
858 sa_ctx->xf[idx].b.auth.op =
859 RTE_CRYPTO_AUTH_OP_GENERATE;
862 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
863 sa_ctx->xf[idx].b.next = NULL;
864 sa->xforms = &sa_ctx->xf[idx].a;
866 print_one_sa_rule(sa, inbound);
874 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
877 return sa_add_rules(sa_ctx, entries, nb_entries, 0);
881 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
884 return sa_add_rules(sa_ctx, entries, nb_entries, 1);
888 sa_init(struct socket_ctx *ctx, int32_t socket_id)
893 rte_exit(EXIT_FAILURE, "NULL context.\n");
895 if (ctx->sa_in != NULL)
896 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
897 "initialized\n", socket_id);
899 if (ctx->sa_out != NULL)
900 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
901 "initialized\n", socket_id);
905 ctx->sa_in = sa_create(name, socket_id);
906 if (ctx->sa_in == NULL)
907 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
908 "context %s in socket %d\n", rte_errno,
911 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
913 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
917 ctx->sa_out = sa_create(name, socket_id);
918 if (ctx->sa_out == NULL)
919 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
920 "context %s in socket %d\n", rte_errno,
923 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
925 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
930 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
932 struct ipsec_mbuf_metadata *priv;
934 priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
936 return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
940 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
941 struct ipsec_sa **sa_ret)
951 ip = rte_pktmbuf_mtod(pkt, struct ip *);
952 if (ip->ip_v == IPVERSION)
953 esp = (struct esp_hdr *)(ip + 1);
955 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
957 if (esp->spi == INVALID_SPI)
960 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
961 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
966 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
967 if ((ip->ip_v == IPVERSION) &&
968 (sa->src.ip.ip4 == *src4_addr) &&
969 (sa->dst.ip.ip4 == *(src4_addr + 1)))
973 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
974 if ((ip->ip_v == IP6_VERSION) &&
975 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
976 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
985 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
986 struct ipsec_sa *sa[], uint16_t nb_pkts)
990 for (i = 0; i < nb_pkts; i++)
991 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
995 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
996 struct ipsec_sa *sa[], uint16_t nb_pkts)
1000 for (i = 0; i < nb_pkts; i++)
1001 sa[i] = &sa_ctx->sa[sa_idx[i]];