X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fsa.c;h=e8f25985ce4aa107d036c1a89169d954b231e612;hb=c7e6d808e5163916181a59b2aaecf25c9c09b24c;hp=1f025a777108a13662f4dad6e872a3d910e53796;hpb=2cf67788ae895b4e3c05059e1feb06f6d3d8865e;p=dpdk.git diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 1f025a7771..e8f25985ce 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ /* @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,7 @@ #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT) -#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0) +#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0) struct supported_cipher_algo { const char *keyword; @@ -45,6 +46,7 @@ struct supported_cipher_algo { struct supported_auth_algo { const char *keyword; enum rte_crypto_auth_algorithm algo; + uint16_t iv_len; uint16_t digest_len; uint16_t key_len; uint8_t key_not_req; @@ -76,6 +78,13 @@ const struct supported_cipher_algo cipher_algos[] = { .block_size = 16, .key_len = 16 }, + { + .keyword = "aes-192-cbc", + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .iv_len = 16, + .block_size = 16, + .key_len = 24 + }, { .keyword = "aes-256-cbc", .algo = RTE_CRYPTO_CIPHER_AES_CBC, @@ -90,6 +99,20 @@ const struct supported_cipher_algo cipher_algos[] = { .block_size = 4, .key_len = 20 }, + { + .keyword = "aes-192-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 16, + .block_size = 16, + .key_len = 28 + }, + { + .keyword = "aes-256-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 16, + .block_size = 16, + .key_len = 36 + }, { .keyword = "3des-cbc", .algo = RTE_CRYPTO_CIPHER_3DES_CBC, @@ -118,6 +141,31 @@ const struct supported_auth_algo auth_algos[] = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .digest_len = 16, .key_len = 32 + }, + { + .keyword = "sha384-hmac", + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .digest_len = 24, + .key_len = 48 + }, + { + .keyword = "sha512-hmac", + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .digest_len = 32, + .key_len = 64 + }, + { + .keyword = "aes-gmac", + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .iv_len = 8, + .digest_len = 16, + .key_len = 20 + }, + { + .keyword = "aes-xcbc-mac-96", + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, + .digest_len = 12, + .key_len = 16 } }; @@ -130,19 +178,74 @@ const struct supported_aead_algo aead_algos[] = { .key_len = 20, .digest_len = 16, .aad_len = 8, + }, + { + .keyword = "aes-192-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 28, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-256-gcm", + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .iv_len = 8, + .block_size = 4, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-128-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-192-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 28, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-256-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "chacha20-poly1305", + .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, + .iv_len = 12, + .block_size = 64, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, } }; #define SA_INIT_NB 128 -static struct ipsec_sa *sa_out; +static uint32_t nb_crypto_sessions; +struct ipsec_sa *sa_out; +uint32_t nb_sa_out; static uint32_t sa_out_sz; -static uint32_t nb_sa_out; static struct ipsec_sa_cnt sa_out_cnt; -static struct ipsec_sa *sa_in; +struct ipsec_sa *sa_in; +uint32_t nb_sa_in; static uint32_t sa_in_sz; -static uint32_t nb_sa_in; static struct ipsec_sa_cnt sa_in_cnt; static const struct supported_cipher_algo * @@ -271,6 +374,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, uint32_t type_p = 0; uint32_t portid_p = 0; uint32_t fallback_p = 0; + int16_t status_p = 0; + uint16_t udp_encap_p = 0; if (strcmp(tokens[0], "in") == 0) { ri = &nb_sa_in; @@ -294,7 +399,9 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, return; if (atoi(tokens[1]) == INVALID_SPI) return; + rule->flags = 0; rule->spi = atoi(tokens[1]); + rule->portid = UINT16_MAX; ips = ipsec_get_primary_session(rule); for (ti = 2; ti < n_tokens; ti++) { @@ -309,14 +416,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (strcmp(tokens[ti], "ipv4-tunnel") == 0) { sa_cnt->nb_v4++; - rule->flags = IP4_TUNNEL; + rule->flags |= IP4_TUNNEL; } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) { sa_cnt->nb_v6++; - rule->flags = IP6_TUNNEL; + rule->flags |= IP6_TUNNEL; } else if (strcmp(tokens[ti], "transport") == 0) { sa_cnt->nb_v4++; sa_cnt->nb_v6++; - rule->flags = TRANSPORT; + rule->flags |= TRANSPORT; } else { APP_CHECK(0, status, "unrecognized " "input \"%s\"", tokens[ti]); @@ -327,6 +434,11 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, continue; } + if (strcmp(tokens[ti], "telemetry") == 0) { + rule->flags |= SA_TELEMETRY_ENABLE; + continue; + } + if (strcmp(tokens[ti], "cipher_algo") == 0) { const struct supported_cipher_algo *algo; uint32_t key_len; @@ -448,6 +560,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; + if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + key_len -= 4; + rule->auth_key_len = key_len; + rule->iv_len = algo->iv_len; + memcpy(&rule->salt, + &rule->auth_key[key_len], 4); + } + auth_algo_p = 1; continue; } @@ -617,6 +737,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; else if (strcmp(tokens[ti], "no-offload") == 0) ips->type = RTE_SECURITY_ACTION_TYPE_NONE; + else if (strcmp(tokens[ti], "cpu-crypto") == 0) + ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; else { APP_CHECK(0, status, "Invalid input \"%s\"", tokens[ti]); @@ -634,13 +756,38 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, INCREMENT_TOKEN_INDEX(ti, n_tokens, status); if (status->status < 0) return; - rule->portid = atoi(tokens[ti]); - if (status->status < 0) + if (rule->portid == UINT16_MAX) + rule->portid = atoi(tokens[ti]); + else if (rule->portid != atoi(tokens[ti])) { + APP_CHECK(0, status, + "portid %s not matching with already assigned portid %u", + tokens[ti], rule->portid); return; + } portid_p = 1; continue; } + if (strcmp(tokens[ti], "mss") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->mss = atoi(tokens[ti]); + if (status->status < 0) + return; + continue; + } + + if (strcmp(tokens[ti], "esn") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->esn = atoll(tokens[ti]); + if (status->status < 0) + return; + continue; + } + if (strcmp(tokens[ti], "fallback") == 0) { struct rte_ipsec_session *fb; @@ -667,20 +814,90 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; fb = ipsec_get_fallback_session(rule); - if (strcmp(tokens[ti], "lookaside-none") == 0) { + if (strcmp(tokens[ti], "lookaside-none") == 0) fb->type = RTE_SECURITY_ACTION_TYPE_NONE; - } else { + else if (strcmp(tokens[ti], "cpu-crypto") == 0) + fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; + else { APP_CHECK(0, status, "unrecognized fallback " "type %s.", tokens[ti]); return; } rule->fallback_sessions = 1; + nb_crypto_sessions++; fallback_p = 1; continue; } + if (strcmp(tokens[ti], "flow-direction") == 0) { + switch (ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + rule->fdir_flag = 1; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + if (rule->portid == UINT16_MAX) + rule->portid = atoi(tokens[ti]); + else if (rule->portid != atoi(tokens[ti])) { + APP_CHECK(0, status, + "portid %s not matching with already assigned portid %u", + tokens[ti], rule->portid); + return; + } + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->fdir_qid = atoi(tokens[ti]); + /* validating portid and queueid */ + status_p = check_flow_params(rule->portid, + rule->fdir_qid); + if (status_p < 0) { + printf("port id %u / queue id %u is " + "not valid\n", rule->portid, + rule->fdir_qid); + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + default: + APP_CHECK(0, status, + "flow director not supported for security session type %d", + ips->type); + return; + } + continue; + } + if (strcmp(tokens[ti], "udp-encap") == 0) { + switch (ips->type) { + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + APP_CHECK_PRESENCE(udp_encap_p, tokens[ti], + status); + if (status->status < 0) + return; + + rule->udp_encap = 1; + app_sa_prm.udp_encap = 1; + udp_encap_p = 1; + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + rule->udp_encap = 1; + rule->udp.sport = 0; + rule->udp.dport = 4500; + break; + default: + APP_CHECK(0, status, + "UDP encapsulation not supported for " + "security session type %d", + ips->type); + return; + } + continue; + } - /* unrecognizeable input */ + /* unrecognizable input */ APP_CHECK(0, status, "unrecognized input \"%s\"", tokens[ti]); return; @@ -710,14 +927,16 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; - if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) + if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0)) printf("Missing portid option, falling back to non-offload\n"); - if (!type_p || !portid_p) { + if (!type_p || (!portid_p && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) { ips->type = RTE_SECURITY_ACTION_TYPE_NONE; - rule->portid = -1; } + nb_crypto_sessions++; *ri = *ri + 1; } @@ -747,13 +966,16 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) } for (i = 0; i < RTE_DIM(aead_algos); i++) { - if (aead_algos[i].algo == sa->aead_algo) { + if (aead_algos[i].algo == sa->aead_algo && + aead_algos[i].key_len-4 == sa->cipher_key_len) { printf("%s ", aead_algos[i].keyword); break; } } printf("mode:"); + if (sa->udp_encap) + printf("UDP encapsulated "); switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: @@ -799,32 +1021,33 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: printf("lookaside-protocol-offload "); break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated "); + break; } fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK]; if (fallback_ips != NULL && sa->fallback_sessions > 0) { printf("inline fallback: "); - if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE) + switch (fallback_ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: printf("lookaside-none"); - else + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated"); + break; + default: printf("invalid"); + break; + } } + if (sa->fdir_flag == 1) + printf("flow-direction port %d queue %d", sa->portid, + sa->fdir_qid); + printf("\n"); } -struct ipsec_xf { - struct rte_crypto_sym_xform a; - struct rte_crypto_sym_xform b; -}; - -struct sa_ctx { - void *satbl; /* pointer to array of rte_ipsec_sa objects*/ - struct ipsec_sad sad; - struct ipsec_xf *xf; - uint32_t nb_sa; - struct ipsec_sa sa[]; -}; - static struct sa_ctx * sa_create(const char *name, int32_t socket_id, uint32_t nb_sa) { @@ -865,7 +1088,7 @@ sa_create(const char *name, int32_t socket_id, uint32_t nb_sa) } static int -check_eth_dev_caps(uint16_t portid, uint32_t inbound) +check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso) { struct rte_eth_dev_info dev_info; int retval; @@ -881,7 +1104,7 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound) if (inbound) { if ((dev_info.rx_offload_capa & - DEV_RX_OFFLOAD_SECURITY) == 0) { + RTE_ETH_RX_OFFLOAD_SECURITY) == 0) { RTE_LOG(WARNING, PORT, "hardware RX IPSec offload is not supported\n"); return -EINVAL; @@ -889,11 +1112,17 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound) } else { /* outbound */ if ((dev_info.tx_offload_capa & - DEV_TX_OFFLOAD_SECURITY) == 0) { + RTE_ETH_TX_OFFLOAD_SECURITY) == 0) { RTE_LOG(WARNING, PORT, "hardware TX IPSec offload is not supported\n"); return -EINVAL; } + if (tso && (dev_info.tx_offload_capa & + RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) { + RTE_LOG(WARNING, PORT, + "hardware TCP TSO offload is not supported\n"); + return -EINVAL; + } } return 0; } @@ -916,7 +1145,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, if (rc4 >= 0) { if (rc6 >= 0) { RTE_LOG(ERR, IPSEC, - "%s: SPI %u used simultaeously by " + "%s: SPI %u used simultaneously by " "IPv4(%d) and IPv6 (%d) SP rules\n", __func__, spi, rc4, rc6); return -EINVAL; @@ -1022,11 +1251,10 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { - if (check_eth_dev_caps(sa->portid, inbound)) + if (check_eth_dev_caps(sa->portid, inbound, sa->mss)) return -EINVAL; } - switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); @@ -1043,9 +1271,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], break; } - if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { - struct rte_ipsec_session *ips; - iv_length = 12; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM || + sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM || + sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) { + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) + iv_length = 11; + else + iv_length = 12; sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; @@ -1064,29 +1298,13 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa->digest_len; sa->xforms = &sa_ctx->xf[idx].a; - - ips = ipsec_get_primary_session(sa); - if (ips->type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || - ips->type == - RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { - rc = create_inline_session(skt_ctx, sa, ips); - if (rc != 0) { - RTE_LOG(ERR, IPSEC_ESP, - "create_inline_session() failed\n"); - return -EINVAL; - } - } - print_one_sa_rule(sa, inbound); } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_CBC: - iv_length = sa->iv_len; - break; case RTE_CRYPTO_CIPHER_AES_CTR: - iv_length = 16; + iv_length = sa->iv_len; break; default: RTE_LOG(ERR, IPSEC_ESP, @@ -1095,6 +1313,10 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } + /* AES_GMAC uses salt like AEAD algorithms */ + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) + iv_length = 12; + if (inbound) { sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; @@ -1116,6 +1338,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa->digest_len; sa_ctx->xf[idx].a.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; + sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.auth.iv.length = iv_length; + } else { /* outbound */ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; @@ -1137,14 +1362,42 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa->digest_len; sa_ctx->xf[idx].b.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].b.auth.iv.length = iv_length; + } - sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; - sa_ctx->xf[idx].b.next = NULL; - sa->xforms = &sa_ctx->xf[idx].a; + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) { + sa->xforms = inbound ? + &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b; + sa->xforms->next = NULL; + + } else { + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; + sa_ctx->xf[idx].b.next = NULL; + sa->xforms = &sa_ctx->xf[idx].a; + } + } + + if (ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || + ips->type == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + rc = create_inline_session(skt_ctx, sa, ips); + if (rc != 0) { + RTE_LOG(ERR, IPSEC_ESP, + "create_inline_session() failed\n"); + return -EINVAL; + } + } - print_one_sa_rule(sa, inbound); + if (sa->fdir_flag && inbound) { + rc = create_ipsec_esp_flow(sa); + if (rc != 0) + RTE_LOG(ERR, IPSEC_ESP, + "create_ipsec_esp_flow() failed\n"); } + print_one_sa_rule(sa, inbound); } return 0; @@ -1204,6 +1457,7 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ? RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; + prm->ipsec_xform.options.udp_encap = ss->udp_encap; prm->ipsec_xform.options.ecn = 1; prm->ipsec_xform.options.copy_dscp = 1; @@ -1279,6 +1533,9 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) if (rc < 0) return rc; + if (lsa->flags & SA_TELEMETRY_ENABLE) + rte_ipsec_telemetry_sa_add(sa); + /* init primary processing session */ ips = ipsec_get_primary_session(lsa); rc = fill_ipsec_session(ips, sa); @@ -1293,7 +1550,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) } /* - * Allocate space and init rte_ipsec_sa strcutures, + * Allocate space and init rte_ipsec_sa structures, * one per session. */ static int @@ -1509,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, struct ipsec_sa *rule; uint32_t idx_sa; enum rte_security_session_action_type rule_type; + struct rte_eth_dev_info dev_info; + int ret; *rx_offloads = 0; *tx_offloads = 0; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + port_id, strerror(-ret)); + /* Check for inbound rules that use offloads and use this port */ for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { rule = &sa_in[idx_sa]; @@ -1521,18 +1786,45 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) && rule->portid == port_id) - *rx_offloads |= DEV_RX_OFFLOAD_SECURITY; + *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY; } /* Check for outbound rules that use offloads and use this port */ for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { rule = &sa_out[idx_sa]; rule_type = ipsec_get_action_type(rule); - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - rule_type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) - && rule->portid == port_id) - *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; + switch (rule_type) { + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + /* Checksum offload is not needed for inline protocol as + * all processing for Outbound IPSec packets will be + * implicitly taken care and for non-IPSec packets, + * there is no need of IPv4 Checksum offload. + */ + if (rule->portid == port_id) { + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + if (rule->mss) + *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM); + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + if (rule->portid == port_id) { + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + if (rule->mss) + *tx_offloads |= + RTE_ETH_TX_OFFLOAD_TCP_TSO; + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + } + break; + default: + /* Enable IPv4 checksum offload even if one of lookaside + * SA's are present. + */ + if (dev_info.tx_offload_capa & + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + break; + } } return 0; } @@ -1543,3 +1835,9 @@ sa_sort_arr(void) qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp); qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp); } + +uint32_t +get_nb_crypto_sessions(void) +{ + return nb_crypto_sessions; +}