X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fsa.c;h=e8f25985ce4aa107d036c1a89169d954b231e612;hb=d04bb1c52647952e191f5ba2c56319c0139afc73;hp=4f0d20d14db510837ef4d8c5ddb2b8ca782613ea;hpb=3e7b7dd8802102cd20d63c96d6b031f86dd91214;p=dpdk.git diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 4f0d20d14d..e8f25985ce 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -46,6 +46,7 @@ struct supported_cipher_algo { struct supported_auth_algo { const char *keyword; enum rte_crypto_auth_algorithm algo; + uint16_t iv_len; uint16_t digest_len; uint16_t key_len; uint8_t key_not_req; @@ -98,6 +99,20 @@ const struct supported_cipher_algo cipher_algos[] = { .block_size = 4, .key_len = 20 }, + { + .keyword = "aes-192-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 16, + .block_size = 16, + .key_len = 28 + }, + { + .keyword = "aes-256-ctr", + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .iv_len = 16, + .block_size = 16, + .key_len = 36 + }, { .keyword = "3des-cbc", .algo = RTE_CRYPTO_CIPHER_3DES_CBC, @@ -126,6 +141,31 @@ const struct supported_auth_algo auth_algos[] = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .digest_len = 16, .key_len = 32 + }, + { + .keyword = "sha384-hmac", + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .digest_len = 24, + .key_len = 48 + }, + { + .keyword = "sha512-hmac", + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .digest_len = 32, + .key_len = 64 + }, + { + .keyword = "aes-gmac", + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .iv_len = 8, + .digest_len = 16, + .key_len = 20 + }, + { + .keyword = "aes-xcbc-mac-96", + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, + .digest_len = 12, + .key_len = 16 } }; @@ -156,6 +196,42 @@ const struct supported_aead_algo aead_algos[] = { .key_len = 36, .digest_len = 16, .aad_len = 8, + }, + { + .keyword = "aes-128-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 20, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-192-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 28, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "aes-256-ccm", + .algo = RTE_CRYPTO_AEAD_AES_CCM, + .iv_len = 8, + .block_size = 4, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, + }, + { + .keyword = "chacha20-poly1305", + .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, + .iv_len = 12, + .block_size = 64, + .key_len = 36, + .digest_len = 16, + .aad_len = 8, } }; @@ -484,6 +560,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; + if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + key_len -= 4; + rule->auth_key_len = key_len; + rule->iv_len = algo->iv_len; + memcpy(&rule->salt, + &rule->auth_key[key_len], 4); + } + auth_algo_p = 1; continue; } @@ -694,6 +778,16 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, continue; } + if (strcmp(tokens[ti], "esn") == 0) { + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->esn = atoll(tokens[ti]); + if (status->status < 0) + return; + continue; + } + if (strcmp(tokens[ti], "fallback") == 0) { struct rte_ipsec_session *fb; @@ -803,7 +897,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, continue; } - /* unrecognizeable input */ + /* unrecognizable input */ APP_CHECK(0, status, "unrecognized input \"%s\"", tokens[ti]); return; @@ -1051,7 +1145,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, if (rc4 >= 0) { if (rc6 >= 0) { RTE_LOG(ERR, IPSEC, - "%s: SPI %u used simultaeously by " + "%s: SPI %u used simultaneously by " "IPv4(%d) and IPv6 (%d) SP rules\n", __func__, spi, rc4, rc6); return -EINVAL; @@ -1177,8 +1271,15 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], break; } - if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { - iv_length = 12; + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM || + sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM || + sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) { + + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) + iv_length = 11; + else + iv_length = 12; sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; @@ -1202,10 +1303,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_CBC: - iv_length = sa->iv_len; - break; case RTE_CRYPTO_CIPHER_AES_CTR: - iv_length = 16; + iv_length = sa->iv_len; break; default: RTE_LOG(ERR, IPSEC_ESP, @@ -1214,6 +1313,10 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } + /* AES_GMAC uses salt like AEAD algorithms */ + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) + iv_length = 12; + if (inbound) { sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo; @@ -1235,6 +1338,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa->digest_len; sa_ctx->xf[idx].a.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; + sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.auth.iv.length = iv_length; + } else { /* outbound */ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo; @@ -1256,11 +1362,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa->digest_len; sa_ctx->xf[idx].b.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].b.auth.iv.length = iv_length; + } - sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; - sa_ctx->xf[idx].b.next = NULL; - sa->xforms = &sa_ctx->xf[idx].a; + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) { + sa->xforms = inbound ? + &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b; + sa->xforms->next = NULL; + + } else { + sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; + sa_ctx->xf[idx].b.next = NULL; + sa->xforms = &sa_ctx->xf[idx].a; + } } if (ips->type == @@ -1434,7 +1550,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) } /* - * Allocate space and init rte_ipsec_sa strcutures, + * Allocate space and init rte_ipsec_sa structures, * one per session. */ static int @@ -1650,10 +1766,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, struct ipsec_sa *rule; uint32_t idx_sa; enum rte_security_session_action_type rule_type; + struct rte_eth_dev_info dev_info; + int ret; *rx_offloads = 0; *tx_offloads = 0; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + port_id, strerror(-ret)); + /* Check for inbound rules that use offloads and use this port */ for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { rule = &sa_in[idx_sa]; @@ -1669,13 +1793,37 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { rule = &sa_out[idx_sa]; rule_type = ipsec_get_action_type(rule); - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - rule_type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) - && rule->portid == port_id) { - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; - if (rule->mss) - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; + switch (rule_type) { + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + /* Checksum offload is not needed for inline protocol as + * all processing for Outbound IPSec packets will be + * implicitly taken care and for non-IPSec packets, + * there is no need of IPv4 Checksum offload. + */ + if (rule->portid == port_id) { + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + if (rule->mss) + *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM); + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + if (rule->portid == port_id) { + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + if (rule->mss) + *tx_offloads |= + RTE_ETH_TX_OFFLOAD_TCP_TSO; + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + } + break; + default: + /* Enable IPv4 checksum offload even if one of lookaside + * SA's are present. + */ + if (dev_info.tx_offload_capa & + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + break; } } return 0;