#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_udp.h>
#include <rte_random.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
-#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
+#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
struct supported_cipher_algo {
const char *keyword;
struct supported_auth_algo {
const char *keyword;
enum rte_crypto_auth_algorithm algo;
+ uint16_t iv_len;
uint16_t digest_len;
uint16_t key_len;
uint8_t key_not_req;
.block_size = 4,
.key_len = 20
},
+ {
+ .keyword = "aes-192-ctr",
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .iv_len = 16,
+ .block_size = 16,
+ .key_len = 28
+ },
+ {
+ .keyword = "aes-256-ctr",
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .iv_len = 16,
+ .block_size = 16,
+ .key_len = 36
+ },
{
.keyword = "3des-cbc",
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.digest_len = 16,
.key_len = 32
+ },
+ {
+ .keyword = "sha384-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .digest_len = 24,
+ .key_len = 48
+ },
+ {
+ .keyword = "sha512-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .digest_len = 32,
+ .key_len = 64
+ },
+ {
+ .keyword = "aes-gmac",
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .iv_len = 8,
+ .digest_len = 16,
+ .key_len = 20
+ },
+ {
+ .keyword = "aes-xcbc-mac-96",
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .digest_len = 12,
+ .key_len = 16
}
};
.key_len = 36,
.digest_len = 16,
.aad_len = 8,
+ },
+ {
+ .keyword = "aes-128-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 20,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "aes-192-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 28,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "aes-256-ccm",
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 36,
+ .digest_len = 16,
+ .aad_len = 8,
+ },
+ {
+ .keyword = "chacha20-poly1305",
+ .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+ .iv_len = 12,
+ .block_size = 64,
+ .key_len = 36,
+ .digest_len = 16,
+ .aad_len = 8,
}
};
#define SA_INIT_NB 128
+static uint32_t nb_crypto_sessions;
struct ipsec_sa *sa_out;
uint32_t nb_sa_out;
static uint32_t sa_out_sz;
uint32_t portid_p = 0;
uint32_t fallback_p = 0;
int16_t status_p = 0;
+ uint16_t udp_encap_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
return;
if (atoi(tokens[1]) == INVALID_SPI)
return;
+ rule->flags = 0;
rule->spi = atoi(tokens[1]);
rule->portid = UINT16_MAX;
ips = ipsec_get_primary_session(rule);
if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
sa_cnt->nb_v4++;
- rule->flags = IP4_TUNNEL;
+ rule->flags |= IP4_TUNNEL;
} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
sa_cnt->nb_v6++;
- rule->flags = IP6_TUNNEL;
+ rule->flags |= IP6_TUNNEL;
} else if (strcmp(tokens[ti], "transport") == 0) {
sa_cnt->nb_v4++;
sa_cnt->nb_v6++;
- rule->flags = TRANSPORT;
+ rule->flags |= TRANSPORT;
} else {
APP_CHECK(0, status, "unrecognized "
"input \"%s\"", tokens[ti]);
continue;
}
+ if (strcmp(tokens[ti], "telemetry") == 0) {
+ rule->flags |= SA_TELEMETRY_ENABLE;
+ continue;
+ }
+
if (strcmp(tokens[ti], "cipher_algo") == 0) {
const struct supported_cipher_algo *algo;
uint32_t key_len;
if (status->status < 0)
return;
+ if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ key_len -= 4;
+ rule->auth_key_len = key_len;
+ rule->iv_len = algo->iv_len;
+ memcpy(&rule->salt,
+ &rule->auth_key[key_len], 4);
+ }
+
auth_algo_p = 1;
continue;
}
continue;
}
+ if (strcmp(tokens[ti], "mss") == 0) {
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+ rule->mss = atoi(tokens[ti]);
+ if (status->status < 0)
+ return;
+ continue;
+ }
+
+ if (strcmp(tokens[ti], "esn") == 0) {
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+ rule->esn = atoll(tokens[ti]);
+ if (status->status < 0)
+ return;
+ continue;
+ }
+
if (strcmp(tokens[ti], "fallback") == 0) {
struct rte_ipsec_session *fb;
}
rule->fallback_sessions = 1;
+ nb_crypto_sessions++;
fallback_p = 1;
continue;
}
}
continue;
}
+ if (strcmp(tokens[ti], "udp-encap") == 0) {
+ switch (ips->type) {
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
+ status);
+ if (status->status < 0)
+ return;
- /* unrecognizeable input */
+ rule->udp_encap = 1;
+ app_sa_prm.udp_encap = 1;
+ udp_encap_p = 1;
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ rule->udp_encap = 1;
+ rule->udp.sport = 0;
+ rule->udp.dport = 4500;
+ break;
+ default:
+ APP_CHECK(0, status,
+ "UDP encapsulation not supported for "
+ "security session type %d",
+ ips->type);
+ return;
+ }
+ continue;
+ }
+
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
}
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ wrkr_flags |= INL_CR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ wrkr_flags |= INL_PR_F;
+ else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+ wrkr_flags |= LA_PR_F;
+ else
+ wrkr_flags |= LA_ANY_F;
+
+ nb_crypto_sessions++;
*ri = *ri + 1;
}
}
printf("mode:");
+ if (sa->udp_encap)
+ printf("UDP encapsulated ");
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
}
static int
-check_eth_dev_caps(uint16_t portid, uint32_t inbound)
+check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
{
struct rte_eth_dev_info dev_info;
int retval;
if (inbound) {
if ((dev_info.rx_offload_capa &
- DEV_RX_OFFLOAD_SECURITY) == 0) {
+ RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware RX IPSec offload is not supported\n");
return -EINVAL;
} else { /* outbound */
if ((dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_SECURITY) == 0) {
+ RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware TX IPSec offload is not supported\n");
return -EINVAL;
}
+ if (tso && (dev_info.tx_offload_capa &
+ RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
+ RTE_LOG(WARNING, PORT,
+ "hardware TCP TSO offload is not supported\n");
+ return -EINVAL;
+ }
}
return 0;
}
if (rc4 >= 0) {
if (rc6 >= 0) {
RTE_LOG(ERR, IPSEC,
- "%s: SPI %u used simultaeously by "
+ "%s: SPI %u used simultaneously by "
"IPv4(%d) and IPv6 (%d) SP rules\n",
__func__, spi, rc4, rc6);
return -EINVAL;
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound,
- struct socket_ctx *skt_ctx)
+ struct socket_ctx *skt_ctx,
+ struct ipsec_ctx *ips_ctx[])
{
struct ipsec_sa *sa;
uint32_t i, idx;
if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- if (check_eth_dev_caps(sa->portid, inbound))
+ if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
return -EINVAL;
}
break;
}
- if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
- iv_length = 12;
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
+ sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
+ sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ iv_length = 11;
+ else
+ iv_length = 12;
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
- iv_length = sa->iv_len;
- break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- iv_length = 16;
+ iv_length = sa->iv_len;
break;
default:
RTE_LOG(ERR, IPSEC_ESP,
return -EINVAL;
}
+ /* AES_GMAC uses salt like AEAD algorithms */
+ if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ iv_length = 12;
+
if (inbound) {
sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
sa->digest_len;
sa_ctx->xf[idx].a.auth.op =
RTE_CRYPTO_AUTH_OP_VERIFY;
+ sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.auth.iv.length = iv_length;
+
} else { /* outbound */
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
sa->digest_len;
sa_ctx->xf[idx].b.auth.op =
RTE_CRYPTO_AUTH_OP_GENERATE;
+ sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].b.auth.iv.length = iv_length;
+
}
- sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
- sa_ctx->xf[idx].b.next = NULL;
- sa->xforms = &sa_ctx->xf[idx].a;
+ if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ sa->xforms = inbound ?
+ &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
+ sa->xforms->next = NULL;
+
+ } else {
+ sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa->xforms = &sa_ctx->xf[idx].a;
+ }
}
if (ips->type ==
"create_inline_session() failed\n");
return -EINVAL;
}
+ } else {
+ rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_lookaside_session() failed\n");
+ return -EINVAL;
+ }
}
if (sa->fdir_flag && inbound) {
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, struct socket_ctx *skt_ctx)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx,
+ struct ipsec_ctx *ips_ctx[])
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, struct socket_ctx *skt_ctx)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx,
+ struct ipsec_ctx *ips_ctx[])
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
}
/*
prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
+ prm->ipsec_xform.options.udp_encap = ss->udp_encap;
prm->ipsec_xform.options.ecn = 1;
prm->ipsec_xform.options.copy_dscp = 1;
ss->sa = sa;
- if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
- if (ss->security.ses != NULL) {
- rc = rte_ipsec_session_prepare(ss);
- if (rc != 0)
- memset(ss, 0, sizeof(*ss));
- }
- }
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
return rc;
}
if (rc < 0)
return rc;
+ if (lsa->flags & SA_TELEMETRY_ENABLE)
+ rte_ipsec_telemetry_sa_add(sa);
+
/* init primary processing session */
ips = ipsec_get_primary_session(lsa);
rc = fill_ipsec_session(ips, sa);
}
/*
- * Allocate space and init rte_ipsec_sa strcutures,
+ * Allocate space and init rte_ipsec_sa structures,
* one per session.
*/
static int
}
void
-sa_init(struct socket_ctx *ctx, int32_t socket_id)
+sa_init(struct socket_ctx *ctx, int32_t socket_id,
+ struct lcore_conf *lcore_conf)
{
int32_t rc;
const char *name;
+ uint32_t lcore_id;
+ struct ipsec_ctx *ipsec_ctx[RTE_MAX_LCORE];
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
&sa_in_cnt);
if (rc != 0)
rte_exit(EXIT_FAILURE, "failed to init SAD\n");
-
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
+ RTE_LCORE_FOREACH(lcore_id)
+ ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
+ RTE_LCORE_FOREACH(lcore_id)
+ ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
struct ipsec_sa *rule;
uint32_t idx_sa;
enum rte_security_session_action_type rule_type;
+ struct rte_eth_dev_info dev_info;
+ int ret;
*rx_offloads = 0;
*tx_offloads = 0;
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
rule_type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
- *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
+ *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
}
/* Check for outbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
rule_type = ipsec_get_action_type(rule);
- if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule_type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
- && rule->portid == port_id)
- *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
+ switch (rule_type) {
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ /* Checksum offload is not needed for inline protocol as
+ * all processing for Outbound IPSec packets will be
+ * implicitly taken care and for non-IPSec packets,
+ * there is no need of IPv4 Checksum offload.
+ */
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ if (rule->portid == port_id) {
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
+ if (rule->mss)
+ *tx_offloads |=
+ RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ }
+ break;
+ default:
+ /* Enable IPv4 checksum offload even if one of lookaside
+ * SA's are present.
+ */
+ if (dev_info.tx_offload_capa &
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+ break;
+ }
}
return 0;
}
qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
}
+
+uint32_t
+get_nb_crypto_sessions(void)
+{
+ return nb_crypto_sessions;
+}