#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
+#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
+
struct supported_cipher_algo {
const char *keyword;
enum rte_crypto_cipher_algorithm algo;
{
.keyword = "sha256-hmac",
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
- .digest_len = 12,
+ .digest_len = 16,
.key_len = 32
}
};
struct parse_status *status)
{
struct ipsec_sa *rule = NULL;
+ struct rte_ipsec_session *ips;
uint32_t ti; /*token index*/
uint32_t *ri /*rule index*/;
uint32_t cipher_algo_p = 0;
uint32_t mode_p = 0;
uint32_t type_p = 0;
uint32_t portid_p = 0;
+ uint32_t fallback_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
return;
rule = &sa_in[*ri];
+ rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
} else {
ri = &nb_sa_out;
return;
rule = &sa_out[*ri];
+ rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
}
/* spi number */
if (atoi(tokens[1]) == INVALID_SPI)
return;
rule->spi = atoi(tokens[1]);
+ ips = ipsec_get_primary_session(rule);
for (ti = 2; ti < n_tokens; ti++) {
if (strcmp(tokens[ti], "mode") == 0) {
return;
if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
- rule->type =
+ ips->type =
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
else if (strcmp(tokens[ti],
"inline-protocol-offload") == 0)
- rule->type =
+ ips->type =
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
else if (strcmp(tokens[ti],
"lookaside-protocol-offload") == 0)
- rule->type =
+ ips->type =
RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
else if (strcmp(tokens[ti], "no-offload") == 0)
- rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
else {
APP_CHECK(0, status, "Invalid input \"%s\"",
tokens[ti]);
continue;
}
+ if (strcmp(tokens[ti], "fallback") == 0) {
+ struct rte_ipsec_session *fb;
+
+ APP_CHECK(app_sa_prm.enable, status, "Fallback session "
+ "not allowed for legacy mode.");
+ if (status->status < 0)
+ return;
+ APP_CHECK(ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
+ "Fallback session allowed if primary session "
+ "is of type inline-crypto-offload only.");
+ if (status->status < 0)
+ return;
+ APP_CHECK(rule->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
+ "Fallback session not allowed for egress "
+ "rule");
+ if (status->status < 0)
+ return;
+ APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+ fb = ipsec_get_fallback_session(rule);
+ if (strcmp(tokens[ti], "lookaside-none") == 0) {
+ fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ } else {
+ APP_CHECK(0, status, "unrecognized fallback "
+ "type %s.", tokens[ti]);
+ return;
+ }
+
+ rule->fallback_sessions = 1;
+ fallback_p = 1;
+ continue;
+ }
+
/* unrecognizeable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
if (status->status < 0)
return;
- if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
+ if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
printf("Missing portid option, falling back to non-offload\n");
if (!type_p || !portid_p) {
- rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
rule->portid = -1;
}
{
uint32_t i;
uint8_t a, b, c, d;
+ const struct rte_ipsec_session *ips;
+ const struct rte_ipsec_session *fallback_ips;
printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
printf("Transport ");
break;
}
+
+ ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
printf(" type:");
- switch (sa->type) {
+ switch (ips->type) {
case RTE_SECURITY_ACTION_TYPE_NONE:
printf("no-offload ");
break;
printf("lookaside-protocol-offload ");
break;
}
+
+ fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
+ if (fallback_ips != NULL && sa->fallback_sessions > 0) {
+ printf("inline fallback: ");
+ if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
+ printf("lookaside-none");
+ else
+ printf("invalid");
+ }
printf("\n");
}
RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
if (mz == NULL) {
printf("Failed to allocate SA DB memory\n");
- rte_errno = -ENOMEM;
+ rte_errno = ENOMEM;
return NULL;
}
check_eth_dev_caps(uint16_t portid, uint32_t inbound)
{
struct rte_eth_dev_info dev_info;
+ int retval;
- rte_eth_dev_info_get(portid, &dev_info);
+ retval = rte_eth_dev_info_get(portid, &dev_info);
+ if (retval != 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-retval));
+
+ return retval;
+ }
if (inbound) {
if ((dev_info.rx_offload_capa &
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries, uint32_t inbound)
+ uint32_t nb_entries, uint32_t inbound,
+ struct socket_ctx *skt_ctx)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
int inline_status;
+ int32_t rc;
+ struct rte_ipsec_session *ips;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
}
*sa = entries[i];
sa->seq = 0;
+ ips = ipsec_get_primary_session(sa);
- if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
- sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
if (check_eth_dev_caps(sa->portid, inbound))
return -EINVAL;
}
- sa->direction = (inbound == 1) ?
- RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
- RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
break;
case TRANSPORT:
- if (sa->type ==
+ if (ips->type ==
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
inline_status =
sa_add_address_inline_crypto(sa);
}
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
- iv_length = 16;
+ struct rte_ipsec_session *ips;
+ iv_length = 12;
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
sa->xforms = &sa_ctx->xf[idx].a;
+ ips = ipsec_get_primary_session(sa);
+ if (ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa, ips);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
+ }
print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- uint32_t nb_entries)
+ uint32_t nb_entries, struct socket_ctx *skt_ctx)
{
- return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+ return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
}
/*
prm->flags = app_prm->flags;
prm->ipsec_xform.options.esn = app_prm->enable_esn;
- prm->replay_win_sz = app_prm->window_size;
+ prm->ipsec_xform.replay_win_sz = app_prm->window_size;
}
static int
return 0;
}
-static void
-fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
- const struct ipsec_sa *lsa)
+static int
+fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
{
+ int32_t rc = 0;
+
ss->sa = sa;
- ss->type = lsa->type;
- /* setup crypto section */
- if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
- ss->crypto.ses = lsa->crypto_session;
- /* setup session action type */
- } else {
- ss->security.ses = lsa->sec_session;
- ss->security.ctx = lsa->security_ctx;
- ss->security.ol_flags = lsa->ol_flags;
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (ss->security.ses != NULL) {
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+ }
}
+
+ return rc;
}
/*
{
int rc;
struct rte_ipsec_sa_prm prm;
+ struct rte_ipsec_session *ips;
struct rte_ipv4_hdr v4 = {
.version_ihl = IPVERSION << 4 |
sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
if (rc < 0)
return rc;
- fill_ipsec_session(&lsa->ips, sa, lsa);
- return 0;
+ /* init primary processing session */
+ ips = ipsec_get_primary_session(lsa);
+ rc = fill_ipsec_session(ips, sa);
+ if (rc != 0)
+ return rc;
+
+ /* init inline fallback processing session */
+ if (lsa->fallback_sessions == 1)
+ rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
+
+ return rc;
}
/*
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+ sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
"context %s in socket %d\n", rte_errno,
name, socket_id);
- sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+ sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
static inline void
single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
- struct ipsec_sa **sa_ret)
+ void **sa_ret)
{
struct rte_esp_hdr *esp;
struct ip *ip;
uint32_t *src4_addr;
uint8_t *src6_addr;
struct ipsec_sa *sa;
+ void *result_sa;
*sa_ret = NULL;
if (esp->spi == INVALID_SPI)
return;
- sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
+ result_sa = sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
if (rte_be_to_cpu_32(esp->spi) != sa->spi)
return;
+ /*
+ * Mark need for inline offload fallback on the LSB of SA pointer.
+ * Thanks to packet grouping mechanism which ipsec_process is using
+ * packets marked for fallback processing will form separate group.
+ *
+ * Because it is not safe to use SA pointer it is casted to generic
+ * pointer to prevent from unintentional use. Use ipsec_mask_saptr
+ * to get valid struct pointer.
+ */
+ if (MBUF_NO_SEC_OFFLOAD(pkt) && sa->fallback_sessions > 0) {
+ uintptr_t intsa = (uintptr_t)sa;
+ intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
+ result_sa = (void *)intsa;
+ }
+
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
if ((ip->ip_v == IPVERSION) &&
(sa->src.ip.ip4 == *src4_addr) &&
(sa->dst.ip.ip4 == *(src4_addr + 1)))
- *sa_ret = sa;
+ *sa_ret = result_sa;
break;
case IP6_TUNNEL:
src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
if ((ip->ip_v == IP6_VERSION) &&
!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
- *sa_ret = sa;
+ *sa_ret = result_sa;
break;
case TRANSPORT:
- *sa_ret = sa;
+ *sa_ret = result_sa;
}
}
void
inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
- struct ipsec_sa *sa[], uint16_t nb_pkts)
+ void *sa[], uint16_t nb_pkts)
{
uint32_t i;
void
outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
- struct ipsec_sa *sa[], uint16_t nb_pkts)
+ void *sa[], uint16_t nb_pkts)
{
uint32_t i;
{
struct ipsec_sa *rule;
uint32_t idx_sa;
+ enum rte_security_session_action_type rule_type;
*rx_offloads = 0;
*tx_offloads = 0;
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
- if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule->type ==
+ rule_type = ipsec_get_action_type(rule);
+ if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ rule_type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
*rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
/* Check for outbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
- if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- rule->type ==
+ rule_type = ipsec_get_action_type(rule);
+ if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ rule_type ==
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
*tx_offloads |= DEV_TX_OFFLOAD_SECURITY;