/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
*/
/*
#define SA_INIT_NB 128
-static struct ipsec_sa *sa_out;
+struct ipsec_sa *sa_out;
+uint32_t nb_sa_out;
static uint32_t sa_out_sz;
-static uint32_t nb_sa_out;
static struct ipsec_sa_cnt sa_out_cnt;
-static struct ipsec_sa *sa_in;
+struct ipsec_sa *sa_in;
+uint32_t nb_sa_in;
static uint32_t sa_in_sz;
-static uint32_t nb_sa_in;
static struct ipsec_sa_cnt sa_in_cnt;
static const struct supported_cipher_algo *
RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
else if (strcmp(tokens[ti], "no-offload") == 0)
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ else if (strcmp(tokens[ti], "cpu-crypto") == 0)
+ ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
else {
APP_CHECK(0, status, "Invalid input \"%s\"",
tokens[ti]);
if (status->status < 0)
return;
- if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
+ if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
+ RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
printf("Missing portid option, falling back to non-offload\n");
- if (!type_p || !portid_p) {
+ if (!type_p || (!portid_p && ips->type !=
+ RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
rule->portid = -1;
}
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
printf("lookaside-protocol-offload ");
break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ printf("cpu-crypto-accelerated");
+ break;
}
fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
if (fallback_ips != NULL && sa->fallback_sessions > 0) {
printf("inline fallback: ");
- if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
+ switch (fallback_ips->type) {
+ case RTE_SECURITY_ACTION_TYPE_NONE:
printf("lookaside-none");
- else
+ break;
+ case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+ printf("cpu-crypto-accelerated");
+ break;
+ default:
printf("invalid");
+ break;
+ }
}
printf("\n");
}
-struct ipsec_xf {
- struct rte_crypto_sym_xform a;
- struct rte_crypto_sym_xform b;
-};
-
-struct sa_ctx {
- void *satbl; /* pointer to array of rte_ipsec_sa objects*/
- struct ipsec_sad sad;
- struct ipsec_xf *xf;
- uint32_t nb_sa;
- struct ipsec_sa sa[];
-};
-
static struct sa_ctx *
sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
{
return -EINVAL;
}
-
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
}
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
- struct rte_ipsec_session *ips;
iv_length = 12;
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa->digest_len;
sa->xforms = &sa_ctx->xf[idx].a;
-
- ips = ipsec_get_primary_session(sa);
- if (ips->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
- ips->type ==
- RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- rc = create_inline_session(skt_ctx, sa, ips);
- if (rc != 0) {
- RTE_LOG(ERR, IPSEC_ESP,
- "create_inline_session() failed\n");
- return -EINVAL;
- }
- }
- print_one_sa_rule(sa, inbound);
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
sa_ctx->xf[idx].b.next = NULL;
sa->xforms = &sa_ctx->xf[idx].a;
+ }
- print_one_sa_rule(sa, inbound);
+ if (ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ ips->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ rc = create_inline_session(skt_ctx, sa, ips);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "create_inline_session() failed\n");
+ return -EINVAL;
+ }
}
+
+ print_one_sa_rule(sa, inbound);
}
return 0;