Introduce librte_ipsec library.
The library is supposed to utilize existing DPDK crypto-dev and
security API to provide application with transparent IPsec processing API.
That initial commit provides some base API to manage
IPsec Security Association (SA) object.
Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
F: lib/librte_gso/
F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+IPsec - EXPERIMENTAL
+M: Konstantin Ananyev <konstantin.ananyev@intel.com>
+T: git://dpdk.org/next/dpdk-next-crypto
+F: lib/librte_ipsec/
+M: Bernard Iremonger <bernard.iremonger@intel.com>
+F: test/test/test_ipsec.c
+
Flow Classify - EXPERIMENTAL
M: Bernard Iremonger <bernard.iremonger@intel.com>
F: lib/librte_flow_classify/
F: app/pdump/
F: doc/guides/tools/pdump.rst
-
Packet Framework
----------------
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
+#
+# Compile librte_ipsec
+#
+CONFIG_RTE_LIBRTE_IPSEC=y
+
#
# Compile the test application
#
DEPDIRS-librte_gso += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_BPF) += librte_bpf
DEPDIRS-librte_bpf := librte_eal librte_mempool librte_mbuf librte_ethdev
+DIRS-$(CONFIG_RTE_LIBRTE_IPSEC) += librte_ipsec
+DEPDIRS-librte_ipsec := librte_eal librte_mbuf librte_cryptodev librte_security
DIRS-$(CONFIG_RTE_LIBRTE_TELEMETRY) += librte_telemetry
DEPDIRS-librte_telemetry := librte_eal librte_metrics librte_ethdev
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ipsec.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_net -lrte_cryptodev -lrte_security
+
+EXPORT_MAP := rte_ipsec_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sa.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPSEC_SQN_H_
+#define _IPSEC_SQN_H_
+
+#define WINDOW_BUCKET_BITS 6 /* uint64_t */
+#define WINDOW_BUCKET_SIZE (1 << WINDOW_BUCKET_BITS)
+#define WINDOW_BIT_LOC_MASK (WINDOW_BUCKET_SIZE - 1)
+
+/* minimum number of bucket, power of 2*/
+#define WINDOW_BUCKET_MIN 2
+#define WINDOW_BUCKET_MAX (INT16_MAX + 1)
+
+#define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX)
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+ uint32_t nb;
+
+ nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+ WINDOW_BUCKET_SIZE);
+ nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+ return nb;
+}
+
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+ size_t sz;
+ struct replay_sqn *rsn;
+
+ sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+ sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+ return sz;
+}
+
+#endif /* _IPSEC_SQN_H_ */
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+
+sources=files('sa.c')
+
+install_headers = files('rte_ipsec_sa.h')
+
+deps += ['mbuf', 'net', 'cryptodev', 'security']
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_SA_H_
+#define _RTE_IPSEC_SA_H_
+
+/**
+ * @file rte_ipsec_sa.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Defines API to manage IPsec Security Association (SA) objects.
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * An opaque structure to represent Security Association (SA).
+ */
+struct rte_ipsec_sa;
+
+/**
+ * SA initialization parameters.
+ */
+struct rte_ipsec_sa_prm {
+
+ uint64_t userdata; /**< provided and interpreted by user */
+ uint64_t flags; /**< see RTE_IPSEC_SAFLAG_* below */
+ /** ipsec configuration */
+ struct rte_security_ipsec_xform ipsec_xform;
+ /** crypto session configuration */
+ struct rte_crypto_sym_xform *crypto_xform;
+ union {
+ struct {
+ uint8_t hdr_len; /**< tunnel header len */
+ uint8_t hdr_l3_off; /**< offset for IPv4/IPv6 header */
+ uint8_t next_proto; /**< next header protocol */
+ const void *hdr; /**< tunnel header template */
+ } tun; /**< tunnel mode related parameters */
+ struct {
+ uint8_t proto; /**< next header protocol */
+ } trs; /**< transport mode related parameters */
+ };
+
+ /**
+ * window size to enable sequence replay attack handling.
+ * replay checking is disabled if the window size is 0.
+ */
+ uint32_t replay_win_sz;
+};
+
+/**
+ * SA type is an 64-bit value that contain the following information:
+ * - IP version (IPv4/IPv6)
+ * - IPsec proto (ESP/AH)
+ * - inbound/outbound
+ * - mode (TRANSPORT/TUNNEL)
+ * - for TUNNEL outer IP version (IPv4/IPv6)
+ * ...
+ */
+
+enum {
+ RTE_SATP_LOG2_IPV,
+ RTE_SATP_LOG2_PROTO,
+ RTE_SATP_LOG2_DIR,
+ RTE_SATP_LOG2_MODE,
+ RTE_SATP_LOG2_NUM
+};
+
+#define RTE_IPSEC_SATP_IPV_MASK (1ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV4 (0ULL << RTE_SATP_LOG2_IPV)
+#define RTE_IPSEC_SATP_IPV6 (1ULL << RTE_SATP_LOG2_IPV)
+
+#define RTE_IPSEC_SATP_PROTO_MASK (1ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_AH (0ULL << RTE_SATP_LOG2_PROTO)
+#define RTE_IPSEC_SATP_PROTO_ESP (1ULL << RTE_SATP_LOG2_PROTO)
+
+#define RTE_IPSEC_SATP_DIR_MASK (1ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_IB (0ULL << RTE_SATP_LOG2_DIR)
+#define RTE_IPSEC_SATP_DIR_OB (1ULL << RTE_SATP_LOG2_DIR)
+
+#define RTE_IPSEC_SATP_MODE_MASK (3ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TRANS (0ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV4 (1ULL << RTE_SATP_LOG2_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV6 (2ULL << RTE_SATP_LOG2_MODE)
+
+/**
+ * get type of given SA
+ * @return
+ * SA type value.
+ */
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
+
+/**
+ * Calculate required SA size based on provided input parameters.
+ * @param prm
+ * Parameters that wil be used to initialise SA object.
+ * @return
+ * - Actual size required for SA with given parameters.
+ * - -EINVAL if the parameters are invalid.
+ */
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm);
+
+/**
+ * initialise SA based on provided input parameters.
+ * @param sa
+ * SA object to initialise.
+ * @param prm
+ * Parameters used to initialise given SA object.
+ * @param size
+ * size of the provided buffer for SA.
+ * @return
+ * - Actual size of SA object if operation completed successfully.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOSPC if the size of the provided buffer is not big enough.
+ */
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size);
+
+/**
+ * cleanup SA
+ * @param sa
+ * Pointer to SA object to de-initialize.
+ */
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_SA_H_ */
--- /dev/null
+EXPERIMENTAL {
+ global:
+
+ rte_ipsec_sa_fini;
+ rte_ipsec_sa_init;
+ rte_ipsec_sa_size;
+ rte_ipsec_sa_type;
+
+ local: *;
+};
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec_sa.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+
+/* some helper structures */
+struct crypto_xform {
+ struct rte_crypto_auth_xform *auth;
+ struct rte_crypto_cipher_xform *cipher;
+ struct rte_crypto_aead_xform *aead;
+};
+
+/*
+ * helper routine, fills internal crypto_xform structure.
+ */
+static int
+fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
+ const struct rte_ipsec_sa_prm *prm)
+{
+ struct rte_crypto_sym_xform *xf, *xfn;
+
+ memset(xform, 0, sizeof(*xform));
+
+ xf = prm->crypto_xform;
+ if (xf == NULL)
+ return -EINVAL;
+
+ xfn = xf->next;
+
+ /* for AEAD just one xform required */
+ if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xfn != NULL)
+ return -EINVAL;
+ xform->aead = &xf->aead;
+ /*
+ * CIPHER+AUTH xforms are expected in strict order,
+ * depending on SA direction:
+ * inbound: AUTH+CIPHER
+ * outbound: CIPHER+AUTH
+ */
+ } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+ /* wrong order or no cipher */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return -EINVAL;
+
+ xform->auth = &xf->auth;
+ xform->cipher = &xfn->cipher;
+
+ } else {
+
+ /* wrong order or no auth */
+ if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
+ xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -EINVAL;
+
+ xform->cipher = &xf->cipher;
+ xform->auth = &xfn->auth;
+ }
+
+ return 0;
+}
+
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
+{
+ return sa->type;
+}
+
+static int32_t
+ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
+{
+ uint32_t n, sz;
+
+ n = 0;
+ if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
+ RTE_IPSEC_SATP_DIR_IB)
+ n = replay_num_bucket(wsz);
+
+ if (n > WINDOW_BUCKET_MAX)
+ return -EINVAL;
+
+ *nb_bucket = n;
+
+ sz = rsn_size(n);
+ sz += sizeof(struct rte_ipsec_sa);
+ return sz;
+}
+
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
+{
+ memset(sa, 0, sa->size);
+}
+
+static int
+fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
+{
+ uint64_t tp;
+
+ tp = 0;
+
+ if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ tp |= RTE_IPSEC_SATP_PROTO_AH;
+ else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ tp |= RTE_IPSEC_SATP_PROTO_ESP;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_OB;
+ else if (prm->ipsec_xform.direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+ tp |= RTE_IPSEC_SATP_DIR_IB;
+ else
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
+ else if (prm->ipsec_xform.tunnel.type ==
+ RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+ tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
+ else
+ return -EINVAL;
+
+ if (prm->tun.next_proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->tun.next_proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else if (prm->ipsec_xform.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+ tp |= RTE_IPSEC_SATP_MODE_TRANS;
+ if (prm->trs.proto == IPPROTO_IPIP)
+ tp |= RTE_IPSEC_SATP_IPV4;
+ else if (prm->trs.proto == IPPROTO_IPV6)
+ tp |= RTE_IPSEC_SATP_IPV6;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ *type = tp;
+ return 0;
+}
+
+static void
+esp_inb_init(struct rte_ipsec_sa *sa)
+{
+ /* these params may differ with new algorithms support */
+ sa->ctp.auth.offset = 0;
+ sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+ sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
+ sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+}
+
+static void
+esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ esp_inb_init(sa);
+}
+
+static void
+esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
+{
+ sa->sqn.outb = 1;
+
+ /* these params may differ with new algorithms support */
+ sa->ctp.auth.offset = hlen;
+ sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
+ if (sa->aad_len != 0) {
+ sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sa->ctp.cipher.length = 0;
+ } else {
+ sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
+ sa->ctp.cipher.length = sa->iv_len;
+ }
+}
+
+static void
+esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+ sa->proto = prm->tun.next_proto;
+ sa->hdr_len = prm->tun.hdr_len;
+ sa->hdr_l3_off = prm->tun.hdr_l3_off;
+ memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
+
+ esp_outb_init(sa, sa->hdr_len);
+}
+
+static int
+esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ const struct crypto_xform *cxf)
+{
+ static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+ RTE_IPSEC_SATP_MODE_MASK;
+
+ if (cxf->aead != NULL) {
+ /* RFC 4106 */
+ if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
+ return -EINVAL;
+ sa->icv_len = cxf->aead->digest_length;
+ sa->iv_ofs = cxf->aead->iv.offset;
+ sa->iv_len = sizeof(uint64_t);
+ sa->pad_align = IPSEC_PAD_AES_GCM;
+ } else {
+ sa->icv_len = cxf->auth->digest_length;
+ sa->iv_ofs = cxf->cipher->iv.offset;
+ sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
+ if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
+ sa->pad_align = IPSEC_PAD_NULL;
+ sa->iv_len = 0;
+ } else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ sa->pad_align = IPSEC_PAD_AES_CBC;
+ sa->iv_len = IPSEC_MAX_IV_SIZE;
+ } else
+ return -EINVAL;
+ }
+
+ sa->udata = prm->userdata;
+ sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
+ sa->salt = prm->ipsec_xform.salt;
+
+ switch (sa->type & msk) {
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_inb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_inb_init(sa);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+ esp_outb_tun_init(sa, prm);
+ break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+ esp_outb_init(sa, 0);
+ break;
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
+{
+ uint64_t type;
+ uint32_t nb;
+ int32_t rc;
+
+ if (prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ return ipsec_sa_size(prm->replay_win_sz, type, &nb);
+}
+
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+ uint32_t size)
+{
+ int32_t rc, sz;
+ uint32_t nb;
+ uint64_t type;
+ struct crypto_xform cxf;
+
+ if (sa == NULL || prm == NULL)
+ return -EINVAL;
+
+ /* determine SA type */
+ rc = fill_sa_type(prm, &type);
+ if (rc != 0)
+ return rc;
+
+ /* determine required size */
+ sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
+ if (sz < 0)
+ return sz;
+ else if (size < (uint32_t)sz)
+ return -ENOSPC;
+
+ /* only esp is supported right now */
+ if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ return -EINVAL;
+
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ prm->tun.hdr_len > sizeof(sa->hdr))
+ return -EINVAL;
+
+ rc = fill_crypto_xform(&cxf, type, prm);
+ if (rc != 0)
+ return rc;
+
+ /* initialize SA */
+
+ memset(sa, 0, sz);
+ sa->type = type;
+ sa->size = sz;
+
+ /* check for ESN flag */
+ sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
+ UINT32_MAX : UINT64_MAX;
+
+ rc = esp_sa_init(sa, prm, &cxf);
+ if (rc != 0)
+ rte_ipsec_sa_fini(sa);
+
+ /* fill replay window related fields */
+ if (nb != 0) {
+ sa->replay.win_sz = prm->replay_win_sz;
+ sa->replay.nb_bucket = nb;
+ sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
+ sa->sqn.inb = (struct replay_sqn *)(sa + 1);
+ }
+
+ return sz;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SA_H_
+#define _SA_H_
+
+#define IPSEC_MAX_HDR_SIZE 64
+#define IPSEC_MAX_IV_SIZE 16
+#define IPSEC_MAX_IV_QWORD (IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
+
+/* padding alignment for different algorithms */
+enum {
+ IPSEC_PAD_DEFAULT = 4,
+ IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
+ IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
+ IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
+};
+
+/* these definitions probably has to be in rte_crypto_sym.h */
+union sym_op_ofslen {
+ uint64_t raw;
+ struct {
+ uint32_t offset;
+ uint32_t length;
+ };
+};
+
+union sym_op_data {
+#ifdef __SIZEOF_INT128__
+ __uint128_t raw;
+#endif
+ struct {
+ uint8_t *va;
+ rte_iova_t pa;
+ };
+};
+
+struct replay_sqn {
+ uint64_t sqn;
+ __extension__ uint64_t window[0];
+};
+
+struct rte_ipsec_sa {
+ uint64_t type; /* type of given SA */
+ uint64_t udata; /* user defined */
+ uint32_t size; /* size of given sa object */
+ uint32_t spi;
+ /* sqn calculations related */
+ uint64_t sqn_mask;
+ struct {
+ uint32_t win_sz;
+ uint16_t nb_bucket;
+ uint16_t bucket_index_mask;
+ } replay;
+ /* template for crypto op fields */
+ struct {
+ union sym_op_ofslen cipher;
+ union sym_op_ofslen auth;
+ } ctp;
+ uint32_t salt;
+ uint8_t proto; /* next proto */
+ uint8_t aad_len;
+ uint8_t hdr_len;
+ uint8_t hdr_l3_off;
+ uint8_t icv_len;
+ uint8_t sqh_len;
+ uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
+ uint8_t iv_len;
+ uint8_t pad_align;
+
+ /* template for tunnel header */
+ uint8_t hdr[IPSEC_MAX_HDR_SIZE];
+
+ /*
+ * sqn and replay window
+ */
+ union {
+ uint64_t outb;
+ struct replay_sqn *inb;
+ } sqn;
+
+} __rte_cache_aligned;
+
+#endif /* _SA_H_ */
'kni', 'latencystats', 'lpm', 'member',
'power', 'pdump', 'rawdev',
'reorder', 'sched', 'security', 'vhost',
+ #ipsec lib depends on crypto and security
+ 'ipsec',
# add pkt framework libs which use other libs from above
'port', 'table', 'pipeline',
# flow_classify lib depends on pkt framework table lib
_LDLIBS-$(CONFIG_RTE_LIBRTE_BPF) += -lelf
endif
+_LDLIBS-$(CONFIG_RTE_LIBRTE_IPSEC) += -lrte_ipsec
+
_LDLIBS-y += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile