1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2020 Intel Corporation
8 #include <rte_rwlock.h>
10 #define IPSEC_MAX_HDR_SIZE 64
11 #define IPSEC_MAX_IV_SIZE 16
12 #define IPSEC_MAX_IV_QWORD (IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
13 #define TUN_HDR_MSK (RTE_IPSEC_SATP_ECN_MASK | RTE_IPSEC_SATP_DSCP_MASK)
15 /* padding alignment for different algorithms */
17 IPSEC_PAD_DEFAULT = 4,
18 IPSEC_PAD_3DES_CBC = 8,
19 IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
20 IPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,
21 IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
22 IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
25 /* iv sizes for different algorithms */
27 IPSEC_IV_SIZE_DEFAULT = IPSEC_MAX_IV_SIZE,
28 IPSEC_AES_CTR_IV_SIZE = sizeof(uint64_t),
29 /* TripleDES supports IV size of 32bits or 64bits but he library
30 * only supports 64bits.
32 IPSEC_3DES_IV_SIZE = sizeof(uint64_t),
35 /* these definitions probably has to be in rte_crypto_sym.h */
45 #ifdef __SIZEOF_INT128__
54 #define REPLAY_SQN_NUM 2
55 #define REPLAY_SQN_NEXT(n) ((n) ^ 1)
60 __extension__ uint64_t window[0];
63 /*IPSEC SA supported algorithms */
75 uint64_t type; /* type of given SA */
76 uint64_t udata; /* user defined */
77 uint32_t size; /* size of given sa object */
79 /* sqn calculations related */
84 uint16_t bucket_index_mask;
86 /* template for crypto op fields */
88 union sym_op_ofslen cipher;
89 union sym_op_ofslen auth;
91 /* cpu-crypto offsets */
92 union rte_crypto_sym_ofs cofs;
93 /* tx_offload template for tunnel mbuf */
100 uint8_t proto; /* next proto */
106 uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
111 /* template for tunnel header */
112 uint8_t hdr[IPSEC_MAX_HDR_SIZE];
115 * sqn and replay window
116 * In case of SA handled by multiple threads *sqn* cacheline
117 * could be shared by multiple cores.
118 * To minimise performance impact, we try to locate in a separate
119 * place from other frequently accesed data.
124 uint32_t rdidx; /* read index */
125 uint32_t wridx; /* write index */
126 struct replay_sqn *rsn[REPLAY_SQN_NUM];
130 } __rte_cache_aligned;
133 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
134 const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf);
136 /* inbound processing */
139 esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
140 struct rte_crypto_op *cop[], uint16_t num);
143 esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
144 struct rte_mbuf *mb[], uint16_t num);
147 inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss,
148 struct rte_mbuf *mb[], uint16_t num);
151 esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
152 struct rte_mbuf *mb[], uint16_t num);
155 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
156 struct rte_mbuf *mb[], uint16_t num);
159 cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
160 struct rte_mbuf *mb[], uint16_t num);
162 /* outbound processing */
165 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
166 struct rte_crypto_op *cop[], uint16_t num);
169 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
170 struct rte_crypto_op *cop[], uint16_t num);
173 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
177 pkt_flag_process(const struct rte_ipsec_session *ss,
178 struct rte_mbuf *mb[], uint16_t num);
181 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
182 struct rte_mbuf *mb[], uint16_t num);
185 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
186 struct rte_mbuf *mb[], uint16_t num);
189 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
190 struct rte_mbuf *mb[], uint16_t num);
193 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
194 struct rte_mbuf *mb[], uint16_t num);
196 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
197 struct rte_mbuf *mb[], uint16_t num);