1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 #define WINDOW_BUCKET_BITS 6 /* uint64_t */
9 #define WINDOW_BUCKET_SIZE (1 << WINDOW_BUCKET_BITS)
10 #define WINDOW_BIT_LOC_MASK (WINDOW_BUCKET_SIZE - 1)
12 /* minimum number of bucket, power of 2*/
13 #define WINDOW_BUCKET_MIN 2
14 #define WINDOW_BUCKET_MAX (INT16_MAX + 1)
16 #define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX)
18 #define SQN_ATOMIC(sa) ((sa)->type & RTE_IPSEC_SATP_SQN_ATOM)
21 * gets SQN.hi32 bits, SQN supposed to be in network byte order.
23 static inline rte_be32_t
24 sqn_hi32(rte_be64_t sqn)
26 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
34 * gets SQN.low32 bits, SQN supposed to be in network byte order.
36 static inline rte_be32_t
37 sqn_low32(rte_be64_t sqn)
39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
47 * gets SQN.low16 bits, SQN supposed to be in network byte order.
49 static inline rte_be16_t
50 sqn_low16(rte_be64_t sqn)
52 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
60 * According to RFC4303 A2.1, determine the high-order bit of sequence number.
61 * use 32bit arithmetic inside, return uint64_t.
63 static inline uint64_t
64 reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w)
72 /* case A: window is within one sequence number subspace */
75 /* case B: window spans two sequence number subspaces */
79 /* return constructed sequence with proper high-order bits */
80 return (uint64_t)th << 32 | sqn;
84 * Perform the replay checking.
86 * struct rte_ipsec_sa contains the window and window related parameters,
87 * such as the window size, bitmask, and the last acknowledged sequence number.
90 * Blocks are 64 bits unsigned integers
93 esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
98 /* replay not enabled */
99 if (sa->replay.win_sz == 0)
102 /* seq is larger than lastseq */
106 /* seq is outside window */
107 if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
110 /* seq is inside the window */
111 bit = sqn & WINDOW_BIT_LOC_MASK;
112 bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask;
114 /* already seen packet */
115 if (rsn->window[bucket] & ((uint64_t)1 << bit))
122 * For outbound SA perform the sequence number update.
124 static inline uint64_t
125 esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num)
131 sqn = (uint64_t)rte_atomic64_add_return(&sa->sqn.outb.atom, n);
133 sqn = sa->sqn.outb.raw + n;
134 sa->sqn.outb.raw = sqn;
138 if (sqn > sa->sqn_mask) {
139 s = sqn - sa->sqn_mask;
140 *num = (s < n) ? n - s : 0;
147 * For inbound SA perform the sequence number and replay window update.
149 static inline int32_t
150 esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,
153 uint32_t bit, bucket, last_bucket, new_bucket, diff, i;
155 /* replay not enabled */
156 if (sa->replay.win_sz == 0)
161 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
163 /* seq is outside window*/
164 if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn)
168 bucket = (sqn >> WINDOW_BUCKET_BITS);
170 /* check if the seq is within the range */
171 if (sqn > rsn->sqn) {
172 last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS;
173 diff = bucket - last_bucket;
174 /* seq is way after the range of WINDOW_SIZE */
175 if (diff > sa->replay.nb_bucket)
176 diff = sa->replay.nb_bucket;
178 for (i = 0; i != diff; i++) {
179 new_bucket = (i + last_bucket + 1) &
180 sa->replay.bucket_index_mask;
181 rsn->window[new_bucket] = 0;
186 bucket &= sa->replay.bucket_index_mask;
187 bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK);
189 /* already seen packet */
190 if (rsn->window[bucket] & bit)
193 rsn->window[bucket] |= bit;
198 * To achieve ability to do multiple readers single writer for
199 * SA replay window information and sequence number (RSN)
200 * basic RCU schema is used:
201 * SA have 2 copies of RSN (one for readers, another for writers).
202 * Each RSN contains a rwlock that has to be grabbed (for read/write)
203 * to avoid races between readers and writer.
204 * Writer is responsible to make a copy or reader RSN, update it
205 * and mark newly updated RSN as readers one.
206 * That approach is intended to minimize contention and cache sharing
207 * between writer and readers.
211 * Copy replay window and SQN.
214 rsn_copy(const struct rte_ipsec_sa *sa, uint32_t dst, uint32_t src)
217 struct replay_sqn *d;
218 const struct replay_sqn *s;
220 d = sa->sqn.inb.rsn[dst];
221 s = sa->sqn.inb.rsn[src];
223 n = sa->replay.nb_bucket;
226 for (i = 0; i != n; i++)
227 d->window[i] = s->window[i];
231 * Get RSN for read-only access.
233 static inline struct replay_sqn *
234 rsn_acquire(struct rte_ipsec_sa *sa)
237 struct replay_sqn *rsn;
239 n = sa->sqn.inb.rdidx;
240 rsn = sa->sqn.inb.rsn[n];
245 /* check there are no writers */
246 while (rte_rwlock_read_trylock(&rsn->rwl) < 0) {
248 n = sa->sqn.inb.rdidx;
249 rsn = sa->sqn.inb.rsn[n];
250 rte_compiler_barrier();
257 * Release read-only access for RSN.
260 rsn_release(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
263 rte_rwlock_read_unlock(&rsn->rwl);
269 static inline struct replay_sqn *
270 rsn_update_start(struct rte_ipsec_sa *sa)
273 struct replay_sqn *rsn;
275 n = sa->sqn.inb.wridx;
277 /* no active writers */
278 RTE_ASSERT(n == sa->sqn.inb.rdidx);
281 return sa->sqn.inb.rsn[n];
283 k = REPLAY_SQN_NEXT(n);
284 sa->sqn.inb.wridx = k;
286 rsn = sa->sqn.inb.rsn[k];
287 rte_rwlock_write_lock(&rsn->rwl);
297 rsn_update_finish(struct rte_ipsec_sa *sa, struct replay_sqn *rsn)
304 n = sa->sqn.inb.wridx;
305 RTE_ASSERT(n != sa->sqn.inb.rdidx);
306 RTE_ASSERT(rsn == sa->sqn.inb.rsn[n]);
308 rte_rwlock_write_unlock(&rsn->rwl);
309 sa->sqn.inb.rdidx = n;
313 #endif /* _IPSEC_SQN_H_ */