/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
*/
#ifndef _SA_H_
union sym_op_ofslen cipher;
union sym_op_ofslen auth;
} ctp;
+ /* cpu-crypto offsets */
+ union rte_crypto_sym_ofs cofs;
/* tx_offload template for tunnel mbuf */
struct {
uint64_t msk;
* sqn and replay window
* In case of SA handled by multiple threads *sqn* cacheline
* could be shared by multiple cores.
- * To minimise perfomance impact, we try to locate in a separate
+ * To minimise performance impact, we try to locate in a separate
* place from other frequently accesed data.
*/
union {
- union {
- rte_atomic64_t atom;
- uint64_t raw;
- } outb;
+ uint64_t outb;
struct {
uint32_t rdidx; /* read index */
uint32_t wridx; /* write index */
inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
/* outbound processing */
uint16_t
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num);
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
uint16_t
inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+ struct rte_mbuf *mb[], uint16_t num);
+
#endif /* _SA_H_ */