X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ipsec%2Fsa.h;h=29cfe7279aef8e29ae2e6411d7f2f6b7a5b64e73;hb=611faa5f46cc67449f272e14450fc6a0a275767d;hp=c3a0d84bcfc7242a5255f7e5398ccc347e3fa168;hpb=51acc16b51313383614ba14f374c9be4f61999a9;p=dpdk.git diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h index c3a0d84bcf..29cfe7279a 100644 --- a/lib/librte_ipsec/sa.h +++ b/lib/librte_ipsec/sa.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2020 Intel Corporation */ #ifndef _SA_H_ @@ -10,6 +10,7 @@ #define IPSEC_MAX_HDR_SIZE 64 #define IPSEC_MAX_IV_SIZE 16 #define IPSEC_MAX_IV_QWORD (IPSEC_MAX_IV_SIZE / sizeof(uint64_t)) +#define TUN_HDR_MSK (RTE_IPSEC_SATP_ECN_MASK | RTE_IPSEC_SATP_DSCP_MASK) /* padding alignment for different algorithms */ enum { @@ -87,6 +88,13 @@ struct rte_ipsec_sa { union sym_op_ofslen cipher; union sym_op_ofslen auth; } ctp; + /* cpu-crypto offsets */ + union rte_crypto_sym_ofs cofs; + /* tx_offload template for tunnel mbuf */ + struct { + uint64_t msk; + uint64_t val; + } tx_offload; uint32_t salt; uint8_t algo_type; uint8_t proto; /* next proto */ @@ -98,6 +106,7 @@ struct rte_ipsec_sa { uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */ uint8_t iv_len; uint8_t pad_align; + uint8_t tos_mask; /* template for tunnel header */ uint8_t hdr[IPSEC_MAX_HDR_SIZE]; @@ -106,7 +115,7 @@ struct rte_ipsec_sa { * sqn and replay window * In case of SA handled by multiple threads *sqn* cacheline * could be shared by multiple cores. - * To minimise perfomance impact, we try to locate in a separate + * To minimise performance impact, we try to locate in a separate * place from other frequently accesed data. */ union { @@ -127,4 +136,67 @@ int ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss, const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf); +/* inbound processing */ + +uint16_t +esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], uint16_t num); + +uint16_t +esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +/* outbound processing */ + +uint16_t +esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], uint16_t num); + +uint16_t +esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], uint16_t num); + +uint16_t +esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], + uint16_t num); + +uint16_t +pkt_flag_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + +uint16_t +cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); +uint16_t +cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num); + #endif /* _SA_H_ */