X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fcnxk%2Fcn9k_ipsec_la_ops.h;h=9a1e217042856ae07035b9393bf84d2f80325865;hb=2f20817c0004886e68ba687d8e257a5c779840d9;hp=b7a88e1b35aaf2ffdb6a380fa300d752826184d9;hpb=67a87e89561cae886616275900f69d8de2f13dcd;p=dpdk.git diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h index b7a88e1b35..9a1e217042 100644 --- a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h +++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h @@ -6,9 +6,11 @@ #define __CN9K_IPSEC_LA_OPS_H__ #include +#include #include #include "cn9k_ipsec.h" +#include "cnxk_security_ar.h" static __rte_always_inline int32_t ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen) @@ -21,16 +23,64 @@ ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen) return sa->rlens.partial_len + enc_payload_len; } +static __rte_always_inline int +ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz, + struct rte_mbuf *m) +{ + uint32_t esn_low = 0, esn_hi = 0, seql = 0, seqh = 0; + struct roc_ie_on_common_sa *common_sa; + struct roc_ie_on_inb_sa *in_sa; + struct roc_ie_on_sa_ctl *ctl; + uint64_t seq_in_sa, seq = 0; + struct rte_esp_hdr *esp; + uint8_t esn; + int ret; + + in_sa = &sa->in_sa; + common_sa = &in_sa->common_sa; + ctl = &common_sa->ctl; + + esn = ctl->esn_en; + esn_low = rte_be_to_cpu_32(common_sa->esn_low); + esn_hi = rte_be_to_cpu_32(common_sa->esn_hi); + + esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr)); + seql = rte_be_to_cpu_32(esp->seq); + + if (!esn) { + seq = (uint64_t)seql; + } else { + seqh = cnxk_on_anti_replay_get_seqh(win_sz, seql, esn_hi, + esn_low); + seq = ((uint64_t)seqh << 32) | seql; + } + + if (unlikely(seq == 0)) + return IPSEC_ANTI_REPLAY_FAILED; + + ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz); + if (esn && !ret) { + seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low; + if (seq > seq_in_sa) { + common_sa->esn_low = rte_cpu_to_be_32(seql); + common_sa->esn_hi = rte_cpu_to_be_32(seqh); + } + } + + return ret; +} + static __rte_always_inline int process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa, struct cpt_inst_s *inst) { - const unsigned int hdr_len = sizeof(struct roc_ie_on_outb_hdr); + const unsigned int hdr_len = sa->custom_hdr_len; struct rte_crypto_sym_op *sym_op = cop->sym; struct rte_mbuf *m_src = sym_op->m_src; - uint32_t dlen, rlen, extend_tail; struct roc_ie_on_outb_sa *out_sa; struct roc_ie_on_outb_hdr *hdr; + uint32_t dlen, rlen; + int32_t extend_tail; out_sa = &sa->out_sa; @@ -39,7 +89,8 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa, extend_tail = rlen - dlen; if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) { - plt_dp_err("Not enough tail room"); + plt_dp_err("Not enough tail room (required: %d, available: %d", + extend_tail, rte_pktmbuf_tailroom(m_src)); return -ENOMEM; } @@ -52,9 +103,15 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa, return -ENOMEM; } - memcpy(&hdr->iv[0], - rte_crypto_op_ctod_offset(cop, uint8_t *, sa->cipher_iv_off), - sa->cipher_iv_len); +#ifdef LA_IPSEC_DEBUG + if (sa->inst.w4 & ROC_IE_ON_PER_PKT_IV) { + memcpy(&hdr->iv[0], + rte_crypto_op_ctod_offset(cop, uint8_t *, + sa->cipher_iv_off), + sa->cipher_iv_len); + } +#endif + hdr->seq = rte_cpu_to_be_32(sa->seq_lo); hdr->ip_id = rte_cpu_to_be_32(sa->ip_id); @@ -78,6 +135,15 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa, { struct rte_crypto_sym_op *sym_op = cop->sym; struct rte_mbuf *m_src = sym_op->m_src; + int ret; + + if (sa->replay_win_sz) { + ret = ipsec_antireplay_check(sa, sa->replay_win_sz, m_src); + if (unlikely(ret)) { + plt_dp_err("Anti replay check failed"); + return ret; + } + } /* Prepare CPT instruction */ inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);