net: add rte prefix to ESP structure
[dpdk.git] / lib / librte_ipsec / sa.c
index 5465198..1cb71ca 100644 (file)
@@ -6,9 +6,17 @@
 #include <rte_esp.h>
 #include <rte_ip.h>
 #include <rte_errno.h>
+#include <rte_cryptodev.h>
 
 #include "sa.h"
 #include "ipsec_sqn.h"
+#include "crypto.h"
+#include "iph.h"
+#include "misc.h"
+#include "pad.h"
+
+#define MBUF_MAX_L2_LEN                RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
+#define MBUF_MAX_L3_LEN                RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
 
 /* some helper structures */
 struct crypto_xform {
@@ -75,22 +83,68 @@ rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
        return sa->type;
 }
 
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+       size_t sz;
+       struct replay_sqn *rsn;
+
+       sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+       sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+       return sz;
+}
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+       uint32_t nb;
+
+       nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+               WINDOW_BUCKET_SIZE);
+       nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+       return nb;
+}
+
 static int32_t
-ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
+ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
 {
-       uint32_t n, sz;
+       uint32_t n, sz, wsz;
 
+       wsz = *wnd_sz;
        n = 0;
-       if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
-                       RTE_IPSEC_SATP_DIR_IB)
-               n = replay_num_bucket(wsz);
+
+       if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+
+               /*
+                * RFC 4303 recommends 64 as minimum window size.
+                * there is no point to use ESN mode without SQN window,
+                * so make sure we have at least 64 window when ESN is enalbed.
+                */
+               wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
+                       RTE_IPSEC_SATP_ESN_DISABLE) ?
+                       wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
+               if (wsz != 0)
+                       n = replay_num_bucket(wsz);
+       }
 
        if (n > WINDOW_BUCKET_MAX)
                return -EINVAL;
 
+       *wnd_sz = wsz;
        *nb_bucket = n;
 
        sz = rsn_size(n);
+       if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+               sz *= REPLAY_SQN_NUM;
+
        sz += sizeof(struct rte_ipsec_sa);
        return sz;
 }
@@ -101,6 +155,9 @@ rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
        memset(sa, 0, sa->size);
 }
 
+/*
+ * Determine expected SA type based on input parameters.
+ */
 static int
 fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
 {
@@ -151,20 +208,38 @@ fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
        } else
                return -EINVAL;
 
+       /* check for ESN flag */
+       if (prm->ipsec_xform.options.esn == 0)
+               tp |= RTE_IPSEC_SATP_ESN_DISABLE;
+       else
+               tp |= RTE_IPSEC_SATP_ESN_ENABLE;
+
+       /* interpret flags */
+       if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
+               tp |= RTE_IPSEC_SATP_SQN_ATOM;
+       else
+               tp |= RTE_IPSEC_SATP_SQN_RAW;
+
        *type = tp;
        return 0;
 }
 
+/*
+ * Init ESP inbound specific things.
+ */
 static void
 esp_inb_init(struct rte_ipsec_sa *sa)
 {
        /* these params may differ with new algorithms support */
        sa->ctp.auth.offset = 0;
        sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
-       sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
+       sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
        sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
 }
 
+/*
+ * Init ESP inbound tunnel specific things.
+ */
 static void
 esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 {
@@ -172,35 +247,62 @@ esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
        esp_inb_init(sa);
 }
 
+/*
+ * Init ESP outbound specific things.
+ */
 static void
 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
 {
-       sa->sqn.outb = 1;
+       uint8_t algo_type;
+
+       sa->sqn.outb.raw = 1;
 
        /* these params may differ with new algorithms support */
        sa->ctp.auth.offset = hlen;
-       sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
-       if (sa->aad_len != 0) {
-               sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
+       sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
+               sa->iv_len + sa->sqh_len;
+
+       algo_type = sa->algo_type;
+
+       switch (algo_type) {
+       case ALGO_TYPE_AES_GCM:
+       case ALGO_TYPE_AES_CTR:
+       case ALGO_TYPE_NULL:
+               sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
                        sa->iv_len;
                sa->ctp.cipher.length = 0;
-       } else {
-               sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
+               break;
+       case ALGO_TYPE_AES_CBC:
+       case ALGO_TYPE_3DES_CBC:
+               sa->ctp.cipher.offset = sa->hdr_len +
+                       sizeof(struct rte_esp_hdr);
                sa->ctp.cipher.length = sa->iv_len;
+               break;
        }
 }
 
+/*
+ * Init ESP outbound tunnel specific things.
+ */
 static void
 esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 {
        sa->proto = prm->tun.next_proto;
        sa->hdr_len = prm->tun.hdr_len;
        sa->hdr_l3_off = prm->tun.hdr_l3_off;
+
+       /* update l2_len and l3_len fields for outbound mbuf */
+       sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
+               sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
+
        memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
 
        esp_outb_init(sa, sa->hdr_len);
 }
 
+/*
+ * helper function, init SA structure.
+ */
 static int
 esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
        const struct crypto_xform *cxf)
@@ -209,31 +311,65 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
                                RTE_IPSEC_SATP_MODE_MASK;
 
        if (cxf->aead != NULL) {
-               /* RFC 4106 */
-               if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
+               switch (cxf->aead->algo) {
+               case RTE_CRYPTO_AEAD_AES_GCM:
+                       /* RFC 4106 */
+                       sa->aad_len = sizeof(struct aead_gcm_aad);
+                       sa->icv_len = cxf->aead->digest_length;
+                       sa->iv_ofs = cxf->aead->iv.offset;
+                       sa->iv_len = sizeof(uint64_t);
+                       sa->pad_align = IPSEC_PAD_AES_GCM;
+                       sa->algo_type = ALGO_TYPE_AES_GCM;
+                       break;
+               default:
                        return -EINVAL;
-               sa->icv_len = cxf->aead->digest_length;
-               sa->iv_ofs = cxf->aead->iv.offset;
-               sa->iv_len = sizeof(uint64_t);
-               sa->pad_align = IPSEC_PAD_AES_GCM;
+               }
        } else {
                sa->icv_len = cxf->auth->digest_length;
                sa->iv_ofs = cxf->cipher->iv.offset;
                sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
-               if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
+
+               switch (cxf->cipher->algo) {
+               case RTE_CRYPTO_CIPHER_NULL:
                        sa->pad_align = IPSEC_PAD_NULL;
                        sa->iv_len = 0;
-               } else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+                       sa->algo_type = ALGO_TYPE_NULL;
+                       break;
+
+               case RTE_CRYPTO_CIPHER_AES_CBC:
                        sa->pad_align = IPSEC_PAD_AES_CBC;
                        sa->iv_len = IPSEC_MAX_IV_SIZE;
-               } else
+                       sa->algo_type = ALGO_TYPE_AES_CBC;
+                       break;
+
+               case RTE_CRYPTO_CIPHER_AES_CTR:
+                       /* RFC 3686 */
+                       sa->pad_align = IPSEC_PAD_AES_CTR;
+                       sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
+                       sa->algo_type = ALGO_TYPE_AES_CTR;
+                       break;
+
+               case RTE_CRYPTO_CIPHER_3DES_CBC:
+                       /* RFC 1851 */
+                       sa->pad_align = IPSEC_PAD_3DES_CBC;
+                       sa->iv_len = IPSEC_3DES_IV_SIZE;
+                       sa->algo_type = ALGO_TYPE_3DES_CBC;
+                       break;
+
+               default:
                        return -EINVAL;
+               }
        }
 
        sa->udata = prm->userdata;
        sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
        sa->salt = prm->ipsec_xform.salt;
 
+       /* preserve all values except l2_len and l3_len */
+       sa->tx_offload.msk =
+               ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
+                               0, 0, 0, 0, 0);
+
        switch (sa->type & msk) {
        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
@@ -254,11 +390,26 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
        return 0;
 }
 
+/*
+ * helper function, init SA replay structure.
+ */
+static void
+fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket)
+{
+       sa->replay.win_sz = wnd_sz;
+       sa->replay.nb_bucket = nb_bucket;
+       sa->replay.bucket_index_mask = nb_bucket - 1;
+       sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
+       if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
+               sa->sqn.inb.rsn[1] = (struct replay_sqn *)
+                       ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
+}
+
 int __rte_experimental
 rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
 {
        uint64_t type;
-       uint32_t nb;
+       uint32_t nb, wsz;
        int32_t rc;
 
        if (prm == NULL)
@@ -270,7 +421,8 @@ rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
                return rc;
 
        /* determine required size */
-       return ipsec_sa_size(prm->replay_win_sz, type, &nb);
+       wsz = prm->replay_win_sz;
+       return ipsec_sa_size(type, &wsz, &nb);
 }
 
 int __rte_experimental
@@ -278,7 +430,7 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
        uint32_t size)
 {
        int32_t rc, sz;
-       uint32_t nb;
+       uint32_t nb, wsz;
        uint64_t type;
        struct crypto_xform cxf;
 
@@ -291,7 +443,8 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
                return rc;
 
        /* determine required size */
-       sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
+       wsz = prm->replay_win_sz;
+       sz = ipsec_sa_size(type, &wsz, &nb);
        if (sz < 0)
                return sz;
        else if (size < (uint32_t)sz)
@@ -324,28 +477,191 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
                rte_ipsec_sa_fini(sa);
 
        /* fill replay window related fields */
-       if (nb != 0) {
-               sa->replay.win_sz = prm->replay_win_sz;
-               sa->replay.nb_bucket = nb;
-               sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
-               sa->sqn.inb = (struct replay_sqn *)(sa + 1);
-       }
+       if (nb != 0)
+               fill_sa_replay(sa, wsz, nb);
 
        return sz;
 }
 
+/*
+ *  setup crypto ops for LOOKASIDE_PROTO type of devices.
+ */
+static inline void
+lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
+       struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+       uint32_t i;
+       struct rte_crypto_sym_op *sop;
+
+       for (i = 0; i != num; i++) {
+               sop = cop[i]->sym;
+               cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+               cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+               cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
+               sop->m_src = mb[i];
+               __rte_security_attach_session(sop, ss->security.ses);
+       }
+}
+
+/*
+ *  setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
+ *  Note that for LOOKASIDE_PROTO all packet modifications will be
+ *  performed by PMD/HW.
+ *  SW has only to prepare crypto op.
+ */
+static uint16_t
+lksd_proto_prepare(const struct rte_ipsec_session *ss,
+       struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
+{
+       lksd_proto_cop_prepare(ss, mb, cop, num);
+       return num;
+}
+
+/*
+ * simplest pkt process routine:
+ * all actual processing is already done by HW/PMD,
+ * just check mbuf ol_flags.
+ * used for:
+ * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
+ * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
+ * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
+ */
+static uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+       uint16_t num)
+{
+       uint32_t i, k;
+       uint32_t dr[num];
+
+       RTE_SET_USED(ss);
+
+       k = 0;
+       for (i = 0; i != num; i++) {
+               if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0)
+                       k++;
+               else
+                       dr[i - k] = i;
+       }
+
+       /* handle unprocessed mbufs */
+       if (k != num) {
+               rte_errno = EBADMSG;
+               if (k != 0)
+                       move_bad_mbufs(mb, dr, num, num - k);
+       }
+
+       return k;
+}
+
+/*
+ * Select packet processing function for session on LOOKASIDE_NONE
+ * type of device.
+ */
+static int
+lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
+               struct rte_ipsec_sa_pkt_func *pf)
+{
+       int32_t rc;
+
+       static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+                       RTE_IPSEC_SATP_MODE_MASK;
+
+       rc = 0;
+       switch (sa->type & msk) {
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->prepare = esp_inb_pkt_prepare;
+               pf->process = esp_inb_tun_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->prepare = esp_inb_pkt_prepare;
+               pf->process = esp_inb_trs_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->prepare = esp_outb_tun_prepare;
+               pf->process = (sa->sqh_len != 0) ?
+                       esp_outb_sqh_process : pkt_flag_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->prepare = esp_outb_trs_prepare;
+               pf->process = (sa->sqh_len != 0) ?
+                       esp_outb_sqh_process : pkt_flag_process;
+               break;
+       default:
+               rc = -ENOTSUP;
+       }
+
+       return rc;
+}
+
+/*
+ * Select packet processing function for session on INLINE_CRYPTO
+ * type of device.
+ */
+static int
+inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+               struct rte_ipsec_sa_pkt_func *pf)
+{
+       int32_t rc;
+
+       static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+                       RTE_IPSEC_SATP_MODE_MASK;
+
+       rc = 0;
+       switch (sa->type & msk) {
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->process = esp_inb_tun_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->process = esp_inb_trs_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->process = inline_outb_tun_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->process = inline_outb_trs_pkt_process;
+               break;
+       default:
+               rc = -ENOTSUP;
+       }
+
+       return rc;
+}
+
+/*
+ * Select packet processing function for given session based on SA parameters
+ * and type of associated with the session device.
+ */
 int
 ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
        const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
 {
        int32_t rc;
 
-       RTE_SET_USED(sa);
-
        rc = 0;
        pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
 
        switch (ss->type) {
+       case RTE_SECURITY_ACTION_TYPE_NONE:
+               rc = lksd_none_pkt_func_select(sa, pf);
+               break;
+       case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+               rc = inline_crypto_pkt_func_select(sa, pf);
+               break;
+       case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+               if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
+                               RTE_IPSEC_SATP_DIR_IB)
+                       pf->process = pkt_flag_process;
+               else
+                       pf->process = inline_proto_outb_pkt_process;
+               break;
+       case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+               pf->prepare = lksd_proto_prepare;
+               pf->process = pkt_flag_process;
+               break;
        default:
                rc = -ENOTSUP;
        }