ipsec: support AES-CTR
authorFan Zhang <roy.fan.zhang@intel.com>
Wed, 20 Mar 2019 15:38:34 +0000 (15:38 +0000)
committerAkhil Goyal <akhil.goyal@nxp.com>
Fri, 22 Mar 2019 14:54:24 +0000 (15:54 +0100)
This patch adds AES-CTR cipher algorithm support to ipsec
library.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
doc/guides/rel_notes/release_19_05.rst
lib/librte_ipsec/crypto.h
lib/librte_ipsec/sa.c
lib/librte_ipsec/sa.h

index 04a72b8..d7a5f1b 100644 (file)
@@ -95,6 +95,11 @@ New Features
 
   Added support for AES-XTS with 128 and 256 bit AES keys.
 
+* **Updated the IPsec library.**
+
+  The IPsec library has been updated with AES-CTR cipher algorithm
+  support.
+
 * **Updated the testpmd application.**
 
   Improved testpmd application performance on ARM platform. For ``macswap``
index b5f2648..4f551e3 100644 (file)
  * by ipsec library.
  */
 
+/*
+ * AES-CTR counter block format.
+ */
+
+struct aesctr_cnt_blk {
+       uint32_t nonce;
+       uint64_t iv;
+       uint32_t cnt;
+} __attribute__((packed));
+
  /*
   * AES-GCM devices have some specific requirements for IV and AAD formats.
   * Ideally that to be done by the driver itself.
@@ -41,6 +51,13 @@ struct gcm_esph_iv {
        uint64_t iv;
 } __attribute__((packed));
 
+static inline void
+aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
+{
+       ctr->nonce = nonce;
+       ctr->iv = iv;
+       ctr->cnt = rte_cpu_to_be_32(1);
+}
 
 static inline void
 aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
index 5f55c2a..e34dd32 100644 (file)
@@ -219,18 +219,28 @@ esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 static void
 esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
 {
+       uint8_t algo_type;
+
        sa->sqn.outb.raw = 1;
 
        /* these params may differ with new algorithms support */
        sa->ctp.auth.offset = hlen;
        sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
-       if (sa->aad_len != 0) {
+
+       algo_type = sa->algo_type;
+
+       switch (algo_type) {
+       case ALGO_TYPE_AES_GCM:
+       case ALGO_TYPE_AES_CTR:
+       case ALGO_TYPE_NULL:
                sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
                        sa->iv_len;
                sa->ctp.cipher.length = 0;
-       } else {
+               break;
+       case ALGO_TYPE_AES_CBC:
                sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
                sa->ctp.cipher.length = sa->iv_len;
+               break;
        }
 }
 
@@ -259,26 +269,47 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
                                RTE_IPSEC_SATP_MODE_MASK;
 
        if (cxf->aead != NULL) {
-               /* RFC 4106 */
-               if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
+               switch (cxf->aead->algo) {
+               case RTE_CRYPTO_AEAD_AES_GCM:
+                       /* RFC 4106 */
+                       sa->aad_len = sizeof(struct aead_gcm_aad);
+                       sa->icv_len = cxf->aead->digest_length;
+                       sa->iv_ofs = cxf->aead->iv.offset;
+                       sa->iv_len = sizeof(uint64_t);
+                       sa->pad_align = IPSEC_PAD_AES_GCM;
+                       sa->algo_type = ALGO_TYPE_AES_GCM;
+                       break;
+               default:
                        return -EINVAL;
-               sa->aad_len = sizeof(struct aead_gcm_aad);
-               sa->icv_len = cxf->aead->digest_length;
-               sa->iv_ofs = cxf->aead->iv.offset;
-               sa->iv_len = sizeof(uint64_t);
-               sa->pad_align = IPSEC_PAD_AES_GCM;
+               }
        } else {
                sa->icv_len = cxf->auth->digest_length;
                sa->iv_ofs = cxf->cipher->iv.offset;
                sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
-               if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
+
+               switch (cxf->cipher->algo) {
+               case RTE_CRYPTO_CIPHER_NULL:
                        sa->pad_align = IPSEC_PAD_NULL;
                        sa->iv_len = 0;
-               } else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+                       sa->algo_type = ALGO_TYPE_NULL;
+                       break;
+
+               case RTE_CRYPTO_CIPHER_AES_CBC:
                        sa->pad_align = IPSEC_PAD_AES_CBC;
                        sa->iv_len = IPSEC_MAX_IV_SIZE;
-               } else
+                       sa->algo_type = ALGO_TYPE_AES_CBC;
+                       break;
+
+               case RTE_CRYPTO_CIPHER_AES_CTR:
+                       /* RFC 3686 */
+                       sa->pad_align = IPSEC_PAD_AES_CTR;
+                       sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
+                       sa->algo_type = ALGO_TYPE_AES_CTR;
+                       break;
+
+               default:
                        return -EINVAL;
+               }
        }
 
        sa->udata = prm->userdata;
@@ -438,12 +469,15 @@ esp_outb_cop_prepare(struct rte_crypto_op *cop,
 {
        struct rte_crypto_sym_op *sop;
        struct aead_gcm_iv *gcm;
+       struct aesctr_cnt_blk *ctr;
+       uint8_t algo_type = sa->algo_type;
 
        /* fill sym op fields */
        sop = cop->sym;
 
-       /* AEAD (AES_GCM) case */
-       if (sa->aad_len != 0) {
+       switch (algo_type) {
+       case ALGO_TYPE_AES_GCM:
+               /* AEAD (AES_GCM) case */
                sop->aead.data.offset = sa->ctp.cipher.offset + hlen;
                sop->aead.data.length = sa->ctp.cipher.length + plen;
                sop->aead.digest.data = icv->va;
@@ -455,14 +489,40 @@ esp_outb_cop_prepare(struct rte_crypto_op *cop,
                gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
                        sa->iv_ofs);
                aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
-       /* CRYPT+AUTH case */
-       } else {
+               break;
+       case ALGO_TYPE_AES_CBC:
+               /* Cipher-Auth (AES-CBC *) case */
+               sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
+               sop->cipher.data.length = sa->ctp.cipher.length + plen;
+               sop->auth.data.offset = sa->ctp.auth.offset + hlen;
+               sop->auth.data.length = sa->ctp.auth.length + plen;
+               sop->auth.digest.data = icv->va;
+               sop->auth.digest.phys_addr = icv->pa;
+               break;
+       case ALGO_TYPE_AES_CTR:
+               /* Cipher-Auth (AES-CTR *) case */
+               sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
+               sop->cipher.data.length = sa->ctp.cipher.length + plen;
+               sop->auth.data.offset = sa->ctp.auth.offset + hlen;
+               sop->auth.data.length = sa->ctp.auth.length + plen;
+               sop->auth.digest.data = icv->va;
+               sop->auth.digest.phys_addr = icv->pa;
+
+               ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+                       sa->iv_ofs);
+               aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+               break;
+       case ALGO_TYPE_NULL:
+               /* NULL case */
                sop->cipher.data.offset = sa->ctp.cipher.offset + hlen;
                sop->cipher.data.length = sa->ctp.cipher.length + plen;
                sop->auth.data.offset = sa->ctp.auth.offset + hlen;
                sop->auth.data.length = sa->ctp.auth.length + plen;
                sop->auth.digest.data = icv->va;
                sop->auth.digest.phys_addr = icv->pa;
+               break;
+       default:
+               break;
        }
 }
 
@@ -561,6 +621,7 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
 {
        uint32_t *psqh;
        struct aead_gcm_aad *aad;
+       uint8_t algo_type = sa->algo_type;
 
        /* insert SQN.hi between ESP trailer and ICV */
        if (sa->sqh_len != 0) {
@@ -572,7 +633,7 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
         * fill IV and AAD fields, if any (aad fields are placed after icv),
         * right now we support only one AEAD algorithm: AES-GCM .
         */
-       if (sa->aad_len != 0) {
+       if (algo_type == ALGO_TYPE_AES_GCM) {
                aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
                aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
        }
@@ -783,8 +844,10 @@ esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
 {
        struct rte_crypto_sym_op *sop;
        struct aead_gcm_iv *gcm;
+       struct aesctr_cnt_blk *ctr;
        uint64_t *ivc, *ivp;
        uint32_t clen;
+       uint8_t algo_type = sa->algo_type;
 
        clen = plen - sa->ctp.cipher.length;
        if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)
@@ -793,8 +856,8 @@ esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
        /* fill sym op fields */
        sop = cop->sym;
 
-       /* AEAD (AES_GCM) case */
-       if (sa->aad_len != 0) {
+       switch (algo_type) {
+       case ALGO_TYPE_AES_GCM:
                sop->aead.data.offset = pofs + sa->ctp.cipher.offset;
                sop->aead.data.length = clen;
                sop->aead.digest.data = icv->va;
@@ -808,8 +871,8 @@ esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
                ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
                        pofs + sizeof(struct esp_hdr));
                aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
-       /* CRYPT+AUTH case */
-       } else {
+               break;
+       case ALGO_TYPE_AES_CBC:
                sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
                sop->cipher.data.length = clen;
                sop->auth.data.offset = pofs + sa->ctp.auth.offset;
@@ -822,7 +885,35 @@ esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,
                ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
                        pofs + sizeof(struct esp_hdr));
                copy_iv(ivc, ivp, sa->iv_len);
+               break;
+       case ALGO_TYPE_AES_CTR:
+               sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+               sop->cipher.data.length = clen;
+               sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+               sop->auth.data.length = plen - sa->ctp.auth.length;
+               sop->auth.digest.data = icv->va;
+               sop->auth.digest.phys_addr = icv->pa;
+
+               /* copy iv from the input packet to the cop */
+               ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
+                       sa->iv_ofs);
+               ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+                       pofs + sizeof(struct esp_hdr));
+               aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+               break;
+       case ALGO_TYPE_NULL:
+               sop->cipher.data.offset = pofs + sa->ctp.cipher.offset;
+               sop->cipher.data.length = clen;
+               sop->auth.data.offset = pofs + sa->ctp.auth.offset;
+               sop->auth.data.length = plen - sa->ctp.auth.length;
+               sop->auth.digest.data = icv->va;
+               sop->auth.digest.phys_addr = icv->pa;
+               break;
+
+       default:
+               return -EINVAL;
        }
+
        return 0;
 }
 
index 392e8fd..12c061e 100644 (file)
 enum {
        IPSEC_PAD_DEFAULT = 4,
        IPSEC_PAD_AES_CBC = IPSEC_MAX_IV_SIZE,
+       IPSEC_PAD_AES_CTR = IPSEC_PAD_DEFAULT,
        IPSEC_PAD_AES_GCM = IPSEC_PAD_DEFAULT,
        IPSEC_PAD_NULL = IPSEC_PAD_DEFAULT,
 };
 
+/* iv sizes for different algorithms */
+enum {
+       IPSEC_IV_SIZE_DEFAULT = IPSEC_MAX_IV_SIZE,
+       IPSEC_AES_CTR_IV_SIZE = sizeof(uint64_t),
+};
+
 /* these definitions probably has to be in rte_crypto_sym.h */
 union sym_op_ofslen {
        uint64_t raw;
@@ -47,7 +54,17 @@ struct replay_sqn {
        __extension__ uint64_t window[0];
 };
 
+/*IPSEC SA supported algorithms */
+enum sa_algo_type      {
+       ALGO_TYPE_NULL = 0,
+       ALGO_TYPE_AES_CBC,
+       ALGO_TYPE_AES_CTR,
+       ALGO_TYPE_AES_GCM,
+       ALGO_TYPE_MAX
+};
+
 struct rte_ipsec_sa {
+
        uint64_t type;     /* type of given SA */
        uint64_t udata;    /* user defined */
        uint32_t size;     /* size of given sa object */
@@ -65,6 +82,7 @@ struct rte_ipsec_sa {
                union sym_op_ofslen auth;
        } ctp;
        uint32_t salt;
+       uint8_t algo_type;
        uint8_t proto;    /* next proto */
        uint8_t aad_len;
        uint8_t hdr_len;