* No IPv6 options headers.
* No AH mode.
-* Currently only EAS-CBC, HMAC-SHA1 and NULL.
+* Supported algorithms: AES-CBC, AES-GCM, HMAC-SHA1 and NULL.
* Each SA must be handle by a unique lcore (*1 RX queue per port*).
* No chained mbufs.
The successfully parsed SA rules will be stored in an array table.
-All SAs configured with AES-CBC and HMAC-SHA1 share the same values for
-cipher block size and key, and authentication digest size and key.
-
The SA rule syntax is shown as follows:
.. code-block:: console
* *null*: NULL algorithm
* *aes-128-cbc*: AES-CBC 128-bit algorithm
+ * *aes-128-gcm*: AES-GCM 128-bit algorithm
* Syntax: *cipher_algo <your algorithm>*
* *null*: NULL algorithm
* *sha1-hmac*: HMAC SHA1 algorithm
+ * *aes-128-gcm*: AES-GCM 128-bit algorithm
``<auth_key>``
- * Authentication key, NOT available when 'null' algorithm is used
+ * Authentication key, NOT available when 'null' or 'aes-128-gcm' algorithm
+ is used.
* Optional: No, must followed by <auth_algo> option
src 1111:1111:1111:1111:1111:1111:1111:5555 \
dst 2222:2222:2222:2222:2222:2222:2222:5555
+ sa in 105 cipher_algo aes-128-gcm \
+ cipher_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \
+ auth_algo aes-128-gcm \
+ mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5
Routing rule syntax
^^^^^^^^^^^^^^^^^^^
sa->iv_len;
sym_cop->cipher.data.length = payload_len;
+ struct cnt_blk *icb;
+ uint8_t *aad;
uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
switch (sa->cipher_algo) {
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ memcpy(&icb->iv, iv, 8);
+ icb->cnt = rte_cpu_to_be_32(1);
+ sym_cop->cipher.iv.data = (uint8_t *)icb;
+ sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
+ sym_cop->cipher.iv.length = 16;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + payload_len;
break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ aad = get_aad(m);
+ memcpy(aad, iv - sizeof(struct esp_hdr), 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ aad - rte_pktmbuf_mtod(m, uint8_t *));
+ sym_cop->auth.aad.length = 8;
+ break;
default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
return -EINVAL;
}
sizeof(struct esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ *iv = sa->seq;
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct esp_hdr) + sa->iv_len;
+ sym_cop->cipher.data.length = pad_payload_len;
+ break;
default:
RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
sa->cipher_algo);
(uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->cipher.iv.length = 16;
- switch (sa->cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_AES_CBC:
+ uint8_t *aad;
+
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + pad_payload_len;
break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ aad = get_aad(m);
+ memcpy(aad, esp, 8);
+ sym_cop->auth.aad.data = aad;
+ sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ aad - rte_pktmbuf_mtod(m, uint8_t *));
+ sym_cop->auth.aad.length = 8;
+ break;
default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
return -EINVAL;
}
enum rte_crypto_auth_algorithm algo;
uint16_t digest_len;
uint16_t key_len;
+ uint8_t aad_len;
+ uint8_t key_not_req;
};
const struct supported_cipher_algo cipher_algos[] = {
.iv_len = 16,
.block_size = 16,
.key_len = 16
+ },
+ {
+ .keyword = "aes-128-gcm",
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 16
}
};
.keyword = "null",
.algo = RTE_CRYPTO_AUTH_NULL,
.digest_len = 0,
- .key_len = 0
+ .key_len = 0,
+ .key_not_req = 1
},
{
.keyword = "sha1-hmac",
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.digest_len = 12,
.key_len = 20
+ },
+ {
+ .keyword = "aes-128-gcm",
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .digest_len = 16,
+ .key_len = 16,
+ .aad_len = 8,
+ .key_not_req = 1
}
};
rule->iv_len = algo->iv_len;
rule->cipher_key_len = algo->key_len;
- /* for NULL algorithm, no cipher key should
- * exist */
+ /* for NULL algorithm, no cipher key required */
if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
cipher_algo_p = 1;
continue;
rule->auth_algo = algo->algo;
rule->auth_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
+ rule->aad_len = algo->key_len;
- /* for NULL algorithm, no auth key should exist */
- if (rule->auth_algo == RTE_CRYPTO_AUTH_NULL) {
+ /* NULL algorithm and combined algos do not
+ * require auth key
+ */
+ if (algo->key_not_req) {
auth_algo_p = 1;
continue;
}
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].a.auth.add_auth_data_length = 0;
+ sa_ctx->xf[idx].a.auth.add_auth_data_length =
+ sa->aad_len;
sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
sa_ctx->xf[idx].a.auth.key.length =
sa->auth_key_len;
sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].b.auth.add_auth_data_length = 0;
+ sa_ctx->xf[idx].b.auth.add_auth_data_length =
+ sa->aad_len;
sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
sa_ctx->xf[idx].b.auth.key.length =
sa->auth_key_len;