#include <rte_ipsec.h>
#include <rte_esp.h>
#include <rte_ip.h>
+#include <rte_udp.h>
#include <rte_errno.h>
#include <rte_cryptodev.h>
typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len);
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
/*
* helper function to fill crypto_sym op for cipher+auth algorithms.
{
struct rte_crypto_sym_op *sop;
struct aead_gcm_iv *gcm;
+ struct aead_ccm_iv *ccm;
+ struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint32_t algo;
/* NULL case */
sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
break;
+ case ALGO_TYPE_AES_GMAC:
+ /* GMAC case */
+ sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
+ sa->iv_ofs);
+ aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+ break;
case ALGO_TYPE_AES_GCM:
/* AEAD (AES_GCM) case */
sop_aead_prepare(sop, sa, icv, hlen, plen);
sa->iv_ofs);
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
+ case ALGO_TYPE_AES_CCM:
+ /* AEAD (AES_CCM) case */
+ sop_aead_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
+ sa->iv_ofs);
+ aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_CHACHA20_POLY1305:
+ /* AEAD (CHACHA20_POLY) case */
+ sop_aead_prepare(sop, sa, icv, hlen, plen);
+
+ /* fill AAD IV (located inside crypto op) */
+ chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
+ struct aead_chacha20_poly1305_iv *,
+ sa->iv_ofs);
+ aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
+ ivp[0], sa->salt);
+ break;
case ALGO_TYPE_AES_CTR:
/* Cipher-Auth (AES-CTR *) case */
sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
static inline int32_t
outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len)
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
{
uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
struct rte_mbuf *ml;
/* number of bytes to encrypt */
clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
- /* pad length + esp tail */
- pdlen = clen - plen;
- tlen = pdlen + sa->icv_len + sqh_len;
+ if (!tso) {
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len + sqh_len;
+ } else {
+ /* We don't need to pad/align packet or append ICV length
+ * when using TSO offload
+ */
+ pdlen = clen - plen;
+ tlen = pdlen + sqh_len;
+ }
/* do append and prepend */
ml = rte_pktmbuf_lastseg(mb);
/* copy tunnel pkt header */
rte_memcpy(ph, sa->hdr, sa->hdr_len);
+ /* if UDP encap is enabled update the dgram_len */
+ if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
+ struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
+ (ph - sizeof(struct rte_udp_hdr));
+ udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
+ sa->hdr_l3_off - sa->hdr_len);
+ }
+
/* update original and new ip header fields */
update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
const union sym_op_data *icv)
{
uint32_t *psqh;
- struct aead_gcm_aad *aad;
+ struct aead_gcm_aad *gaad;
+ struct aead_ccm_aad *caad;
+ struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
/* insert SQN.hi between ESP trailer and ICV */
if (sa->sqh_len != 0) {
* fill IV and AAD fields, if any (aad fields are placed after icv),
* right now we support only one AEAD algorithm: AES-GCM .
*/
+ switch (sa->algo_type) {
+ case ALGO_TYPE_AES_GCM:
+ if (sa->aad_len != 0) {
+ gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+ aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
+ }
+ break;
+ case ALGO_TYPE_AES_CCM:
if (sa->aad_len != 0) {
- aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
- aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
+ caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
+ aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
+ }
+ break;
+ case ALGO_TYPE_CHACHA20_POLY1305:
+ if (sa->aad_len != 0) {
+ chacha20_poly1305_aad = (struct aead_chacha20_poly1305_aad *)
+ (icv->va + sa->icv_len);
+ aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
+ sa->spi, sqc, IS_ESN(sa));
+ }
+ break;
+ default:
+ break;
}
}
/* try to update the packet itself */
rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
- sa->sqh_len);
+ sa->sqh_len, 0);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
static inline int32_t
outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
- union sym_op_data *icv, uint8_t sqh_len)
+ union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
{
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
/* number of bytes to encrypt */
clen = plen + sizeof(*espt);
- clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
- /* pad length + esp tail */
- pdlen = clen - plen;
- tlen = pdlen + sa->icv_len + sqh_len;
+ if (!tso) {
+ clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
+ /* pad length + esp tail */
+ pdlen = clen - plen;
+ tlen = pdlen + sa->icv_len + sqh_len;
+ } else {
+ /* We don't need to pad/align packet or append ICV length
+ * when using TSO offload
+ */
+ pdlen = clen - plen;
+ tlen = pdlen + sqh_len;
+ }
/* do append and insert */
ml = rte_pktmbuf_lastseg(mb);
/* try to update the packet itself */
rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
- sa->sqh_len);
+ sa->sqh_len, 0);
/* success, setup crypto op */
if (rc >= 0) {
outb_pkt_xprepare(sa, sqc, &icv);
{
uint64_t *ivp = iv;
struct aead_gcm_iv *gcm;
+ struct aead_ccm_iv *ccm;
+ struct aead_chacha20_poly1305_iv *chacha20_poly1305;
struct aesctr_cnt_blk *ctr;
uint32_t clen;
gcm = iv;
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
break;
+ case ALGO_TYPE_AES_CCM:
+ ccm = iv;
+ aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
+ break;
+ case ALGO_TYPE_CHACHA20_POLY1305:
+ chacha20_poly1305 = iv;
+ aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
+ ivp[0], sa->salt);
+ break;
case ALGO_TYPE_AES_CTR:
ctr = iv;
aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
gen_iv(ivbuf[k], sqc);
/* try to update the packet itself */
- rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+ rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
/* success, proceed with preparations */
if (rc >= 0) {
/*
* process outbound packets for SA with ESN support,
- * for algorithms that require SQN.hibits to be implictly included
+ * for algorithms that require SQN.hibits to be implicitly included
* into digest computation.
* In that case we have to move ICV bytes back to their proper place.
*/
esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
uint16_t num)
{
- uint32_t i, k, icv_len, *icv;
+ uint32_t i, k, icv_len, *icv, bytes;
struct rte_mbuf *ml;
struct rte_ipsec_sa *sa;
uint32_t dr[num];
k = 0;
icv_len = sa->icv_len;
+ bytes = 0;
for (i = 0; i != num; i++) {
- if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
+ if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
ml = rte_pktmbuf_lastseg(mb[i]);
/* remove high-order 32 bits of esn from packet len */
mb[i]->pkt_len -= sa->sqh_len;
icv = rte_pktmbuf_mtod_offset(ml, void *,
ml->data_len - icv_len);
remove_sqh(icv, icv_len);
+ bytes += mb[i]->pkt_len;
k++;
} else
dr[i - k] = i;
}
+ sa->statistics.count += k;
+ sa->statistics.bytes += bytes;
/* handle unprocessed mbufs */
if (k != num) {
inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
struct rte_mbuf *mb[], uint16_t num)
{
- uint32_t i, ol_flags;
+ uint32_t i, ol_flags, bytes;
ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
+ bytes = 0;
for (i = 0; i != num; i++) {
- mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+ bytes += mb[i]->pkt_len;
if (ol_flags != 0)
rte_security_set_pkt_metadata(ss->security.ctx,
ss->security.ses, mb[i], NULL);
}
+ ss->sa->statistics.count += num;
+ ss->sa->statistics.bytes += bytes;
+}
+
+
+static inline int
+esn_outb_nb_segments(struct rte_mbuf *m)
+{
+ if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+ uint16_t pkt_l3len = m->pkt_len - m->l2_len;
+ uint16_t segments =
+ (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
+ (pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
+ return segments;
+ }
+ return 1; /* no TSO */
+}
+
+/* Compute how many packets can be sent before overflow occurs */
+static inline uint16_t
+esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[])
+{
+ uint16_t i;
+ uint32_t seg_cnt = 0;
+ for (i = 0; i < num && seg_cnt < n_sqn; i++)
+ seg_cnt += nb_segs[i];
+ return i - 1;
}
/*
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
- uint32_t i, k, n;
+ uint32_t i, k, nb_segs_total, n_sqn;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
union sym_op_data icv;
uint64_t iv[IPSEC_MAX_IV_QWORD];
uint32_t dr[num];
+ uint16_t nb_segs[num];
sa = ss->sa;
+ nb_segs_total = 0;
+ /* Calculate number of segments */
+ for (i = 0; i != num; i++) {
+ nb_segs[i] = esn_outb_nb_segments(mb[i]);
+ nb_segs_total += nb_segs[i];
+ }
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
+ n_sqn = nb_segs_total;
+ sqn = esn_outb_update_sqn(sa, &n_sqn);
+ if (n_sqn != nb_segs_total) {
rte_errno = EOVERFLOW;
+ /* if there are segmented packets find out how many can be
+ * sent until overflow occurs
+ */
+ if (nb_segs_total > num) /* there is at least 1 */
+ num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
+ else
+ num = n_sqn; /* no segmented packets */
+ }
k = 0;
- for (i = 0; i != n; i++) {
+ for (i = 0; i != num; i++) {
- sqc = rte_cpu_to_be_64(sqn + i);
+ sqc = rte_cpu_to_be_64(sqn);
gen_iv(iv, sqc);
+ sqn += nb_segs[i];
/* try to update the packet itself */
- rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
+ (mb[i]->ol_flags &
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
k += (rc >= 0);
}
/* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
inline_outb_mbuf_prepare(ss, mb, k);
return k;
struct rte_mbuf *mb[], uint16_t num)
{
int32_t rc;
- uint32_t i, k, n;
+ uint32_t i, k, nb_segs_total, n_sqn;
uint64_t sqn;
rte_be64_t sqc;
struct rte_ipsec_sa *sa;
union sym_op_data icv;
uint64_t iv[IPSEC_MAX_IV_QWORD];
uint32_t dr[num];
+ uint16_t nb_segs[num];
sa = ss->sa;
+ nb_segs_total = 0;
+ /* Calculate number of segments */
+ for (i = 0; i != num; i++) {
+ nb_segs[i] = esn_outb_nb_segments(mb[i]);
+ nb_segs_total += nb_segs[i];
+ }
- n = num;
- sqn = esn_outb_update_sqn(sa, &n);
- if (n != num)
+ n_sqn = nb_segs_total;
+ sqn = esn_outb_update_sqn(sa, &n_sqn);
+ if (n_sqn != nb_segs_total) {
rte_errno = EOVERFLOW;
+ /* if there are segmented packets find out how many can be
+ * sent until overflow occurs
+ */
+ if (nb_segs_total > num) /* there is at least 1 */
+ num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
+ else
+ num = n_sqn; /* no segmented packets */
+ }
k = 0;
- for (i = 0; i != n; i++) {
+ for (i = 0; i != num; i++) {
- sqc = rte_cpu_to_be_64(sqn + i);
+ sqc = rte_cpu_to_be_64(sqn);
gen_iv(iv, sqc);
+ sqn += nb_segs[i];
/* try to update the packet itself */
- rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
+ (mb[i]->ol_flags &
+ (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
k += (rc >= 0);
}
/* copy not processed mbufs beyond good ones */
- if (k != n && k != 0)
- move_bad_mbufs(mb, dr, n, n - k);
+ if (k != num && k != 0)
+ move_bad_mbufs(mb, dr, num, num - k);
inline_outb_mbuf_prepare(ss, mb, k);
return k;