X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ipsec%2Fesp_inb.c;h=96eec0131fe3cf6cc21d00325eacbd96af8c736b;hb=ceb599e5409c0909181d9c89fcb0597d099a1715;hp=3e12ca1030f0c8297342cb7dc90b58e6992b78f0;hpb=5ef2546767525c8a4a3c2bc60357b23c3dffcf6b;p=dpdk.git diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c index 3e12ca1030..96eec0131f 100644 --- a/lib/librte_ipsec/esp_inb.c +++ b/lib/librte_ipsec/esp_inb.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2020 Intel Corporation */ #include @@ -16,7 +16,8 @@ #include "pad.h" typedef uint16_t (*esp_inb_process_t)(const struct rte_ipsec_sa *sa, - struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num); + struct rte_mbuf *mb[], uint32_t sqn[], uint32_t dr[], uint16_t num, + uint8_t sqh_len); /* * helper function to fill crypto_sym op for cipher+auth algorithms. @@ -104,6 +105,67 @@ inb_cop_prepare(struct rte_crypto_op *cop, } } +static inline uint32_t +inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb, + uint32_t *pofs, uint32_t plen, void *iv) +{ + struct aead_gcm_iv *gcm; + struct aesctr_cnt_blk *ctr; + uint64_t *ivp; + uint32_t clen; + + ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *, + *pofs + sizeof(struct rte_esp_hdr)); + clen = 0; + + switch (sa->algo_type) { + case ALGO_TYPE_AES_GCM: + gcm = (struct aead_gcm_iv *)iv; + aead_gcm_iv_fill(gcm, ivp[0], sa->salt); + break; + case ALGO_TYPE_AES_CBC: + case ALGO_TYPE_3DES_CBC: + copy_iv(iv, ivp, sa->iv_len); + break; + case ALGO_TYPE_AES_CTR: + ctr = (struct aesctr_cnt_blk *)iv; + aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt); + break; + } + + *pofs += sa->ctp.auth.offset; + clen = plen - sa->ctp.auth.length; + return clen; +} + +/* + * Helper function for prepare() to deal with situation when + * ICV is spread by two segments. Tries to move ICV completely into the + * last segment. + */ +static struct rte_mbuf * +move_icv(struct rte_mbuf *ml, uint32_t ofs) +{ + uint32_t n; + struct rte_mbuf *ms; + const void *prev; + void *new; + + ms = ml->next; + n = ml->data_len - ofs; + + prev = rte_pktmbuf_mtod_offset(ml, const void *, ofs); + new = rte_pktmbuf_prepend(ms, n); + if (new == NULL) + return NULL; + + /* move n ICV bytes from ml into ms */ + rte_memcpy(new, prev, n); + ml->data_len -= n; + + return ms; +} + /* * for pure cryptodev (lookaside none) depending on SA settings, * we might have to write some extra data to the packet. @@ -128,17 +190,12 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc, } } -/* - * setup/update packet data and metadata for ESP inbound tunnel case. - */ -static inline int32_t -inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, - struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv) +static inline int +inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, + struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc) { int32_t rc; uint64_t sqn; - uint32_t clen, icv_ofs, plen; - struct rte_mbuf *ml; struct rte_esp_hdr *esph; esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen); @@ -150,26 +207,55 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, sqn = rte_be_to_cpu_32(esph->seq); if (IS_ESN(sa)) sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz); + *sqc = rte_cpu_to_be_64(sqn); + /* check IPsec window */ rc = esn_inb_check_sqn(rsn, sa, sqn); - if (rc != 0) - return rc; - sqn = rte_cpu_to_be_64(sqn); + return rc; +} + +/* prepare packet for upcoming processing */ +static inline int32_t +inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb, + uint32_t hlen, union sym_op_data *icv) +{ + uint32_t clen, icv_len, icv_ofs, plen; + struct rte_mbuf *ml; /* start packet manipulation */ plen = mb->pkt_len; plen = plen - hlen; - ml = rte_pktmbuf_lastseg(mb); - icv_ofs = ml->data_len - sa->icv_len + sa->sqh_len; - /* check that packet has a valid length */ clen = plen - sa->ctp.cipher.length; if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0) return -EBADMSG; - /* we have to allocate space for AAD somewhere, + /* find ICV location */ + icv_len = sa->icv_len; + icv_ofs = mb->pkt_len - icv_len; + + ml = mbuf_get_seg_ofs(mb, &icv_ofs); + + /* + * if ICV is spread by two segments, then try to + * move ICV completely into the last segment. + */ + if (ml->data_len < icv_ofs + icv_len) { + + ml = move_icv(ml, icv_ofs); + if (ml == NULL) + return -ENOSPC; + + /* new ICV location */ + icv_ofs = 0; + } + + icv_ofs += sa->sqh_len; + + /* + * we have to allocate space for AAD somewhere, * right now - just use free trailing space at the last segment. * Would probably be more convenient to reserve space for AAD * inside rte_crypto_op itself @@ -181,10 +267,37 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs); icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs); - inb_pkt_xprepare(sa, sqn, icv); + /* + * if esn is used then high-order 32 bits are also used in ICV + * calculation but are not transmitted, update packet length + * to be consistent with auth data length and offset, this will + * be subtracted from packet length in post crypto processing + */ + mb->pkt_len += sa->sqh_len; + ml->data_len += sa->sqh_len; + return plen; } +static inline int32_t +inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn, + struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv) +{ + int rc; + rte_be64_t sqn; + + rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn); + if (rc != 0) + return rc; + + rc = inb_prepare(sa, mb, hlen, icv); + if (rc < 0) + return rc; + + inb_pkt_xprepare(sa, sqn, icv); + return rc; +} + /* * setup/update packets and crypto ops for ESP inbound case. */ @@ -213,17 +326,17 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], lksd_none_cop_prepare(cop[k], cs, mb[i]); inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc); k++; - } else + } else { dr[i - k] = i; + rte_errno = -rc; + } } rsn_release(sa, rsn); /* copy not prepared mbufs beyond good ones */ - if (k != num && k != 0) { + if (k != num && k != 0) move_bad_mbufs(mb, dr, num, num - k); - rte_errno = EBADMSG; - } return k; } @@ -239,36 +352,65 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], */ static inline void process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml, - struct esp_tail *espt, uint32_t *hlen) + struct rte_esp_tail *espt, uint32_t *hlen, uint32_t *tofs) { - const struct esp_tail *pt; + const struct rte_esp_tail *pt; + uint32_t ofs; - ml[0] = rte_pktmbuf_lastseg(mb); + ofs = mb->pkt_len - tlen; hlen[0] = mb->l2_len + mb->l3_len; - pt = rte_pktmbuf_mtod_offset(ml[0], const struct esp_tail *, - ml[0]->data_len - tlen); + ml[0] = mbuf_get_seg_ofs(mb, &ofs); + pt = rte_pktmbuf_mtod_offset(ml[0], const struct rte_esp_tail *, ofs); + tofs[0] = ofs; espt[0] = pt[0]; } +/* + * Helper function to check pad bytes values. + * Note that pad bytes can be spread across multiple segments. + */ +static inline int +check_pad_bytes(struct rte_mbuf *mb, uint32_t ofs, uint32_t len) +{ + const uint8_t *pd; + uint32_t k, n; + + for (n = 0; n != len; n += k, mb = mb->next) { + k = mb->data_len - ofs; + k = RTE_MIN(k, len - n); + pd = rte_pktmbuf_mtod_offset(mb, const uint8_t *, ofs); + if (memcmp(pd, esp_pad_bytes + n, k) != 0) + break; + ofs = 0; + } + + return len - n; +} + /* * packet checks for transport mode: * - no reported IPsec related failures in ol_flags - * - tail length is valid + * - tail and header lengths are valid * - padding bytes are valid + * apart from checks, function also updates tail offset (and segment) + * by taking into account pad length. */ static inline int32_t -trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml, - struct esp_tail espt, uint32_t hlen, uint32_t tlen) +trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, + uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen) { - const uint8_t *pd; - int32_t ofs; + if ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 || + tlen + hlen > mb->pkt_len) + return -EBADMSG; - ofs = ml->data_len - tlen; - pd = rte_pktmbuf_mtod_offset(ml, const uint8_t *, ofs); + /* padding bytes are spread over multiple segments */ + if (tofs[0] < espt.pad_len) { + tofs[0] = mb->pkt_len - tlen; + ml[0] = mbuf_get_seg_ofs(mb, tofs); + } else + tofs[0] -= espt.pad_len; - return ((mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) != 0 || - ofs < 0 || tlen + hlen > mb->pkt_len || - (espt.pad_len != 0 && memcmp(pd, esp_pad_bytes, espt.pad_len))); + return check_pad_bytes(ml[0], tofs[0], espt.pad_len); } /* @@ -277,10 +419,11 @@ trs_process_check(const struct rte_mbuf *mb, const struct rte_mbuf *ml, * - esp tail next proto contains expected for that SA value */ static inline int32_t -tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml, - struct esp_tail espt, uint32_t hlen, const uint32_t tlen, uint8_t proto) +tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, + uint32_t *tofs, struct rte_esp_tail espt, uint32_t hlen, uint32_t tlen, + uint8_t proto) { - return (trs_process_check(mb, ml, espt, hlen, tlen) || + return (trs_process_check(mb, ml, tofs, espt, hlen, tlen) || espt.next_proto != proto); } @@ -293,7 +436,7 @@ tun_process_check(const struct rte_mbuf *mb, struct rte_mbuf *ml, */ static inline void * tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, - uint32_t adj, uint32_t tlen, uint32_t *sqn) + uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn) { const struct rte_esp_hdr *ph; @@ -302,8 +445,7 @@ tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, sqn[0] = ph->seq; /* cut of ICV, ESP tail and padding bytes */ - ml->data_len -= tlen; - mb->pkt_len -= tlen; + mbuf_cut_seg_ofs(mb, ml, tofs, tlen); /* cut of L2/L3 headers, ESP header and IV */ return rte_pktmbuf_adj(mb, adj); @@ -318,7 +460,7 @@ tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, */ static inline void * trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, - uint32_t adj, uint32_t tlen, uint32_t *sqn) + uint32_t adj, uint32_t tofs, uint32_t tlen, uint32_t *sqn) { char *np, *op; @@ -326,7 +468,7 @@ trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, op = rte_pktmbuf_mtod(mb, char *); /* cut off ESP header and IV */ - np = tun_process_step2(mb, ml, hlen, adj, tlen, sqn); + np = tun_process_step2(mb, ml, hlen, adj, tofs, tlen, sqn); /* move header bytes to fill the gap after ESP header removal */ remove_esph(np, op, hlen); @@ -367,20 +509,25 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val) mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD; } - /* * *process* function for tunnel packets */ static inline uint16_t tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], - uint32_t sqn[], uint32_t dr[], uint16_t num) + uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len) { uint32_t adj, i, k, tl; - uint32_t hl[num]; - struct esp_tail espt[num]; + uint32_t hl[num], to[num]; + struct rte_esp_tail espt[num]; struct rte_mbuf *ml[num]; + const void *outh; + void *inh; - const uint32_t tlen = sa->icv_len + sizeof(espt[0]); + /* + * remove icv, esp trailer and high-order + * 32 bits of esn from packet length + */ + const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len; const uint32_t cofs = sa->ctp.cipher.offset; /* @@ -388,7 +535,7 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], * read mbufs metadata and esp tail first. */ for (i = 0; i != num; i++) - process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]); + process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]); k = 0; for (i = 0; i != num; i++) { @@ -397,12 +544,19 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], tl = tlen + espt[i].pad_len; /* check that packet is valid */ - if (tun_process_check(mb[i], ml[i], espt[i], adj, tl, + if (tun_process_check(mb[i], &ml[i], &to[i], espt[i], adj, tl, sa->proto) == 0) { + outh = rte_pktmbuf_mtod_offset(mb[i], uint8_t *, + mb[i]->l2_len); + /* modify packet's layout */ - tun_process_step2(mb[i], ml[i], hl[i], adj, - tl, sqn + k); + inh = tun_process_step2(mb[i], ml[i], hl[i], adj, + to[i], tl, sqn + k); + + /* update inner ip header */ + update_tun_inb_l3hdr(sa, outh, inh); + /* update mbuf's metadata */ tun_process_step3(mb[i], sa->tx_offload.msk, sa->tx_offload.val); @@ -414,21 +568,24 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], return k; } - /* * *process* function for tunnel packets */ static inline uint16_t trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], - uint32_t sqn[], uint32_t dr[], uint16_t num) + uint32_t sqn[], uint32_t dr[], uint16_t num, uint8_t sqh_len) { char *np; uint32_t i, k, l2, tl; - uint32_t hl[num]; - struct esp_tail espt[num]; + uint32_t hl[num], to[num]; + struct rte_esp_tail espt[num]; struct rte_mbuf *ml[num]; - const uint32_t tlen = sa->icv_len + sizeof(espt[0]); + /* + * remove icv, esp trailer and high-order + * 32 bits of esn from packet length + */ + const uint32_t tlen = sa->icv_len + sizeof(espt[0]) + sqh_len; const uint32_t cofs = sa->ctp.cipher.offset; /* @@ -436,7 +593,7 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], * read mbufs metadata and esp tail first. */ for (i = 0; i != num; i++) - process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i]); + process_step1(mb[i], tlen, &ml[i], &espt[i], &hl[i], &to[i]); k = 0; for (i = 0; i != num; i++) { @@ -445,12 +602,12 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], l2 = mb[i]->l2_len; /* check that packet is valid */ - if (trs_process_check(mb[i], ml[i], espt[i], hl[i] + cofs, - tl) == 0) { + if (trs_process_check(mb[i], &ml[i], &to[i], espt[i], + hl[i] + cofs, tl) == 0) { /* modify packet's layout */ - np = trs_process_step2(mb[i], ml[i], hl[i], cofs, tl, - sqn + k); + np = trs_process_step2(mb[i], ml[i], hl[i], cofs, + to[i], tl, sqn + k); update_trs_l3hdr(sa, np + l2, mb[i]->pkt_len, l2, hl[i] - l2, espt[i].next_proto); @@ -496,24 +653,21 @@ esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[], * process group of ESP inbound packets. */ static inline uint16_t -esp_inb_pkt_process(const struct rte_ipsec_session *ss, - struct rte_mbuf *mb[], uint16_t num, esp_inb_process_t process) +esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], + uint16_t num, uint8_t sqh_len, esp_inb_process_t process) { uint32_t k, n; - struct rte_ipsec_sa *sa; uint32_t sqn[num]; uint32_t dr[num]; - sa = ss->sa; - /* process packets, extract seq numbers */ - k = process(sa, mb, sqn, dr, num); + k = process(sa, mb, sqn, dr, num, sqh_len); /* handle unprocessed mbufs */ if (k != num && k != 0) move_bad_mbufs(mb, dr, num, num - k); - /* update SQN and replay winow */ + /* update SQN and replay window */ n = esp_inb_rsn_update(sa, sqn, dr, k); /* handle mbufs with wrong SQN */ @@ -526,6 +680,69 @@ esp_inb_pkt_process(const struct rte_ipsec_session *ss, return n; } +/* + * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO + * (synchronous mode). + */ +uint16_t +cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + int32_t rc; + uint32_t i, k; + struct rte_ipsec_sa *sa; + struct replay_sqn *rsn; + union sym_op_data icv; + void *iv[num]; + void *aad[num]; + void *dgst[num]; + uint32_t dr[num]; + uint32_t l4ofs[num]; + uint32_t clen[num]; + uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD]; + + sa = ss->sa; + + /* grab rsn lock */ + rsn = rsn_acquire(sa); + + /* do preparation for all packets */ + for (i = 0, k = 0; i != num; i++) { + + /* calculate ESP header offset */ + l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len; + + /* prepare ESP packet for processing */ + rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv); + if (rc >= 0) { + /* get encrypted data offset and length */ + clen[k] = inb_cpu_crypto_prepare(sa, mb[i], + l4ofs + k, rc, ivbuf[k]); + + /* fill iv, digest and aad */ + iv[k] = ivbuf[k]; + aad[k] = icv.va + sa->icv_len; + dgst[k++] = icv.va; + } else { + dr[i - k] = i; + rte_errno = -rc; + } + } + + /* release rsn lock */ + rsn_release(sa, rsn); + + /* copy not prepared mbufs beyond good ones */ + if (k != num && k != 0) + move_bad_mbufs(mb, dr, num, num - k); + + /* convert mbufs to iovecs and do actual crypto/auth processing */ + if (k != 0) + cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, + l4ofs, clen, k); + return k; +} + /* * process group of ESP inbound tunnel packets. */ @@ -533,7 +750,16 @@ uint16_t esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - return esp_inb_pkt_process(ss, mb, num, tun_process); + struct rte_ipsec_sa *sa = ss->sa; + + return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, tun_process); +} + +uint16_t +inline_inb_tun_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + return esp_inb_pkt_process(ss->sa, mb, num, 0, tun_process); } /* @@ -543,5 +769,14 @@ uint16_t esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - return esp_inb_pkt_process(ss, mb, num, trs_process); + struct rte_ipsec_sa *sa = ss->sa; + + return esp_inb_pkt_process(sa, mb, num, sa->sqh_len, trs_process); +} + +uint16_t +inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + return esp_inb_pkt_process(ss->sa, mb, num, 0, trs_process); }