size_t len, uint32_t spi, uint32_t seq)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
- uint32_t hdrlen = sizeof(struct ipv4_hdr) + sizeof(struct esp_hdr);
+ uint32_t hdrlen = sizeof(struct ipv4_hdr) + sizeof(struct rte_esp_hdr);
uint32_t taillen = sizeof(struct esp_tail);
uint32_t t_len = len + hdrlen + taillen;
uint32_t padlen;
- struct esp_hdr esph = {
+ struct rte_esp_hdr esph = {
.spi = rte_cpu_to_be_32(spi),
.seq = rte_cpu_to_be_32(seq)
};
}
payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
- sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
+ sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
- sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct rte_esp_hdr) +
sa->iv_len;
sym_cop->aead.data.length = payload_len;
struct cnt_blk *icb;
uint8_t *aad;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct rte_esp_hdr));
icb = get_cnt_blk(m);
icb->salt = sa->salt;
icb->cnt = rte_cpu_to_be_32(1);
aad = get_aad(m);
- memcpy(aad, iv - sizeof(struct esp_hdr), 8);
+ memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
sym_cop->aead.aad.data = aad;
sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
- sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct rte_esp_hdr) +
sa->iv_len;
sym_cop->cipher.data.length = payload_len;
struct cnt_blk *icb;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct rte_esp_hdr));
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
uint8_t *, IV_OFFSET);
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + payload_len;
break;
default:
if (unlikely(sa->flags == TRANSPORT)) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
- sizeof(struct esp_hdr) + sa->iv_len);
+ sizeof(struct rte_esp_hdr) + sa->iv_len);
if (likely(ip->ip_v == IPVERSION)) {
memmove(ip4, ip, ip->ip_hl * 4);
ip4->ip_p = *nexthdr;
sizeof(struct ip6_hdr));
}
} else
- ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
return 0;
}
{
struct ip *ip4;
struct ip6_hdr *ip6;
- struct esp_hdr *esp = NULL;
+ struct rte_esp_hdr *esp = NULL;
uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
}
/* Check maximum packet size */
- if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
switch (sa->flags) {
case IP4_TUNNEL:
- ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
- esp = (struct esp_hdr *)(ip4 + 1);
+ esp = (struct rte_esp_hdr *)(ip4 + 1);
break;
case IP6_TUNNEL:
- ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
- esp = (struct esp_hdr *)(ip6 + 1);
+ esp = (struct rte_esp_hdr *)(ip6 + 1);
break;
case TRANSPORT:
new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
- sizeof(struct esp_hdr) + sa->iv_len);
+ sizeof(struct rte_esp_hdr) + sa->iv_len);
memmove(new_ip, ip4, ip_hdr_len);
- esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
ip4 = (struct ip *)new_ip;
if (likely(ip4->ip_v == IPVERSION)) {
ip4->ip_p = IPPROTO_ESP;
uint8_t *aad;
sym_cop->aead.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr) + sa->iv_len;
+ sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
/* Fill pad_len using default sequential scheme */
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr);
+ sizeof(struct rte_esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr) + sa->iv_len;
+ sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
break;
default:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + pad_payload_len;
break;
default:
single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
struct ipsec_sa **sa_ret)
{
- struct esp_hdr *esp;
+ struct rte_esp_hdr *esp;
struct ip *ip;
uint32_t *src4_addr;
uint8_t *src6_addr;
ip = rte_pktmbuf_mtod(pkt, struct ip *);
if (ip->ip_v == IPVERSION)
- esp = (struct esp_hdr *)(ip + 1);
+ esp = (struct rte_esp_hdr *)(ip + 1);
else
- esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
+ esp = (struct rte_esp_hdr *)(((struct ip6_hdr *)ip) + 1);
if (esp->spi == INVALID_SPI)
return;
* Matches an ESP header.
*/
struct rte_flow_item_esp {
- struct esp_hdr hdr; /**< ESP header definition. */
+ struct rte_esp_hdr hdr; /**< ESP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
} __attribute__((packed));
struct gcm_esph_iv {
- struct esp_hdr esph;
+ struct rte_esp_hdr esph;
uint64_t iv;
} __attribute__((packed));
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = 0;
sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
- sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
+ sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
}
/* these params may differ with new algorithms support */
sa->ctp.auth.offset = hlen;
- sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
+ sa->ctp.auth.length = sizeof(struct rte_esp_hdr) + sa->iv_len + sa->sqh_len;
if (sa->aad_len != 0) {
- sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
+ sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
sa->iv_len;
sa->ctp.cipher.length = 0;
} else {
- sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
+ sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct rte_esp_hdr);
sa->ctp.cipher.length = sa->iv_len;
}
}
{
uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
struct rte_mbuf *ml;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
struct esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
sqn_low16(sqc));
/* update spi, seqn and iv */
- esph = (struct esp_hdr *)(ph + sa->hdr_len);
+ esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
iv = (uint64_t *)(esph + 1);
copy_iv(iv, ivp, sa->iv_len);
uint8_t np;
uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
struct rte_mbuf *ml;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
struct esp_tail *espt;
char *ph, *pt;
uint64_t *iv;
IPPROTO_ESP);
/* update spi, seqn and iv */
- esph = (struct esp_hdr *)(ph + uhlen);
+ esph = (struct rte_esp_hdr *)(ph + uhlen);
iv = (uint64_t *)(esph + 1);
copy_iv(iv, ivp, sa->iv_len);
gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
sa->iv_ofs);
ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
+ pofs + sizeof(struct rte_esp_hdr));
aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
/* CRYPT+AUTH case */
} else {
/* copy iv from the input packet to the cop */
ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);
ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
- pofs + sizeof(struct esp_hdr));
+ pofs + sizeof(struct rte_esp_hdr));
copy_iv(ivc, ivp, sa->iv_len);
}
return 0;
uint64_t sqn;
uint32_t icv_ofs, plen;
struct rte_mbuf *ml;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+ esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
/*
* retrieve and reconstruct SQN, then check it, then
uint32_t *sqn)
{
uint32_t hlen, icv_len, tlen;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
struct esp_tail *espt;
struct rte_mbuf *ml;
char *pd;
/* cut of L2/L3 headers, ESP header and IV */
hlen = mb->l2_len + mb->l3_len;
- esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);
+ esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);
/* retrieve SQN for later check */
uint32_t *sqn)
{
uint32_t hlen, icv_len, l2len, l3len, tlen;
- struct esp_hdr *esph;
+ struct rte_esp_hdr *esph;
struct esp_tail *espt;
struct rte_mbuf *ml;
char *np, *op, *pd;
l3len = mb->l3_len;
hlen = l2len + l3len;
op = rte_pktmbuf_mtod(mb, char *);
- esph = (struct esp_hdr *)(op + hlen);
+ esph = (struct rte_esp_hdr *)(op + hlen);
*sqn = rte_be_to_cpu_32(esph->seq);
/* cut off ESP header and IV, update L3 header */
/**
* ESP Header
*/
-struct esp_hdr {
+struct rte_esp_hdr {
rte_be32_t spi; /**< Security Parameters Index */
rte_be32_t seq; /**< packet sequence number */
} __attribute__((__packed__));