int32_t payload_len, ip_hdr_len;
RTE_ASSERT(sa != NULL);
- if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ if (ipsec_get_action_type(sa) ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
return 0;
RTE_ASSERT(m != NULL);
}
payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
- sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
+ sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
- sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
- sa->iv_len;
+ sym_cop->aead.data.offset =
+ ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = payload_len;
struct cnt_blk *icb;
uint8_t *aad;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
+ sizeof(struct rte_esp_hdr));
icb = get_cnt_blk(m);
icb->salt = sa->salt;
icb->cnt = rte_cpu_to_be_32(1);
aad = get_aad(m);
- memcpy(aad, iv - sizeof(struct esp_hdr), 8);
+ memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
sym_cop->aead.aad.data = aad;
sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
- sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct rte_esp_hdr) +
sa->iv_len;
sym_cop->cipher.data.length = payload_len;
struct cnt_blk *icb;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
+ sizeof(struct rte_esp_hdr));
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
uint8_t *, IV_OFFSET);
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
/* Copy IV at the end of crypto operation */
rte_memcpy(iv_ptr, iv, sa->iv_len);
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + payload_len;
break;
default:
uint8_t *nexthdr, *pad_len;
uint8_t *padding;
uint16_t i;
+ struct rte_ipsec_session *ips;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
- if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
- (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
+ ips = ipsec_get_primary_session(sa);
+
+ if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
+ (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
return -1;
}
- if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
- sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
nexthdr = &m->inner_esp_next_proto;
} else {
nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
}
}
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
- sizeof(struct esp_hdr) + sa->iv_len);
+ sizeof(struct rte_esp_hdr) + sa->iv_len);
if (likely(ip->ip_v == IPVERSION)) {
memmove(ip4, ip, ip->ip_hl * 4);
ip4->ip_p = *nexthdr;
sizeof(struct ip6_hdr));
}
} else
- ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
return 0;
}
{
struct ip *ip4;
struct ip6_hdr *ip6;
- struct esp_hdr *esp = NULL;
+ struct rte_esp_hdr *esp = NULL;
uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
uint16_t pad_payload_len, pad_len, ip_hdr_len;
+ struct rte_ipsec_session *ips;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
+ ips = ipsec_get_primary_session(sa);
ip_hdr_len = 0;
ip4 = rte_pktmbuf_mtod(m, struct ip *);
if (likely(ip4->ip_v == IPVERSION)) {
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
ip_hdr_len = ip4->ip_hl * 4;
nlp = ip4->ip_p;
} else
nlp = IPPROTO_IPIP;
} else if (ip4->ip_v == IP6_VERSION) {
- if (unlikely(sa->flags == TRANSPORT)) {
+ if (unlikely(IS_TRANSPORT(sa->flags))) {
/* XXX No option headers supported */
ip_hdr_len = sizeof(struct ip6_hdr);
ip6 = (struct ip6_hdr *)ip4;
ip_hdr_len + 2, sa->block_size);
pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
- RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
- sa->flags == TRANSPORT);
+ RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
- if (likely(sa->flags == IP4_TUNNEL))
+ if (likely(IS_IP4_TUNNEL(sa->flags)))
ip_hdr_len = sizeof(struct ip);
- else if (sa->flags == IP6_TUNNEL)
+ else if (IS_IP6_TUNNEL(sa->flags))
ip_hdr_len = sizeof(struct ip6_hdr);
- else if (sa->flags != TRANSPORT) {
+ else if (!IS_TRANSPORT(sa->flags)) {
RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
sa->flags);
return -EINVAL;
}
/* Check maximum packet size */
- if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
/* Add trailer padding if it is not constructed by HW */
- if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
- (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
- !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ !(ips->security.ol_flags &
+ RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
sa->digest_len);
if (unlikely(padding == NULL)) {
rte_prefetch0(padding);
}
- switch (sa->flags) {
+ switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
- ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
- esp = (struct esp_hdr *)(ip4 + 1);
+ esp = (struct rte_esp_hdr *)(ip4 + 1);
break;
case IP6_TUNNEL:
- ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
&sa->src, &sa->dst);
- esp = (struct esp_hdr *)(ip6 + 1);
+ esp = (struct rte_esp_hdr *)(ip6 + 1);
break;
case TRANSPORT:
new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
- sizeof(struct esp_hdr) + sa->iv_len);
+ sizeof(struct rte_esp_hdr) + sa->iv_len);
memmove(new_ip, ip4, ip_hdr_len);
- esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
ip4 = (struct ip *)new_ip;
if (likely(ip4->ip_v == IPVERSION)) {
ip4->ip_p = IPPROTO_ESP;
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
memset(iv, 0, sa->iv_len);
break;
}
}
- if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
- if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
+ if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (ips->security.ol_flags &
+ RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
/* Set the inner esp next protocol for HW trailer */
m->inner_esp_next_proto = nlp;
m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
uint8_t *aad;
sym_cop->aead.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr) + sa->iv_len;
+ sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
/* Fill pad_len using default sequential scheme */
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CBC:
sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr);
+ sizeof(struct rte_esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr) + sa->iv_len;
+ sizeof(struct rte_esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
break;
default:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
sa->iv_len + pad_payload_len;
break;
default:
struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
+ enum rte_security_session_action_type type;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
- if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
- (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
+ type = ipsec_get_action_type(sa);
+
+ if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
+ (type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
m->ol_flags |= PKT_TX_SEC_OFFLOAD;
} else {
RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
+ __func__);
return -1;
}
}