#include <rte_ipsec.h>
#include <rte_esp.h>
#include <rte_ip.h>
+#include <rte_udp.h>
#include <rte_errno.h>
#include <rte_cryptodev.h>
/* copy tunnel pkt header */
rte_memcpy(ph, sa->hdr, sa->hdr_len);
+ /* if UDP encap is enabled update the dgram_len */
+ if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
+ struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
+ (ph - sizeof(struct rte_udp_hdr));
+ udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
+ sa->hdr_l3_off - sa->hdr_len);
+ }
+
/* update original and new ip header fields */
update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
* - for TUNNEL outer IP version (IPv4/IPv6)
* - are SA SQN operations 'atomic'
* - ESN enabled/disabled
+ * - NAT-T UDP encapsulated (TUNNEL mode only)
* ...
*/
RTE_SATP_LOG2_SQN = RTE_SATP_LOG2_MODE + 2,
RTE_SATP_LOG2_ESN,
RTE_SATP_LOG2_ECN,
- RTE_SATP_LOG2_DSCP
+ RTE_SATP_LOG2_DSCP,
+ RTE_SATP_LOG2_NATT
};
#define RTE_IPSEC_SATP_IPV_MASK (1ULL << RTE_SATP_LOG2_IPV)
#define RTE_IPSEC_SATP_DSCP_DISABLE (0ULL << RTE_SATP_LOG2_DSCP)
#define RTE_IPSEC_SATP_DSCP_ENABLE (1ULL << RTE_SATP_LOG2_DSCP)
+#define RTE_IPSEC_SATP_NATT_MASK (1ULL << RTE_SATP_LOG2_NATT)
+#define RTE_IPSEC_SATP_NATT_DISABLE (0ULL << RTE_SATP_LOG2_NATT)
+#define RTE_IPSEC_SATP_NATT_ENABLE (1ULL << RTE_SATP_LOG2_NATT)
+
+
/**
* get type of given SA
* @return
#include <rte_ipsec.h>
#include <rte_esp.h>
#include <rte_ip.h>
+#include <rte_udp.h>
#include <rte_errno.h>
#include <rte_cryptodev.h>
} else
return -EINVAL;
+ /* check for UDP encapsulation flag */
+ if (prm->ipsec_xform.options.udp_encap == 1)
+ tp |= RTE_IPSEC_SATP_NATT_ENABLE;
+
/* check for ESN flag */
if (prm->ipsec_xform.options.esn == 0)
tp |= RTE_IPSEC_SATP_ESN_DISABLE;
sa->hdr_len = prm->tun.hdr_len;
sa->hdr_l3_off = prm->tun.hdr_l3_off;
+ memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
+
+ /* insert UDP header if UDP encapsulation is inabled */
+ if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
+ struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
+ &sa->hdr[prm->tun.hdr_len];
+ sa->hdr_len += sizeof(struct rte_udp_hdr);
+ udph->src_port = prm->ipsec_xform.udp.sport;
+ udph->dst_port = prm->ipsec_xform.udp.dport;
+ udph->dgram_cksum = 0;
+ }
+
/* update l2_len and l3_len fields for outbound mbuf */
sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
- memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
-
esp_outb_init(sa, sa->hdr_len);
}
const struct crypto_xform *cxf)
{
static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
- RTE_IPSEC_SATP_MODE_MASK;
+ RTE_IPSEC_SATP_MODE_MASK |
+ RTE_IPSEC_SATP_NATT_MASK;
if (prm->ipsec_xform.options.ecn)
sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
esp_inb_init(sa);
break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4 |
+ RTE_IPSEC_SATP_NATT_ENABLE):
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6 |
+ RTE_IPSEC_SATP_NATT_ENABLE):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
esp_outb_tun_init(sa, prm);
break;
+ case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS |
+ RTE_IPSEC_SATP_NATT_ENABLE):
case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
esp_outb_init(sa, 0);
break;
if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
return -EINVAL;
- if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
- prm->tun.hdr_len > sizeof(sa->hdr))
- return -EINVAL;
+ if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ uint32_t hlen = prm->tun.hdr_len;
+ if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE)
+ hlen += sizeof(struct rte_udp_hdr);
+ if (hlen > sizeof(sa->hdr))
+ return -EINVAL;
+ }
rc = fill_crypto_xform(&cxf, type, prm);
if (rc != 0)