* Added command to display Rx queue used descriptor count.
``show port (port_id) rxq (queue_id) desc used count``
+* **Updated ipsec-secgw sample application.**
+
+ * Updated the ``ipsec-secgw`` sample application with UDP encapsulation
+ support for NAT Traversal.
+
Removed Items
-------------
sa <dir> <spi> <cipher_algo> <cipher_key> <auth_algo> <auth_key>
<mode> <src_ip> <dst_ip> <action_type> <port_id> <fallback>
- <flow-direction> <port_id> <queue_id>
+ <flow-direction> <port_id> <queue_id> <udp-encap>
where each options means:
* *port_id*: Port ID of the NIC for which the SA is configured.
* *queue_id*: Queue ID to which traffic should be redirected.
+ ``<udp-encap>``
+
+ * Option to enable IPsec UDP encapsulation for NAT Traversal.
+ Only *lookaside-protocol-offload* mode is supported at the moment.
+
+ * Optional: Yes, it is disabled by default
+
+ * Syntax:
+
+ * *udp-encap*
+
Example SA rules:
.. code-block:: console
* ``-h`` Show usage.
If <ipsec_mode> is specified, only tests for that mode will be invoked. For the
-list of available modes please refer to run_test.sh.
\ No newline at end of file
+list of available modes please refer to run_test.sh.
/* application wide librte_ipsec/SA parameters */
struct app_sa_prm app_sa_prm = {
.enable = 0,
- .cache_sz = SA_CACHE_SZ
+ .cache_sz = SA_CACHE_SZ,
+ .udp_encap = 0
};
static const char *cfgfile;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- if (iph4->next_proto_id == IPPROTO_ESP)
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
return;
}
- if (next_proto == IPPROTO_ESP)
+ switch (iph6->proto) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
+#define IPSEC_NAT_T_PORT 4500
+#define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP)
+
struct traffic_type {
const uint8_t *data[MAX_PKT_BURST * 2];
struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
ipsec->replay_win_sz = app_sa_prm.window_size;
ipsec->options.esn = app_sa_prm.enable_esn;
+ ipsec->options.udp_encap = sa->udp_encap;
}
int
continue;
}
+ if (unlikely((pkts[i]->packet_type &
+ (RTE_PTYPE_TUNNEL_MASK |
+ RTE_PTYPE_L4_MASK)) ==
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
+ sa->udp_encap != 1)) {
+ free_pkts(&pkts[i], 1);
+ continue;
+ }
+
sym_cop = get_sym_cop(&priv->cop);
sym_cop->m_src = pkts[i];
uint32_t window_size; /* replay window size */
uint32_t enable_esn; /* enable/disable ESN support */
uint32_t cache_sz; /* per lcore SA cache size */
+ uint32_t udp_encap; /* enable/disable UDP Encapsulation */
uint64_t flags; /* rte_ipsec_sa_prm.flags */
};
struct rte_security_ipsec_xform *sec_xform;
};
enum rte_security_ipsec_sa_direction direction;
+ uint8_t udp_encap;
uint16_t portid;
uint8_t fdir_qid;
uint8_t fdir_flag;
uint32_t portid_p = 0;
uint32_t fallback_p = 0;
int16_t status_p = 0;
+ uint16_t udp_encap_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
}
continue;
}
+ if (strcmp(tokens[ti], "udp-encap") == 0) {
+ APP_CHECK(ips->type ==
+ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ status, "UDP encapsulation is allowed if the "
+ "session is of type lookaside-protocol-offload "
+ "only.");
+ if (status->status < 0)
+ return;
+ APP_CHECK_PRESENCE(udp_encap_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+
+ rule->udp_encap = 1;
+ app_sa_prm.udp_encap = 1;
+ udp_encap_p = 1;
+ continue;
+ }
/* unrecognizeable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
uint32_t spi, cache_idx;
struct ipsec_sad_cache *cache;
struct ipsec_sa *cached_sa;
+ uint16_t udp_hdr_len = 0;
int is_ipv4;
cache = &RTE_PER_LCORE(sad_cache);
for (i = 0; i < nb_pkts; i++) {
ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
+ if ((pkts[i]->packet_type &
+ (RTE_PTYPE_TUNNEL_MASK | RTE_PTYPE_L4_MASK)) ==
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP)
+ udp_hdr_len = sizeof(struct rte_udp_hdr);
esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
- pkts[i]->l3_len);
+ pkts[i]->l3_len + udp_hdr_len);
is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4;
spi = rte_be_to_cpu_32(esp->spi);