examples/ipsec-secgw: support security offload
[dpdk.git] / examples / ipsec-secgw / ipsec-secgw.c
index 1d6c81b..6201d85 100644 (file)
@@ -38,6 +38,7 @@
 #include <sys/types.h>
 #include <netinet/in.h>
 #include <netinet/ip.h>
+#include <netinet/ip6.h>
 #include <string.h>
 #include <sys/queue.h>
 #include <stdarg.h>
 #include <rte_mbuf.h>
 #include <rte_acl.h>
 #include <rte_lpm.h>
+#include <rte_lpm6.h>
 #include <rte_hash.h>
 #include <rte_jhash.h>
 #include <rte_cryptodev.h>
 
 #include "ipsec.h"
+#include "parser.h"
 
 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
 
@@ -79,6 +82,7 @@
 
 #define NB_MBUF        (32000)
 
+#define CDEV_QUEUE_DESC 2048
 #define CDEV_MAP_ENTRIES 1024
 #define CDEV_MP_NB_OBJS 2048
 #define CDEV_MP_CACHE_SZ 64
@@ -86,8 +90,6 @@
 
 #define OPTION_CONFIG          "config"
 #define OPTION_SINGLE_SA       "single-sa"
-#define OPTION_EP0             "ep0"
-#define OPTION_EP1             "ep1"
 
 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
 
@@ -156,18 +158,18 @@ static uint32_t enabled_port_mask;
 static uint32_t unprotected_port_mask;
 static int32_t promiscuous_on = 1;
 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
-static int32_t ep = -1; /**< Endpoint configuration (0 or 1) */
 static uint32_t nb_lcores;
 static uint32_t single_sa;
 static uint32_t single_sa_idx;
+static uint32_t frame_size;
 
 struct lcore_rx_queue {
-       uint8_t port_id;
+       uint16_t port_id;
        uint8_t queue_id;
 } __rte_cache_aligned;
 
 struct lcore_params {
-       uint8_t port_id;
+       uint16_t port_id;
        uint8_t queue_id;
        uint8_t lcore_id;
 } __rte_cache_aligned;
@@ -192,7 +194,8 @@ struct lcore_conf {
        struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
        struct ipsec_ctx inbound;
        struct ipsec_ctx outbound;
-       struct rt_ctx *rt_ctx;
+       struct rt_ctx *rt4_ctx;
+       struct rt_ctx *rt6_ctx;
 } __rte_cache_aligned;
 
 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -202,11 +205,9 @@ static struct rte_eth_conf port_conf = {
                .mq_mode        = ETH_MQ_RX_RSS,
                .max_rx_pkt_len = ETHER_MAX_LEN,
                .split_hdr_size = 0,
-               .header_split   = 0, /**< Header Split disabled */
-               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
-               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
-               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+               .offloads = DEV_RX_OFFLOAD_CHECKSUM |
+                           DEV_RX_OFFLOAD_CRC_STRIP,
+               .ignore_offload_bitfield = 1,
        },
        .rx_adv_conf = {
                .rss_conf = {
@@ -230,27 +231,39 @@ struct traffic_type {
 };
 
 struct ipsec_traffic {
-       struct traffic_type ipsec4;
-       struct traffic_type ipv4;
+       struct traffic_type ipsec;
+       struct traffic_type ip4;
+       struct traffic_type ip6;
 };
 
 static inline void
 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 {
        uint8_t *nlp;
+       struct ether_hdr *eth;
 
-       if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
-               rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
-               nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
-                               offsetof(struct ip, ip_p));
+       eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+       if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+               nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+               nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
                if (*nlp == IPPROTO_ESP)
-                       t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
+                       t->ipsec.pkts[(t->ipsec.num)++] = pkt;
                else {
-                       t->ipv4.data[t->ipv4.num] = nlp;
-                       t->ipv4.pkts[(t->ipv4.num)++] = pkt;
+                       t->ip4.data[t->ip4.num] = nlp;
+                       t->ip4.pkts[(t->ip4.num)++] = pkt;
+               }
+       } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+               nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+               nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
+               if (*nlp == IPPROTO_ESP)
+                       t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+               else {
+                       t->ip6.data[t->ip6.num] = nlp;
+                       t->ip6.pkts[(t->ip6.num)++] = pkt;
                }
        } else {
                /* Unknown/Unsupported type, drop the packet */
+               RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
                rte_pktmbuf_free(pkt);
        }
 }
@@ -261,8 +274,9 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
 {
        int32_t i;
 
-       t->ipsec4.num = 0;
-       t->ipv4.num = 0;
+       t->ipsec.num = 0;
+       t->ip4.num = 0;
+       t->ip6.num = 0;
 
        for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
                rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
@@ -275,16 +289,29 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
 }
 
 static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
 {
-       pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
-       pkt->l3_len = sizeof(struct ip);
-       pkt->l2_len = ETHER_HDR_LEN;
+       struct ip *ip;
+       struct ether_hdr *ethhdr;
+
+       ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+       ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
+
+       if (ip->ip_v == IPVERSION) {
+               pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+               pkt->l3_len = sizeof(struct ip);
+               pkt->l2_len = ETHER_HDR_LEN;
+
+               ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+       } else {
+               pkt->ol_flags |= PKT_TX_IPV6;
+               pkt->l3_len = sizeof(struct ip6_hdr);
+               pkt->l2_len = ETHER_HDR_LEN;
 
-       struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
-                       ETHER_HDR_LEN);
+               ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+       }
 
-       ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
        memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
                        sizeof(struct ether_addr));
        memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
@@ -292,13 +319,13 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
 }
 
 static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
 {
        int32_t i;
        const int32_t prefetch_offset = 2;
 
        for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
-               rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
+               rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
                prepare_tx_pkt(pkts[i], port);
        }
        /* Process left packets */
@@ -308,7 +335,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
 
 /* Send burst of packets on an output interface */
 static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
 {
        struct rte_mbuf **m_table;
        int32_t ret;
@@ -331,7 +358,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
 
 /* Enqueue a single packet, and send burst if queue is filled */
 static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
 {
        uint32_t lcore_id;
        uint16_t len;
@@ -355,94 +382,139 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
 }
 
 static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
-               struct ipsec_traffic *traffic)
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
+               uint16_t lim)
 {
        struct rte_mbuf *m;
-       uint16_t idx, nb_pkts_in, i, j;
-       uint32_t sa_idx, res;
+       uint32_t i, j, res, sa_idx;
 
-       nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
-                       traffic->ipsec4.num, MAX_PKT_BURST);
-
-       /* SP/ACL Inbound check ipsec and ipv4 */
-       for (i = 0; i < nb_pkts_in; i++) {
-               idx = traffic->ipv4.num++;
-               m = traffic->ipsec4.pkts[i];
-               traffic->ipv4.pkts[idx] = m;
-               traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
-                               uint8_t *, offsetof(struct ip, ip_p));
-       }
+       if (ip->num == 0 || sp == NULL)
+               return;
 
-       rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
-                       traffic->ipv4.data, traffic->ipv4.res,
-                       traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+       rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+                       ip->num, DEFAULT_MAX_CATEGORIES);
 
        j = 0;
-       for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
-               m = traffic->ipv4.pkts[i];
-               res = traffic->ipv4.res[i];
-               if (res & ~BYPASS) {
+       for (i = 0; i < ip->num; i++) {
+               m = ip->pkts[i];
+               res = ip->res[i];
+               if (res & BYPASS) {
+                       ip->pkts[j++] = m;
+                       continue;
+               }
+               if (res & DISCARD || i < lim) {
                        rte_pktmbuf_free(m);
                        continue;
                }
-               traffic->ipv4.pkts[j++] = m;
-       }
-       /* Check return SA SPI matches pkt SPI */
-       for ( ; i < traffic->ipv4.num; i++) {
-               m = traffic->ipv4.pkts[i];
-               sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
-               if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
-                                       m, sa_idx)) {
+               /* Only check SPI match for processed IPSec packets */
+               sa_idx = ip->res[i] & PROTECT_MASK;
+               if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
                        rte_pktmbuf_free(m);
                        continue;
                }
-               traffic->ipv4.pkts[j++] = m;
+               ip->pkts[j++] = m;
        }
-       traffic->ipv4.num = j;
+       ip->num = j;
 }
 
 static inline void
-process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
                struct ipsec_traffic *traffic)
 {
        struct rte_mbuf *m;
-       uint16_t idx, nb_pkts_out, i, j;
-       uint32_t sa_idx, res;
+       uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
 
-       rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
-                       traffic->ipv4.data, traffic->ipv4.res,
-                       traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+       nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+                       traffic->ipsec.num, MAX_PKT_BURST);
 
-       /* Drop any IPsec traffic from protected ports */
-       for (i = 0; i < traffic->ipsec4.num; i++)
-               rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+       n_ip4 = traffic->ip4.num;
+       n_ip6 = traffic->ip6.num;
+
+       /* SP/ACL Inbound check ipsec and ip4 */
+       for (i = 0; i < nb_pkts_in; i++) {
+               m = traffic->ipsec.pkts[i];
+               struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+               if (ip->ip_v == IPVERSION) {
+                       idx = traffic->ip4.num++;
+                       traffic->ip4.pkts[idx] = m;
+                       traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+                                       uint8_t *, offsetof(struct ip, ip_p));
+               } else if (ip->ip_v == IP6_VERSION) {
+                       idx = traffic->ip6.num++;
+                       traffic->ip6.pkts[idx] = m;
+                       traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+                                       uint8_t *,
+                                       offsetof(struct ip6_hdr, ip6_nxt));
+               } else
+                       rte_pktmbuf_free(m);
+       }
+
+       inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
+                       n_ip4);
+
+       inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
+                       n_ip6);
+}
 
-       traffic->ipsec4.num = 0;
+static inline void
+outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
+               struct traffic_type *ipsec)
+{
+       struct rte_mbuf *m;
+       uint32_t i, j, sa_idx;
+
+       if (ip->num == 0 || sp == NULL)
+               return;
+
+       rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+                       ip->num, DEFAULT_MAX_CATEGORIES);
 
        j = 0;
-       for (i = 0; i < traffic->ipv4.num; i++) {
-               m = traffic->ipv4.pkts[i];
-               res = traffic->ipv4.res[i];
-               sa_idx = res & PROTECT_MASK;
-               if ((res == 0) || (res & DISCARD))
+       for (i = 0; i < ip->num; i++) {
+               m = ip->pkts[i];
+               sa_idx = ip->res[i] & PROTECT_MASK;
+               if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
                        rte_pktmbuf_free(m);
                else if (sa_idx != 0) {
-                       traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
-                       traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
+                       ipsec->res[ipsec->num] = sa_idx;
+                       ipsec->pkts[ipsec->num++] = m;
                } else /* BYPASS */
-                       traffic->ipv4.pkts[j++] = m;
+                       ip->pkts[j++] = m;
        }
-       traffic->ipv4.num = j;
+       ip->num = j;
+}
+
+static inline void
+process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+               struct ipsec_traffic *traffic)
+{
+       struct rte_mbuf *m;
+       uint16_t idx, nb_pkts_out, i;
+
+       /* Drop any IPsec traffic from protected ports */
+       for (i = 0; i < traffic->ipsec.num; i++)
+               rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+
+       traffic->ipsec.num = 0;
+
+       outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+
+       outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
 
-       nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
-                       traffic->ipsec4.res, traffic->ipsec4.num,
+       nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+                       traffic->ipsec.res, traffic->ipsec.num,
                        MAX_PKT_BURST);
 
        for (i = 0; i < nb_pkts_out; i++) {
-               idx = traffic->ipv4.num++;
-               m = traffic->ipsec4.pkts[i];
-               traffic->ipv4.pkts[idx] = m;
+               m = traffic->ipsec.pkts[i];
+               struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+               if (ip->ip_v == IPVERSION) {
+                       idx = traffic->ip4.num++;
+                       traffic->ip4.pkts[idx] = m;
+               } else {
+                       idx = traffic->ip6.num++;
+                       traffic->ip6.pkts[idx] = m;
+               }
        }
 }
 
@@ -450,47 +522,72 @@ static inline void
 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
                struct ipsec_traffic *traffic)
 {
-       uint16_t nb_pkts_in, i;
+       struct rte_mbuf *m;
+       uint32_t nb_pkts_in, i, idx;
 
        /* Drop any IPv4 traffic from unprotected ports */
-       for (i = 0; i < traffic->ipv4.num; i++)
-               rte_pktmbuf_free(traffic->ipv4.pkts[i]);
+       for (i = 0; i < traffic->ip4.num; i++)
+               rte_pktmbuf_free(traffic->ip4.pkts[i]);
+
+       traffic->ip4.num = 0;
 
-       traffic->ipv4.num = 0;
+       /* Drop any IPv6 traffic from unprotected ports */
+       for (i = 0; i < traffic->ip6.num; i++)
+               rte_pktmbuf_free(traffic->ip6.pkts[i]);
 
-       nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
-                       traffic->ipsec4.num, MAX_PKT_BURST);
+       traffic->ip6.num = 0;
 
-       for (i = 0; i < nb_pkts_in; i++)
-               traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
+       nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+                       traffic->ipsec.num, MAX_PKT_BURST);
 
-       traffic->ipv4.num = nb_pkts_in;
+       for (i = 0; i < nb_pkts_in; i++) {
+               m = traffic->ipsec.pkts[i];
+               struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+               if (ip->ip_v == IPVERSION) {
+                       idx = traffic->ip4.num++;
+                       traffic->ip4.pkts[idx] = m;
+               } else {
+                       idx = traffic->ip6.num++;
+                       traffic->ip6.pkts[idx] = m;
+               }
+       }
 }
 
 static inline void
 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
                struct ipsec_traffic *traffic)
 {
-       uint16_t nb_pkts_out, i;
+       struct rte_mbuf *m;
+       uint32_t nb_pkts_out, i;
+       struct ip *ip;
 
        /* Drop any IPsec traffic from protected ports */
-       for (i = 0; i < traffic->ipsec4.num; i++)
-               rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+       for (i = 0; i < traffic->ipsec.num; i++)
+               rte_pktmbuf_free(traffic->ipsec.pkts[i]);
 
-       traffic->ipsec4.num = 0;
+       traffic->ipsec.num = 0;
 
-       for (i = 0; i < traffic->ipv4.num; i++)
-               traffic->ipv4.res[i] = single_sa_idx;
+       for (i = 0; i < traffic->ip4.num; i++)
+               traffic->ip4.res[i] = single_sa_idx;
 
-       nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
-                       traffic->ipv4.res, traffic->ipv4.num,
+       for (i = 0; i < traffic->ip6.num; i++)
+               traffic->ip6.res[i] = single_sa_idx;
+
+       nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
+                       traffic->ip4.res, traffic->ip4.num,
                        MAX_PKT_BURST);
 
-       traffic->ipv4.num = nb_pkts_out;
+       /* They all sue the same SA (ip4 or ip6 tunnel) */
+       m = traffic->ipsec.pkts[i];
+       ip = rte_pktmbuf_mtod(m, struct ip *);
+       if (ip->ip_v == IPVERSION)
+               traffic->ip4.num = nb_pkts_out;
+       else
+               traffic->ip6.num = nb_pkts_out;
 }
 
 static inline void
-route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
 {
        uint32_t hop[MAX_PKT_BURST * 2];
        uint32_t dst_ip[MAX_PKT_BURST * 2];
@@ -517,15 +614,44 @@ route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
        }
 }
 
+static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+       int32_t hop[MAX_PKT_BURST * 2];
+       uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+       uint8_t *ip6_dst;
+       uint16_t i, offset;
+
+       if (nb_pkts == 0)
+               return;
+
+       for (i = 0; i < nb_pkts; i++) {
+               offset = offsetof(struct ip6_hdr, ip6_dst);
+               ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
+               memcpy(&dst_ip[i][0], ip6_dst, 16);
+       }
+
+       rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
+                       hop, nb_pkts);
+
+       for (i = 0; i < nb_pkts; i++) {
+               if (hop[i] == -1) {
+                       rte_pktmbuf_free(pkts[i]);
+                       continue;
+               }
+               send_single_packet(pkts[i], hop[i] & 0xff);
+       }
+}
+
 static inline void
 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
-               uint8_t nb_pkts, uint8_t portid)
+               uint8_t nb_pkts, uint16_t portid)
 {
        struct ipsec_traffic traffic;
 
        prepare_traffic(pkts, &traffic, nb_pkts);
 
-       if (single_sa) {
+       if (unlikely(single_sa)) {
                if (UNPROTECTED_PORT(portid))
                        process_pkts_inbound_nosp(&qconf->inbound, &traffic);
                else
@@ -537,7 +663,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
                        process_pkts_outbound(&qconf->outbound, &traffic);
        }
 
-       route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
+       route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+       route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
 }
 
 static inline void
@@ -563,7 +690,8 @@ main_loop(__attribute__((unused)) void *dummy)
        uint32_t lcore_id;
        uint64_t prev_tsc, diff_tsc, cur_tsc;
        int32_t i, nb_rx;
-       uint8_t portid, queueid;
+       uint16_t portid;
+       uint8_t queueid;
        struct lcore_conf *qconf;
        int32_t socket_id;
        const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -576,13 +704,18 @@ main_loop(__attribute__((unused)) void *dummy)
        rxql = qconf->rx_queue_list;
        socket_id = rte_lcore_to_socket_id(lcore_id);
 
-       qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
-       qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
-       qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
+       qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
+       qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
+       qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+       qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+       qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
        qconf->inbound.cdev_map = cdev_map_in;
-       qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
-       qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
+       qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
+       qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+       qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+       qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
        qconf->outbound.cdev_map = cdev_map_out;
+       qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
 
        if (qconf->nb_rx_queue == 0) {
                RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
@@ -595,7 +728,7 @@ main_loop(__attribute__((unused)) void *dummy)
                portid = rxql[i].port_id;
                queueid = rxql[i].queue_id;
                RTE_LOG(INFO, IPSEC,
-                       " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+                       " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
                        lcore_id, portid, queueid);
        }
 
@@ -626,7 +759,8 @@ main_loop(__attribute__((unused)) void *dummy)
 static int32_t
 check_params(void)
 {
-       uint8_t lcore, portid, nb_ports;
+       uint8_t lcore;
+       uint16_t portid, nb_ports;
        uint16_t i;
        int32_t socket_id;
 
@@ -636,8 +770,6 @@ check_params(void)
        }
 
        nb_ports = rte_eth_dev_count();
-       if (nb_ports > RTE_MAX_ETHPORTS)
-               nb_ports = RTE_MAX_ETHPORTS;
 
        for (i = 0; i < nb_lcore_params; ++i) {
                lcore = lcore_params[i].lcore_id;
@@ -666,7 +798,7 @@ check_params(void)
 }
 
 static uint8_t
-get_port_nb_rx_queues(const uint8_t port)
+get_port_nb_rx_queues(const uint16_t port)
 {
        int32_t queue = -1;
        uint16_t i;
@@ -708,16 +840,17 @@ print_usage(const char *prgname)
 {
        printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
                "  --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
-               " --single-sa SAIDX --ep0|--ep1\n"
+               " --single-sa SAIDX -f CONFIG_FILE\n"
                "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
                "  -P : enable promiscuous mode\n"
                "  -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
+               "  -j FRAMESIZE: jumbo frame maximum size\n"
                "  --"OPTION_CONFIG": (port,queue,lcore): "
                "rx queues configuration\n"
                "  --single-sa SAIDX: use single SA index for outbound, "
                "bypassing the SP\n"
-               "  --ep0: Configure as Endpoint 0\n"
-               "  --ep1: Configure as Endpoint 1\n", prgname);
+               "  -f CONFIG_FILE: Configuration file path\n",
+               prgname);
 }
 
 static int32_t
@@ -762,7 +895,7 @@ parse_config(const char *q_arg)
                FLD_LCORE,
                _NUM_FLD
        };
-       int long int_fld[_NUM_FLD];
+       unsigned long int_fld[_NUM_FLD];
        char *str_fld[_NUM_FLD];
        int32_t i;
        uint32_t size;
@@ -830,18 +963,6 @@ parse_args_long_options(struct option *lgopts, int32_t option_index)
                }
        }
 
-       if (__STRNCMP(optname, OPTION_EP0)) {
-               printf("endpoint 0\n");
-               ep = 0;
-               ret = 0;
-       }
-
-       if (__STRNCMP(optname, OPTION_EP1)) {
-               printf("endpoint 1\n");
-               ep = 1;
-               ret = 0;
-       }
-
        return ret;
 }
 #undef __STRNCMP
@@ -856,14 +977,13 @@ parse_args(int32_t argc, char **argv)
        static struct option lgopts[] = {
                {OPTION_CONFIG, 1, 0, 0},
                {OPTION_SINGLE_SA, 1, 0, 0},
-               {OPTION_EP0, 0, 0, 0},
-               {OPTION_EP1, 0, 0, 0},
                {NULL, 0, 0, 0}
        };
+       int32_t f_present = 0;
 
        argvopt = argv;
 
-       while ((opt = getopt_long(argc, argvopt, "p:Pu:",
+       while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
                                lgopts, &option_index)) != EOF) {
 
                switch (opt) {
@@ -887,6 +1007,38 @@ parse_args(int32_t argc, char **argv)
                                return -1;
                        }
                        break;
+               case 'f':
+                       if (f_present == 1) {
+                               printf("\"-f\" option present more than "
+                                       "once!\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       if (parse_cfg_file(optarg) < 0) {
+                               printf("parsing file \"%s\" failed\n",
+                                       optarg);
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       f_present = 1;
+                       break;
+               case 'j':
+                       {
+                               int32_t size = parse_decimal(optarg);
+                               if (size <= 1518) {
+                                       printf("Invalid jumbo frame size\n");
+                                       if (size < 0) {
+                                               print_usage(prgname);
+                                               return -1;
+                                       }
+                                       printf("Using default value 9000\n");
+                                       frame_size = 9000;
+                               } else {
+                                       frame_size = size;
+                               }
+                       }
+                       printf("Enabled jumbo frames size %u\n", frame_size);
+                       break;
                case 0:
                        if (parse_args_long_options(lgopts, option_index)) {
                                print_usage(prgname);
@@ -899,11 +1051,16 @@ parse_args(int32_t argc, char **argv)
                }
        }
 
+       if (f_present == 0) {
+               printf("Mandatory option \"-f\" not present\n");
+               return -1;
+       }
+
        if (optind >= 0)
                argv[optind-1] = prgname;
 
        ret = optind-1;
-       optind = 0; /* reset getopt lib */
+       optind = 1; /* reset getopt lib */
        return ret;
 }
 
@@ -917,11 +1074,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
 
 /* Check the link status of all ports in up to 9s, and print them finally */
 static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
 {
 #define CHECK_INTERVAL 100 /* 100ms */
 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
-       uint8_t portid, count, all_ports_up, print_flag = 0;
+       uint16_t portid;
+       uint8_t count, all_ports_up, print_flag = 0;
        struct rte_eth_link link;
 
        printf("\nChecking link status");
@@ -936,18 +1094,17 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
                        /* print link status if flag set */
                        if (print_flag == 1) {
                                if (link.link_status)
-                                       printf("Port %d Link Up - speed %u "
-                                               "Mbps - %s\n", (uint8_t)portid,
-                                               (uint32_t)link.link_speed,
+                                       printf(
+                                       "Port%d Link Up - speed %u Mbps -%s\n",
+                                               portid, link.link_speed,
                                (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
                                        ("full-duplex") : ("half-duplex\n"));
                                else
-                                       printf("Port %d Link Down\n",
-                                               (uint8_t)portid);
+                                       printf("Port %d Link Down\n", portid);
                                continue;
                        }
                        /* clear all_ports_up flag if any link down */
-                       if (link.link_status == 0) {
+                       if (link.link_status == ETH_LINK_DOWN) {
                                all_ports_up = 0;
                                break;
                        }
@@ -975,7 +1132,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
                uint16_t qp, struct lcore_params *params,
                struct ipsec_ctx *ipsec_ctx,
                const struct rte_cryptodev_capabilities *cipher,
-               const struct rte_cryptodev_capabilities *auth)
+               const struct rte_cryptodev_capabilities *auth,
+               const struct rte_cryptodev_capabilities *aead)
 {
        int32_t ret = 0;
        unsigned long i;
@@ -986,6 +1144,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
                key.cipher_algo = cipher->sym.cipher.algo;
        if (auth)
                key.auth_algo = auth->sym.auth.algo;
+       if (aead)
+               key.aead_algo = aead->sym.aead.algo;
 
        ret = rte_hash_lookup(map, &key);
        if (ret != -ENOENT)
@@ -1054,6 +1214,12 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
                if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
                        continue;
 
+               if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+                       ret |= add_mapping(map, str, cdev_id, qp, params,
+                                       ipsec_ctx, NULL, NULL, i);
+                       continue;
+               }
+
                if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
                        continue;
 
@@ -1066,7 +1232,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
                                continue;
 
                        ret |= add_mapping(map, str, cdev_id, qp, params,
-                                       ipsec_ctx, i, j);
+                                               ipsec_ctx, i, j, NULL);
                }
        }
 
@@ -1102,6 +1268,13 @@ cryptodevs_init(void)
 
        printf("lcore/cryptodev/qp mappings:\n");
 
+       uint32_t max_sess_sz = 0, sess_sz;
+       for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+               sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+               if (sess_sz > max_sess_sz)
+                       max_sess_sz = sess_sz;
+       }
+
        idx = 0;
        /* Start from last cdev id to give HW priority */
        for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
@@ -1130,19 +1303,45 @@ cryptodevs_init(void)
 
                dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
                dev_conf.nb_queue_pairs = qp;
-               dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
-               dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
+
+               if (!socket_ctx[dev_conf.socket_id].session_pool) {
+                       char mp_name[RTE_MEMPOOL_NAMESIZE];
+                       struct rte_mempool *sess_mp;
+
+                       snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+                                       "sess_mp_%u", dev_conf.socket_id);
+                       sess_mp = rte_mempool_create(mp_name,
+                                       CDEV_MP_NB_OBJS,
+                                       max_sess_sz,
+                                       CDEV_MP_CACHE_SZ,
+                                       0, NULL, NULL, NULL,
+                                       NULL, dev_conf.socket_id,
+                                       0);
+                       if (sess_mp == NULL)
+                               rte_exit(EXIT_FAILURE,
+                                       "Cannot create session pool on socket %d\n",
+                                       dev_conf.socket_id);
+                       else
+                               printf("Allocated session pool on socket %d\n",
+                                       dev_conf.socket_id);
+                       socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+               }
 
                if (rte_cryptodev_configure(cdev_id, &dev_conf))
-                       rte_panic("Failed to initialize crypodev %u\n",
+                       rte_panic("Failed to initialize cryptodev %u\n",
                                        cdev_id);
 
-               qp_conf.nb_descriptors = CDEV_MP_NB_OBJS;
+               qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
                for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
                        if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
-                                               &qp_conf, dev_conf.socket_id))
+                                       &qp_conf, dev_conf.socket_id,
+                                       socket_ctx[dev_conf.socket_id].session_pool))
                                rte_panic("Failed to setup queue %u for "
                                                "cdev_id %u\n", 0, cdev_id);
+
+               if (rte_cryptodev_start(cdev_id))
+                       rte_panic("Failed to start cryptodev %u\n",
+                                       cdev_id);
        }
 
        printf("\n");
@@ -1151,7 +1350,7 @@ cryptodevs_init(void)
 }
 
 static void
-port_init(uint8_t portid)
+port_init(uint16_t portid)
 {
        struct rte_eth_dev_info dev_info;
        struct rte_eth_txconf *txconf;
@@ -1186,12 +1385,27 @@ port_init(uint8_t portid)
        printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
                        nb_rx_queue, nb_tx_queue);
 
+       if (frame_size) {
+               port_conf.rxmode.max_rx_pkt_len = frame_size;
+               port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+               port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
+       if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+               port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+
        ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
                        &port_conf);
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Cannot configure device: "
                                "err=%d, port=%d\n", ret, portid);
 
+       ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
+                               "err=%d, port=%d\n", ret, portid);
+
        /* init one TX queue per lcore */
        tx_queueid = 0;
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1245,11 +1459,14 @@ static void
 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
 {
        char s[64];
+       uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
+                       RTE_MBUF_DEFAULT_BUF_SIZE;
+
 
        snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
        ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
                        MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
-                       RTE_MBUF_DEFAULT_BUF_SIZE,
+                       buff_size,
                        socket_id);
        if (ctx->mbuf_pool == NULL)
                rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
@@ -1262,8 +1479,9 @@ int32_t
 main(int32_t argc, char **argv)
 {
        int32_t ret;
-       uint32_t lcore_id, nb_ports;
-       uint8_t portid, socket_id;
+       uint32_t lcore_id;
+       uint8_t socket_id;
+       uint16_t portid, nb_ports;
 
        /* init EAL */
        ret = rte_eal_init(argc, argv);
@@ -1277,17 +1495,12 @@ main(int32_t argc, char **argv)
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Invalid parameters\n");
 
-       if (ep < 0)
-               rte_exit(EXIT_FAILURE, "need to choose either EP0 or EP1\n");
-
        if ((unprotected_port_mask & enabled_port_mask) !=
                        unprotected_port_mask)
                rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
                                unprotected_port_mask);
 
        nb_ports = rte_eth_dev_count();
-       if (nb_ports > RTE_MAX_ETHPORTS)
-               nb_ports = RTE_MAX_ETHPORTS;
 
        if (check_params() < 0)
                rte_exit(EXIT_FAILURE, "check_params failed\n");
@@ -1298,7 +1511,7 @@ main(int32_t argc, char **argv)
 
        nb_lcores = rte_lcore_count();
 
-       /* Replicate each contex per socket */
+       /* Replicate each context per socket */
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                if (rte_lcore_is_enabled(lcore_id) == 0)
                        continue;
@@ -1311,11 +1524,13 @@ main(int32_t argc, char **argv)
                if (socket_ctx[socket_id].mbuf_pool)
                        continue;
 
-               sa_init(&socket_ctx[socket_id], socket_id, ep);
+               sa_init(&socket_ctx[socket_id], socket_id);
+
+               sp4_init(&socket_ctx[socket_id], socket_id);
 
-               sp_init(&socket_ctx[socket_id], socket_id, ep);
+               sp6_init(&socket_ctx[socket_id], socket_id);
 
-               rt_init(&socket_ctx[socket_id], socket_id, ep);
+               rt_init(&socket_ctx[socket_id], socket_id);
 
                pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
        }
@@ -1349,7 +1564,7 @@ main(int32_t argc, char **argv)
                        rte_eth_promiscuous_enable(portid);
        }
 
-       check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+       check_all_ports_link_status(nb_ports, enabled_port_mask);
 
        /* launch per-lcore init on every lcore */
        rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);