#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
+#include <rte_security.h>
+#include <rte_ip.h>
+#include <rte_ip_frag.h>
#include "ipsec.h"
#include "parser.h"
#define NB_MBUF (32000)
#define CDEV_QUEUE_DESC 2048
-#define CDEV_MAP_ENTRIES 1024
-#define CDEV_MP_NB_OBJS 2048
+#define CDEV_MAP_ENTRIES 16384
+#define CDEV_MP_NB_OBJS 1024
#define CDEV_MP_CACHE_SZ 64
#define MAX_QUEUE_PAIRS 1
#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
- addr.addr_bytes[0], addr.addr_bytes[1], \
- addr.addr_bytes[2], addr.addr_bytes[3], \
- addr.addr_bytes[4], addr.addr_bytes[5], \
+ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
+ (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
+ (addr)->addr_bytes[4], (addr)->addr_bytes[5], \
0, 0)
+#define FRAG_TBL_BUCKET_ENTRIES 4
+#define FRAG_TTL_MS (10 * MS_PER_S)
+
+#define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+
/* port/source ethernet addr and destination ethernet addr */
struct ethaddr_info {
uint64_t src, dst;
#define CMD_LINE_OPT_CONFIG "config"
#define CMD_LINE_OPT_SINGLE_SA "single-sa"
#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
+#define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
+#define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
+#define CMD_LINE_OPT_REASSEMBLE "reassemble"
+#define CMD_LINE_OPT_MTU "mtu"
enum {
/* long options mapped to a short option */
CMD_LINE_OPT_CONFIG_NUM,
CMD_LINE_OPT_SINGLE_SA_NUM,
CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
+ CMD_LINE_OPT_RX_OFFLOAD_NUM,
+ CMD_LINE_OPT_TX_OFFLOAD_NUM,
+ CMD_LINE_OPT_REASSEMBLE_NUM,
+ CMD_LINE_OPT_MTU_NUM,
};
static const struct option lgopts[] = {
{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
+ {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
+ {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
+ {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
+ {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
{NULL, 0, 0, 0}
};
static uint32_t nb_lcores;
static uint32_t single_sa;
static uint32_t single_sa_idx;
-static uint32_t frame_size;
+
+/*
+ * RX/TX HW offload capabilities to enable/use on ethernet ports.
+ * By default all capabilities are enabled.
+ */
+static uint64_t dev_rx_offload = UINT64_MAX;
+static uint64_t dev_tx_offload = UINT64_MAX;
+
+/*
+ * global values that determine multi-seg policy
+ */
+static uint32_t frag_tbl_sz;
+static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+static uint32_t mtu_size = RTE_ETHER_MTU;
+
+/* application wide librte_ipsec/SA parameters */
+struct app_sa_prm app_sa_prm = {.enable = 0};
struct lcore_rx_queue {
uint16_t port_id;
struct ipsec_ctx outbound;
struct rt_ctx *rt4_ctx;
struct rt_ctx *rt6_ctx;
+ struct {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_mempool *pool_dir;
+ struct rte_mempool *pool_indir;
+ struct rte_ip_frag_death_row dr;
+ } frag;
} __rte_cache_aligned;
static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
- .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS),
},
};
static struct socket_ctx socket_ctx[NB_SOCKETS];
-struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
- uint32_t num;
-};
+/*
+ * Determine is multi-segment support required:
+ * - either frame buffer size is smaller then mtu
+ * - or reassmeble support is requested
+ */
+static int
+multi_seg_required(void)
+{
+ return (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >
+ frame_buf_size || frag_tbl_sz != 0);
+}
-struct ipsec_traffic {
- struct traffic_type ipsec;
- struct traffic_type ip4;
- struct traffic_type ip6;
-};
+static inline void
+adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
+
+static inline void
+adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
+ uint32_t l2_len)
+{
+ uint32_t plen, trim;
+
+ plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
+ if (plen < m->pkt_len) {
+ trim = m->pkt_len - plen;
+ rte_pktmbuf_trim(m, trim);
+ }
+}
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
- uint8_t *nlp;
- struct ether_hdr *eth;
-
- eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
- nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
- nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
- if (*nlp == IPPROTO_ESP)
+ const struct rte_ether_hdr *eth;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+
+ eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv4_pktlen(pkt, iph4, 0);
+
+ if (iph4->next_proto_id == IPPROTO_ESP)
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
else {
- t->ip4.data[t->ip4.num] = nlp;
+ t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
- } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
- nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
- nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
- if (*nlp == IPPROTO_ESP)
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(*iph4);
+ } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ int next_proto;
+ size_t l3len, ext_len;
+ uint8_t *p;
+
+ /* get protocol type */
+ iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
+ RTE_ETHER_HDR_LEN);
+ adjust_ipv6_pktlen(pkt, iph6, 0);
+
+ next_proto = iph6->proto;
+
+ /* determine l3 header size up to ESP extension */
+ l3len = sizeof(struct ip6_hdr);
+ p = rte_pktmbuf_mtod(pkt, uint8_t *);
+ while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* drop packet when IPv6 header exceeds first segment length */
+ if (unlikely(l3len > pkt->data_len)) {
+ rte_pktmbuf_free(pkt);
+ return;
+ }
+
+ if (next_proto == IPPROTO_ESP)
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
else {
- t->ip6.data[t->ip6.num] = nlp;
+ t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
+ pkt->l2_len = 0;
+ pkt->l3_len = l3len;
} else {
/* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
+ rte_be_to_cpu_16(eth->ether_type));
rte_pktmbuf_free(pkt);
}
}
static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
+ const struct lcore_conf *qconf)
{
struct ip *ip;
- struct ether_hdr *ethhdr;
+ struct rte_ether_hdr *ethhdr;
ip = rte_pktmbuf_mtod(pkt, struct ip *);
- ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
+ ethhdr = (struct rte_ether_hdr *)
+ rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+ pkt->ol_flags |= qconf->outbound.ipv4_offloads;
pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = ETHER_HDR_LEN;
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->ip_sum = 0;
+
+ /* calculate IPv4 cksum in SW */
+ if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
+
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
} else {
- pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->ol_flags |= qconf->outbound.ipv6_offloads;
pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = ETHER_HDR_LEN;
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
}
memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
- sizeof(struct ether_addr));
+ sizeof(struct rte_ether_addr));
memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
- sizeof(struct ether_addr));
+ sizeof(struct rte_ether_addr));
}
static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
+ const struct lcore_conf *qconf)
{
int32_t i;
const int32_t prefetch_offset = 2;
for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port);
+ prepare_tx_pkt(pkts[i], port, qconf);
}
/* Process left packets */
for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port);
+ prepare_tx_pkt(pkts[i], port, qconf);
}
/* Send burst of packets on an output interface */
queueid = qconf->tx_queue_id[port];
m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
- prepare_tx_burst(m_table, n, port);
+ prepare_tx_burst(m_table, n, port, qconf);
ret = rte_eth_tx_burst(port, queueid, m_table, n);
if (unlikely(ret < n)) {
return 0;
}
+/*
+ * Helper function to fragment and queue for TX one packet.
+ */
+static inline uint32_t
+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
+ uint16_t port, uint8_t proto)
+{
+ struct buffer *tbl;
+ uint32_t len, n;
+ int32_t rc;
+
+ tbl = qconf->tx_mbufs + port;
+ len = tbl->len;
+
+ /* free space for new fragments */
+ if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
+ send_burst(qconf, len, port);
+ len = 0;
+ }
+
+ n = RTE_DIM(tbl->m_table) - len;
+
+ if (proto == IPPROTO_IP)
+ rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, qconf->frag.pool_dir,
+ qconf->frag.pool_indir);
+ else
+ rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
+ n, mtu_size, qconf->frag.pool_dir,
+ qconf->frag.pool_indir);
+
+ if (rc >= 0)
+ len += rc;
+ else
+ RTE_LOG(ERR, IPSEC,
+ "%s: failed to fragment packet with size %u, "
+ "error code: %d\n",
+ __func__, m->pkt_len, rte_errno);
+
+ rte_pktmbuf_free(m);
+ return len;
+}
+
/* Enqueue a single packet, and send burst if queue is filled */
static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
{
uint32_t lcore_id;
uint16_t len;
qconf = &lcore_conf[lcore_id];
len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
+
+ if (m->pkt_len <= mtu_size) {
+ qconf->tx_mbufs[port].m_table[len] = m;
+ len++;
+
+ /* need to fragment the packet */
+ } else
+ len = send_fragment_packet(qconf, m, port, proto);
/* enough pkts to be sent */
if (unlikely(len == MAX_PKT_BURST)) {
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
- if (res & BYPASS) {
+ if (res == BYPASS) {
ip->pkts[j++] = m;
continue;
}
- if (res & DISCARD) {
+ if (res == DISCARD) {
rte_pktmbuf_free(m);
continue;
}
continue;
}
- sa_idx = ip->res[i] & PROTECT_MASK;
- if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
- !inbound_sa_check(sa, m, sa_idx)) {
+ sa_idx = SPI2IDX(res);
+ if (!inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
ip->num = j;
}
-static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+static void
+split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
+ uint32_t i, n4, n6;
+ struct ip *ip;
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
+ n4 = trf->ip4.num;
+ n6 = trf->ip6.num;
- n_ip4 = traffic->ip4.num;
- n_ip6 = traffic->ip6.num;
+ for (i = 0; i < num; i++) {
+
+ m = mb[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
- /* SP/ACL Inbound check ipsec and ip4 */
- for (i = 0; i < nb_pkts_in; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip4.pkts[n4] = m;
+ trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
uint8_t *, offsetof(struct ip, ip_p));
+ n4++;
} else if (ip->ip_v == IP6_VERSION) {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
- traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip6.pkts[n6] = m;
+ trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
+ n6++;
} else
rte_pktmbuf_free(m);
}
+ trf->ip4.num = n4;
+ trf->ip6.num = n6;
+}
+
+
+static inline void
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ uint16_t nb_pkts_in, n_ip4, n_ip6;
+
+ n_ip4 = traffic->ip4.num;
+ n_ip6 = traffic->ip6.num;
+
+ if (app_sa_prm.enable == 0) {
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+ split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
+ } else {
+ inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
+ }
+
inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
n_ip4);
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
- sa_idx = ip->res[i] & PROTECT_MASK;
- if (ip->res[i] & DISCARD)
+ sa_idx = SPI2IDX(ip->res[i]);
+ if (ip->res[i] == DISCARD)
rte_pktmbuf_free(m);
- else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
+ else if (ip->res[i] == BYPASS)
+ ip->pkts[j++] = m;
+ else {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
- } else /* BYPASS */
- ip->pkts[j++] = m;
+ }
}
ip->num = j;
}
outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.res, traffic->ipsec.num,
- MAX_PKT_BURST);
-
- for (i = 0; i < nb_pkts_out; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- } else {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
+ if (app_sa_prm.enable == 0) {
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
+ MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_out; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
+ } else {
+ outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
}
}
traffic->ip6.num = 0;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
+ if (app_sa_prm.enable == 0) {
- for (i = 0; i < nb_pkts_in; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- } else {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
+ } else {
+ inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
}
}
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
- uint32_t nb_pkts_out, i;
+ uint32_t nb_pkts_out, i, n;
struct ip *ip;
/* Drop any IPsec traffic from protected ports */
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- traffic->ipsec.num = 0;
+ n = 0;
- for (i = 0; i < traffic->ip4.num; i++)
- traffic->ip4.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip4.num; i++) {
+ traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
+ traffic->ipsec.res[n++] = single_sa_idx;
+ }
- for (i = 0; i < traffic->ip6.num; i++)
- traffic->ip6.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip6.num; i++) {
+ traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
+ traffic->ipsec.res[n++] = single_sa_idx;
+ }
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
- traffic->ip4.res, traffic->ip4.num,
- MAX_PKT_BURST);
+ traffic->ip4.num = 0;
+ traffic->ip6.num = 0;
+ traffic->ipsec.num = n;
- /* They all sue the same SA (ip4 or ip6 tunnel) */
- m = traffic->ipsec.pkts[i];
- ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION)
- traffic->ip4.num = nb_pkts_out;
- else
- traffic->ip6.num = nb_pkts_out;
+ if (app_sa_prm.enable == 0) {
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
+ MAX_PKT_BURST);
+
+ /* They all sue the same SA (ip4 or ip6 tunnel) */
+ m = traffic->ipsec.pkts[0];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ traffic->ip4.num = nb_pkts_out;
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
+ } else {
+ traffic->ip6.num = nb_pkts_out;
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
+ }
+ } else {
+ outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
+ }
}
static inline int32_t
rte_pktmbuf_free(pkts[i]);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff);
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
}
}
rte_pktmbuf_free(pkts[i]);
continue;
}
- send_single_packet(pkts[i], pkt_hop & 0xff);
+ send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
}
}
}
static inline void
-drain_buffers(struct lcore_conf *qconf)
+drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
uint32_t portid;
}
}
+static inline void
+drain_crypto_buffers(struct lcore_conf *qconf)
+{
+ uint32_t i;
+ struct ipsec_ctx *ctx;
+
+ /* drain inbound buffers*/
+ ctx = &qconf->inbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+
+ /* drain outbound buffers*/
+ ctx = &qconf->outbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+}
+
+static void
+drain_inbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ if (app_sa_prm.enable == 0) {
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+ } else
+ ipsec_cqp_process(ctx, &trf);
+
+ /* process ipv4 packets */
+ if (trf.ip4.num != 0) {
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ }
+
+ /* process ipv6 packets */
+ if (trf.ip6.num != 0) {
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+ }
+}
+
+static void
+drain_outbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ if (app_sa_prm.enable == 0) {
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+ } else
+ ipsec_cqp_process(ctx, &trf);
+
+ /* process ipv4 packets */
+ if (trf.ip4.num != 0)
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+
+ /* process ipv6 packets */
+ if (trf.ip6.num != 0)
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+}
+
/* main processing loop */
static int32_t
main_loop(__attribute__((unused)) void *dummy)
qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
+ qconf->inbound.session_priv_pool =
+ socket_ctx[socket_id].session_priv_pool;
qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
+ qconf->outbound.session_priv_pool =
+ socket_ctx[socket_id].session_priv_pool;
+ qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
+ qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
if (qconf->nb_rx_queue == 0) {
- RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
+ RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+ lcore_id);
return 0;
}
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
- drain_buffers(qconf);
+ drain_tx_buffers(qconf);
+ drain_crypto_buffers(qconf);
prev_tsc = cur_tsc;
}
- /* Read packet from RX queues */
for (i = 0; i < qconf->nb_rx_queue; ++i) {
+
+ /* Read packets from RX queues */
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
nb_rx = rte_eth_rx_burst(portid, queueid,
if (nb_rx > 0)
process_pkts(qconf, pkts, nb_rx, portid);
+
+ /* dequeue and process completed crypto-ops */
+ if (UNPROTECTED_PORT(portid))
+ drain_inbound_crypto_queues(qconf,
+ &qconf->inbound);
+ else
+ drain_outbound_crypto_queues(qconf,
+ &qconf->outbound);
}
}
}
" [-P]"
" [-u PORTMASK]"
" [-j FRAMESIZE]"
+ " [-l]"
+ " [-w REPLAY_WINDOW_SIZE]"
+ " [-e]"
+ " [-a]"
" -f CONFIG_FILE"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--single-sa SAIDX]"
" [--cryptodev_mask MASK]"
+ " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
+ " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
+ " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
+ " [--" CMD_LINE_OPT_MTU " MTU]"
"\n\n"
" -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
" -P : Enable promiscuous mode\n"
" -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
- " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
- " packet size\n"
+ " -j FRAMESIZE: Data buffer size, minimum (and default)\n"
+ " value: RTE_MBUF_DEFAULT_BUF_SIZE\n"
+ " -l enables code-path that uses librte_ipsec\n"
+ " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n"
+ " size for each SA\n"
+ " -e enables ESN\n"
+ " -a enables SA SQN atomic behaviour\n"
" -f CONFIG_FILE: Configuration file\n"
" --config (port,queue,lcore): Rx queue configuration\n"
" --single-sa SAIDX: Use single SA index for outbound traffic,\n"
" bypassing the SP\n"
" --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
" devices to configure\n"
+ " --" CMD_LINE_OPT_RX_OFFLOAD
+ ": bitmask of the RX HW offload capabilities to enable/use\n"
+ " (DEV_RX_OFFLOAD_*)\n"
+ " --" CMD_LINE_OPT_TX_OFFLOAD
+ ": bitmask of the TX HW offload capabilities to enable/use\n"
+ " (DEV_TX_OFFLOAD_*)\n"
+ " --" CMD_LINE_OPT_REASSEMBLE " NUM"
+ ": max number of entries in reassemble(fragment) table\n"
+ " (zero (default value) disables reassembly)\n"
+ " --" CMD_LINE_OPT_MTU " MTU"
+ ": MTU value on all ports (default value: 1500)\n"
+ " outgoing packets with bigger size will be fragmented\n"
+ " incoming packets with bigger size will be discarded\n"
"\n",
prgname);
}
+static int
+parse_mask(const char *str, uint64_t *val)
+{
+ char *end;
+ unsigned long t;
+
+ errno = 0;
+ t = strtoul(str, &end, 0);
+ if (errno != 0 || end[0] != 0)
+ return -EINVAL;
+
+ *val = t;
+ return 0;
+}
+
static int32_t
parse_portmask(const char *portmask)
{
return 0;
}
+static void
+print_app_sa_prm(const struct app_sa_prm *prm)
+{
+ printf("librte_ipsec usage: %s\n",
+ (prm->enable == 0) ? "disabled" : "enabled");
+
+ if (prm->enable == 0)
+ return;
+
+ printf("replay window size: %u\n", prm->window_size);
+ printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
+ printf("SA flags: %#" PRIx64 "\n", prm->flags);
+}
+
static int32_t
parse_args(int32_t argc, char **argv)
{
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
+ while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
lgopts, &option_index)) != EOF) {
switch (opt) {
f_present = 1;
break;
case 'j':
- {
- int32_t size = parse_decimal(optarg);
- if (size <= 1518) {
- printf("Invalid jumbo frame size\n");
- if (size < 0) {
- print_usage(prgname);
- return -1;
- }
- printf("Using default value 9000\n");
- frame_size = 9000;
- } else {
- frame_size = size;
- }
+ ret = parse_decimal(optarg);
+ if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
+ ret > UINT16_MAX) {
+ printf("Invalid frame buffer size value: %s\n",
+ optarg);
+ print_usage(prgname);
+ return -1;
}
- printf("Enabled jumbo frames size %u\n", frame_size);
+ frame_buf_size = ret;
+ printf("Custom frame buffer size %u\n", frame_buf_size);
+ break;
+ case 'l':
+ app_sa_prm.enable = 1;
+ break;
+ case 'w':
+ app_sa_prm.enable = 1;
+ app_sa_prm.window_size = parse_decimal(optarg);
+ break;
+ case 'e':
+ app_sa_prm.enable = 1;
+ app_sa_prm.enable_esn = 1;
+ break;
+ case 'a':
+ app_sa_prm.enable = 1;
+ app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
break;
case CMD_LINE_OPT_CONFIG_NUM:
ret = parse_config(optarg);
/* else */
enabled_cryptodev_mask = ret;
break;
+ case CMD_LINE_OPT_RX_OFFLOAD_NUM:
+ ret = parse_mask(optarg, &dev_rx_offload);
+ if (ret != 0) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_RX_OFFLOAD, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+ case CMD_LINE_OPT_TX_OFFLOAD_NUM:
+ ret = parse_mask(optarg, &dev_tx_offload);
+ if (ret != 0) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_TX_OFFLOAD, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+ case CMD_LINE_OPT_REASSEMBLE_NUM:
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_REASSEMBLE, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ frag_tbl_sz = ret;
+ break;
+ case CMD_LINE_OPT_MTU_NUM:
+ ret = parse_decimal(optarg);
+ if (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_MTU, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ mtu_size = ret;
+ break;
default:
print_usage(prgname);
return -1;
return -1;
}
+ /* check do we need to enable multi-seg support */
+ if (multi_seg_required()) {
+ /* legacy mode doesn't support multi-seg */
+ app_sa_prm.enable = 1;
+ printf("frame buf size: %u, mtu: %u, "
+ "number of reassemble entries: %u\n"
+ "multi-segment support is required\n",
+ frame_buf_size, mtu_size, frag_tbl_sz);
+ }
+
+ print_app_sa_prm(&app_sa_prm);
+
if (optind >= 0)
argv[optind-1] = prgname;
}
static void
-print_ethaddr(const char *name, const struct ether_addr *eth_addr)
+print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
printf("%s%s", name, buf);
}
+/*
+ * Update destination ethaddr for the port.
+ */
+int
+add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr)
+{
+ if (port >= RTE_DIM(ethaddr_tbl))
+ return -EINVAL;
+
+ ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr);
+ return 0;
+}
+
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint32_t port_mask)
int16_t cdev_id, port_id;
struct rte_hash_parameters params = { 0 };
+ const uint64_t mseg_flag = multi_seg_required() ?
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
+
params.entries = CDEV_MAP_ENTRIES;
params.key_len = sizeof(struct cdev_key);
params.hash_func = rte_jhash;
uint32_t max_sess_sz = 0, sess_sz;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ void *sec_ctx;
+
+ /* Get crypto priv session size */
+ sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+
+ /*
+ * If crypto device is security capable, need to check the
+ * size of security session as well.
+ */
+
+ /* Get security context of the crypto device */
+ sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ /* Get size of security session */
+ sess_sz = rte_security_session_get_size(sec_ctx);
if (sess_sz > max_sess_sz)
max_sess_sz = sess_sz;
}
rte_cryptodev_info_get(cdev_id, &cdev_info);
+ if ((mseg_flag & cdev_info.feature_flags) != mseg_flag)
+ rte_exit(EXIT_FAILURE,
+ "Device %hd does not support \'%s\' feature\n",
+ cdev_id,
+ rte_cryptodev_get_feature_name(mseg_flag));
+
if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
max_nb_qps = cdev_info.max_nb_queue_pairs;
else
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
+ dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
+
+ uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
+ rte_exit(EXIT_FAILURE,
+ "Device does not support at least %u "
+ "sessions", CDEV_MP_NB_OBJS);
if (!socket_ctx[dev_conf.socket_id].session_pool) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", dev_conf.socket_id);
+ sess_mp = rte_cryptodev_sym_session_pool_create(
+ mp_name, CDEV_MP_NB_OBJS,
+ 0, CDEV_MP_CACHE_SZ, 0,
+ dev_conf.socket_id);
+ socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+ }
+
+ if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_priv_%u", dev_conf.socket_id);
sess_mp = rte_mempool_create(mp_name,
CDEV_MP_NB_OBJS,
max_sess_sz,
0, NULL, NULL, NULL,
NULL, dev_conf.socket_id,
0);
- if (sess_mp == NULL)
- rte_exit(EXIT_FAILURE,
- "Cannot create session pool on socket %d\n",
- dev_conf.socket_id);
- else
- printf("Allocated session pool on socket %d\n",
- dev_conf.socket_id);
- socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+ socket_ctx[dev_conf.socket_id].session_priv_pool =
+ sess_mp;
}
+ if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
+ !socket_ctx[dev_conf.socket_id].session_pool)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool on socket %d\n",
+ dev_conf.socket_id);
+ else
+ printf("Allocated session pool on socket %d\n",
+ dev_conf.socket_id);
+
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
+ qp_conf.mp_session =
+ socket_ctx[dev_conf.socket_id].session_pool;
+ qp_conf.mp_session_private =
+ socket_ctx[dev_conf.socket_id].session_priv_pool;
for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
- &qp_conf, dev_conf.socket_id,
- socket_ctx[dev_conf.socket_id].session_pool))
+ &qp_conf, dev_conf.socket_id))
rte_panic("Failed to setup queue %u for "
"cdev_id %u\n", 0, cdev_id);
rte_eth_dev_get_sec_ctx(port_id)) {
int socket_id = rte_eth_dev_socket_id(port_id);
- if (!socket_ctx[socket_id].session_pool) {
+ if (!socket_ctx[socket_id].session_priv_pool) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", socket_id);
sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
+ (CDEV_MP_NB_OBJS * 2),
max_sess_sz,
CDEV_MP_CACHE_SZ,
0, NULL, NULL, NULL,
else
printf("Allocated session pool "
"on socket %d\n", socket_id);
- socket_ctx[socket_id].session_pool = sess_mp;
+ socket_ctx[socket_id].session_priv_pool =
+ sess_mp;
}
}
}
}
static void
-port_init(uint16_t portid)
+port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
+ uint32_t frame_size;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
uint16_t tx_queueid, rx_queueid, queue, lcore_id;
int32_t ret, socket_id;
struct lcore_conf *qconf;
- struct ether_addr ethaddr;
+ struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
rte_eth_dev_info_get(portid, &dev_info);
+ /* limit allowed HW offloafs, as user requested */
+ dev_info.rx_offload_capa &= dev_rx_offload;
+ dev_info.tx_offload_capa &= dev_tx_offload;
+
printf("Configuring device port %u:\n", portid);
rte_eth_macaddr_get(portid, ðaddr);
- ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
+ ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
print_ethaddr("Address: ", ðaddr);
printf("\n");
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- if (frame_size) {
- local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ frame_size = MTU_TO_FRAMELEN(mtu_size);
+ if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ if (multi_seg_required()) {
+ local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
}
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+ local_port_conf.rxmode.offloads |= req_rx_offloads;
+ local_port_conf.txmode.offloads |= req_tx_offloads;
+
+ /* Check that all required capabilities are supported */
+ if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+ local_port_conf.rxmode.offloads)
+ rte_exit(EXIT_FAILURE,
+ "Error: port %u required RX offloads: 0x%" PRIx64
+ ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ portid, local_port_conf.rxmode.offloads,
+ dev_info.rx_offload_capa);
+
+ if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+ local_port_conf.txmode.offloads)
+ rte_exit(EXIT_FAILURE,
+ "Error: port %u required TX offloads: 0x%" PRIx64
+ ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ portid, local_port_conf.txmode.offloads,
+ dev_info.tx_offload_capa);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+
+ printf("port %u configurng rx_offloads=0x%" PRIx64
+ ", tx_offloads=0x%" PRIx64 "\n",
+ portid, local_port_conf.rxmode.offloads,
+ local_port_conf.txmode.offloads);
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
&local_port_conf);
if (ret < 0)
printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
qconf = &lcore_conf[lcore_id];
qconf->tx_queue_id[portid] = tx_queueid;
+
+ /* Pre-populate pkt offloads based on capabilities */
+ qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
+ qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
+ if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+
tx_queueid++;
/* init RX queues */
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
char s[64];
- uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
- RTE_MBUF_DEFAULT_BUF_SIZE;
-
+ int32_t ms;
snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- buff_size,
- socket_id);
- if (ctx->mbuf_pool == NULL)
+ frame_buf_size, socket_id);
+
+ /*
+ * if multi-segment support is enabled, then create a pool
+ * for indirect mbufs.
+ */
+ ms = multi_seg_required();
+ if (ms != 0) {
+ snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
+ ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
+ }
+
+ if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
socket_id);
else
printf("Allocated mbuf pool on socket %d\n", socket_id);
}
+static inline int
+inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
+{
+ struct ipsec_sa *sa;
+
+ /* For inline protocol processing, the metadata in the event will
+ * uniquely identify the security session which raised the event.
+ * Application would then need the userdata it had registered with the
+ * security session to process the event.
+ */
+
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
+
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return -1;
+ }
+
+ /* Sequence number over flow. SA need to be re-established */
+ RTE_SET_USED(sa);
+ return 0;
+}
+
+static int
+inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param)
+{
+ uint64_t md;
+ struct rte_eth_event_ipsec_desc *event_desc = NULL;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(port_id);
+
+ RTE_SET_USED(param);
+
+ if (type != RTE_ETH_EVENT_IPSEC)
+ return -1;
+
+ event_desc = ret_param;
+ if (event_desc == NULL) {
+ printf("Event descriptor not set\n");
+ return -1;
+ }
+
+ md = event_desc->metadata;
+
+ if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
+ return inline_ipsec_event_esn_overflow(ctx, md);
+ else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
+ printf("Invalid IPsec event reported\n");
+ return -1;
+ }
+
+ return -1;
+}
+
+static uint16_t
+rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ uint64_t tm;
+ uint32_t i, k;
+ struct lcore_conf *lc;
+ struct rte_mbuf *mb;
+ struct rte_ether_hdr *eth;
+
+ lc = user_param;
+ k = 0;
+ tm = 0;
+
+ for (i = 0; i != nb_pkts; i++) {
+
+ mb = pkt[i];
+ eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+
+ struct rte_ipv4_hdr *iph;
+
+ iph = (struct rte_ipv4_hdr *)(eth + 1);
+ if (rte_ipv4_frag_pkt_is_fragmented(iph)) {
+
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = sizeof(*iph);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv4_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph);
+
+ if (mb != NULL) {
+ /* fix ip cksum after reassemble. */
+ iph = rte_pktmbuf_mtod_offset(mb,
+ struct rte_ipv4_hdr *,
+ mb->l2_len);
+ iph->hdr_checksum = 0;
+ iph->hdr_checksum = rte_ipv4_cksum(iph);
+ }
+ }
+ } else if (eth->ether_type ==
+ rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+
+ struct rte_ipv6_hdr *iph;
+ struct ipv6_extension_fragment *fh;
+
+ iph = (struct rte_ipv6_hdr *)(eth + 1);
+ fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
+ if (fh != NULL) {
+ mb->l2_len = sizeof(*eth);
+ mb->l3_len = (uintptr_t)fh - (uintptr_t)iph +
+ sizeof(*fh);
+ tm = (tm != 0) ? tm : rte_rdtsc();
+ mb = rte_ipv6_frag_reassemble_packet(
+ lc->frag.tbl, &lc->frag.dr,
+ mb, tm, iph, fh);
+ if (mb != NULL)
+ /* fix l3_len after reassemble. */
+ mb->l3_len = mb->l3_len - sizeof(*fh);
+ }
+ }
+
+ pkt[k] = mb;
+ k += (mb != NULL);
+ }
+
+ /* some fragments were encountered, drain death row */
+ if (tm != 0)
+ rte_ip_frag_free_death_row(&lc->frag.dr, 0);
+
+ return k;
+}
+
+
+static int
+reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)
+{
+ int32_t sid;
+ uint32_t i;
+ uint64_t frag_cycles;
+ const struct lcore_rx_queue *rxq;
+ const struct rte_eth_rxtx_callback *cb;
+
+ /* create fragment table */
+ sid = rte_lcore_to_socket_id(cid);
+ frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
+ MS_PER_S * FRAG_TTL_MS;
+
+ lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
+ FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
+ if (lc->frag.tbl == NULL) {
+ printf("%s(%u): failed to create fragment table of size: %u, "
+ "error code: %d\n",
+ __func__, cid, frag_tbl_sz, rte_errno);
+ return -ENOMEM;
+ }
+
+ /* setup reassemble RX callbacks for all queues */
+ for (i = 0; i != lc->nb_rx_queue; i++) {
+
+ rxq = lc->rx_queue_list + i;
+ cb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,
+ rx_callback, lc);
+ if (cb == NULL) {
+ printf("%s(%u): failed to install RX callback for "
+ "portid=%u, queueid=%u, error code: %d\n",
+ __func__, cid,
+ rxq->port_id, rxq->queue_id, rte_errno);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int
+reassemble_init(void)
+{
+ int32_t rc;
+ uint32_t i, lc;
+
+ rc = 0;
+ for (i = 0; i != nb_lcore_params; i++) {
+ lc = lcore_params[i].lcore_id;
+ rc = reassemble_lcore_init(lcore_conf + lc, lc);
+ if (rc != 0)
+ break;
+ }
+
+ return rc;
+}
+
int32_t
main(int32_t argc, char **argv)
{
uint32_t lcore_id;
uint8_t socket_id;
uint16_t portid;
+ uint64_t req_rx_offloads, req_tx_offloads;
/* init EAL */
ret = rte_eal_init(argc, argv);
if (socket_ctx[socket_id].mbuf_pool)
continue;
- sa_init(&socket_ctx[socket_id], socket_id);
-
+ /* initilaze SPD */
sp4_init(&socket_ctx[socket_id], socket_id);
sp6_init(&socket_ctx[socket_id], socket_id);
+ /* initilaze SAD */
+ sa_init(&socket_ctx[socket_id], socket_id);
+
rt_init(&socket_ctx[socket_id], socket_id);
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- port_init(portid);
+ sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
+ port_init(portid, req_rx_offloads, req_tx_offloads);
}
cryptodevs_init();
*/
if (promiscuous_on)
rte_eth_promiscuous_enable(portid);
+
+ rte_eth_dev_callback_register(portid,
+ RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
+ }
+
+ /* fragment reassemble is enabled */
+ if (frag_tbl_sz != 0) {
+ ret = reassemble_init();
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "failed at reassemble init");
}
check_all_ports_link_status(enabled_port_mask);