#include <rte_log.h>
#include <rte_eal.h>
#include <rte_launch.h>
-#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_ip.h>
#include <rte_ip_frag.h>
#include <rte_alarm.h>
+#include <rte_telemetry.h>
#include "event_helper.h"
#include "flow.h"
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-/* Configure how many packets ahead to prefetch, when reading packets */
-#define PREFETCH_OFFSET 3
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
#define MAX_LCORE_PARAMS 1024
/*
#define CMD_LINE_OPT_REASSEMBLE "reassemble"
#define CMD_LINE_OPT_MTU "mtu"
#define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
+#define CMD_LINE_OPT_EVENT_VECTOR "event-vector"
+#define CMD_LINE_OPT_VECTOR_SIZE "vector-size"
+#define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo"
+#define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz"
+#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
#define CMD_LINE_ARG_EVENT "event"
#define CMD_LINE_ARG_POLL "poll"
CMD_LINE_OPT_REASSEMBLE_NUM,
CMD_LINE_OPT_MTU_NUM,
CMD_LINE_OPT_FRAG_TTL_NUM,
+ CMD_LINE_OPT_EVENT_VECTOR_NUM,
+ CMD_LINE_OPT_VECTOR_SIZE_NUM,
+ CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
+ CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
+ CMD_LINE_OPT_PER_PORT_POOL_NUM,
};
static const struct option lgopts[] = {
{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
{CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
{CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
+ {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
+ {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
+ {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
+ {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
+ {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
{NULL, 0, 0, 0}
};
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
-static uint32_t nb_bufs_in_pool;
+uint32_t nb_bufs_in_pool;
/*
* RX/TX HW offload capabilities to enable/use on ethernet ports.
/*
* global values that determine multi-seg policy
*/
-static uint32_t frag_tbl_sz;
+uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
-static uint32_t mtu_size = RTE_ETHER_MTU;
+uint32_t mtu_size = RTE_ETHER_MTU;
static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
+static uint32_t stats_interval;
/* application wide librte_ipsec/SA parameters */
struct app_sa_prm app_sa_prm = {
};
static const char *cfgfile;
-struct lcore_rx_queue {
- uint16_t port_id;
- uint8_t queue_id;
-} __rte_cache_aligned;
-
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
static struct rte_hash *cdev_map_in;
static struct rte_hash *cdev_map_out;
-struct buffer {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
-};
-
-struct lcore_conf {
- uint16_t nb_rx_queue;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
- struct ipsec_ctx inbound;
- struct ipsec_ctx outbound;
- struct rt_ctx *rt4_ctx;
- struct rt_ctx *rt6_ctx;
- struct {
- struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_dir;
- struct rte_mempool *pool_indir;
- struct rte_ip_frag_death_row dr;
- } frag;
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
struct socket_ctx socket_ctx[NB_SOCKETS];
+bool per_port_pool;
+
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
- * - or reassmeble support is requested
+ * - or reassemble support is requested
*/
static int
multi_seg_required(void)
frame_buf_size || frag_tbl_sz != 0);
}
-static inline void
-adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-
-static inline void
-adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
- uint32_t l2_len)
-{
- uint32_t plen, trim;
-
- plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
- if (plen < m->pkt_len) {
- trim = m->pkt_len - plen;
- rte_pktmbuf_trim(m, trim);
- }
-}
-#if (STATS_INTERVAL > 0)
+struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
/* Print out statistics on packet distribution */
static void
total_packets_dropped);
printf("\n====================================================\n");
- rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
-}
-#endif /* STATS_INTERVAL */
-
-static inline void
-prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
-{
- const struct rte_ether_hdr *eth;
- const struct rte_ipv4_hdr *iph4;
- const struct rte_ipv6_hdr *iph6;
- const struct rte_udp_hdr *udp;
- uint16_t ip4_hdr_len;
- uint16_t nat_port;
-
- eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
-
- iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv4_pktlen(pkt, iph4, 0);
-
- switch (iph4->next_proto_id) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- ip4_hdr_len = ((iph4->version_ihl &
- RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER);
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, ip4_hdr_len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
- t->ip4.pkts[(t->ip4.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = sizeof(*iph4);
- pkt->packet_type |= RTE_PTYPE_L3_IPV4;
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
- int next_proto;
- size_t l3len, ext_len;
- uint8_t *p;
-
- /* get protocol type */
- iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
- RTE_ETHER_HDR_LEN);
- adjust_ipv6_pktlen(pkt, iph6, 0);
-
- next_proto = iph6->proto;
-
- /* determine l3 header size up to ESP extension */
- l3len = sizeof(struct ip6_hdr);
- p = rte_pktmbuf_mtod(pkt, uint8_t *);
- while (next_proto != IPPROTO_ESP && l3len < pkt->data_len &&
- (next_proto = rte_ipv6_get_next_ext(p + l3len,
- next_proto, &ext_len)) >= 0)
- l3len += ext_len;
-
- /* drop packet when IPv6 header exceeds first segment length */
- if (unlikely(l3len > pkt->data_len)) {
- free_pkts(&pkt, 1);
- return;
- }
-
- switch (next_proto) {
- case IPPROTO_ESP:
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- break;
- case IPPROTO_UDP:
- if (app_sa_prm.udp_encap == 1) {
- udp = rte_pktmbuf_mtod_offset(pkt,
- struct rte_udp_hdr *, l3len);
- nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
- if (udp->src_port == nat_port ||
- udp->dst_port == nat_port){
- t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- pkt->packet_type |=
- MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
- break;
- }
- }
- /* Fall through */
- default:
- t->ip6.data[t->ip6.num] = &iph6->proto;
- t->ip6.pkts[(t->ip6.num)++] = pkt;
- }
- pkt->l2_len = 0;
- pkt->l3_len = l3len;
- pkt->packet_type |= RTE_PTYPE_L3_IPV6;
- } else {
- /* Unknown/Unsupported type, drop the packet */
- RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
- rte_be_to_cpu_16(eth->ether_type));
- free_pkts(&pkt, 1);
- return;
- }
-
- /* Check if the packet has been processed inline. For inline protocol
- * processed packets, the metadata in the mbuf can be used to identify
- * the security processing done on the packet. The metadata will be
- * used to retrieve the application registered userdata associated
- * with the security session.
- */
-
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD &&
- rte_security_dynfield_is_registered()) {
- struct ipsec_sa *sa;
- struct ipsec_mbuf_metadata *priv;
- struct rte_security_ctx *ctx = (struct rte_security_ctx *)
- rte_eth_dev_get_sec_ctx(
- pkt->port);
-
- /* Retrieve the userdata registered. Here, the userdata
- * registered is the SA pointer.
- */
- sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
- *rte_security_dynfield(pkt));
- if (sa == NULL) {
- /* userdata could not be retrieved */
- return;
- }
-
- /* Save SA as priv member in mbuf. This will be used in the
- * IPsec selector(SP-SA) check.
- */
-
- priv = get_priv(pkt);
- priv->sa = sa;
- }
-}
-
-static inline void
-prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
- uint16_t nb_pkts)
-{
- int32_t i;
-
- t->ipsec.num = 0;
- t->ip4.num = 0;
- t->ip6.num = 0;
-
- for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
- void *));
- prepare_one_packet(pkts[i], t);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_one_packet(pkts[i], t);
-}
-
-static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
- const struct lcore_conf *qconf)
-{
- struct ip *ip;
- struct rte_ether_hdr *ethhdr;
-
- ip = rte_pktmbuf_mtod(pkt, struct ip *);
-
- ethhdr = (struct rte_ether_hdr *)
- rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
-
- if (ip->ip_v == IPVERSION) {
- pkt->ol_flags |= qconf->outbound.ipv4_offloads;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ip->ip_sum = 0;
-
- /* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
- ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- } else {
- pkt->ol_flags |= qconf->outbound.ipv6_offloads;
- pkt->l3_len = sizeof(struct ip6_hdr);
- pkt->l2_len = RTE_ETHER_HDR_LEN;
-
- ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- }
-
- memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
- sizeof(struct rte_ether_addr));
- memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
- sizeof(struct rte_ether_addr));
-}
-
-static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port,
- const struct lcore_conf *qconf)
-{
- int32_t i;
- const int32_t prefetch_offset = 2;
-
- for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
- prepare_tx_pkt(pkts[i], port, qconf);
- }
- /* Process left packets */
- for (; i < nb_pkts; i++)
- prepare_tx_pkt(pkts[i], port, qconf);
-}
-
-/* Send burst of packets on an output interface */
-static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
-{
- struct rte_mbuf **m_table;
- int32_t ret;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
-
- prepare_tx_burst(m_table, n, port, qconf);
-
- ret = rte_eth_tx_burst(port, queueid, m_table, n);
-
- core_stats_update_tx(ret);
-
- if (unlikely(ret < n)) {
- do {
- free_pkts(&m_table[ret], 1);
- } while (++ret < n);
- }
-
- return 0;
-}
-
-/*
- * Helper function to fragment and queue for TX one packet.
- */
-static inline uint32_t
-send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
- uint16_t port, uint8_t proto)
-{
- struct buffer *tbl;
- uint32_t len, n;
- int32_t rc;
-
- tbl = qconf->tx_mbufs + port;
- len = tbl->len;
-
- /* free space for new fragments */
- if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) {
- send_burst(qconf, len, port);
- len = 0;
- }
-
- n = RTE_DIM(tbl->m_table) - len;
-
- if (proto == IPPROTO_IP)
- rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
- else
- rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
-
- if (rc >= 0)
- len += rc;
- else
- RTE_LOG(ERR, IPSEC,
- "%s: failed to fragment packet with size %u, "
- "error code: %d\n",
- __func__, m->pkt_len, rte_errno);
-
- free_pkts(&m, 1);
- return len;
-}
-
-/* Enqueue a single packet, and send burst if queue is filled */
-static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
-{
- uint32_t lcore_id;
- uint16_t len;
- struct lcore_conf *qconf;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_conf[lcore_id];
- len = qconf->tx_mbufs[port].len;
-
- if (m->pkt_len <= mtu_size) {
- qconf->tx_mbufs[port].m_table[len] = m;
- len++;
-
- /* need to fragment the packet */
- } else if (frag_tbl_sz > 0)
- len = send_fragment_packet(qconf, m, port, proto);
- else
- free_pkts(&m, 1);
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- send_burst(qconf, MAX_PKT_BURST, port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
- return 0;
-}
-
-static inline void
-inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim)
-{
- struct rte_mbuf *m;
- uint32_t i, j, res, sa_idx;
-
- if (ip->num == 0 || sp == NULL)
- return;
-
- rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
- ip->num, DEFAULT_MAX_CATEGORIES);
-
- j = 0;
- for (i = 0; i < ip->num; i++) {
- m = ip->pkts[i];
- res = ip->res[i];
- if (res == BYPASS) {
- ip->pkts[j++] = m;
- continue;
- }
- if (res == DISCARD) {
- free_pkts(&m, 1);
- continue;
- }
-
- /* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
- free_pkts(&m, 1);
- continue;
- }
-
- sa_idx = res - 1;
- if (!inbound_sa_check(sa, m, sa_idx)) {
- free_pkts(&m, 1);
- continue;
- }
- ip->pkts[j++] = m;
- }
- ip->num = j;
+ rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
}
static void
process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
+ unsigned int lcoreid = rte_lcore_id();
uint16_t nb_pkts_in, n_ip4, n_ip6;
n_ip4 = traffic->ip4.num;
ipsec_process(ipsec_ctx, traffic);
}
- inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
- n_ip4);
+ inbound_sp_sa(ipsec_ctx->sp4_ctx,
+ ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
+ &core_statistics[lcoreid].inbound.spd4);
- inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
- n_ip6);
+ inbound_sp_sa(ipsec_ctx->sp6_ctx,
+ ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
+ &core_statistics[lcoreid].inbound.spd6);
}
static inline void
-outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
- struct traffic_type *ipsec)
+outbound_spd_lookup(struct sp_ctx *sp,
+ struct traffic_type *ip,
+ struct traffic_type *ipsec,
+ struct ipsec_spd_stats *stats)
{
struct rte_mbuf *m;
uint32_t i, j, sa_idx;
rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
ip->num, DEFAULT_MAX_CATEGORIES);
- j = 0;
- for (i = 0; i < ip->num; i++) {
+ for (i = 0, j = 0; i < ip->num; i++) {
m = ip->pkts[i];
sa_idx = ip->res[i] - 1;
- if (ip->res[i] == DISCARD)
+
+ if (unlikely(ip->res[i] == DISCARD)) {
free_pkts(&m, 1);
- else if (ip->res[i] == BYPASS)
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
ip->pkts[j++] = m;
- else {
+
+ stats->bypass++;
+ } else {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
+
+ stats->protect++;
}
}
ip->num = j;
{
struct rte_mbuf *m;
uint16_t idx, nb_pkts_out, i;
+ unsigned int lcoreid = rte_lcore_id();
/* Drop any IPsec traffic from protected ports */
free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
traffic->ipsec.num = 0;
- outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+ outbound_spd_lookup(ipsec_ctx->sp4_ctx,
+ &traffic->ip4, &traffic->ipsec,
+ &core_statistics[lcoreid].outbound.spd4);
- outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
+ outbound_spd_lookup(ipsec_ctx->sp6_ctx,
+ &traffic->ip6, &traffic->ipsec,
+ &core_statistics[lcoreid].outbound.spd6);
if (app_sa_prm.enable == 0) {
struct rte_mbuf *m;
uint32_t nb_pkts_in, i, idx;
- /* Drop any IPv4 traffic from unprotected ports */
- free_pkts(traffic->ip4.pkts, traffic->ip4.num);
-
- traffic->ip4.num = 0;
-
- /* Drop any IPv6 traffic from unprotected ports */
- free_pkts(traffic->ip6.pkts, traffic->ip6.num);
-
- traffic->ip6.num = 0;
-
if (app_sa_prm.enable == 0) {
nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
}
}
-static inline int32_t
-get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
-{
- struct ipsec_mbuf_metadata *priv;
- struct ipsec_sa *sa;
-
- priv = get_priv(pkt);
-
- sa = priv->sa;
- if (unlikely(sa == NULL)) {
- RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
- goto fail;
- }
-
- if (is_ipv6)
- return sa->portid;
-
- /* else */
- return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
-
-fail:
- if (is_ipv6)
- return -1;
-
- /* else */
- return 0;
-}
-
-static inline void
-route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- uint32_t hop[MAX_PKT_BURST * 2];
- uint32_t dst_ip[MAX_PKT_BURST * 2];
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip, ip_dst);
- dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
- lpm_pkts++;
- }
- }
-
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
- }
-}
-
-static inline void
-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
-{
- int32_t hop[MAX_PKT_BURST * 2];
- uint8_t dst_ip[MAX_PKT_BURST * 2][16];
- uint8_t *ip6_dst;
- int32_t pkt_hop = 0;
- uint16_t i, offset;
- uint16_t lpm_pkts = 0;
-
- if (nb_pkts == 0)
- return;
-
- /* Need to do an LPM lookup for non-inline packets. Inline packets will
- * have port ID in the SA
- */
-
- for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
- /* Security offload not enabled. So an LPM lookup is
- * required to get the hop
- */
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
- offset);
- memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
- lpm_pkts++;
- }
- }
-
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
- lpm_pkts);
-
- lpm_pkts = 0;
-
- for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
- /* Read hop from the SA */
- pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
- } else {
- /* Need to use hop returned by lookup */
- pkt_hop = hop[lpm_pkts++];
- }
-
- if (pkt_hop == -1) {
- free_pkts(&pkts[i], 1);
- continue;
- }
- send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
- }
-}
-
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint16_t portid)
route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
-static inline void
-drain_tx_buffers(struct lcore_conf *qconf)
-{
- struct buffer *buf;
- uint32_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- buf = &qconf->tx_mbufs[portid];
- if (buf->len == 0)
- continue;
- send_burst(qconf, buf->len, portid);
- buf->len = 0;
- }
-}
-
static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
{
uint32_t n;
struct ipsec_traffic trf;
+ unsigned int lcoreid = rte_lcore_id();
if (app_sa_prm.enable == 0) {
/* process ipv4 packets */
if (trf.ip4.num != 0) {
- inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
+ &core_statistics[lcoreid].inbound.spd4);
route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
}
/* process ipv6 packets */
if (trf.ip6.num != 0) {
- inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
+ &core_statistics[lcoreid].inbound.spd6);
route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
}
}
qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.session_priv_pool =
socket_ctx[socket_id].session_priv_pool;
- qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
" [-e]"
" [-a]"
" [-c]"
+ " [-t STATS_INTERVAL]"
" [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
" -f CONFIG_FILE"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
" [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
" [--" CMD_LINE_OPT_MTU " MTU]"
+ " [--event-vector]"
+ " [--vector-size SIZE]"
+ " [--vector-tmo TIMEOUT in ns]"
"\n\n"
" -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
" -P : Enable promiscuous mode\n"
" -a enables SA SQN atomic behaviour\n"
" -c specifies inbound SAD cache size,\n"
" zero value disables the cache (default value: 128)\n"
+ " -t specifies statistics screen update interval,\n"
+ " zero disables statistics screen (default value: 0)\n"
" -s number of mbufs in packet pool, if not specified number\n"
" of mbufs will be calculated based on number of cores,\n"
" ports and crypto queues\n"
" \"parallel\" : Parallel\n"
" --" CMD_LINE_OPT_RX_OFFLOAD
": bitmask of the RX HW offload capabilities to enable/use\n"
- " (DEV_RX_OFFLOAD_*)\n"
+ " (RTE_ETH_RX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_TX_OFFLOAD
": bitmask of the TX HW offload capabilities to enable/use\n"
- " (DEV_TX_OFFLOAD_*)\n"
+ " (RTE_ETH_TX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_REASSEMBLE " NUM"
": max number of entries in reassemble(fragment) table\n"
" (zero (default value) disables reassembly)\n"
" --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
": fragments lifetime in nanoseconds, default\n"
" and maximum value is 10.000.000.000 ns (10 s)\n"
+ " --event-vector enables event vectorization\n"
+ " --vector-size Max vector size (default value: 16)\n"
+ " --vector-tmo Max vector timeout in nanoseconds"
+ " (default value: 102400)\n"
+ " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
+ " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
+ " (default value is based on mbuf count)\n"
"\n",
prgname);
}
int32_t option_index;
char *prgname = argv[0];
int32_t f_present = 0;
+ struct eventmode_conf *em_conf = NULL;
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:",
+ while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
lgopts, &option_index)) != EOF) {
switch (opt) {
}
app_sa_prm.cache_sz = ret;
break;
+ case 't':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid interval value: %s\n", optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ stats_interval = ret;
+ break;
case CMD_LINE_OPT_CONFIG_NUM:
ret = parse_config(optarg);
if (ret) {
}
frag_ttl_ns = ret;
break;
+ case CMD_LINE_OPT_EVENT_VECTOR_NUM:
+ em_conf = eh_conf->mode_params;
+ em_conf->ext_params.event_vector = 1;
+ break;
+ case CMD_LINE_OPT_VECTOR_SIZE_NUM:
+ ret = parse_decimal(optarg);
+
+ if (ret > MAX_PKT_BURST) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_VECTOR_SIZE, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ em_conf = eh_conf->mode_params;
+ em_conf->ext_params.vector_size = ret;
+ break;
+ case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
+ ret = parse_decimal(optarg);
+
+ em_conf = eh_conf->mode_params;
+ em_conf->vector_tmo_ns = ret;
+ break;
+ case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
+ ret = parse_decimal(optarg);
+
+ em_conf = eh_conf->mode_params;
+ em_conf->vector_pool_sz = ret;
+ break;
+ case CMD_LINE_OPT_PER_PORT_POOL_NUM:
+ per_port_pool = 1;
+ break;
default:
print_usage(prgname);
return -1;
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
ret = rte_hash_add_key_data(map, &key, (void *)i);
if (ret < 0) {
- printf("Faled to insert cdev mapping for (lcore %u, "
+ printf("Failed to insert cdev mapping for (lcore %u, "
"cdev %u, qp %u), errno %d\n",
key.lcore_id, ipsec_ctx->tbl[i].id,
ipsec_ctx->tbl[i].qp, ret);
str = "Inbound";
}
- /* Required cryptodevs with operation chainning */
+ /* Required cryptodevs with operation chaining */
if (!(dev_info->feature_flags &
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
return ret;
return total_nb_qps;
}
+static int
+check_ptype(int portid)
+{
+ int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0;
+ int i, nb_ptypes;
+ uint32_t mask;
+
+ mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_TUNNEL_MASK);
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0);
+ if (nb_ptypes <= 0)
+ return 0;
+
+ uint32_t ptypes[nb_ptypes];
+
+ nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes);
+ for (i = 0; i < nb_ptypes; ++i) {
+ if (RTE_ETH_IS_IPV4_HDR(ptypes[i]))
+ l3_ipv4 = 1;
+ if (RTE_ETH_IS_IPV6_HDR(ptypes[i]))
+ l3_ipv6 = 1;
+ if ((ptypes[i] & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
+ tunnel_esp = 1;
+ if ((ptypes[i] & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+ l4_udp = 1;
+ }
+
+ if (l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (l4_udp == 0)
+ printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid);
+
+ if (tunnel_esp == 0)
+ printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid);
+
+ if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ const struct rte_ipv4_hdr *iph4;
+ const struct rte_ipv6_hdr *iph6;
+ const struct rte_ether_hdr *eth;
+ const struct rte_udp_hdr *udp;
+ uint16_t nat_port, ether_type;
+ int next_proto = 0;
+ size_t ext_len = 0;
+ const uint8_t *p;
+ uint32_t l3len;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ ether_type = eth->ether_type;
+
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ iph4 = (const struct rte_ipv4_hdr *)(eth + 1);
+ l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+
+ if (l3len == sizeof(struct rte_ipv4_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV4;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ next_proto = iph4->next_proto_id;
+ p = (const uint8_t *)iph4;
+ } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ iph6 = (const struct rte_ipv6_hdr *)(eth + 1);
+ l3len = sizeof(struct ip6_hdr);
+
+ /* determine l3 header size up to ESP extension */
+ next_proto = iph6->proto;
+ p = (const uint8_t *)iph6;
+ while (next_proto != IPPROTO_ESP && l3len < m->data_len &&
+ (next_proto = rte_ipv6_get_next_ext(p + l3len,
+ next_proto, &ext_len)) >= 0)
+ l3len += ext_len;
+
+ /* Skip IPv6 header exceeds first segment length */
+ if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len))
+ goto exit;
+
+ if (l3len == sizeof(struct ip6_hdr))
+ packet_type |= RTE_PTYPE_L3_IPV6;
+ else
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT;
+ }
+
+ switch (next_proto) {
+ case IPPROTO_ESP:
+ packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = (const struct rte_udp_hdr *)(p + l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port)
+ packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ }
+ break;
+ default:
+ break;
+ }
+exit:
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint32_t i;
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
+ for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
+ struct ether_hdr *));
+ parse_ptype(pkts[i]);
+ }
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
- uint32_t frame_size;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
+ int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
if (ret != 0)
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- /* limit allowed HW offloafs, as user requested */
+ /* limit allowed HW offloads, as user requested */
dev_info.rx_offload_capa &= dev_rx_offload;
dev_info.tx_offload_capa &= dev_tx_offload;
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- frame_size = MTU_TO_FRAMELEN(mtu_size);
- if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
local_port_conf.rxmode.offloads |= req_rx_offloads;
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
-
- printf("port %u configurng rx_offloads=0x%" PRIx64
+ printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
local_port_conf.txmode.offloads);
rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
"err=%d, port=%d\n", ret, portid);
+ /* Check if required ptypes are supported */
+ ptype_supported = check_ptype(portid);
+ if (!ptype_supported)
+ printf("Port %d: softly parse packet type info\n", portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
qconf->tx_queue_id[portid] = tx_queueid;
/* Pre-populate pkt offloads based on capabilities */
- qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
- qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
- if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
- qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+ qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
+ qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
+ if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
tx_queueid++;
/* init RX queues */
for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
struct rte_eth_rxconf rxq_conf;
+ struct rte_mempool *pool;
if (portid != qconf->rx_queue_list[queue].port_id)
continue;
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+
+ if (per_port_pool)
+ pool = socket_ctx[socket_id].mbuf_pool[portid];
+ else
+ pool = socket_ctx[socket_id].mbuf_pool[0];
+
ret = rte_eth_rx_queue_setup(portid, rx_queueid,
- nb_rxd, socket_id, &rxq_conf,
- socket_ctx[socket_id].mbuf_pool);
+ nb_rxd, socket_id, &rxq_conf, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ /* Register Rx callback if ptypes are not supported */
+ if (!ptype_supported &&
+ !rte_eth_add_rx_callback(portid, queue,
+ parse_ptype_cb, NULL)) {
+ printf("Failed to add rx callback: port=%d, "
+ "queue=%d\n", portid, queue);
+ }
+
+
}
}
printf("\n");
}
static void
-pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
+pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
+ uint32_t nb_mbuf)
{
char s[64];
int32_t ms;
- snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
- ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
- MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- frame_buf_size, socket_id);
+
+ /* mbuf_pool is initialised by the pool_init() function*/
+ if (socket_ctx[socket_id].mbuf_pool[portid])
+ return;
+
+ snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
+ ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE,
+ ipsec_metadata_size(),
+ frame_buf_size,
+ socket_id);
/*
* if multi-segment support is enabled, then create a pool
- * for indirect mbufs.
+ * for indirect mbufs. This is not per-port but global.
*/
ms = multi_seg_required();
- if (ms != 0) {
+ if (ms != 0 && !ctx->mbuf_pool_indir) {
snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
}
- if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
+ if (ctx->mbuf_pool[portid] == NULL ||
+ (ms != 0 && ctx->mbuf_pool_indir == NULL))
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
socket_id);
else
return -1;
}
+static int
+ethdev_reset_event_callback(uint16_t port_id,
+ enum rte_eth_event_type type,
+ void *param __rte_unused, void *ret_param __rte_unused)
+{
+ printf("Reset Event on port id %d type %d\n", port_id, type);
+ printf("Force quit application");
+ force_quit = true;
+ return 0;
+}
+
static uint16_t
rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkt[], uint16_t nb_pkts,
rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
struct rte_ipv6_hdr *iph;
- struct ipv6_extension_fragment *fh;
+ struct rte_ipv6_fragment_ext *fh;
iph = (struct rte_ipv6_hdr *)(eth + 1);
fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
struct rte_flow *flow;
int ret;
- if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
return;
/* Add the default rte_flow to enable SECURITY for all ESP packets */
8192U);
}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
+ unsigned int coreid;
+
+ rte_tel_data_start_dict(data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return -EINVAL;
+
+ total_pkts_dropped = core_statistics[coreid].dropped;
+ total_pkts_tx = core_statistics[coreid].tx;
+ total_pkts_rx = core_statistics[coreid].rx;
+
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+
+ total_pkts_dropped += core_statistics[coreid].dropped;
+ total_pkts_tx += core_statistics[coreid].tx;
+ total_pkts_rx += core_statistics[coreid].rx;
+ }
+ }
+
+ /* add telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(data, "packets received",
+ total_pkts_rx);
+
+ rte_tel_data_add_dict_u64(data, "packets transmitted",
+ total_pkts_tx);
+
+ rte_tel_data_add_dict_u64(data, "packets dropped",
+ total_pkts_dropped);
+
+
+ return 0;
+}
+
+static void
+update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ struct ipsec_core_statistics *lcore_stats;
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return;
+
+ lcore_stats = &core_statistics[coreid];
+
+ total->rx = lcore_stats->rx;
+ total->dropped = lcore_stats->dropped;
+ total->tx = lcore_stats->tx;
+
+ /* outbound stats */
+ total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
+ total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
+ total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
+
+ total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
+ total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
+ total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
+
+ total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
+
+ /* inbound stats */
+ total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
+ total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
+ total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
+
+ total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
+ total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
+ total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
+
+ total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
+
+
+ /* routing stats */
+ total->lpm4.miss += lcore_stats->lpm4.miss;
+ total->lpm6.miss += lcore_stats->lpm6.miss;
+}
+
+static void
+update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ memset(total, 0, sizeof(*total));
+
+ if (coreid != UINT32_MAX) {
+ update_lcore_statistics(total, coreid);
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
+ update_lcore_statistics(total, coreid);
+ }
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.outbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.outbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.outbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.outbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.outbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.outbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.outbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ /* add children dicts to parent dict */
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.inbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.inbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.inbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.inbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.inbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.inbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.inbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
+ struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!lpm4_data || !lpm6_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(lpm4_data);
+ rte_tel_data_start_dict(lpm6_data);
+
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add lpm 4 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm4_data, "miss",
+ total_stats.lpm4.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
+
+ /* add lpm 6 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm6_data, "miss",
+ total_stats.lpm6.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(lpm4_data);
+ rte_tel_data_free(lpm6_data);
+ }
+ return rc;
+}
+
+static void
+ipsec_secgw_telemetry_init(void)
+{
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
+ handle_telemetry_cmd_ipsec_secgw_stats,
+ "Returns global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_outbound,
+ "Returns outbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_inbound,
+ "Returns inbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
+ handle_telemetry_cmd_ipsec_secgw_stats_routing,
+ "Returns routing stats. "
+ "Optional Parameters: int <logical core id>");
+}
+
+
int32_t
main(int32_t argc, char **argv)
{
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid parameters\n");
+ ipsec_secgw_telemetry_init();
+
/* parse configuration file */
if (parse_cfg_file(cfgfile) < 0) {
printf("parsing file \"%s\" failed\n",
rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
unprotected_port_mask);
+ if (unprotected_port_mask && !nb_sa_in)
+ rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
+
if (check_poll_mode_params(eh_conf) < 0)
rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
else
socket_id = 0;
- /* mbuf_pool is initialised by the pool_init() function*/
- if (socket_ctx[socket_id].mbuf_pool)
+ if (per_port_pool) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ pool_init(&socket_ctx[socket_id], socket_id,
+ portid, nb_bufs_in_pool);
+ }
+ } else {
+ pool_init(&socket_ctx[socket_id], socket_id, 0,
+ nb_bufs_in_pool);
+ }
+
+ if (socket_ctx[socket_id].session_pool)
continue;
- pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
session_priv_pool_init(&socket_ctx[socket_id], socket_id,
sess_sz);
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Create flow before starting the device */
- create_default_ipsec_flow(portid, req_rx_offloads[portid]);
-
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
"err=%d, port=%d\n", ret, portid);
+
+ /* Create flow after starting the device */
+ create_default_ipsec_flow(portid, req_rx_offloads[portid]);
+
/*
* If enabled, put device in promiscuous mode.
* This allows IO forwarding mode to forward packets
rte_strerror(-ret), portid);
}
+ rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
+ ethdev_reset_event_callback, NULL);
+
rte_eth_dev_callback_register(portid,
RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
/* Replicate each context per socket */
for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
socket_id = rte_socket_id_by_idx(i);
- if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ if ((socket_ctx[socket_id].session_pool != NULL) &&
(socket_ctx[socket_id].sa_in == NULL) &&
(socket_ctx[socket_id].sa_out == NULL)) {
sa_init(&socket_ctx[socket_id], socket_id);
check_all_ports_link_status(enabled_port_mask);
-#if (STATS_INTERVAL > 0)
- rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
-#else
- RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
-#endif /* STATS_INTERVAL */
+ if (stats_interval > 0)
+ rte_eal_alarm_set(stats_interval * US_PER_S,
+ print_stats_cb, NULL);
+ else
+ RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);