* Copyright(c) 2016 Intel Corporation
*/
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
+#include <signal.h>
#include <getopt.h>
#include <rte_common.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_eal.h>
#include <rte_launch.h>
-#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
#include <rte_security.h>
+#include <rte_eventdev.h>
#include <rte_ip.h>
#include <rte_ip_frag.h>
+#include <rte_alarm.h>
+#include <rte_telemetry.h>
+#include "event_helper.h"
+#include "flow.h"
#include "ipsec.h"
+#include "ipsec_worker.h"
#include "parser.h"
+#include "sad.h"
-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
+volatile bool force_quit;
#define MAX_JUMBO_PKT_LEN 9600
#define MEMPOOL_CACHE_SIZE 256
-#define NB_MBUF (32000)
-
#define CDEV_QUEUE_DESC 2048
#define CDEV_MAP_ENTRIES 16384
-#define CDEV_MP_NB_OBJS 1024
#define CDEV_MP_CACHE_SZ 64
+#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-#define NB_SOCKETS 4
-
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
#define MAX_LCORE_PARAMS 1024
-#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
-
/*
* Configurable number of RX/TX ring descriptors
*/
static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
- (((uint64_t)((a) & 0xff) << 56) | \
- ((uint64_t)((b) & 0xff) << 48) | \
- ((uint64_t)((c) & 0xff) << 40) | \
- ((uint64_t)((d) & 0xff) << 32) | \
- ((uint64_t)((e) & 0xff) << 24) | \
- ((uint64_t)((f) & 0xff) << 16) | \
- ((uint64_t)((g) & 0xff) << 8) | \
- ((uint64_t)(h) & 0xff))
-#else
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
- (((uint64_t)((h) & 0xff) << 56) | \
- ((uint64_t)((g) & 0xff) << 48) | \
- ((uint64_t)((f) & 0xff) << 40) | \
- ((uint64_t)((e) & 0xff) << 32) | \
- ((uint64_t)((d) & 0xff) << 24) | \
- ((uint64_t)((c) & 0xff) << 16) | \
- ((uint64_t)((b) & 0xff) << 8) | \
- ((uint64_t)(a) & 0xff))
-#endif
-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
-
#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
0, 0)
#define FRAG_TBL_BUCKET_ENTRIES 4
-#define FRAG_TTL_MS (10 * MS_PER_S)
+#define MAX_FRAG_TTL_NS (10LL * NS_PER_S)
#define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
-/* port/source ethernet addr and destination ethernet addr */
-struct ethaddr_info {
- uint64_t src, dst;
-};
-
struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
};
+struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
+
#define CMD_LINE_OPT_CONFIG "config"
#define CMD_LINE_OPT_SINGLE_SA "single-sa"
#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
+#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode"
+#define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type"
#define CMD_LINE_OPT_RX_OFFLOAD "rxoffload"
#define CMD_LINE_OPT_TX_OFFLOAD "txoffload"
#define CMD_LINE_OPT_REASSEMBLE "reassemble"
#define CMD_LINE_OPT_MTU "mtu"
+#define CMD_LINE_OPT_FRAG_TTL "frag-ttl"
+#define CMD_LINE_OPT_EVENT_VECTOR "event-vector"
+#define CMD_LINE_OPT_VECTOR_SIZE "vector-size"
+#define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo"
+#define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz"
+#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
+
+#define CMD_LINE_ARG_EVENT "event"
+#define CMD_LINE_ARG_POLL "poll"
+#define CMD_LINE_ARG_ORDERED "ordered"
+#define CMD_LINE_ARG_ATOMIC "atomic"
+#define CMD_LINE_ARG_PARALLEL "parallel"
enum {
/* long options mapped to a short option */
CMD_LINE_OPT_CONFIG_NUM,
CMD_LINE_OPT_SINGLE_SA_NUM,
CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
+ CMD_LINE_OPT_TRANSFER_MODE_NUM,
+ CMD_LINE_OPT_SCHEDULE_TYPE_NUM,
CMD_LINE_OPT_RX_OFFLOAD_NUM,
CMD_LINE_OPT_TX_OFFLOAD_NUM,
CMD_LINE_OPT_REASSEMBLE_NUM,
CMD_LINE_OPT_MTU_NUM,
+ CMD_LINE_OPT_FRAG_TTL_NUM,
+ CMD_LINE_OPT_EVENT_VECTOR_NUM,
+ CMD_LINE_OPT_VECTOR_SIZE_NUM,
+ CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
+ CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
+ CMD_LINE_OPT_PER_PORT_POOL_NUM,
};
static const struct option lgopts[] = {
{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
{CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
+ {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM},
+ {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM},
{CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},
{CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},
{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
{CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
+ {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
+ {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
+ {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
+ {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
+ {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
+ {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
{NULL, 0, 0, 0}
};
+uint32_t unprotected_port_mask;
+uint32_t single_sa_idx;
/* mask of enabled ports */
static uint32_t enabled_port_mask;
static uint64_t enabled_cryptodev_mask = UINT64_MAX;
-static uint32_t unprotected_port_mask;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
-static uint32_t single_sa_idx;
+uint32_t nb_bufs_in_pool;
/*
* RX/TX HW offload capabilities to enable/use on ethernet ports.
static uint32_t frag_tbl_sz;
static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;
static uint32_t mtu_size = RTE_ETHER_MTU;
+static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS;
+static uint32_t stats_interval;
/* application wide librte_ipsec/SA parameters */
-struct app_sa_prm app_sa_prm = {.enable = 0};
+struct app_sa_prm app_sa_prm = {
+ .enable = 0,
+ .cache_sz = SA_CACHE_SZ,
+ .udp_encap = 0
+ };
+static const char *cfgfile;
struct lcore_rx_queue {
uint16_t port_id;
struct rt_ctx *rt6_ctx;
struct {
struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_dir;
struct rte_mempool *pool_indir;
struct rte_ip_frag_death_row dr;
} frag;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
-static struct socket_ctx socket_ctx[NB_SOCKETS];
+struct socket_ctx socket_ctx[NB_SOCKETS];
+
+bool per_port_pool;
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
- * - or reassmeble support is requested
+ * - or reassemble support is requested
*/
static int
multi_seg_required(void)
}
}
+
+struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
+
+/* Print out statistics on packet distribution */
+static void
+print_stats_cb(__rte_unused void *param)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ float burst_percent, rx_per_call, tx_per_call;
+ unsigned int coreid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nCore statistics ====================================");
+
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+ burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
+ core_statistics[coreid].rx;
+ rx_per_call = (float)(core_statistics[coreid].rx)/
+ core_statistics[coreid].rx_call;
+ tx_per_call = (float)(core_statistics[coreid].tx)/
+ core_statistics[coreid].tx_call;
+ printf("\nStatistics for core %u ------------------------------"
+ "\nPackets received: %20"PRIu64
+ "\nPackets sent: %24"PRIu64
+ "\nPackets dropped: %21"PRIu64
+ "\nBurst percent: %23.2f"
+ "\nPackets per Rx call: %17.2f"
+ "\nPackets per Tx call: %17.2f",
+ coreid,
+ core_statistics[coreid].rx,
+ core_statistics[coreid].tx,
+ core_statistics[coreid].dropped,
+ burst_percent,
+ rx_per_call,
+ tx_per_call);
+
+ total_packets_dropped += core_statistics[coreid].dropped;
+ total_packets_tx += core_statistics[coreid].tx;
+ total_packets_rx += core_statistics[coreid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_rx,
+ total_packets_tx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+
+ rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
+}
+
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- if (iph4->next_proto_id == IPPROTO_ESP)
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
pkt->l2_len = 0;
pkt->l3_len = sizeof(*iph4);
+ pkt->packet_type |= RTE_PTYPE_L3_IPV4;
+ if (pkt->packet_type & RTE_PTYPE_L4_TCP)
+ pkt->l4_len = sizeof(struct rte_tcp_hdr);
+ else if (pkt->packet_type & RTE_PTYPE_L4_UDP)
+ pkt->l4_len = sizeof(struct rte_udp_hdr);
} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
int next_proto;
size_t l3len, ext_len;
/* drop packet when IPv6 header exceeds first segment length */
if (unlikely(l3len > pkt->data_len)) {
- rte_pktmbuf_free(pkt);
+ free_pkts(&pkt, 1);
return;
}
- if (next_proto == IPPROTO_ESP)
+ switch (next_proto) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
pkt->l2_len = 0;
pkt->l3_len = l3len;
+ pkt->packet_type |= RTE_PTYPE_L3_IPV6;
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
rte_be_to_cpu_16(eth->ether_type));
- rte_pktmbuf_free(pkt);
+ free_pkts(&pkt, 1);
return;
}
* with the security session.
*/
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
struct rte_security_ctx *ctx = (struct rte_security_ctx *)
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
*/
-
- sa = (struct ipsec_sa *)
- rte_security_get_userdata(ctx, pkt->udata64);
-
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
if (sa == NULL) {
/* userdata could not be retrieved */
return;
ip->ip_sum = 0;
/* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
}
- memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
sizeof(struct rte_ether_addr));
- memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
sizeof(struct rte_ether_addr));
}
prepare_tx_burst(m_table, n, port, qconf);
ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
if (unlikely(ret < n)) {
do {
- rte_pktmbuf_free(m_table[ret]);
+ free_pkts(&m_table[ret], 1);
} while (++ret < n);
}
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
else
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
if (rc >= 0)
len += rc;
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
return len;
}
} else if (frag_tbl_sz > 0)
len = send_fragment_packet(qconf, m, port, proto);
else
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
/* enough pkts to be sent */
if (unlikely(len == MAX_PKT_BURST)) {
static inline void
inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
- uint16_t lim)
+ uint16_t lim, struct ipsec_spd_stats *stats)
{
struct rte_mbuf *m;
uint32_t i, j, res, sa_idx;
res = ip->res[i];
if (res == BYPASS) {
ip->pkts[j++] = m;
+ stats->bypass++;
continue;
}
if (res == DISCARD) {
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
+ stats->discard++;
continue;
}
/* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
- rte_pktmbuf_free(m);
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
+ stats->discard++;
+ free_pkts(&m, 1);
continue;
}
- sa_idx = SPI2IDX(res);
+ sa_idx = res - 1;
if (!inbound_sa_check(sa, m, sa_idx)) {
- rte_pktmbuf_free(m);
+ stats->discard++;
+ free_pkts(&m, 1);
continue;
}
ip->pkts[j++] = m;
+ stats->protect++;
}
ip->num = j;
}
offsetof(struct ip6_hdr, ip6_nxt));
n6++;
} else
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
}
trf->ip4.num = n4;
process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
+ unsigned int lcoreid = rte_lcore_id();
uint16_t nb_pkts_in, n_ip4, n_ip6;
n_ip4 = traffic->ip4.num;
ipsec_process(ipsec_ctx, traffic);
}
- inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
- n_ip4);
+ inbound_sp_sa(ipsec_ctx->sp4_ctx,
+ ipsec_ctx->sa_ctx, &traffic->ip4, n_ip4,
+ &core_statistics[lcoreid].inbound.spd4);
- inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
- n_ip6);
+ inbound_sp_sa(ipsec_ctx->sp6_ctx,
+ ipsec_ctx->sa_ctx, &traffic->ip6, n_ip6,
+ &core_statistics[lcoreid].inbound.spd6);
}
static inline void
-outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
- struct traffic_type *ipsec)
+outbound_spd_lookup(struct sp_ctx *sp,
+ struct traffic_type *ip,
+ struct traffic_type *ipsec,
+ struct ipsec_spd_stats *stats)
{
struct rte_mbuf *m;
uint32_t i, j, sa_idx;
rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
ip->num, DEFAULT_MAX_CATEGORIES);
- j = 0;
- for (i = 0; i < ip->num; i++) {
+ for (i = 0, j = 0; i < ip->num; i++) {
m = ip->pkts[i];
- sa_idx = SPI2IDX(ip->res[i]);
- if (ip->res[i] == DISCARD)
- rte_pktmbuf_free(m);
- else if (ip->res[i] == BYPASS)
+ sa_idx = ip->res[i] - 1;
+
+ if (unlikely(ip->res[i] == DISCARD)) {
+ free_pkts(&m, 1);
+
+ stats->discard++;
+ } else if (unlikely(ip->res[i] == BYPASS)) {
ip->pkts[j++] = m;
- else {
+
+ stats->bypass++;
+ } else {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
+
+ stats->protect++;
}
}
ip->num = j;
{
struct rte_mbuf *m;
uint16_t idx, nb_pkts_out, i;
+ unsigned int lcoreid = rte_lcore_id();
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec.num; i++)
- rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
traffic->ipsec.num = 0;
- outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+ outbound_spd_lookup(ipsec_ctx->sp4_ctx,
+ &traffic->ip4, &traffic->ipsec,
+ &core_statistics[lcoreid].outbound.spd4);
- outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
+ outbound_spd_lookup(ipsec_ctx->sp6_ctx,
+ &traffic->ip6, &traffic->ipsec,
+ &core_statistics[lcoreid].outbound.spd6);
if (app_sa_prm.enable == 0) {
struct rte_mbuf *m;
uint32_t nb_pkts_in, i, idx;
- /* Drop any IPv4 traffic from unprotected ports */
- for (i = 0; i < traffic->ip4.num; i++)
- rte_pktmbuf_free(traffic->ip4.pkts[i]);
-
- traffic->ip4.num = 0;
-
- /* Drop any IPv6 traffic from unprotected ports */
- for (i = 0; i < traffic->ip6.num; i++)
- rte_pktmbuf_free(traffic->ip6.pkts[i]);
-
- traffic->ip6.num = 0;
-
if (app_sa_prm.enable == 0) {
nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
struct ip *ip;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec.num; i++)
- rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
n = 0;
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
if (nb_pkts == 0)
return;
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
} else {
}
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- rte_pktmbuf_free(pkts[i]);
+ core_statistics[lcoreid].lpm4.miss++;
+ free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
int32_t pkt_hop = 0;
uint16_t i, offset;
uint16_t lpm_pkts = 0;
+ unsigned int lcoreid = rte_lcore_id();
if (nb_pkts == 0)
return;
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
} else {
}
if (pkt_hop == -1) {
- rte_pktmbuf_free(pkts[i]);
+ core_statistics[lcoreid].lpm6.miss++;
+ free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
prepare_traffic(pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
process_pkts_inbound_nosp(&qconf->inbound, &traffic);
else
process_pkts_outbound_nosp(&qconf->outbound, &traffic);
} else {
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
process_pkts_inbound(&qconf->inbound, &traffic);
else
process_pkts_outbound(&qconf->outbound, &traffic);
{
uint32_t n;
struct ipsec_traffic trf;
+ unsigned int lcoreid = rte_lcore_id();
if (app_sa_prm.enable == 0) {
/* process ipv4 packets */
if (trf.ip4.num != 0) {
- inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0,
+ &core_statistics[lcoreid].inbound.spd4);
route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
}
/* process ipv6 packets */
if (trf.ip6.num != 0) {
- inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0,
+ &core_statistics[lcoreid].inbound.spd6);
route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
}
}
}
/* main processing loop */
-static int32_t
-main_loop(__attribute__((unused)) void *dummy)
+void
+ipsec_poll_mode_worker(void)
{
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
uint16_t portid;
uint8_t queueid;
struct lcore_conf *qconf;
- int32_t socket_id;
+ int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
/ US_PER_S * BURST_TX_DRAIN_US;
struct lcore_rx_queue *rxql;
qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.session_priv_pool =
socket_ctx[socket_id].session_priv_pool;
- qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+ rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
+ if (rc != 0) {
+ RTE_LOG(ERR, IPSEC,
+ "SAD cache init on lcore %u, failed with code: %d\n",
+ lcore_id, rc);
+ return;
+ }
+
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
lcore_id);
- return 0;
+ return;
}
RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
lcore_id, portid, queueid);
}
- while (1) {
+ while (!force_quit) {
cur_tsc = rte_rdtsc();
/* TX queue buffer drain */
nb_rx = rte_eth_rx_burst(portid, queueid,
pkts, MAX_PKT_BURST);
- if (nb_rx > 0)
+ if (nb_rx > 0) {
+ core_stats_update_rx(nb_rx);
process_pkts(qconf, pkts, nb_rx, portid);
+ }
/* dequeue and process completed crypto-ops */
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
drain_inbound_crypto_queues(qconf,
&qconf->inbound);
else
}
}
+int
+check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+{
+ uint16_t i;
+ uint16_t portid;
+ uint8_t queueid;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ portid = lcore_params_array[i].port_id;
+ if (portid == fdir_portid) {
+ queueid = lcore_params_array[i].queue_id;
+ if (queueid == fdir_qid)
+ break;
+ }
+
+ if (i == nb_lcore_params - 1)
+ return -1;
+ }
+
+ return 1;
+}
+
static int32_t
-check_params(void)
+check_poll_mode_params(struct eh_conf *eh_conf)
{
uint8_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
+ if (!eh_conf)
+ return -EINVAL;
+
+ if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL)
+ return 0;
+
if (lcore_params == NULL) {
printf("Error: No port/queue/core mappings\n");
return -1;
" [-w REPLAY_WINDOW_SIZE]"
" [-e]"
" [-a]"
+ " [-c]"
+ " [-t STATS_INTERVAL]"
+ " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
" -f CONFIG_FILE"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--single-sa SAIDX]"
" [--cryptodev_mask MASK]"
+ " [--transfer-mode MODE]"
+ " [--event-schedule-type TYPE]"
" [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]"
" [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
" [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
" [--" CMD_LINE_OPT_MTU " MTU]"
+ " [--event-vector]"
+ " [--vector-size SIZE]"
+ " [--vector-tmo TIMEOUT in ns]"
"\n\n"
" -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
" -P : Enable promiscuous mode\n"
" size for each SA\n"
" -e enables ESN\n"
" -a enables SA SQN atomic behaviour\n"
+ " -c specifies inbound SAD cache size,\n"
+ " zero value disables the cache (default value: 128)\n"
+ " -t specifies statistics screen update interval,\n"
+ " zero disables statistics screen (default value: 0)\n"
+ " -s number of mbufs in packet pool, if not specified number\n"
+ " of mbufs will be calculated based on number of cores,\n"
+ " ports and crypto queues\n"
" -f CONFIG_FILE: Configuration file\n"
- " --config (port,queue,lcore): Rx queue configuration\n"
- " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
- " bypassing the SP\n"
+ " --config (port,queue,lcore): Rx queue configuration. In poll\n"
+ " mode determines which queues from\n"
+ " which ports are mapped to which cores.\n"
+ " In event mode this option is not used\n"
+ " as packets are dynamically scheduled\n"
+ " to cores by HW.\n"
+ " --single-sa SAIDX: In poll mode use single SA index for\n"
+ " outbound traffic, bypassing the SP\n"
+ " In event mode selects driver submode,\n"
+ " SA index value is ignored\n"
" --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
" devices to configure\n"
+ " --transfer-mode MODE\n"
+ " \"poll\" : Packet transfer via polling (default)\n"
+ " \"event\" : Packet transfer via event device\n"
+ " --event-schedule-type TYPE queue schedule type, used only when\n"
+ " transfer mode is set to event\n"
+ " \"ordered\" : Ordered (default)\n"
+ " \"atomic\" : Atomic\n"
+ " \"parallel\" : Parallel\n"
" --" CMD_LINE_OPT_RX_OFFLOAD
": bitmask of the RX HW offload capabilities to enable/use\n"
- " (DEV_RX_OFFLOAD_*)\n"
+ " (RTE_ETH_RX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_TX_OFFLOAD
": bitmask of the TX HW offload capabilities to enable/use\n"
- " (DEV_TX_OFFLOAD_*)\n"
+ " (RTE_ETH_TX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_REASSEMBLE " NUM"
": max number of entries in reassemble(fragment) table\n"
" (zero (default value) disables reassembly)\n"
": MTU value on all ports (default value: 1500)\n"
" outgoing packets with bigger size will be fragmented\n"
" incoming packets with bigger size will be discarded\n"
+ " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
+ ": fragments lifetime in nanoseconds, default\n"
+ " and maximum value is 10.000.000.000 ns (10 s)\n"
+ " --event-vector enables event vectorization\n"
+ " --vector-size Max vector size (default value: 16)\n"
+ " --vector-tmo Max vector timeout in nanoseconds"
+ " (default value: 102400)\n"
+ " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
+ " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
+ " (default value is based on mbuf count)\n"
"\n",
prgname);
}
char *end = NULL;
unsigned long pm;
+ errno = 0;
+
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
return pm;
}
-static int32_t
+static int64_t
parse_decimal(const char *str)
{
char *end = NULL;
- unsigned long num;
+ uint64_t num;
- num = strtoul(str, &end, 10);
- if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
+ num = strtoull(str, &end, 10);
+ if ((str[0] == '\0') || (end == NULL) || (*end != '\0')
+ || num > INT64_MAX)
return -1;
return num;
printf("librte_ipsec usage: %s\n",
(prm->enable == 0) ? "disabled" : "enabled");
- if (prm->enable == 0)
- return;
-
printf("replay window size: %u\n", prm->window_size);
printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled");
printf("SA flags: %#" PRIx64 "\n", prm->flags);
+ printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns);
+}
+
+static int
+parse_transfer_mode(struct eh_conf *conf, const char *optarg)
+{
+ if (!strcmp(CMD_LINE_ARG_POLL, optarg))
+ conf->mode = EH_PKT_TRANSFER_MODE_POLL;
+ else if (!strcmp(CMD_LINE_ARG_EVENT, optarg))
+ conf->mode = EH_PKT_TRANSFER_MODE_EVENT;
+ else {
+ printf("Unsupported packet transfer mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_schedule_type(struct eh_conf *conf, const char *optarg)
+{
+ struct eventmode_conf *em_conf = NULL;
+
+ /* Get eventmode conf */
+ em_conf = conf->mode_params;
+
+ if (!strcmp(CMD_LINE_ARG_ORDERED, optarg))
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
+ else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg))
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg))
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL;
+ else {
+ printf("Unsupported queue schedule type\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int32_t
-parse_args(int32_t argc, char **argv)
+parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
{
- int32_t opt, ret;
+ int opt;
+ int64_t ret;
char **argvopt;
int32_t option_index;
char *prgname = argv[0];
int32_t f_present = 0;
+ struct eventmode_conf *em_conf = NULL;
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:",
+ while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:t:s:",
lgopts, &option_index)) != EOF) {
switch (opt) {
print_usage(prgname);
return -1;
}
- if (parse_cfg_file(optarg) < 0) {
- printf("parsing file \"%s\" failed\n",
- optarg);
+ cfgfile = optarg;
+ f_present = 1;
+ break;
+
+ case 's':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid number of buffers in a pool: "
+ "%s\n", optarg);
print_usage(prgname);
return -1;
}
- f_present = 1;
+
+ nb_bufs_in_pool = ret;
break;
+
case 'j':
ret = parse_decimal(optarg);
if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
app_sa_prm.enable = 1;
break;
case 'w':
- app_sa_prm.enable = 1;
app_sa_prm.window_size = parse_decimal(optarg);
break;
case 'e':
- app_sa_prm.enable = 1;
app_sa_prm.enable_esn = 1;
break;
case 'a':
app_sa_prm.enable = 1;
app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM;
break;
+ case 'c':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid SA cache size: %s\n", optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ app_sa_prm.cache_sz = ret;
+ break;
+ case 't':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid interval value: %s\n", optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ stats_interval = ret;
+ break;
case CMD_LINE_OPT_CONFIG_NUM:
ret = parse_config(optarg);
if (ret) {
break;
case CMD_LINE_OPT_SINGLE_SA_NUM:
ret = parse_decimal(optarg);
- if (ret == -1) {
+ if (ret == -1 || ret > UINT32_MAX) {
printf("Invalid argument[sa_idx]\n");
print_usage(prgname);
return -1;
/* else */
single_sa = 1;
single_sa_idx = ret;
+ eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
printf("Configured with single SA index %u\n",
single_sa_idx);
break;
/* else */
enabled_cryptodev_mask = ret;
break;
+
+ case CMD_LINE_OPT_TRANSFER_MODE_NUM:
+ ret = parse_transfer_mode(eh_conf, optarg);
+ if (ret < 0) {
+ printf("Invalid packet transfer mode\n");
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+
+ case CMD_LINE_OPT_SCHEDULE_TYPE_NUM:
+ ret = parse_schedule_type(eh_conf, optarg);
+ if (ret < 0) {
+ printf("Invalid queue schedule type\n");
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+
case CMD_LINE_OPT_RX_OFFLOAD_NUM:
ret = parse_mask(optarg, &dev_rx_offload);
if (ret != 0) {
break;
case CMD_LINE_OPT_REASSEMBLE_NUM:
ret = parse_decimal(optarg);
- if (ret < 0) {
+ if (ret < 0 || ret > UINT32_MAX) {
printf("Invalid argument for \'%s\': %s\n",
CMD_LINE_OPT_REASSEMBLE, optarg);
print_usage(prgname);
}
mtu_size = ret;
break;
+ case CMD_LINE_OPT_FRAG_TTL_NUM:
+ ret = parse_decimal(optarg);
+ if (ret < 0 || ret > MAX_FRAG_TTL_NS) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_MTU, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ frag_ttl_ns = ret;
+ break;
+ case CMD_LINE_OPT_EVENT_VECTOR_NUM:
+ em_conf = eh_conf->mode_params;
+ em_conf->ext_params.event_vector = 1;
+ break;
+ case CMD_LINE_OPT_VECTOR_SIZE_NUM:
+ ret = parse_decimal(optarg);
+
+ if (ret > MAX_PKT_BURST) {
+ printf("Invalid argument for \'%s\': %s\n",
+ CMD_LINE_OPT_VECTOR_SIZE, optarg);
+ print_usage(prgname);
+ return -1;
+ }
+ em_conf = eh_conf->mode_params;
+ em_conf->ext_params.vector_size = ret;
+ break;
+ case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
+ ret = parse_decimal(optarg);
+
+ em_conf = eh_conf->mode_params;
+ em_conf->vector_tmo_ns = ret;
+ break;
+ case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
+ ret = parse_decimal(optarg);
+
+ em_conf = eh_conf->mode_params;
+ em_conf->vector_pool_sz = ret;
+ break;
+ case CMD_LINE_OPT_PER_PORT_POOL_NUM:
+ per_port_pool = 1;
+ break;
default:
print_usage(prgname);
return -1;
uint16_t portid;
uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
+ int ret;
+ char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
printf("\nChecking link status");
fflush(stdout);
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait(portid, &link);
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
/* print link status if flag set */
if (print_flag == 1) {
- if (link.link_status)
- printf(
- "Port%d Link Up - speed %u Mbps -%s\n",
- portid, link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- else
- printf("Port %d Link Down\n", portid);
+ rte_eth_link_to_str(link_status_text,
+ sizeof(link_status_text), &link);
+ printf("Port %d %s\n", portid,
+ link_status_text);
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
ret = rte_hash_add_key_data(map, &key, (void *)i);
if (ret < 0) {
- printf("Faled to insert cdev mapping for (lcore %u, "
+ printf("Failed to insert cdev mapping for (lcore %u, "
"cdev %u, qp %u), errno %d\n",
key.lcore_id, ipsec_ctx->tbl[i].id,
ipsec_ctx->tbl[i].qp, ret);
str = "Inbound";
}
- /* Required cryptodevs with operation chainning */
+ /* Required cryptodevs with operation chaining */
if (!(dev_info->feature_flags &
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
return ret;
return -1;
}
-static int32_t
-cryptodevs_init(void)
+static uint16_t
+cryptodevs_init(uint16_t req_queue_num)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
- uint16_t idx, max_nb_qps, qp, i;
+ uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
printf("lcore/cryptodev/qp mappings:\n");
idx = 0;
+ total_nb_qps = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
i++;
}
+ qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
if (qp == 0)
continue;
+ total_nb_qps += qp;
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
- if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
+ if (dev_max_sess != 0 &&
+ dev_max_sess < get_nb_crypto_sessions())
rte_exit(EXIT_FAILURE,
"Device does not support at least %u "
- "sessions", CDEV_MP_NB_OBJS);
+ "sessions", get_nb_crypto_sessions());
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
printf("\n");
- return 0;
+ return total_nb_qps;
}
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
- uint32_t frame_size;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- /* limit allowed HW offloafs, as user requested */
+ /* limit allowed HW offloads, as user requested */
dev_info.rx_offload_capa &= dev_rx_offload;
dev_info.tx_offload_capa &= dev_tx_offload;
printf("Configuring device port %u:\n", portid);
- rte_eth_macaddr_get(portid, ðaddr);
+ ret = rte_eth_macaddr_get(portid, ðaddr);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error getting MAC address (port %u): %s\n",
+ portid, rte_strerror(-ret));
+
ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr);
print_ethaddr("Address: ", ðaddr);
printf("\n");
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- frame_size = MTU_TO_FRAMELEN(mtu_size);
- if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
local_port_conf.rxmode.offloads |= req_rx_offloads;
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
- printf("port %u configurng rx_offloads=0x%" PRIx64
+ printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
local_port_conf.txmode.offloads);
qconf->tx_queue_id[portid] = tx_queueid;
/* Pre-populate pkt offloads based on capabilities */
- qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
- qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
- if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
- qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+ qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
+ qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
+ if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
tx_queueid++;
/* init RX queues */
for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
struct rte_eth_rxconf rxq_conf;
+ struct rte_mempool *pool;
if (portid != qconf->rx_queue_list[queue].port_id)
continue;
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+
+ if (per_port_pool)
+ pool = socket_ctx[socket_id].mbuf_pool[portid];
+ else
+ pool = socket_ctx[socket_id].mbuf_pool[0];
+
ret = rte_eth_rx_queue_setup(portid, rx_queueid,
- nb_rxd, socket_id, &rxq_conf,
- socket_ctx[socket_id].mbuf_pool);
+ nb_rxd, socket_id, &rxq_conf, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
{
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
+ uint32_t nb_sess;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", socket_id);
+ nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
+ rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- sess_sz, CDEV_MP_CACHE_SZ, 0,
+ mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
socket_id);
ctx->session_pool = sess_mp;
{
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
+ uint32_t nb_sess;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_priv_%u", socket_id);
+ nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
+ rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
+ nb_sess,
sess_sz,
CDEV_MP_CACHE_SZ,
0, NULL, NULL, NULL,
}
static void
-pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
+pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
+ uint32_t nb_mbuf)
{
char s[64];
int32_t ms;
- snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
- ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
- MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- frame_buf_size, socket_id);
+
+ /* mbuf_pool is initialised by the pool_init() function*/
+ if (socket_ctx[socket_id].mbuf_pool[portid])
+ return;
+
+ snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
+ ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE,
+ ipsec_metadata_size(),
+ frame_buf_size,
+ socket_id);
/*
* if multi-segment support is enabled, then create a pool
- * for indirect mbufs.
+ * for indirect mbufs. This is not per-port but global.
*/
ms = multi_seg_required();
- if (ms != 0) {
+ if (ms != 0 && !ctx->mbuf_pool_indir) {
snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
}
- if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
+ if (ctx->mbuf_pool[portid] == NULL ||
+ (ms != 0 && ctx->mbuf_pool_indir == NULL))
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
socket_id);
else
return -1;
}
+static int
+ethdev_reset_event_callback(uint16_t port_id,
+ enum rte_eth_event_type type,
+ void *param __rte_unused, void *ret_param __rte_unused)
+{
+ printf("Reset Event on port id %d type %d\n", port_id, type);
+ printf("Force quit application");
+ force_quit = true;
+ return 0;
+}
+
static uint16_t
rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkt[], uint16_t nb_pkts,
rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
struct rte_ipv6_hdr *iph;
- struct ipv6_extension_fragment *fh;
+ struct rte_ipv6_fragment_ext *fh;
iph = (struct rte_ipv6_hdr *)(eth + 1);
fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
/* create fragment table */
sid = rte_lcore_to_socket_id(cid);
- frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /
- MS_PER_S * FRAG_TTL_MS;
+ frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) /
+ NS_PER_S * frag_ttl_ns;
lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,
FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);
return rc;
}
+static void
+create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads)
+{
+ struct rte_flow_action action[2];
+ struct rte_flow_item pattern[2];
+ struct rte_flow_attr attr = {0};
+ struct rte_flow_error err;
+ struct rte_flow *flow;
+ int ret;
+
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
+ return;
+
+ /* Add the default rte_flow to enable SECURITY for all ESP packets */
+
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP;
+ pattern[0].spec = NULL;
+ pattern[0].mask = NULL;
+ pattern[0].last = NULL;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_END;
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ action[0].conf = NULL;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ action[1].conf = NULL;
+
+ attr.ingress = 1;
+
+ ret = rte_flow_validate(port_id, &attr, pattern, action, &err);
+ if (ret)
+ return;
+
+ flow = rte_flow_create(port_id, &attr, pattern, action, &err);
+ if (flow == NULL)
+ return;
+
+ flow_info_tbl[port_id].rx_def_flow = flow;
+ RTE_LOG(INFO, IPSEC,
+ "Created default flow enabling SECURITY for all ESP traffic on port %d\n",
+ port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+static void
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+{
+ struct rte_ipsec_session *ips;
+ int32_t i;
+
+ if (!sa || !nb_sa)
+ return;
+
+ for (i = 0; i < nb_sa; i++) {
+ ips = ipsec_get_primary_session(&sa[i]);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ rte_exit(EXIT_FAILURE, "Event mode supports only "
+ "inline protocol sessions\n");
+ }
+
+}
+
+static int32_t
+check_event_mode_params(struct eh_conf *eh_conf)
+{
+ struct eventmode_conf *em_conf = NULL;
+ struct lcore_params *params;
+ uint16_t portid;
+
+ if (!eh_conf || !eh_conf->mode_params)
+ return -EINVAL;
+
+ /* Get eventmode conf */
+ em_conf = eh_conf->mode_params;
+
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL &&
+ em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) {
+ printf("error: option --event-schedule-type applies only to "
+ "event mode\n");
+ return -EINVAL;
+ }
+
+ if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT)
+ return 0;
+
+ /* Set schedule type to ORDERED if it wasn't explicitly set by user */
+ if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET)
+ em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
+
+ /*
+ * Event mode currently supports only inline protocol sessions.
+ * If there are other types of sessions configured then exit with
+ * error.
+ */
+ ev_mode_sess_verify(sa_in, nb_sa_in);
+ ev_mode_sess_verify(sa_out, nb_sa_out);
+
+
+ /* Option --config does not apply to event mode */
+ if (nb_lcore_params > 0) {
+ printf("error: option --config applies only to poll mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * In order to use the same port_init routine for both poll and event
+ * modes initialize lcore_params with one queue for each eth port
+ */
+ lcore_params = lcore_params_array;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ params = &lcore_params[nb_lcore_params++];
+ params->port_id = portid;
+ params->queue_id = 0;
+ params->lcore_id = rte_get_next_lcore(0, 0, 1);
+ }
+
+ return 0;
+}
+
+static void
+inline_sessions_free(struct sa_ctx *sa_ctx)
+{
+ struct rte_ipsec_session *ips;
+ struct ipsec_sa *sa;
+ int32_t ret;
+ uint32_t i;
+
+ if (!sa_ctx)
+ return;
+
+ for (i = 0; i < sa_ctx->nb_sa; i++) {
+
+ sa = &sa_ctx->sa[i];
+ if (!sa->spi)
+ continue;
+
+ ips = ipsec_get_primary_session(sa);
+ if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
+ ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ continue;
+
+ if (!rte_eth_dev_is_valid_port(sa->portid))
+ continue;
+
+ ret = rte_security_session_destroy(
+ rte_eth_dev_get_sec_ctx(sa->portid),
+ ips->security.ses);
+ if (ret)
+ RTE_LOG(ERR, IPSEC, "Failed to destroy security "
+ "session type %d, spi %d\n",
+ ips->type, sa->spi);
+ }
+}
+
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+ uint32_t nb_txq)
+{
+ return RTE_MAX((nb_rxq * nb_rxd +
+ nb_ports * nb_lcores * MAX_PKT_BURST +
+ nb_ports * nb_txq * nb_txd +
+ nb_lcores * MEMPOOL_CACHE_SIZE +
+ nb_crypto_qp * CDEV_QUEUE_DESC +
+ nb_lcores * frag_tbl_sz *
+ FRAG_TBL_BUCKET_ENTRIES),
+ 8192U);
+}
+
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ uint64_t total_pkts_dropped = 0, total_pkts_tx = 0, total_pkts_rx = 0;
+ unsigned int coreid;
+
+ rte_tel_data_start_dict(data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return -EINVAL;
+
+ total_pkts_dropped = core_statistics[coreid].dropped;
+ total_pkts_tx = core_statistics[coreid].tx;
+ total_pkts_rx = core_statistics[coreid].rx;
+
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+
+ total_pkts_dropped += core_statistics[coreid].dropped;
+ total_pkts_tx += core_statistics[coreid].tx;
+ total_pkts_rx += core_statistics[coreid].rx;
+ }
+ }
+
+ /* add telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(data, "packets received",
+ total_pkts_rx);
+
+ rte_tel_data_add_dict_u64(data, "packets transmitted",
+ total_pkts_tx);
+
+ rte_tel_data_add_dict_u64(data, "packets dropped",
+ total_pkts_dropped);
+
+
+ return 0;
+}
+
+static void
+update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ struct ipsec_core_statistics *lcore_stats;
+
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ return;
+
+ lcore_stats = &core_statistics[coreid];
+
+ total->rx = lcore_stats->rx;
+ total->dropped = lcore_stats->dropped;
+ total->tx = lcore_stats->tx;
+
+ /* outbound stats */
+ total->outbound.spd6.protect += lcore_stats->outbound.spd6.protect;
+ total->outbound.spd6.bypass += lcore_stats->outbound.spd6.bypass;
+ total->outbound.spd6.discard += lcore_stats->outbound.spd6.discard;
+
+ total->outbound.spd4.protect += lcore_stats->outbound.spd4.protect;
+ total->outbound.spd4.bypass += lcore_stats->outbound.spd4.bypass;
+ total->outbound.spd4.discard += lcore_stats->outbound.spd4.discard;
+
+ total->outbound.sad.miss += lcore_stats->outbound.sad.miss;
+
+ /* inbound stats */
+ total->inbound.spd6.protect += lcore_stats->inbound.spd6.protect;
+ total->inbound.spd6.bypass += lcore_stats->inbound.spd6.bypass;
+ total->inbound.spd6.discard += lcore_stats->inbound.spd6.discard;
+
+ total->inbound.spd4.protect += lcore_stats->inbound.spd4.protect;
+ total->inbound.spd4.bypass += lcore_stats->inbound.spd4.bypass;
+ total->inbound.spd4.discard += lcore_stats->inbound.spd4.discard;
+
+ total->inbound.sad.miss += lcore_stats->inbound.sad.miss;
+
+
+ /* routing stats */
+ total->lpm4.miss += lcore_stats->lpm4.miss;
+ total->lpm6.miss += lcore_stats->lpm6.miss;
+}
+
+static void
+update_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
+{
+ memset(total, 0, sizeof(*total));
+
+ if (coreid != UINT32_MAX) {
+ update_lcore_statistics(total, coreid);
+ } else {
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++)
+ update_lcore_statistics(total, coreid);
+ }
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.outbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.outbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.outbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.outbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.outbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.outbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.outbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *spd4_data = rte_tel_data_alloc();
+ struct rte_tel_data *spd6_data = rte_tel_data_alloc();
+ struct rte_tel_data *sad_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(spd4_data);
+ rte_tel_data_start_dict(spd6_data);
+ rte_tel_data_start_dict(sad_data);
+
+ /* add children dicts to parent dict */
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add sad telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(sad_data, "miss",
+ total_stats.inbound.sad.miss);
+
+ rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
+
+ /* add spd 4 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd4_data, "protect",
+ total_stats.inbound.spd4.protect);
+ rte_tel_data_add_dict_u64(spd4_data, "bypass",
+ total_stats.inbound.spd4.bypass);
+ rte_tel_data_add_dict_u64(spd4_data, "discard",
+ total_stats.inbound.spd4.discard);
+
+ rte_tel_data_add_dict_container(data, "spd4", spd4_data, 0);
+
+ /* add spd 6 telemetry key/values pairs */
+
+ rte_tel_data_add_dict_u64(spd6_data, "protect",
+ total_stats.inbound.spd6.protect);
+ rte_tel_data_add_dict_u64(spd6_data, "bypass",
+ total_stats.inbound.spd6.bypass);
+ rte_tel_data_add_dict_u64(spd6_data, "discard",
+ total_stats.inbound.spd6.discard);
+
+ rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
+}
+
+static int
+handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
+ const char *params, struct rte_tel_data *data)
+{
+ struct ipsec_core_statistics total_stats;
+
+ struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
+ struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
+ unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!lpm4_data || !lpm6_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize telemetry data structs as dicts */
+ rte_tel_data_start_dict(data);
+ rte_tel_data_start_dict(lpm4_data);
+ rte_tel_data_start_dict(lpm6_data);
+
+
+ if (params) {
+ coreid = (uint32_t)atoi(params);
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+
+ update_statistics(&total_stats, coreid);
+
+ /* add lpm 4 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm4_data, "miss",
+ total_stats.lpm4.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv4 LPM", lpm4_data, 0);
+
+ /* add lpm 6 telemetry key/values pairs */
+ rte_tel_data_add_dict_u64(lpm6_data, "miss",
+ total_stats.lpm6.miss);
+
+ rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
+
+exit:
+ if (rc) {
+ rte_tel_data_free(lpm4_data);
+ rte_tel_data_free(lpm6_data);
+ }
+ return rc;
+}
+
+static void
+ipsec_secgw_telemetry_init(void)
+{
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats",
+ handle_telemetry_cmd_ipsec_secgw_stats,
+ "Returns global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/outbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_outbound,
+ "Returns outbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/inbound",
+ handle_telemetry_cmd_ipsec_secgw_stats_inbound,
+ "Returns inbound global stats. "
+ "Optional Parameters: int <logical core id>");
+
+ rte_telemetry_register_cmd("/examples/ipsec-secgw/stats/routing",
+ handle_telemetry_cmd_ipsec_secgw_stats_routing,
+ "Returns routing stats. "
+ "Optional Parameters: int <logical core id>");
+}
+
+
int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
- uint32_t lcore_id;
+ uint32_t lcore_id, nb_txq, nb_rxq = 0;
+ uint32_t cdev_id;
uint32_t i;
uint8_t socket_id;
- uint16_t portid;
- uint64_t req_rx_offloads, req_tx_offloads;
+ uint16_t portid, nb_crypto_qp, nb_ports = 0;
+ uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
+ uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
+ struct eh_conf *eh_conf = NULL;
size_t sess_sz;
+ nb_bufs_in_pool = 0;
+
/* init EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
argc -= ret;
argv += ret;
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* initialize event helper configuration */
+ eh_conf = eh_conf_init();
+ if (eh_conf == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to init event helper config");
+
/* parse application arguments (after the EAL ones) */
- ret = parse_args(argc, argv);
+ ret = parse_args(argc, argv, eh_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid parameters\n");
+ ipsec_secgw_telemetry_init();
+
+ /* parse configuration file */
+ if (parse_cfg_file(cfgfile) < 0) {
+ printf("parsing file \"%s\" failed\n",
+ optarg);
+ print_usage(argv[0]);
+ return -1;
+ }
+
if ((unprotected_port_mask & enabled_port_mask) !=
unprotected_port_mask)
rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
unprotected_port_mask);
- if (check_params() < 0)
- rte_exit(EXIT_FAILURE, "check_params failed\n");
+ if (unprotected_port_mask && !nb_sa_in)
+ rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
+
+ if (check_poll_mode_params(eh_conf) < 0)
+ rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
+
+ if (check_event_mode_params(eh_conf) < 0)
+ rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n");
ret = init_lcore_rx_queues();
if (ret < 0)
sess_sz = max_session_size();
+ /*
+ * In event mode request minimum number of crypto queues
+ * to be reserved equal to number of ports.
+ */
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+ nb_crypto_qp = rte_eth_dev_count_avail();
+ else
+ nb_crypto_qp = 0;
+
+ nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
+
+ if (nb_bufs_in_pool == 0) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ nb_ports++;
+ nb_rxq += get_port_nb_rx_queues(portid);
+ }
+
+ nb_txq = nb_lcores;
+
+ nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
+ nb_rxq, nb_txq);
+ }
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
else
socket_id = 0;
- /* mbuf_pool is initialised by the pool_init() function*/
- if (socket_ctx[socket_id].mbuf_pool)
+ if (per_port_pool) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ pool_init(&socket_ctx[socket_id], socket_id,
+ portid, nb_bufs_in_pool);
+ }
+ } else {
+ pool_init(&socket_ctx[socket_id], socket_id, 0,
+ nb_bufs_in_pool);
+ }
+
+ if (socket_ctx[socket_id].session_pool)
continue;
- pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
session_priv_pool_init(&socket_ctx[socket_id], socket_id,
sess_sz);
}
+ printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads);
- port_init(portid, req_rx_offloads, req_tx_offloads);
+ sa_check_offloads(portid, &req_rx_offloads[portid],
+ &req_tx_offloads[portid]);
+ port_init(portid, req_rx_offloads[portid],
+ req_tx_offloads[portid]);
}
- cryptodevs_init();
+ /*
+ * Set the enabled port mask in helper config for use by helper
+ * sub-system. This will be used while initializing devices using
+ * helper sub-system.
+ */
+ eh_conf->eth_portmask = enabled_port_mask;
+
+ /* Initialize eventmode components */
+ ret = eh_devs_init(eh_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret);
/* start ports */
RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /*
- * Start device
- * note: device must be started before a flow rule
- * can be installed.
- */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
"err=%d, port=%d\n", ret, portid);
+
+ /* Create flow after starting the device */
+ create_default_ipsec_flow(portid, req_rx_offloads[portid]);
+
/*
* If enabled, put device in promiscuous mode.
* This allows IO forwarding mode to forward packets
rte_strerror(-ret), portid);
}
+ rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_INTR_RESET,
+ ethdev_reset_event_callback, NULL);
+
rte_eth_dev_callback_register(portid,
RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
/* Replicate each context per socket */
for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
socket_id = rte_socket_id_by_idx(i);
- if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ if ((socket_ctx[socket_id].session_pool != NULL) &&
(socket_ctx[socket_id].sa_in == NULL) &&
(socket_ctx[socket_id].sa_out == NULL)) {
sa_init(&socket_ctx[socket_id], socket_id);
}
}
+ flow_init();
+
check_all_ports_link_status(enabled_port_mask);
+ if (stats_interval > 0)
+ rte_eal_alarm_set(stats_interval * US_PER_S,
+ print_stats_cb, NULL);
+ else
+ RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
+
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
+ /* Uninitialize eventmode components */
+ ret = eh_devs_uninit(eh_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret);
+
+ /* Free eventmode configuration memory */
+ eh_conf_uninit(eh_conf);
+
+ /* Destroy inline inbound and outbound sessions */
+ for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+ socket_id = rte_socket_id_by_idx(i);
+ inline_sessions_free(socket_ctx[socket_id].sa_in);
+ inline_sessions_free(socket_ctx[socket_id].sa_out);
+ }
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ printf("Closing cryptodev %d...", cdev_id);
+ rte_cryptodev_stop(cdev_id);
+ rte_cryptodev_close(cdev_id);
+ printf(" Done\n");
+ }
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ printf("Closing port %d...", portid);
+ if (flow_info_tbl[portid].rx_def_flow) {
+ struct rte_flow_error err;
+
+ ret = rte_flow_destroy(portid,
+ flow_info_tbl[portid].rx_def_flow, &err);
+ if (ret)
+ RTE_LOG(ERR, IPSEC, "Failed to destroy flow "
+ " for port %u, err msg: %s\n", portid,
+ err.message);
+ }
+ ret = rte_eth_dev_stop(portid);
+ if (ret != 0)
+ RTE_LOG(ERR, IPSEC,
+ "rte_eth_dev_stop: err=%s, port=%u\n",
+ rte_strerror(-ret), portid);
+
+ rte_eth_dev_close(portid);
+ printf(" Done\n");
+ }
+
+ /* clean up the EAL */
+ rte_eal_cleanup();
+ printf("Bye...\n");
+
return 0;
}