X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec-secgw.c;h=2219148285de59453a12fe61656ad5bdb3915a44;hb=cb056611a8ed9ab9024f3b91bf26e97255194514;hp=5ed41312eb5c7d4d3668508e874e2130e74e69bd;hpb=03ad0e5c25d89f956b74ebbdfb2999af3779f585;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 5ed41312eb..2219148285 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -2,6 +2,7 @@ * Copyright(c) 2016 Intel Corporation */ +#include #include #include #include @@ -14,9 +15,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -41,30 +44,31 @@ #include #include #include +#include #include #include +#include +#include "event_helper.h" +#include "flow.h" #include "ipsec.h" +#include "ipsec_worker.h" #include "parser.h" +#include "sad.h" -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 +volatile bool force_quit; #define MAX_JUMBO_PKT_LEN 9600 #define MEMPOOL_CACHE_SIZE 256 -#define NB_MBUF (32000) - #define CDEV_QUEUE_DESC 2048 #define CDEV_MAP_ENTRIES 16384 -#define CDEV_MP_NB_OBJS 1024 #define CDEV_MP_CACHE_SZ 64 #define MAX_QUEUE_PAIRS 1 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -#define NB_SOCKETS 4 - /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET 3 @@ -72,8 +76,6 @@ #define MAX_LCORE_PARAMS 1024 -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) - /* * Configurable number of RX/TX ring descriptors */ @@ -82,29 +84,6 @@ static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((a) & 0xff) << 56) | \ - ((uint64_t)((b) & 0xff) << 48) | \ - ((uint64_t)((c) & 0xff) << 40) | \ - ((uint64_t)((d) & 0xff) << 32) | \ - ((uint64_t)((e) & 0xff) << 24) | \ - ((uint64_t)((f) & 0xff) << 16) | \ - ((uint64_t)((g) & 0xff) << 8) | \ - ((uint64_t)(h) & 0xff)) -#else -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((h) & 0xff) << 56) | \ - ((uint64_t)((g) & 0xff) << 48) | \ - ((uint64_t)((f) & 0xff) << 40) | \ - ((uint64_t)((e) & 0xff) << 32) | \ - ((uint64_t)((d) & 0xff) << 24) | \ - ((uint64_t)((c) & 0xff) << 16) | \ - ((uint64_t)((b) & 0xff) << 8) | \ - ((uint64_t)(a) & 0xff)) -#endif -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) - #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ @@ -112,15 +91,10 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; 0, 0) #define FRAG_TBL_BUCKET_ENTRIES 4 -#define FRAG_TTL_MS (10 * MS_PER_S) +#define MAX_FRAG_TTL_NS (10LL * NS_PER_S) #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) -/* port/source ethernet addr and destination ethernet addr */ -struct ethaddr_info { - uint64_t src, dst; -}; - struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, @@ -128,13 +102,24 @@ struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } }; +struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; + #define CMD_LINE_OPT_CONFIG "config" #define CMD_LINE_OPT_SINGLE_SA "single-sa" #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" +#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode" +#define CMD_LINE_OPT_SCHEDULE_TYPE "event-schedule-type" #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload" #define CMD_LINE_OPT_TX_OFFLOAD "txoffload" #define CMD_LINE_OPT_REASSEMBLE "reassemble" #define CMD_LINE_OPT_MTU "mtu" +#define CMD_LINE_OPT_FRAG_TTL "frag-ttl" + +#define CMD_LINE_ARG_EVENT "event" +#define CMD_LINE_ARG_POLL "poll" +#define CMD_LINE_ARG_ORDERED "ordered" +#define CMD_LINE_ARG_ATOMIC "atomic" +#define CMD_LINE_ARG_PARALLEL "parallel" enum { /* long options mapped to a short option */ @@ -146,32 +131,39 @@ enum { CMD_LINE_OPT_CONFIG_NUM, CMD_LINE_OPT_SINGLE_SA_NUM, CMD_LINE_OPT_CRYPTODEV_MASK_NUM, + CMD_LINE_OPT_TRANSFER_MODE_NUM, + CMD_LINE_OPT_SCHEDULE_TYPE_NUM, CMD_LINE_OPT_RX_OFFLOAD_NUM, CMD_LINE_OPT_TX_OFFLOAD_NUM, CMD_LINE_OPT_REASSEMBLE_NUM, CMD_LINE_OPT_MTU_NUM, + CMD_LINE_OPT_FRAG_TTL_NUM, }; static const struct option lgopts[] = { {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, + {CMD_LINE_OPT_TRANSFER_MODE, 1, 0, CMD_LINE_OPT_TRANSFER_MODE_NUM}, + {CMD_LINE_OPT_SCHEDULE_TYPE, 1, 0, CMD_LINE_OPT_SCHEDULE_TYPE_NUM}, {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM}, {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM}, {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM}, {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM}, + {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM}, {NULL, 0, 0, 0} }; +uint32_t unprotected_port_mask; +uint32_t single_sa_idx; /* mask of enabled ports */ static uint32_t enabled_port_mask; static uint64_t enabled_cryptodev_mask = UINT64_MAX; -static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; static uint32_t single_sa; -static uint32_t single_sa_idx; +static uint32_t nb_bufs_in_pool; /* * RX/TX HW offload capabilities to enable/use on ethernet ports. @@ -186,9 +178,14 @@ static uint64_t dev_tx_offload = UINT64_MAX; static uint32_t frag_tbl_sz; static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE; static uint32_t mtu_size = RTE_ETHER_MTU; +static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; /* application wide librte_ipsec/SA parameters */ -struct app_sa_prm app_sa_prm = {.enable = 0}; +struct app_sa_prm app_sa_prm = { + .enable = 0, + .cache_sz = SA_CACHE_SZ + }; +static const char *cfgfile; struct lcore_rx_queue { uint16_t port_id; @@ -252,7 +249,7 @@ static struct rte_eth_conf port_conf = { }, }; -static struct socket_ctx socket_ctx[NB_SOCKETS]; +struct socket_ctx socket_ctx[NB_SOCKETS]; /* * Determine is multi-segment support required: @@ -292,6 +289,70 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, } } +#if (STATS_INTERVAL > 0) + +/* Print out statistics on packet distribution */ +static void +print_stats_cb(__rte_unused void *param) +{ + uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; + float burst_percent, rx_per_call, tx_per_call; + unsigned int coreid; + + total_packets_dropped = 0; + total_packets_tx = 0; + total_packets_rx = 0; + + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("\nCore statistics ===================================="); + + for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) { + /* skip disabled cores */ + if (rte_lcore_is_enabled(coreid) == 0) + continue; + burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/ + core_statistics[coreid].rx; + rx_per_call = (float)(core_statistics[coreid].rx)/ + core_statistics[coreid].rx_call; + tx_per_call = (float)(core_statistics[coreid].tx)/ + core_statistics[coreid].tx_call; + printf("\nStatistics for core %u ------------------------------" + "\nPackets received: %20"PRIu64 + "\nPackets sent: %24"PRIu64 + "\nPackets dropped: %21"PRIu64 + "\nBurst percent: %23.2f" + "\nPackets per Rx call: %17.2f" + "\nPackets per Tx call: %17.2f", + coreid, + core_statistics[coreid].rx, + core_statistics[coreid].tx, + core_statistics[coreid].dropped, + burst_percent, + rx_per_call, + tx_per_call); + + total_packets_dropped += core_statistics[coreid].dropped; + total_packets_tx += core_statistics[coreid].tx; + total_packets_rx += core_statistics[coreid].rx; + } + printf("\nAggregate statistics ===============================" + "\nTotal packets received: %14"PRIu64 + "\nTotal packets sent: %18"PRIu64 + "\nTotal packets dropped: %15"PRIu64, + total_packets_rx, + total_packets_tx, + total_packets_dropped); + printf("\n====================================================\n"); + + rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL); +} +#endif /* STATS_INTERVAL */ + static inline void prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) { @@ -314,6 +375,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) } pkt->l2_len = 0; pkt->l3_len = sizeof(*iph4); + pkt->packet_type |= RTE_PTYPE_L3_IPV4; } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { int next_proto; size_t l3len, ext_len; @@ -336,7 +398,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) /* drop packet when IPv6 header exceeds first segment length */ if (unlikely(l3len > pkt->data_len)) { - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); return; } @@ -348,11 +410,12 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) } pkt->l2_len = 0; pkt->l3_len = l3len; + pkt->packet_type |= RTE_PTYPE_L3_IPV6; } else { /* Unknown/Unsupported type, drop the packet */ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", rte_be_to_cpu_16(eth->ether_type)); - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); return; } @@ -479,9 +542,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) prepare_tx_burst(m_table, n, port, qconf); ret = rte_eth_tx_burst(port, queueid, m_table, n); + + core_stats_update_tx(ret); + if (unlikely(ret < n)) { do { - rte_pktmbuf_free(m_table[ret]); + free_pkts(&m_table[ret], 1); } while (++ret < n); } @@ -527,7 +593,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, "error code: %d\n", __func__, m->pkt_len, rte_errno); - rte_pktmbuf_free(m); + free_pkts(&m, 1); return len; } @@ -552,7 +618,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) } else if (frag_tbl_sz > 0) len = send_fragment_packet(qconf, m, port, proto); else - rte_pktmbuf_free(m); + free_pkts(&m, 1); /* enough pkts to be sent */ if (unlikely(len == MAX_PKT_BURST)) { @@ -586,19 +652,19 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, continue; } if (res == DISCARD) { - rte_pktmbuf_free(m); + free_pkts(&m, 1); continue; } /* Only check SPI match for processed IPSec packets */ if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { - rte_pktmbuf_free(m); + free_pkts(&m, 1); continue; } - sa_idx = SPI2IDX(res); + sa_idx = res - 1; if (!inbound_sa_check(sa, m, sa_idx)) { - rte_pktmbuf_free(m); + free_pkts(&m, 1); continue; } ip->pkts[j++] = m; @@ -633,7 +699,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) offsetof(struct ip6_hdr, ip6_nxt)); n6++; } else - rte_pktmbuf_free(m); + free_pkts(&m, 1); } trf->ip4.num = n4; @@ -683,9 +749,9 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, j = 0; for (i = 0; i < ip->num; i++) { m = ip->pkts[i]; - sa_idx = SPI2IDX(ip->res[i]); + sa_idx = ip->res[i] - 1; if (ip->res[i] == DISCARD) - rte_pktmbuf_free(m); + free_pkts(&m, 1); else if (ip->res[i] == BYPASS) ip->pkts[j++] = m; else { @@ -704,8 +770,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, uint16_t idx, nb_pkts_out, i; /* Drop any IPsec traffic from protected ports */ - for (i = 0; i < traffic->ipsec.num; i++) - rte_pktmbuf_free(traffic->ipsec.pkts[i]); + free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); traffic->ipsec.num = 0; @@ -745,14 +810,12 @@ process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, uint32_t nb_pkts_in, i, idx; /* Drop any IPv4 traffic from unprotected ports */ - for (i = 0; i < traffic->ip4.num; i++) - rte_pktmbuf_free(traffic->ip4.pkts[i]); + free_pkts(traffic->ip4.pkts, traffic->ip4.num); traffic->ip4.num = 0; /* Drop any IPv6 traffic from unprotected ports */ - for (i = 0; i < traffic->ip6.num; i++) - rte_pktmbuf_free(traffic->ip6.pkts[i]); + free_pkts(traffic->ip6.pkts, traffic->ip6.num); traffic->ip6.num = 0; @@ -788,8 +851,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, struct ip *ip; /* Drop any IPsec traffic from protected ports */ - for (i = 0; i < traffic->ipsec.num; i++) - rte_pktmbuf_free(traffic->ipsec.pkts[i]); + free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); n = 0; @@ -903,7 +965,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) } if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { - rte_pktmbuf_free(pkts[i]); + free_pkts(&pkts[i], 1); continue; } send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); @@ -955,7 +1017,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) } if (pkt_hop == -1) { - rte_pktmbuf_free(pkts[i]); + free_pkts(&pkts[i], 1); continue; } send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); @@ -971,12 +1033,12 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, prepare_traffic(pkts, &traffic, nb_pkts); if (unlikely(single_sa)) { - if (UNPROTECTED_PORT(portid)) + if (is_unprotected_port(portid)) process_pkts_inbound_nosp(&qconf->inbound, &traffic); else process_pkts_outbound_nosp(&qconf->outbound, &traffic); } else { - if (UNPROTECTED_PORT(portid)) + if (is_unprotected_port(portid)) process_pkts_inbound(&qconf->inbound, &traffic); else process_pkts_outbound(&qconf->outbound, &traffic); @@ -1087,8 +1149,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf, } /* main processing loop */ -static int32_t -main_loop(__attribute__((unused)) void *dummy) +void +ipsec_poll_mode_worker(void) { struct rte_mbuf *pkts[MAX_PKT_BURST]; uint32_t lcore_id; @@ -1097,7 +1159,7 @@ main_loop(__attribute__((unused)) void *dummy) uint16_t portid; uint8_t queueid; struct lcore_conf *qconf; - int32_t socket_id; + int32_t rc, socket_id; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; struct lcore_rx_queue *rxql; @@ -1127,10 +1189,18 @@ main_loop(__attribute__((unused)) void *dummy) qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool; qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; + rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); + if (rc != 0) { + RTE_LOG(ERR, IPSEC, + "SAD cache init on lcore %u, failed with code: %d\n", + lcore_id, rc); + return; + } + if (qconf->nb_rx_queue == 0) { RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", lcore_id); - return 0; + return; } RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); @@ -1143,7 +1213,7 @@ main_loop(__attribute__((unused)) void *dummy) lcore_id, portid, queueid); } - while (1) { + while (!force_quit) { cur_tsc = rte_rdtsc(); /* TX queue buffer drain */ @@ -1163,11 +1233,13 @@ main_loop(__attribute__((unused)) void *dummy) nb_rx = rte_eth_rx_burst(portid, queueid, pkts, MAX_PKT_BURST); - if (nb_rx > 0) + if (nb_rx > 0) { + core_stats_update_rx(nb_rx); process_pkts(qconf, pkts, nb_rx, portid); + } /* dequeue and process completed crypto-ops */ - if (UNPROTECTED_PORT(portid)) + if (is_unprotected_port(portid)) drain_inbound_crypto_queues(qconf, &qconf->inbound); else @@ -1177,14 +1249,42 @@ main_loop(__attribute__((unused)) void *dummy) } } +int +check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) +{ + uint16_t i; + uint16_t portid; + uint8_t queueid; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params_array[i].port_id; + if (portid == fdir_portid) { + queueid = lcore_params_array[i].queue_id; + if (queueid == fdir_qid) + break; + } + + if (i == nb_lcore_params - 1) + return -1; + } + + return 1; +} + static int32_t -check_params(void) +check_poll_mode_params(struct eh_conf *eh_conf) { uint8_t lcore; uint16_t portid; uint16_t i; int32_t socket_id; + if (!eh_conf) + return -EINVAL; + + if (eh_conf->mode != EH_PKT_TRANSFER_MODE_POLL) + return 0; + if (lcore_params == NULL) { printf("Error: No port/queue/core mappings\n"); return -1; @@ -1266,10 +1366,14 @@ print_usage(const char *prgname) " [-w REPLAY_WINDOW_SIZE]" " [-e]" " [-a]" + " [-c]" + " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]" " -f CONFIG_FILE" " --config (port,queue,lcore)[,(port,queue,lcore)]" " [--single-sa SAIDX]" " [--cryptodev_mask MASK]" + " [--transfer-mode MODE]" + " [--event-schedule-type TYPE]" " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]" " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]" " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]" @@ -1285,12 +1389,32 @@ print_usage(const char *prgname) " size for each SA\n" " -e enables ESN\n" " -a enables SA SQN atomic behaviour\n" + " -c specifies inbound SAD cache size,\n" + " zero value disables the cache (default value: 128)\n" + " -s number of mbufs in packet pool, if not specified number\n" + " of mbufs will be calculated based on number of cores,\n" + " ports and crypto queues\n" " -f CONFIG_FILE: Configuration file\n" - " --config (port,queue,lcore): Rx queue configuration\n" - " --single-sa SAIDX: Use single SA index for outbound traffic,\n" - " bypassing the SP\n" + " --config (port,queue,lcore): Rx queue configuration. In poll\n" + " mode determines which queues from\n" + " which ports are mapped to which cores.\n" + " In event mode this option is not used\n" + " as packets are dynamically scheduled\n" + " to cores by HW.\n" + " --single-sa SAIDX: In poll mode use single SA index for\n" + " outbound traffic, bypassing the SP\n" + " In event mode selects driver submode,\n" + " SA index value is ignored\n" " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" " devices to configure\n" + " --transfer-mode MODE\n" + " \"poll\" : Packet transfer via polling (default)\n" + " \"event\" : Packet transfer via event device\n" + " --event-schedule-type TYPE queue schedule type, used only when\n" + " transfer mode is set to event\n" + " \"ordered\" : Ordered (default)\n" + " \"atomic\" : Atomic\n" + " \"parallel\" : Parallel\n" " --" CMD_LINE_OPT_RX_OFFLOAD ": bitmask of the RX HW offload capabilities to enable/use\n" " (DEV_RX_OFFLOAD_*)\n" @@ -1304,6 +1428,9 @@ print_usage(const char *prgname) ": MTU value on all ports (default value: 1500)\n" " outgoing packets with bigger size will be fragmented\n" " incoming packets with bigger size will be discarded\n" + " --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS" + ": fragments lifetime in nanoseconds, default\n" + " and maximum value is 10.000.000.000 ns (10 s)\n" "\n", prgname); } @@ -1340,14 +1467,15 @@ parse_portmask(const char *portmask) return pm; } -static int32_t +static int64_t parse_decimal(const char *str) { char *end = NULL; - unsigned long num; + uint64_t num; - num = strtoul(str, &end, 10); - if ((str[0] == '\0') || (end == NULL) || (*end != '\0')) + num = strtoull(str, &end, 10); + if ((str[0] == '\0') || (end == NULL) || (*end != '\0') + || num > INT64_MAX) return -1; return num; @@ -1415,18 +1543,54 @@ print_app_sa_prm(const struct app_sa_prm *prm) printf("librte_ipsec usage: %s\n", (prm->enable == 0) ? "disabled" : "enabled"); - if (prm->enable == 0) - return; - printf("replay window size: %u\n", prm->window_size); printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled"); printf("SA flags: %#" PRIx64 "\n", prm->flags); + printf("Frag TTL: %" PRIu64 " ns\n", frag_ttl_ns); +} + +static int +parse_transfer_mode(struct eh_conf *conf, const char *optarg) +{ + if (!strcmp(CMD_LINE_ARG_POLL, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_POLL; + else if (!strcmp(CMD_LINE_ARG_EVENT, optarg)) + conf->mode = EH_PKT_TRANSFER_MODE_EVENT; + else { + printf("Unsupported packet transfer mode\n"); + return -EINVAL; + } + + return 0; +} + +static int +parse_schedule_type(struct eh_conf *conf, const char *optarg) +{ + struct eventmode_conf *em_conf = NULL; + + /* Get eventmode conf */ + em_conf = conf->mode_params; + + if (!strcmp(CMD_LINE_ARG_ORDERED, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + else if (!strcmp(CMD_LINE_ARG_ATOMIC, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ATOMIC; + else if (!strcmp(CMD_LINE_ARG_PARALLEL, optarg)) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_PARALLEL; + else { + printf("Unsupported queue schedule type\n"); + return -EINVAL; + } + + return 0; } static int32_t -parse_args(int32_t argc, char **argv) +parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) { - int32_t opt, ret; + int opt; + int64_t ret; char **argvopt; int32_t option_index; char *prgname = argv[0]; @@ -1434,7 +1598,7 @@ parse_args(int32_t argc, char **argv) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:", + while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1465,14 +1629,22 @@ parse_args(int32_t argc, char **argv) print_usage(prgname); return -1; } - if (parse_cfg_file(optarg) < 0) { - printf("parsing file \"%s\" failed\n", - optarg); + cfgfile = optarg; + f_present = 1; + break; + + case 's': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid number of buffers in a pool: " + "%s\n", optarg); print_usage(prgname); return -1; } - f_present = 1; + + nb_bufs_in_pool = ret; break; + case 'j': ret = parse_decimal(optarg); if (ret < RTE_MBUF_DEFAULT_BUF_SIZE || @@ -1489,17 +1661,24 @@ parse_args(int32_t argc, char **argv) app_sa_prm.enable = 1; break; case 'w': - app_sa_prm.enable = 1; app_sa_prm.window_size = parse_decimal(optarg); break; case 'e': - app_sa_prm.enable = 1; app_sa_prm.enable_esn = 1; break; case 'a': app_sa_prm.enable = 1; app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM; break; + case 'c': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid SA cache size: %s\n", optarg); + print_usage(prgname); + return -1; + } + app_sa_prm.cache_sz = ret; + break; case CMD_LINE_OPT_CONFIG_NUM: ret = parse_config(optarg); if (ret) { @@ -1510,7 +1689,7 @@ parse_args(int32_t argc, char **argv) break; case CMD_LINE_OPT_SINGLE_SA_NUM: ret = parse_decimal(optarg); - if (ret == -1) { + if (ret == -1 || ret > UINT32_MAX) { printf("Invalid argument[sa_idx]\n"); print_usage(prgname); return -1; @@ -1519,6 +1698,7 @@ parse_args(int32_t argc, char **argv) /* else */ single_sa = 1; single_sa_idx = ret; + eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; printf("Configured with single SA index %u\n", single_sa_idx); break; @@ -1533,6 +1713,25 @@ parse_args(int32_t argc, char **argv) /* else */ enabled_cryptodev_mask = ret; break; + + case CMD_LINE_OPT_TRANSFER_MODE_NUM: + ret = parse_transfer_mode(eh_conf, optarg); + if (ret < 0) { + printf("Invalid packet transfer mode\n"); + print_usage(prgname); + return -1; + } + break; + + case CMD_LINE_OPT_SCHEDULE_TYPE_NUM: + ret = parse_schedule_type(eh_conf, optarg); + if (ret < 0) { + printf("Invalid queue schedule type\n"); + print_usage(prgname); + return -1; + } + break; + case CMD_LINE_OPT_RX_OFFLOAD_NUM: ret = parse_mask(optarg, &dev_rx_offload); if (ret != 0) { @@ -1553,7 +1752,7 @@ parse_args(int32_t argc, char **argv) break; case CMD_LINE_OPT_REASSEMBLE_NUM: ret = parse_decimal(optarg); - if (ret < 0) { + if (ret < 0 || ret > UINT32_MAX) { printf("Invalid argument for \'%s\': %s\n", CMD_LINE_OPT_REASSEMBLE, optarg); print_usage(prgname); @@ -1571,6 +1770,16 @@ parse_args(int32_t argc, char **argv) } mtu_size = ret; break; + case CMD_LINE_OPT_FRAG_TTL_NUM: + ret = parse_decimal(optarg); + if (ret < 0 || ret > MAX_FRAG_TTL_NS) { + printf("Invalid argument for \'%s\': %s\n", + CMD_LINE_OPT_MTU, optarg); + print_usage(prgname); + return -1; + } + frag_ttl_ns = ret; + break; default: print_usage(prgname); return -1; @@ -1632,6 +1841,8 @@ check_all_ports_link_status(uint32_t port_mask) uint16_t portid; uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; + int ret; + char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; printf("\nChecking link status"); fflush(stdout); @@ -1641,17 +1852,20 @@ check_all_ports_link_status(uint32_t port_mask) if ((port_mask & (1 << portid)) == 0) continue; memset(&link, 0, sizeof(link)); - rte_eth_link_get_nowait(portid, &link); + ret = rte_eth_link_get_nowait(portid, &link); + if (ret < 0) { + all_ports_up = 0; + if (print_flag == 1) + printf("Port %u link get failed: %s\n", + portid, rte_strerror(-ret)); + continue; + } /* print link status if flag set */ if (print_flag == 1) { - if (link.link_status) - printf( - "Port%d Link Up - speed %u Mbps -%s\n", - portid, link.link_speed, - (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? - ("full-duplex") : ("half-duplex\n")); - else - printf("Port %d Link Down\n", portid); + rte_eth_link_to_str(link_status_text, + sizeof(link_status_text), &link); + printf("Port %d %s\n", portid, + link_status_text); continue; } /* clear all_ports_up flag if any link down */ @@ -1800,12 +2014,12 @@ check_cryptodev_mask(uint8_t cdev_id) return -1; } -static int32_t -cryptodevs_init(void) +static uint16_t +cryptodevs_init(uint16_t req_queue_num) { struct rte_cryptodev_config dev_conf; struct rte_cryptodev_qp_conf qp_conf; - uint16_t idx, max_nb_qps, qp, i; + uint16_t idx, max_nb_qps, qp, total_nb_qps, i; int16_t cdev_id; struct rte_hash_parameters params = { 0 }; @@ -1833,6 +2047,7 @@ cryptodevs_init(void) printf("lcore/cryptodev/qp mappings:\n"); idx = 0; + total_nb_qps = 0; for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { struct rte_cryptodev_info cdev_info; @@ -1863,18 +2078,21 @@ cryptodevs_init(void) i++; } + qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp)); if (qp == 0) continue; + total_nb_qps += qp; dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); dev_conf.nb_queue_pairs = qp; dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; - if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS) + if (dev_max_sess != 0 && + dev_max_sess < get_nb_crypto_sessions()) rte_exit(EXIT_FAILURE, "Device does not support at least %u " - "sessions", CDEV_MP_NB_OBJS); + "sessions", get_nb_crypto_sessions()); if (rte_cryptodev_configure(cdev_id, &dev_conf)) rte_panic("Failed to initialize cryptodev %u\n", @@ -1898,7 +2116,7 @@ cryptodevs_init(void) printf("\n"); - return 0; + return total_nb_qps; } static void @@ -1926,7 +2144,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) printf("Configuring device port %u:\n", portid); - rte_eth_macaddr_get(portid, ðaddr); + ret = rte_eth_macaddr_get(portid, ðaddr); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error getting MAC address (port %u): %s\n", + portid, rte_strerror(-ret)); + ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr); print_ethaddr("Address: ", ðaddr); printf("\n"); @@ -2121,12 +2344,14 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; + uint32_t nb_sess; snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "sess_mp_%u", socket_id); + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()); sess_mp = rte_cryptodev_sym_session_pool_create( - mp_name, CDEV_MP_NB_OBJS, - sess_sz, CDEV_MP_CACHE_SZ, 0, + mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, socket_id); ctx->session_pool = sess_mp; @@ -2143,11 +2368,14 @@ session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id, { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; + uint32_t nb_sess; snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "sess_mp_priv_%u", socket_id); + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()); sess_mp = rte_mempool_create(mp_name, - CDEV_MP_NB_OBJS, + nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, NULL, NULL, NULL, @@ -2335,8 +2563,8 @@ reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid) /* create fragment table */ sid = rte_lcore_to_socket_id(cid); - frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / - MS_PER_S * FRAG_TTL_MS; + frag_cycles = (rte_get_tsc_hz() + NS_PER_S - 1) / + NS_PER_S * frag_ttl_ns; lc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz, FRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid); @@ -2382,17 +2610,201 @@ reassemble_init(void) return rc; } +static void +create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) +{ + struct rte_flow_action action[2]; + struct rte_flow_item pattern[2]; + struct rte_flow_attr attr = {0}; + struct rte_flow_error err; + struct rte_flow *flow; + int ret; + + if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY)) + return; + + /* Add the default rte_flow to enable SECURITY for all ESP packets */ + + pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; + pattern[0].spec = NULL; + pattern[0].mask = NULL; + pattern[0].last = NULL; + pattern[1].type = RTE_FLOW_ITEM_TYPE_END; + + action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + action[0].conf = NULL; + action[1].type = RTE_FLOW_ACTION_TYPE_END; + action[1].conf = NULL; + + attr.ingress = 1; + + ret = rte_flow_validate(port_id, &attr, pattern, action, &err); + if (ret) + return; + + flow = rte_flow_create(port_id, &attr, pattern, action, &err); + if (flow == NULL) + return; + + flow_info_tbl[port_id].rx_def_flow = flow; + RTE_LOG(INFO, IPSEC, + "Created default flow enabling SECURITY for all ESP traffic on port %d\n", + port_id); +} + +static void +signal_handler(int signum) +{ + if (signum == SIGINT || signum == SIGTERM) { + printf("\n\nSignal %d received, preparing to exit...\n", + signum); + force_quit = true; + } +} + +static void +ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa) +{ + struct rte_ipsec_session *ips; + int32_t i; + + if (!sa || !nb_sa) + return; + + for (i = 0; i < nb_sa; i++) { + ips = ipsec_get_primary_session(&sa[i]); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + rte_exit(EXIT_FAILURE, "Event mode supports only " + "inline protocol sessions\n"); + } + +} + +static int32_t +check_event_mode_params(struct eh_conf *eh_conf) +{ + struct eventmode_conf *em_conf = NULL; + struct lcore_params *params; + uint16_t portid; + + if (!eh_conf || !eh_conf->mode_params) + return -EINVAL; + + /* Get eventmode conf */ + em_conf = eh_conf->mode_params; + + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_POLL && + em_conf->ext_params.sched_type != SCHED_TYPE_NOT_SET) { + printf("error: option --event-schedule-type applies only to " + "event mode\n"); + return -EINVAL; + } + + if (eh_conf->mode != EH_PKT_TRANSFER_MODE_EVENT) + return 0; + + /* Set schedule type to ORDERED if it wasn't explicitly set by user */ + if (em_conf->ext_params.sched_type == SCHED_TYPE_NOT_SET) + em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED; + + /* + * Event mode currently supports only inline protocol sessions. + * If there are other types of sessions configured then exit with + * error. + */ + ev_mode_sess_verify(sa_in, nb_sa_in); + ev_mode_sess_verify(sa_out, nb_sa_out); + + + /* Option --config does not apply to event mode */ + if (nb_lcore_params > 0) { + printf("error: option --config applies only to poll mode\n"); + return -EINVAL; + } + + /* + * In order to use the same port_init routine for both poll and event + * modes initialize lcore_params with one queue for each eth port + */ + lcore_params = lcore_params_array; + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + params = &lcore_params[nb_lcore_params++]; + params->port_id = portid; + params->queue_id = 0; + params->lcore_id = rte_get_next_lcore(0, 0, 1); + } + + return 0; +} + +static void +inline_sessions_free(struct sa_ctx *sa_ctx) +{ + struct rte_ipsec_session *ips; + struct ipsec_sa *sa; + int32_t ret; + uint32_t i; + + if (!sa_ctx) + return; + + for (i = 0; i < sa_ctx->nb_sa; i++) { + + sa = &sa_ctx->sa[i]; + if (!sa->spi) + continue; + + ips = ipsec_get_primary_session(sa); + if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL && + ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + continue; + + if (!rte_eth_dev_is_valid_port(sa->portid)) + continue; + + ret = rte_security_session_destroy( + rte_eth_dev_get_sec_ctx(sa->portid), + ips->security.ses); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy security " + "session type %d, spi %d\n", + ips->type, sa->spi); + } +} + +static uint32_t +calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq, + uint32_t nb_txq) +{ + return RTE_MAX((nb_rxq * nb_rxd + + nb_ports * nb_lcores * MAX_PKT_BURST + + nb_ports * nb_txq * nb_txd + + nb_lcores * MEMPOOL_CACHE_SIZE + + nb_crypto_qp * CDEV_QUEUE_DESC + + nb_lcores * frag_tbl_sz * + FRAG_TBL_BUCKET_ENTRIES), + 8192U); +} + int32_t main(int32_t argc, char **argv) { int32_t ret; - uint32_t lcore_id; + uint32_t lcore_id, nb_txq, nb_rxq = 0; + uint32_t cdev_id; uint32_t i; uint8_t socket_id; - uint16_t portid; - uint64_t req_rx_offloads, req_tx_offloads; + uint16_t portid, nb_crypto_qp, nb_ports = 0; + uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; + uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; + struct eh_conf *eh_conf = NULL; size_t sess_sz; + nb_bufs_in_pool = 0; + /* init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) @@ -2400,18 +2812,38 @@ main(int32_t argc, char **argv) argc -= ret; argv += ret; + force_quit = false; + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + /* initialize event helper configuration */ + eh_conf = eh_conf_init(); + if (eh_conf == NULL) + rte_exit(EXIT_FAILURE, "Failed to init event helper config"); + /* parse application arguments (after the EAL ones) */ - ret = parse_args(argc, argv); + ret = parse_args(argc, argv, eh_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid parameters\n"); + /* parse configuration file */ + if (parse_cfg_file(cfgfile) < 0) { + printf("parsing file \"%s\" failed\n", + optarg); + print_usage(argv[0]); + return -1; + } + if ((unprotected_port_mask & enabled_port_mask) != unprotected_port_mask) rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", unprotected_port_mask); - if (check_params() < 0) - rte_exit(EXIT_FAILURE, "check_params failed\n"); + if (check_poll_mode_params(eh_conf) < 0) + rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n"); + + if (check_event_mode_params(eh_conf) < 0) + rte_exit(EXIT_FAILURE, "check_event_mode_params failed\n"); ret = init_lcore_rx_queues(); if (ret < 0) @@ -2421,6 +2853,31 @@ main(int32_t argc, char **argv) sess_sz = max_session_size(); + /* + * In event mode request minimum number of crypto queues + * to be reserved equal to number of ports. + */ + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) + nb_crypto_qp = rte_eth_dev_count_avail(); + else + nb_crypto_qp = 0; + + nb_crypto_qp = cryptodevs_init(nb_crypto_qp); + + if (nb_bufs_in_pool == 0) { + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + nb_ports++; + nb_rxq += get_port_nb_rx_queues(portid); + } + + nb_txq = nb_lcores; + + nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp, + nb_rxq, nb_txq); + } + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; @@ -2434,32 +2891,43 @@ main(int32_t argc, char **argv) if (socket_ctx[socket_id].mbuf_pool) continue; - pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); + pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool); session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); session_priv_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); } + printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool); RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) continue; - sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); - port_init(portid, req_rx_offloads, req_tx_offloads); + sa_check_offloads(portid, &req_rx_offloads[portid], + &req_tx_offloads[portid]); + port_init(portid, req_rx_offloads[portid], + req_tx_offloads[portid]); } - cryptodevs_init(); + /* + * Set the enabled port mask in helper config for use by helper + * sub-system. This will be used while initializing devices using + * helper sub-system. + */ + eh_conf->eth_portmask = enabled_port_mask; + + /* Initialize eventmode components */ + ret = eh_devs_init(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_init failed, err=%d\n", ret); /* start ports */ RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) continue; - /* - * Start device - * note: device must be started before a flow rule - * can be installed. - */ + /* Create flow before starting the device */ + create_default_ipsec_flow(portid, req_rx_offloads[portid]); + ret = rte_eth_dev_start(portid); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " @@ -2470,8 +2938,13 @@ main(int32_t argc, char **argv) * to itself through 2 cross-connected ports of the * target machine. */ - if (promiscuous_on) - rte_eth_promiscuous_enable(portid); + if (promiscuous_on) { + ret = rte_eth_promiscuous_enable(portid); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "rte_eth_promiscuous_enable: err=%s, port=%d\n", + rte_strerror(-ret), portid); + } rte_eth_dev_callback_register(portid, RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); @@ -2497,14 +2970,70 @@ main(int32_t argc, char **argv) } } + flow_init(); + check_all_ports_link_status(enabled_port_mask); +#if (STATS_INTERVAL > 0) + rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL); +#else + RTE_LOG(INFO, IPSEC, "Stats display disabled\n"); +#endif /* STATS_INTERVAL */ + /* launch per-lcore init on every lcore */ - rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN); + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } + /* Uninitialize eventmode components */ + ret = eh_devs_uninit(eh_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "eh_devs_uninit failed, err=%d\n", ret); + + /* Free eventmode configuration memory */ + eh_conf_uninit(eh_conf); + + /* Destroy inline inbound and outbound sessions */ + for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) { + socket_id = rte_socket_id_by_idx(i); + inline_sessions_free(socket_ctx[socket_id].sa_in); + inline_sessions_free(socket_ctx[socket_id].sa_out); + } + + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { + printf("Closing cryptodev %d...", cdev_id); + rte_cryptodev_stop(cdev_id); + rte_cryptodev_close(cdev_id); + printf(" Done\n"); + } + + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + + printf("Closing port %d...", portid); + if (flow_info_tbl[portid].rx_def_flow) { + struct rte_flow_error err; + + ret = rte_flow_destroy(portid, + flow_info_tbl[portid].rx_def_flow, &err); + if (ret) + RTE_LOG(ERR, IPSEC, "Failed to destroy flow " + " for port %u, err msg: %s\n", portid, + err.message); + } + ret = rte_eth_dev_stop(portid); + if (ret != 0) + RTE_LOG(ERR, IPSEC, + "rte_eth_dev_stop: err=%s, port=%u\n", + rte_strerror(-ret), portid); + + rte_eth_dev_close(portid); + printf(" Done\n"); + } + printf("Bye...\n"); + return 0; }