X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec-secgw.c;h=4bdf99b62bbc3e4f9b96cb8ff5542e0e6396c748;hb=daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f;hp=2e55614ae7b583f6a9feaa35bb24b9523bb68e5c;hpb=9ad50c29d01d0d3e7a1f420178d795a92ca4b682;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 2e55614ae7..4bdf99b62b 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -47,8 +46,10 @@ #include #include #include +#include #include "event_helper.h" +#include "flow.h" #include "ipsec.h" #include "ipsec_worker.h" #include "parser.h" @@ -60,12 +61,10 @@ volatile bool force_quit; #define MEMPOOL_CACHE_SIZE 256 -#define NB_MBUF (32000) - #define CDEV_QUEUE_DESC 2048 #define CDEV_MAP_ENTRIES 16384 -#define CDEV_MP_NB_OBJS 1024 #define CDEV_MP_CACHE_SZ 64 +#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */ #define MAX_QUEUE_PAIRS 1 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -164,6 +163,7 @@ static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; static uint32_t single_sa; +static uint32_t nb_bufs_in_pool; /* * RX/TX HW offload capabilities to enable/use on ethernet ports. @@ -183,7 +183,8 @@ static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; /* application wide librte_ipsec/SA parameters */ struct app_sa_prm app_sa_prm = { .enable = 0, - .cache_sz = SA_CACHE_SZ + .cache_sz = SA_CACHE_SZ, + .udp_encap = 0 }; static const char *cfgfile; @@ -232,20 +233,19 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; static struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = RTE_ETHER_MAX_LEN, + .mq_mode = RTE_ETH_MQ_RX_RSS, .split_hdr_size = 0, - .offloads = DEV_RX_OFFLOAD_CHECKSUM, + .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM, }, .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | - ETH_RSS_TCP | ETH_RSS_SCTP, + .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | + RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP, }, }, .txmode = { - .mq_mode = ETH_MQ_TX_NONE, + .mq_mode = RTE_ETH_MQ_TX_NONE, }, }; @@ -289,12 +289,79 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, } } +#if (STATS_INTERVAL > 0) + +/* Print out statistics on packet distribution */ +static void +print_stats_cb(__rte_unused void *param) +{ + uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; + float burst_percent, rx_per_call, tx_per_call; + unsigned int coreid; + + total_packets_dropped = 0; + total_packets_tx = 0; + total_packets_rx = 0; + + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("\nCore statistics ===================================="); + + for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) { + /* skip disabled cores */ + if (rte_lcore_is_enabled(coreid) == 0) + continue; + burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/ + core_statistics[coreid].rx; + rx_per_call = (float)(core_statistics[coreid].rx)/ + core_statistics[coreid].rx_call; + tx_per_call = (float)(core_statistics[coreid].tx)/ + core_statistics[coreid].tx_call; + printf("\nStatistics for core %u ------------------------------" + "\nPackets received: %20"PRIu64 + "\nPackets sent: %24"PRIu64 + "\nPackets dropped: %21"PRIu64 + "\nBurst percent: %23.2f" + "\nPackets per Rx call: %17.2f" + "\nPackets per Tx call: %17.2f", + coreid, + core_statistics[coreid].rx, + core_statistics[coreid].tx, + core_statistics[coreid].dropped, + burst_percent, + rx_per_call, + tx_per_call); + + total_packets_dropped += core_statistics[coreid].dropped; + total_packets_tx += core_statistics[coreid].tx; + total_packets_rx += core_statistics[coreid].rx; + } + printf("\nAggregate statistics ===============================" + "\nTotal packets received: %14"PRIu64 + "\nTotal packets sent: %18"PRIu64 + "\nTotal packets dropped: %15"PRIu64, + total_packets_rx, + total_packets_tx, + total_packets_dropped); + printf("\n====================================================\n"); + + rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL); +} +#endif /* STATS_INTERVAL */ + static inline void prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) { const struct rte_ether_hdr *eth; const struct rte_ipv4_hdr *iph4; const struct rte_ipv6_hdr *iph6; + const struct rte_udp_hdr *udp; + uint16_t ip4_hdr_len; + uint16_t nat_port; eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { @@ -303,9 +370,28 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) RTE_ETHER_HDR_LEN); adjust_ipv4_pktlen(pkt, iph4, 0); - if (iph4->next_proto_id == IPPROTO_ESP) + switch (iph4->next_proto_id) { + case IPPROTO_ESP: t->ipsec.pkts[(t->ipsec.num)++] = pkt; - else { + break; + case IPPROTO_UDP: + if (app_sa_prm.udp_encap == 1) { + ip4_hdr_len = ((iph4->version_ihl & + RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER); + udp = rte_pktmbuf_mtod_offset(pkt, + struct rte_udp_hdr *, ip4_hdr_len); + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); + if (udp->src_port == nat_port || + udp->dst_port == nat_port){ + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + pkt->packet_type |= + MBUF_PTYPE_TUNNEL_ESP_IN_UDP; + break; + } + } + /* Fall through */ + default: t->ip4.data[t->ip4.num] = &iph4->next_proto_id; t->ip4.pkts[(t->ip4.num)++] = pkt; } @@ -334,13 +420,29 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) /* drop packet when IPv6 header exceeds first segment length */ if (unlikely(l3len > pkt->data_len)) { - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); return; } - if (next_proto == IPPROTO_ESP) + switch (next_proto) { + case IPPROTO_ESP: t->ipsec.pkts[(t->ipsec.num)++] = pkt; - else { + break; + case IPPROTO_UDP: + if (app_sa_prm.udp_encap == 1) { + udp = rte_pktmbuf_mtod_offset(pkt, + struct rte_udp_hdr *, l3len); + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); + if (udp->src_port == nat_port || + udp->dst_port == nat_port){ + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + pkt->packet_type |= + MBUF_PTYPE_TUNNEL_ESP_IN_UDP; + break; + } + } + /* Fall through */ + default: t->ip6.data[t->ip6.num] = &iph6->proto; t->ip6.pkts[(t->ip6.num)++] = pkt; } @@ -351,7 +453,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) /* Unknown/Unsupported type, drop the packet */ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", rte_be_to_cpu_16(eth->ether_type)); - rte_pktmbuf_free(pkt); + free_pkts(&pkt, 1); return; } @@ -362,7 +464,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) * with the security session. */ - if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD && + rte_security_dynfield_is_registered()) { struct ipsec_sa *sa; struct ipsec_mbuf_metadata *priv; struct rte_security_ctx *ctx = (struct rte_security_ctx *) @@ -372,10 +475,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) /* Retrieve the userdata registered. Here, the userdata * registered is the SA pointer. */ - - sa = (struct ipsec_sa *) - rte_security_get_userdata(ctx, pkt->udata64); - + sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, + *rte_security_dynfield(pkt)); if (sa == NULL) { /* userdata could not be retrieved */ return; @@ -430,7 +531,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, ip->ip_sum = 0; /* calculate IPv4 cksum in SW */ - if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0) + if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); @@ -442,9 +543,9 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); } - memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, sizeof(struct rte_ether_addr)); - memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, sizeof(struct rte_ether_addr)); } @@ -478,9 +579,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) prepare_tx_burst(m_table, n, port, qconf); ret = rte_eth_tx_burst(port, queueid, m_table, n); + + core_stats_update_tx(ret); + if (unlikely(ret < n)) { do { - rte_pktmbuf_free(m_table[ret]); + free_pkts(&m_table[ret], 1); } while (++ret < n); } @@ -526,7 +630,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, "error code: %d\n", __func__, m->pkt_len, rte_errno); - rte_pktmbuf_free(m); + free_pkts(&m, 1); return len; } @@ -551,7 +655,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) } else if (frag_tbl_sz > 0) len = send_fragment_packet(qconf, m, port, proto); else - rte_pktmbuf_free(m); + free_pkts(&m, 1); /* enough pkts to be sent */ if (unlikely(len == MAX_PKT_BURST)) { @@ -585,19 +689,19 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, continue; } if (res == DISCARD) { - rte_pktmbuf_free(m); + free_pkts(&m, 1); continue; } /* Only check SPI match for processed IPSec packets */ - if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { - rte_pktmbuf_free(m); + if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) { + free_pkts(&m, 1); continue; } sa_idx = res - 1; if (!inbound_sa_check(sa, m, sa_idx)) { - rte_pktmbuf_free(m); + free_pkts(&m, 1); continue; } ip->pkts[j++] = m; @@ -632,7 +736,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) offsetof(struct ip6_hdr, ip6_nxt)); n6++; } else - rte_pktmbuf_free(m); + free_pkts(&m, 1); } trf->ip4.num = n4; @@ -684,7 +788,7 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, m = ip->pkts[i]; sa_idx = ip->res[i] - 1; if (ip->res[i] == DISCARD) - rte_pktmbuf_free(m); + free_pkts(&m, 1); else if (ip->res[i] == BYPASS) ip->pkts[j++] = m; else { @@ -703,8 +807,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, uint16_t idx, nb_pkts_out, i; /* Drop any IPsec traffic from protected ports */ - for (i = 0; i < traffic->ipsec.num; i++) - rte_pktmbuf_free(traffic->ipsec.pkts[i]); + free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); traffic->ipsec.num = 0; @@ -743,18 +846,6 @@ process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *m; uint32_t nb_pkts_in, i, idx; - /* Drop any IPv4 traffic from unprotected ports */ - for (i = 0; i < traffic->ip4.num; i++) - rte_pktmbuf_free(traffic->ip4.pkts[i]); - - traffic->ip4.num = 0; - - /* Drop any IPv6 traffic from unprotected ports */ - for (i = 0; i < traffic->ip6.num; i++) - rte_pktmbuf_free(traffic->ip6.pkts[i]); - - traffic->ip6.num = 0; - if (app_sa_prm.enable == 0) { nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, @@ -787,8 +878,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, struct ip *ip; /* Drop any IPsec traffic from protected ports */ - for (i = 0; i < traffic->ipsec.num; i++) - rte_pktmbuf_free(traffic->ipsec.pkts[i]); + free_pkts(traffic->ipsec.pkts, traffic->ipsec.num); n = 0; @@ -876,7 +966,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) */ for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { /* Security offload not enabled. So an LPM lookup is * required to get the hop */ @@ -893,7 +983,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { /* Read hop from the SA */ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); } else { @@ -902,7 +992,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) } if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { - rte_pktmbuf_free(pkts[i]); + free_pkts(&pkts[i], 1); continue; } send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); @@ -927,7 +1017,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) */ for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { /* Security offload not enabled. So an LPM lookup is * required to get the hop */ @@ -945,7 +1035,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { /* Read hop from the SA */ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); } else { @@ -954,7 +1044,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) } if (pkt_hop == -1) { - rte_pktmbuf_free(pkts[i]); + free_pkts(&pkts[i], 1); continue; } send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); @@ -1170,8 +1260,10 @@ ipsec_poll_mode_worker(void) nb_rx = rte_eth_rx_burst(portid, queueid, pkts, MAX_PKT_BURST); - if (nb_rx > 0) + if (nb_rx > 0) { + core_stats_update_rx(nb_rx); process_pkts(qconf, pkts, nb_rx, portid); + } /* dequeue and process completed crypto-ops */ if (is_unprotected_port(portid)) @@ -1184,6 +1276,28 @@ ipsec_poll_mode_worker(void) } } +int +check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) +{ + uint16_t i; + uint16_t portid; + uint8_t queueid; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params_array[i].port_id; + if (portid == fdir_portid) { + queueid = lcore_params_array[i].queue_id; + if (queueid == fdir_qid) + break; + } + + if (i == nb_lcore_params - 1) + return -1; + } + + return 1; +} + static int32_t check_poll_mode_params(struct eh_conf *eh_conf) { @@ -1280,6 +1394,7 @@ print_usage(const char *prgname) " [-e]" " [-a]" " [-c]" + " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]" " -f CONFIG_FILE" " --config (port,queue,lcore)[,(port,queue,lcore)]" " [--single-sa SAIDX]" @@ -1303,6 +1418,9 @@ print_usage(const char *prgname) " -a enables SA SQN atomic behaviour\n" " -c specifies inbound SAD cache size,\n" " zero value disables the cache (default value: 128)\n" + " -s number of mbufs in packet pool, if not specified number\n" + " of mbufs will be calculated based on number of cores,\n" + " ports and crypto queues\n" " -f CONFIG_FILE: Configuration file\n" " --config (port,queue,lcore): Rx queue configuration. In poll\n" " mode determines which queues from\n" @@ -1326,10 +1444,10 @@ print_usage(const char *prgname) " \"parallel\" : Parallel\n" " --" CMD_LINE_OPT_RX_OFFLOAD ": bitmask of the RX HW offload capabilities to enable/use\n" - " (DEV_RX_OFFLOAD_*)\n" + " (RTE_ETH_RX_OFFLOAD_*)\n" " --" CMD_LINE_OPT_TX_OFFLOAD ": bitmask of the TX HW offload capabilities to enable/use\n" - " (DEV_TX_OFFLOAD_*)\n" + " (RTE_ETH_TX_OFFLOAD_*)\n" " --" CMD_LINE_OPT_REASSEMBLE " NUM" ": max number of entries in reassemble(fragment) table\n" " (zero (default value) disables reassembly)\n" @@ -1365,6 +1483,8 @@ parse_portmask(const char *portmask) char *end = NULL; unsigned long pm; + errno = 0; + /* parse hexadecimal string */ pm = strtoul(portmask, &end, 16); if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) @@ -1507,7 +1627,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:", + while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1541,6 +1661,19 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) cfgfile = optarg; f_present = 1; break; + + case 's': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid number of buffers in a pool: " + "%s\n", optarg); + print_usage(prgname); + return -1; + } + + nb_bufs_in_pool = ret; + break; + case 'j': ret = parse_decimal(optarg); if (ret < RTE_MBUF_DEFAULT_BUF_SIZE || @@ -1738,6 +1871,7 @@ check_all_ports_link_status(uint32_t port_mask) uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; int ret; + char link_status_text[RTE_ETH_LINK_MAX_STR_LEN]; printf("\nChecking link status"); fflush(stdout); @@ -1757,18 +1891,14 @@ check_all_ports_link_status(uint32_t port_mask) } /* print link status if flag set */ if (print_flag == 1) { - if (link.link_status) - printf( - "Port%d Link Up - speed %u Mbps -%s\n", - portid, link.link_speed, - (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? - ("full-duplex") : ("half-duplex\n")); - else - printf("Port %d Link Down\n", portid); + rte_eth_link_to_str(link_status_text, + sizeof(link_status_text), &link); + printf("Port %d %s\n", portid, + link_status_text); continue; } /* clear all_ports_up flag if any link down */ - if (link.link_status == ETH_LINK_DOWN) { + if (link.link_status == RTE_ETH_LINK_DOWN) { all_ports_up = 0; break; } @@ -1913,12 +2043,12 @@ check_cryptodev_mask(uint8_t cdev_id) return -1; } -static int32_t -cryptodevs_init(void) +static uint16_t +cryptodevs_init(uint16_t req_queue_num) { struct rte_cryptodev_config dev_conf; struct rte_cryptodev_qp_conf qp_conf; - uint16_t idx, max_nb_qps, qp, i; + uint16_t idx, max_nb_qps, qp, total_nb_qps, i; int16_t cdev_id; struct rte_hash_parameters params = { 0 }; @@ -1946,6 +2076,7 @@ cryptodevs_init(void) printf("lcore/cryptodev/qp mappings:\n"); idx = 0; + total_nb_qps = 0; for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { struct rte_cryptodev_info cdev_info; @@ -1976,18 +2107,21 @@ cryptodevs_init(void) i++; } + qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp)); if (qp == 0) continue; + total_nb_qps += qp; dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); dev_conf.nb_queue_pairs = qp; dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; - if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS) + if (dev_max_sess != 0 && + dev_max_sess < get_nb_crypto_sessions()) rte_exit(EXIT_FAILURE, "Device does not support at least %u " - "sessions", CDEV_MP_NB_OBJS); + "sessions", get_nb_crypto_sessions()); if (rte_cryptodev_configure(cdev_id, &dev_conf)) rte_panic("Failed to initialize cryptodev %u\n", @@ -2011,13 +2145,12 @@ cryptodevs_init(void) printf("\n"); - return 0; + return total_nb_qps; } static void port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) { - uint32_t frame_size; struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; uint16_t nb_tx_queue, nb_rx_queue; @@ -2065,14 +2198,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", nb_rx_queue, nb_tx_queue); - frame_size = MTU_TO_FRAMELEN(mtu_size); - if (frame_size > local_port_conf.rxmode.max_rx_pkt_len) - local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - local_port_conf.rxmode.max_rx_pkt_len = frame_size; + local_port_conf.rxmode.mtu = mtu_size; if (multi_seg_required()) { - local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; - local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; + local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; } local_port_conf.rxmode.offloads |= req_rx_offloads; @@ -2095,12 +2225,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) portid, local_port_conf.txmode.offloads, dev_info.tx_offload_capa); - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) local_port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_MBUF_FAST_FREE; + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) - local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) + local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; printf("port %u configurng rx_offloads=0x%" PRIx64 ", tx_offloads=0x%" PRIx64 "\n", @@ -2156,10 +2286,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) qconf->tx_queue_id[portid] = tx_queueid; /* Pre-populate pkt offloads based on capabilities */ - qconf->outbound.ipv4_offloads = PKT_TX_IPV4; - qconf->outbound.ipv6_offloads = PKT_TX_IPV6; - if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) - qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM; + qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4; + qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6; + if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) + qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM; tx_queueid++; @@ -2239,12 +2369,16 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz) { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; + uint32_t nb_sess; snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "sess_mp_%u", socket_id); + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()); + nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ * + CDEV_MP_CACHE_MULTIPLIER); sess_mp = rte_cryptodev_sym_session_pool_create( - mp_name, CDEV_MP_NB_OBJS, - sess_sz, CDEV_MP_CACHE_SZ, 0, + mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, socket_id); ctx->session_pool = sess_mp; @@ -2261,11 +2395,16 @@ session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id, { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; + uint32_t nb_sess; snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "sess_mp_priv_%u", socket_id); + nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ * + rte_lcore_count()); + nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ * + CDEV_MP_CACHE_MULTIPLIER); sess_mp = rte_mempool_create(mp_name, - CDEV_MP_NB_OBJS, + nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0, NULL, NULL, NULL, @@ -2510,7 +2649,7 @@ create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) struct rte_flow *flow; int ret; - if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY)) + if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)) return; /* Add the default rte_flow to enable SECURITY for all ESP packets */ @@ -2665,20 +2804,36 @@ inline_sessions_free(struct sa_ctx *sa_ctx) } } +static uint32_t +calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq, + uint32_t nb_txq) +{ + return RTE_MAX((nb_rxq * nb_rxd + + nb_ports * nb_lcores * MAX_PKT_BURST + + nb_ports * nb_txq * nb_txd + + nb_lcores * MEMPOOL_CACHE_SIZE + + nb_crypto_qp * CDEV_QUEUE_DESC + + nb_lcores * frag_tbl_sz * + FRAG_TBL_BUCKET_ENTRIES), + 8192U); +} + int32_t main(int32_t argc, char **argv) { int32_t ret; - uint32_t lcore_id; + uint32_t lcore_id, nb_txq, nb_rxq = 0; uint32_t cdev_id; uint32_t i; uint8_t socket_id; - uint16_t portid; + uint16_t portid, nb_crypto_qp, nb_ports = 0; uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; struct eh_conf *eh_conf = NULL; size_t sess_sz; + nb_bufs_in_pool = 0; + /* init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) @@ -2727,6 +2882,31 @@ main(int32_t argc, char **argv) sess_sz = max_session_size(); + /* + * In event mode request minimum number of crypto queues + * to be reserved equal to number of ports. + */ + if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT) + nb_crypto_qp = rte_eth_dev_count_avail(); + else + nb_crypto_qp = 0; + + nb_crypto_qp = cryptodevs_init(nb_crypto_qp); + + if (nb_bufs_in_pool == 0) { + RTE_ETH_FOREACH_DEV(portid) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + nb_ports++; + nb_rxq += get_port_nb_rx_queues(portid); + } + + nb_txq = nb_lcores; + + nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp, + nb_rxq, nb_txq); + } + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; @@ -2740,11 +2920,12 @@ main(int32_t argc, char **argv) if (socket_ctx[socket_id].mbuf_pool) continue; - pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); + pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool); session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); session_priv_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); } + printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool); RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) @@ -2756,8 +2937,6 @@ main(int32_t argc, char **argv) req_tx_offloads[portid]); } - cryptodevs_init(); - /* * Set the enabled port mask in helper config for use by helper * sub-system. This will be used while initializing devices using @@ -2820,11 +2999,19 @@ main(int32_t argc, char **argv) } } + flow_init(); + check_all_ports_link_status(enabled_port_mask); +#if (STATS_INTERVAL > 0) + rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL); +#else + RTE_LOG(INFO, IPSEC, "Stats display disabled\n"); +#endif /* STATS_INTERVAL */ + /* launch per-lcore init on every lcore */ - rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN); + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } @@ -2866,10 +3053,18 @@ main(int32_t argc, char **argv) " for port %u, err msg: %s\n", portid, err.message); } - rte_eth_dev_stop(portid); + ret = rte_eth_dev_stop(portid); + if (ret != 0) + RTE_LOG(ERR, IPSEC, + "rte_eth_dev_stop: err=%s, port=%u\n", + rte_strerror(-ret), portid); + rte_eth_dev_close(portid); printf(" Done\n"); } + + /* clean up the EAL */ + rte_eal_cleanup(); printf("Bye...\n"); return 0;