#include <rte_eventdev.h>
#include <rte_ip.h>
#include <rte_ip_frag.h>
+#include <rte_alarm.h>
#include "event_helper.h"
#include "ipsec.h"
#define CDEV_QUEUE_DESC 2048
#define CDEV_MAP_ENTRIES 16384
-#define CDEV_MP_NB_OBJS 1024
#define CDEV_MP_CACHE_SZ 64
#define MAX_QUEUE_PAIRS 1
}
}
+#if (STATS_INTERVAL > 0)
+
+/* Print out statistics on packet distribution */
+static void
+print_stats_cb(__rte_unused void *param)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ float burst_percent, rx_per_call, tx_per_call;
+ unsigned int coreid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nCore statistics ====================================");
+
+ for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
+ /* skip disabled cores */
+ if (rte_lcore_is_enabled(coreid) == 0)
+ continue;
+ burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
+ core_statistics[coreid].rx;
+ rx_per_call = (float)(core_statistics[coreid].rx)/
+ core_statistics[coreid].rx_call;
+ tx_per_call = (float)(core_statistics[coreid].tx)/
+ core_statistics[coreid].tx_call;
+ printf("\nStatistics for core %u ------------------------------"
+ "\nPackets received: %20"PRIu64
+ "\nPackets sent: %24"PRIu64
+ "\nPackets dropped: %21"PRIu64
+ "\nBurst percent: %23.2f"
+ "\nPackets per Rx call: %17.2f"
+ "\nPackets per Tx call: %17.2f",
+ coreid,
+ core_statistics[coreid].rx,
+ core_statistics[coreid].tx,
+ core_statistics[coreid].dropped,
+ burst_percent,
+ rx_per_call,
+ tx_per_call);
+
+ total_packets_dropped += core_statistics[coreid].dropped;
+ total_packets_tx += core_statistics[coreid].tx;
+ total_packets_rx += core_statistics[coreid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_rx,
+ total_packets_tx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+
+ rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
+}
+#endif /* STATS_INTERVAL */
+
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
/* drop packet when IPv6 header exceeds first segment length */
if (unlikely(l3len > pkt->data_len)) {
- rte_pktmbuf_free(pkt);
+ free_pkts(&pkt, 1);
return;
}
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
rte_be_to_cpu_16(eth->ether_type));
- rte_pktmbuf_free(pkt);
+ free_pkts(&pkt, 1);
return;
}
prepare_tx_burst(m_table, n, port, qconf);
ret = rte_eth_tx_burst(port, queueid, m_table, n);
+
+ core_stats_update_tx(ret);
+
if (unlikely(ret < n)) {
do {
- rte_pktmbuf_free(m_table[ret]);
+ free_pkts(&m_table[ret], 1);
} while (++ret < n);
}
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
return len;
}
} else if (frag_tbl_sz > 0)
len = send_fragment_packet(qconf, m, port, proto);
else
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
/* enough pkts to be sent */
if (unlikely(len == MAX_PKT_BURST)) {
continue;
}
if (res == DISCARD) {
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
continue;
}
/* Only check SPI match for processed IPSec packets */
if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
continue;
}
sa_idx = res - 1;
if (!inbound_sa_check(sa, m, sa_idx)) {
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
continue;
}
ip->pkts[j++] = m;
offsetof(struct ip6_hdr, ip6_nxt));
n6++;
} else
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
}
trf->ip4.num = n4;
m = ip->pkts[i];
sa_idx = ip->res[i] - 1;
if (ip->res[i] == DISCARD)
- rte_pktmbuf_free(m);
+ free_pkts(&m, 1);
else if (ip->res[i] == BYPASS)
ip->pkts[j++] = m;
else {
uint16_t idx, nb_pkts_out, i;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec.num; i++)
- rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
traffic->ipsec.num = 0;
uint32_t nb_pkts_in, i, idx;
/* Drop any IPv4 traffic from unprotected ports */
- for (i = 0; i < traffic->ip4.num; i++)
- rte_pktmbuf_free(traffic->ip4.pkts[i]);
+ free_pkts(traffic->ip4.pkts, traffic->ip4.num);
traffic->ip4.num = 0;
/* Drop any IPv6 traffic from unprotected ports */
- for (i = 0; i < traffic->ip6.num; i++)
- rte_pktmbuf_free(traffic->ip6.pkts[i]);
+ free_pkts(traffic->ip6.pkts, traffic->ip6.num);
traffic->ip6.num = 0;
struct ip *ip;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec.num; i++)
- rte_pktmbuf_free(traffic->ipsec.pkts[i]);
+ free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
n = 0;
}
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
- rte_pktmbuf_free(pkts[i]);
+ free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
}
if (pkt_hop == -1) {
- rte_pktmbuf_free(pkts[i]);
+ free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
nb_rx = rte_eth_rx_burst(portid, queueid,
pkts, MAX_PKT_BURST);
- if (nb_rx > 0)
+ if (nb_rx > 0) {
+ core_stats_update_rx(nb_rx);
process_pkts(qconf, pkts, nb_rx, portid);
+ }
/* dequeue and process completed crypto-ops */
if (is_unprotected_port(portid))
}
}
+int
+check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+{
+ uint16_t i;
+ uint16_t portid;
+ uint8_t queueid;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ portid = lcore_params_array[i].port_id;
+ if (portid == fdir_portid) {
+ queueid = lcore_params_array[i].queue_id;
+ if (queueid == fdir_qid)
+ break;
+ }
+
+ if (i == nb_lcore_params - 1)
+ return -1;
+ }
+
+ return 1;
+}
+
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
"Port%d Link Up - speed %u Mbps -%s\n",
portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
+ ("full-duplex") : ("half-duplex"));
else
printf("Port %d Link Down\n", portid);
continue;
}
static uint16_t
-cryptodevs_init(void)
+cryptodevs_init(uint16_t req_queue_num)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
i++;
}
+ qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
if (qp == 0)
continue;
dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
- if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
+ if (dev_max_sess != 0 &&
+ dev_max_sess < get_nb_crypto_sessions())
rte_exit(EXIT_FAILURE,
"Device does not support at least %u "
- "sessions", CDEV_MP_NB_OBJS);
+ "sessions", get_nb_crypto_sessions());
if (rte_cryptodev_configure(cdev_id, &dev_conf))
rte_panic("Failed to initialize cryptodev %u\n",
{
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
+ uint32_t nb_sess;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", socket_id);
+ /*
+ * Doubled due to rte_security_session_create() uses one mempool for
+ * session and for session private data.
+ */
+ nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
+ rte_lcore_count()) * 2;
sess_mp = rte_cryptodev_sym_session_pool_create(
- mp_name, CDEV_MP_NB_OBJS,
- sess_sz, CDEV_MP_CACHE_SZ, 0,
+ mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
socket_id);
ctx->session_pool = sess_mp;
{
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
+ uint32_t nb_sess;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_priv_%u", socket_id);
+ /*
+ * Doubled due to rte_security_session_create() uses one mempool for
+ * session and for session private data.
+ */
+ nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
+ rte_lcore_count()) * 2;
sess_mp = rte_mempool_create(mp_name,
- CDEV_MP_NB_OBJS,
+ nb_sess,
sess_sz,
CDEV_MP_CACHE_SZ,
0, NULL, NULL, NULL,
sess_sz = max_session_size();
- nb_crypto_qp = cryptodevs_init();
+ /*
+ * In event mode request minimum number of crypto queues
+ * to be reserved equal to number of ports.
+ */
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+ nb_crypto_qp = rte_eth_dev_count_avail();
+ else
+ nb_crypto_qp = 0;
+
+ nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
if (nb_bufs_in_pool == 0) {
RTE_ETH_FOREACH_DEV(portid) {
check_all_ports_link_status(enabled_port_mask);
+#if (STATS_INTERVAL > 0)
+ rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
+#else
+ RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
+#endif /* STATS_INTERVAL */
+
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {