#include "event_helper.h"
#include "ipsec.h"
+#include "ipsec_worker.h"
#include "parser.h"
#include "sad.h"
volatile bool force_quit;
-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
-
#define MAX_JUMBO_PKT_LEN 9600
#define MEMPOOL_CACHE_SIZE 256
-#define NB_MBUF (32000)
-
#define CDEV_QUEUE_DESC 2048
#define CDEV_MAP_ENTRIES 16384
#define CDEV_MP_NB_OBJS 1024
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-#define NB_SOCKETS 4
-
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
#define MAX_LCORE_PARAMS 1024
-#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
-
/*
* Configurable number of RX/TX ring descriptors
*/
static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
- (((uint64_t)((a) & 0xff) << 56) | \
- ((uint64_t)((b) & 0xff) << 48) | \
- ((uint64_t)((c) & 0xff) << 40) | \
- ((uint64_t)((d) & 0xff) << 32) | \
- ((uint64_t)((e) & 0xff) << 24) | \
- ((uint64_t)((f) & 0xff) << 16) | \
- ((uint64_t)((g) & 0xff) << 8) | \
- ((uint64_t)(h) & 0xff))
-#else
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
- (((uint64_t)((h) & 0xff) << 56) | \
- ((uint64_t)((g) & 0xff) << 48) | \
- ((uint64_t)((f) & 0xff) << 40) | \
- ((uint64_t)((e) & 0xff) << 32) | \
- ((uint64_t)((d) & 0xff) << 24) | \
- ((uint64_t)((c) & 0xff) << 16) | \
- ((uint64_t)((b) & 0xff) << 8) | \
- ((uint64_t)(a) & 0xff))
-#endif
-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
-
#define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
(addr)->addr_bytes[2], (addr)->addr_bytes[3], \
#define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
-/* port/source ethernet addr and destination ethernet addr */
-struct ethaddr_info {
- uint64_t src, dst;
-};
-
struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
{NULL, 0, 0, 0}
};
+uint32_t unprotected_port_mask;
+uint32_t single_sa_idx;
/* mask of enabled ports */
static uint32_t enabled_port_mask;
static uint64_t enabled_cryptodev_mask = UINT64_MAX;
-static uint32_t unprotected_port_mask;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
-static uint32_t single_sa_idx;
+static uint32_t nb_bufs_in_pool;
/*
* RX/TX HW offload capabilities to enable/use on ethernet ports.
},
};
-static struct socket_ctx socket_ctx[NB_SOCKETS];
+struct socket_ctx socket_ctx[NB_SOCKETS];
/*
* Determine is multi-segment support required:
prepare_traffic(pkts, &traffic, nb_pkts);
if (unlikely(single_sa)) {
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
process_pkts_inbound_nosp(&qconf->inbound, &traffic);
else
process_pkts_outbound_nosp(&qconf->outbound, &traffic);
} else {
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
process_pkts_inbound(&qconf->inbound, &traffic);
else
process_pkts_outbound(&qconf->outbound, &traffic);
}
/* main processing loop */
-static int32_t
-main_loop(__attribute__((unused)) void *dummy)
+void
+ipsec_poll_mode_worker(void)
{
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
RTE_LOG(ERR, IPSEC,
"SAD cache init on lcore %u, failed with code: %d\n",
lcore_id, rc);
- return rc;
+ return;
}
if (qconf->nb_rx_queue == 0) {
RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
lcore_id);
- return 0;
+ return;
}
RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
lcore_id, portid, queueid);
}
- while (1) {
+ while (!force_quit) {
cur_tsc = rte_rdtsc();
/* TX queue buffer drain */
process_pkts(qconf, pkts, nb_rx, portid);
/* dequeue and process completed crypto-ops */
- if (UNPROTECTED_PORT(portid))
+ if (is_unprotected_port(portid))
drain_inbound_crypto_queues(qconf,
&qconf->inbound);
else
}
}
+int
+check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+{
+ uint16_t i;
+ uint16_t portid;
+ uint8_t queueid;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ portid = lcore_params_array[i].port_id;
+ if (portid == fdir_portid) {
+ queueid = lcore_params_array[i].queue_id;
+ if (queueid == fdir_qid)
+ break;
+ }
+
+ if (i == nb_lcore_params - 1)
+ return -1;
+ }
+
+ return 1;
+}
+
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
" [-e]"
" [-a]"
" [-c]"
+ " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
" -f CONFIG_FILE"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--single-sa SAIDX]"
" -a enables SA SQN atomic behaviour\n"
" -c specifies inbound SAD cache size,\n"
" zero value disables the cache (default value: 128)\n"
+ " -s number of mbufs in packet pool, if not specified number\n"
+ " of mbufs will be calculated based on number of cores,\n"
+ " ports and crypto queues\n"
" -f CONFIG_FILE: Configuration file\n"
" --config (port,queue,lcore): Rx queue configuration. In poll\n"
" mode determines which queues from\n"
" In event mode this option is not used\n"
" as packets are dynamically scheduled\n"
" to cores by HW.\n"
- " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
- " bypassing the SP\n"
+ " --single-sa SAIDX: In poll mode use single SA index for\n"
+ " outbound traffic, bypassing the SP\n"
+ " In event mode selects driver submode,\n"
+ " SA index value is ignored\n"
" --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
" devices to configure\n"
" --transfer-mode MODE\n"
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:",
+ while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:",
lgopts, &option_index)) != EOF) {
switch (opt) {
cfgfile = optarg;
f_present = 1;
break;
+
+ case 's':
+ ret = parse_decimal(optarg);
+ if (ret < 0) {
+ printf("Invalid number of buffers in a pool: "
+ "%s\n", optarg);
+ print_usage(prgname);
+ return -1;
+ }
+
+ nb_bufs_in_pool = ret;
+ break;
+
case 'j':
ret = parse_decimal(optarg);
if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
return -1;
}
-static int32_t
-cryptodevs_init(void)
+static uint16_t
+cryptodevs_init(uint16_t req_queue_num)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
- uint16_t idx, max_nb_qps, qp, i;
+ uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
int16_t cdev_id;
struct rte_hash_parameters params = { 0 };
printf("lcore/cryptodev/qp mappings:\n");
idx = 0;
+ total_nb_qps = 0;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
i++;
}
+ qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
if (qp == 0)
continue;
+ total_nb_qps += qp;
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
printf("\n");
- return 0;
+ return total_nb_qps;
}
static void
}
}
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+ uint32_t nb_txq)
+{
+ return RTE_MAX((nb_rxq * nb_rxd +
+ nb_ports * nb_lcores * MAX_PKT_BURST +
+ nb_ports * nb_txq * nb_txd +
+ nb_lcores * MEMPOOL_CACHE_SIZE +
+ nb_crypto_qp * CDEV_QUEUE_DESC +
+ nb_lcores * frag_tbl_sz *
+ FRAG_TBL_BUCKET_ENTRIES),
+ 8192U);
+}
+
int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
- uint32_t lcore_id;
+ uint32_t lcore_id, nb_txq, nb_rxq = 0;
uint32_t cdev_id;
uint32_t i;
uint8_t socket_id;
- uint16_t portid;
+ uint16_t portid, nb_crypto_qp, nb_ports = 0;
uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
struct eh_conf *eh_conf = NULL;
size_t sess_sz;
+ nb_bufs_in_pool = 0;
+
/* init EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
sess_sz = max_session_size();
+ /*
+ * In event mode request minimum number of crypto queues
+ * to be reserved equal to number of ports.
+ */
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+ nb_crypto_qp = rte_eth_dev_count_avail();
+ else
+ nb_crypto_qp = 0;
+
+ nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
+
+ if (nb_bufs_in_pool == 0) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ nb_ports++;
+ nb_rxq += get_port_nb_rx_queues(portid);
+ }
+
+ nb_txq = nb_lcores;
+
+ nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
+ nb_rxq, nb_txq);
+ }
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
if (socket_ctx[socket_id].mbuf_pool)
continue;
- pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+ pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
session_priv_pool_init(&socket_ctx[socket_id], socket_id,
sess_sz);
}
+ printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
req_tx_offloads[portid]);
}
- cryptodevs_init();
-
/*
* Set the enabled port mask in helper config for use by helper
* sub-system. This will be used while initializing devices using
check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;