#define OPTION_CONFIG "config"
#define OPTION_SINGLE_SA "single-sa"
+#define OPTION_CRYPTODEV_MASK "cryptodev_mask"
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/*
* Configurable number of RX/TX ring descriptors
*/
-#define IPSEC_SECGW_RX_DESC_DEFAULT 128
-#define IPSEC_SECGW_TX_DESC_DEFAULT 512
+#define IPSEC_SECGW_RX_DESC_DEFAULT 1024
+#define IPSEC_SECGW_TX_DESC_DEFAULT 1024
static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
/* mask of enabled ports */
static uint32_t enabled_port_mask;
+static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static uint32_t unprotected_port_mask;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
}
sa_idx = ip->res[i] & PROTECT_MASK;
- if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
+ if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
+ !inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
sa_idx = ip->res[i] & PROTECT_MASK;
- if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
+ if (ip->res[i] & DISCARD)
rte_pktmbuf_free(m);
- else if (sa_idx != 0) {
+ else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
} else /* BYPASS */
"rx queues configuration\n"
" --single-sa SAIDX: use single SA index for outbound, "
"bypassing the SP\n"
+ " --cryptodev_mask MASK: hexadecimal bitmask of the "
+ "crypto devices to configure\n"
" -f CONFIG_FILE: Configuration file path\n",
prgname);
}
}
}
+ if (__STRNCMP(optname, OPTION_CRYPTODEV_MASK)) {
+ ret = parse_portmask(optarg);
+ if (ret != -1) {
+ enabled_cryptodev_mask = ret;
+ ret = 0;
+ }
+ }
+
return ret;
}
#undef __STRNCMP
static struct option lgopts[] = {
{OPTION_CONFIG, 1, 0, 0},
{OPTION_SINGLE_SA, 1, 0, 0},
+ {OPTION_CRYPTODEV_MASK, 1, 0, 0},
{NULL, 0, 0, 0}
};
int32_t f_present = 0;
return ret;
}
+/* Check if the device is enabled by cryptodev_mask */
+static int
+check_cryptodev_mask(uint8_t cdev_id)
+{
+ if (enabled_cryptodev_mask & (1 << cdev_id))
+ return 0;
+
+ return -1;
+}
+
static int32_t
cryptodevs_init(void)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id;
+ int16_t cdev_id, port_id;
struct rte_hash_parameters params = { 0 };
params.entries = CDEV_MAP_ENTRIES;
if (sess_sz > max_sess_sz)
max_sess_sz = sess_sz;
}
+ for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
+ void *sec_ctx;
+
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sess_sz = rte_security_session_get_size(sec_ctx);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
idx = 0;
- /* Start from last cdev id to give HW priority */
- for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
+ if (check_cryptodev_mask((uint8_t)cdev_id))
+ continue;
+
rte_cryptodev_info_get(cdev_id, &cdev_info);
if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
cdev_id);
}
+ /* create session pools for eth devices that implement security */
+ for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
+ if ((enabled_port_mask & (1 << port_id)) &&
+ rte_eth_dev_get_sec_ctx(port_id)) {
+ int socket_id = rte_eth_dev_socket_id(port_id);
+
+ if (!socket_ctx[socket_id].session_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ max_sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ if (sess_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool "
+ "on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool "
+ "on socket %d\n", socket_id);
+ socket_ctx[socket_id].session_pool = sess_mp;
+ }
+ }
+ }
+
+
printf("\n");
return 0;