}
}
+int
+check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
+{
+ uint16_t i;
+ uint16_t portid;
+ uint8_t queueid;
+
+ for (i = 0; i < nb_lcore_params; ++i) {
+ portid = lcore_params_array[i].port_id;
+ if (portid == fdir_portid) {
+ queueid = lcore_params_array[i].queue_id;
+ if (queueid == fdir_qid)
+ break;
+ }
+
+ if (i == nb_lcore_params - 1)
+ return -1;
+ }
+
+ return 1;
+}
+
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
}
static uint16_t
-cryptodevs_init(void)
+cryptodevs_init(uint16_t req_queue_num)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
i++;
}
+ qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
if (qp == 0)
continue;
sess_sz = max_session_size();
- nb_crypto_qp = cryptodevs_init();
+ /*
+ * In event mode request minimum number of crypto queues
+ * to be reserved equal to number of ports.
+ */
+ if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+ nb_crypto_qp = rte_eth_dev_count_avail();
+ else
+ nb_crypto_qp = 0;
+
+ nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
if (nb_bufs_in_pool == 0) {
RTE_ETH_FOREACH_DEV(portid) {