examples/ipsec-secgw: reserve crypto queues in event mode
[dpdk.git] / examples / ipsec-secgw / ipsec-secgw.c
index 1eb7667..5fde4f7 100644 (file)
 
 #include "event_helper.h"
 #include "ipsec.h"
+#include "ipsec_worker.h"
 #include "parser.h"
 #include "sad.h"
 
 volatile bool force_quit;
 
-#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
-
 #define MAX_JUMBO_PKT_LEN  9600
 
 #define MEMPOOL_CACHE_SIZE 256
 
-#define NB_MBUF        (32000)
-
 #define CDEV_QUEUE_DESC 2048
 #define CDEV_MAP_ENTRIES 16384
 #define CDEV_MP_NB_OBJS 1024
@@ -86,29 +83,6 @@ volatile bool force_quit;
 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
-#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-       (((uint64_t)((a) & 0xff) << 56) | \
-       ((uint64_t)((b) & 0xff) << 48) | \
-       ((uint64_t)((c) & 0xff) << 40) | \
-       ((uint64_t)((d) & 0xff) << 32) | \
-       ((uint64_t)((e) & 0xff) << 24) | \
-       ((uint64_t)((f) & 0xff) << 16) | \
-       ((uint64_t)((g) & 0xff) << 8)  | \
-       ((uint64_t)(h) & 0xff))
-#else
-#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
-       (((uint64_t)((h) & 0xff) << 56) | \
-       ((uint64_t)((g) & 0xff) << 48) | \
-       ((uint64_t)((f) & 0xff) << 40) | \
-       ((uint64_t)((e) & 0xff) << 32) | \
-       ((uint64_t)((d) & 0xff) << 24) | \
-       ((uint64_t)((c) & 0xff) << 16) | \
-       ((uint64_t)((b) & 0xff) << 8) | \
-       ((uint64_t)(a) & 0xff))
-#endif
-#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
-
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
                (addr)->addr_bytes[0], (addr)->addr_bytes[1], \
                (addr)->addr_bytes[2], (addr)->addr_bytes[3], \
@@ -120,11 +94,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 
 #define MTU_TO_FRAMELEN(x)     ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
 
-/* port/source ethernet addr and destination ethernet addr */
-struct ethaddr_info {
-       uint64_t src, dst;
-};
-
 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
        { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
        { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
@@ -193,6 +162,7 @@ static int32_t promiscuous_on = 1;
 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
 static uint32_t nb_lcores;
 static uint32_t single_sa;
+static uint32_t nb_bufs_in_pool;
 
 /*
  * RX/TX HW offload capabilities to enable/use on ethernet ports.
@@ -1309,6 +1279,7 @@ print_usage(const char *prgname)
                " [-e]"
                " [-a]"
                " [-c]"
+               " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]"
                " -f CONFIG_FILE"
                " --config (port,queue,lcore)[,(port,queue,lcore)]"
                " [--single-sa SAIDX]"
@@ -1332,6 +1303,9 @@ print_usage(const char *prgname)
                "  -a enables SA SQN atomic behaviour\n"
                "  -c specifies inbound SAD cache size,\n"
                "     zero value disables the cache (default value: 128)\n"
+               "  -s number of mbufs in packet pool, if not specified number\n"
+               "     of mbufs will be calculated based on number of cores,\n"
+               "     ports and crypto queues\n"
                "  -f CONFIG_FILE: Configuration file\n"
                "  --config (port,queue,lcore): Rx queue configuration. In poll\n"
                "                               mode determines which queues from\n"
@@ -1536,7 +1510,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
 
        argvopt = argv;
 
-       while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:",
+       while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:s:",
                                lgopts, &option_index)) != EOF) {
 
                switch (opt) {
@@ -1570,6 +1544,19 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
                        cfgfile = optarg;
                        f_present = 1;
                        break;
+
+               case 's':
+                       ret = parse_decimal(optarg);
+                       if (ret < 0) {
+                               printf("Invalid number of buffers in a pool: "
+                                       "%s\n", optarg);
+                               print_usage(prgname);
+                               return -1;
+                       }
+
+                       nb_bufs_in_pool = ret;
+                       break;
+
                case 'j':
                        ret = parse_decimal(optarg);
                        if (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||
@@ -1942,12 +1929,12 @@ check_cryptodev_mask(uint8_t cdev_id)
        return -1;
 }
 
-static int32_t
-cryptodevs_init(void)
+static uint16_t
+cryptodevs_init(uint16_t req_queue_num)
 {
        struct rte_cryptodev_config dev_conf;
        struct rte_cryptodev_qp_conf qp_conf;
-       uint16_t idx, max_nb_qps, qp, i;
+       uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
        int16_t cdev_id;
        struct rte_hash_parameters params = { 0 };
 
@@ -1975,6 +1962,7 @@ cryptodevs_init(void)
        printf("lcore/cryptodev/qp mappings:\n");
 
        idx = 0;
+       total_nb_qps = 0;
        for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
                struct rte_cryptodev_info cdev_info;
 
@@ -2005,9 +1993,11 @@ cryptodevs_init(void)
                        i++;
                }
 
+               qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
                if (qp == 0)
                        continue;
 
+               total_nb_qps += qp;
                dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
                dev_conf.nb_queue_pairs = qp;
                dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
@@ -2040,7 +2030,7 @@ cryptodevs_init(void)
 
        printf("\n");
 
-       return 0;
+       return total_nb_qps;
 }
 
 static void
@@ -2694,20 +2684,36 @@ inline_sessions_free(struct sa_ctx *sa_ctx)
        }
 }
 
+static uint32_t
+calculate_nb_mbufs(uint16_t nb_ports, uint16_t nb_crypto_qp, uint32_t nb_rxq,
+               uint32_t nb_txq)
+{
+       return RTE_MAX((nb_rxq * nb_rxd +
+                       nb_ports * nb_lcores * MAX_PKT_BURST +
+                       nb_ports * nb_txq * nb_txd +
+                       nb_lcores * MEMPOOL_CACHE_SIZE +
+                       nb_crypto_qp * CDEV_QUEUE_DESC +
+                       nb_lcores * frag_tbl_sz *
+                       FRAG_TBL_BUCKET_ENTRIES),
+                      8192U);
+}
+
 int32_t
 main(int32_t argc, char **argv)
 {
        int32_t ret;
-       uint32_t lcore_id;
+       uint32_t lcore_id, nb_txq, nb_rxq = 0;
        uint32_t cdev_id;
        uint32_t i;
        uint8_t socket_id;
-       uint16_t portid;
+       uint16_t portid, nb_crypto_qp, nb_ports = 0;
        uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
        uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
        struct eh_conf *eh_conf = NULL;
        size_t sess_sz;
 
+       nb_bufs_in_pool = 0;
+
        /* init EAL */
        ret = rte_eal_init(argc, argv);
        if (ret < 0)
@@ -2756,6 +2762,31 @@ main(int32_t argc, char **argv)
 
        sess_sz = max_session_size();
 
+       /*
+        * In event mode request minimum number of crypto queues
+        * to be reserved equal to number of ports.
+        */
+       if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
+               nb_crypto_qp = rte_eth_dev_count_avail();
+       else
+               nb_crypto_qp = 0;
+
+       nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
+
+       if (nb_bufs_in_pool == 0) {
+               RTE_ETH_FOREACH_DEV(portid) {
+                       if ((enabled_port_mask & (1 << portid)) == 0)
+                               continue;
+                       nb_ports++;
+                       nb_rxq += get_port_nb_rx_queues(portid);
+               }
+
+               nb_txq = nb_lcores;
+
+               nb_bufs_in_pool = calculate_nb_mbufs(nb_ports, nb_crypto_qp,
+                                               nb_rxq, nb_txq);
+       }
+
        for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                if (rte_lcore_is_enabled(lcore_id) == 0)
                        continue;
@@ -2769,11 +2800,12 @@ main(int32_t argc, char **argv)
                if (socket_ctx[socket_id].mbuf_pool)
                        continue;
 
-               pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+               pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
                session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
                session_priv_pool_init(&socket_ctx[socket_id], socket_id,
                        sess_sz);
        }
+       printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool);
 
        RTE_ETH_FOREACH_DEV(portid) {
                if ((enabled_port_mask & (1 << portid)) == 0)
@@ -2785,8 +2817,6 @@ main(int32_t argc, char **argv)
                                req_tx_offloads[portid]);
        }
 
-       cryptodevs_init();
-
        /*
         * Set the enabled port mask in helper config for use by helper
         * sub-system. This will be used while initializing devices using