]> git.droids-corp.org - dpdk.git/commitdiff
examples/ipsec-secgw: add pool size parameters
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Wed, 23 Feb 2022 09:53:53 +0000 (15:23 +0530)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 23 Feb 2022 10:43:14 +0000 (11:43 +0100)
Add support to enable per port packet pool and also override
vector pool size from command line args. This is useful
on some HW to tune performance based on usecase.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
doc/guides/sample_app_ug/ipsec_secgw.rst
examples/ipsec-secgw/event_helper.c
examples/ipsec-secgw/event_helper.h
examples/ipsec-secgw/ipsec-secgw.c
examples/ipsec-secgw/ipsec-secgw.h
examples/ipsec-secgw/ipsec.h

index c53ee7c386843393b2615990d8e74f3eb2626303..d93acf06676b368639f00f038e5b97e3d4eb502b 100644 (file)
@@ -249,6 +249,13 @@ Where:
     Should be lower for low number of reassembly buckets.
     Valid values: from 1 ns to 10 s. Default value: 10000000 (10 s).
 
+*   ``--per-port-pool``: Enable per ethdev port pktmbuf pool.
+     By default one packet mbuf pool per socket is created and configured
+     via Rx queue setup.
+
+*   ``--vector-pool-sz``: Number of buffers in vector pool.
+    By default, vector pool size depeneds on packet pool size
+    and size of each vector.
 
 The mapping of lcores to port/queues is similar to other l3fwd applications.
 
index 8947e418030a501dd75cc7a1f5525959c354ac8c..172ab8e716e43d9b97b18291ed77b4f11dd2ad0e 100644 (file)
@@ -792,8 +792,8 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
        uint32_t service_id, socket_id, nb_elem;
        struct rte_mempool *vector_pool = NULL;
        uint32_t lcore_id = rte_lcore_id();
+       int ret, portid, nb_ports = 0;
        uint8_t eventdev_id;
-       int ret;
        int j;
 
        /* Get event dev ID */
@@ -806,10 +806,21 @@ eh_rx_adapter_configure(struct eventmode_conf *em_conf,
                return ret;
        }
 
+       RTE_ETH_FOREACH_DEV(portid)
+               if ((em_conf->eth_portmask & (1 << portid)))
+                       nb_ports++;
+
        if (em_conf->ext_params.event_vector) {
                socket_id = rte_lcore_to_socket_id(lcore_id);
-               nb_elem = (nb_bufs_in_pool / em_conf->ext_params.vector_size)
-                         + 1;
+
+               if (em_conf->vector_pool_sz) {
+                       nb_elem = em_conf->vector_pool_sz;
+               } else {
+                       nb_elem = (nb_bufs_in_pool /
+                                  em_conf->ext_params.vector_size) + 1;
+                       if (per_port_pool)
+                               nb_elem = nb_ports * nb_elem;
+               }
 
                vector_pool = rte_event_vector_pool_create(
                        "vector_pool", nb_elem, 0,
index 5be6c620cd868ac8d0d9fa17c78972a4e8775cd7..f3cbe57cb33d36edd57e920f92d5d81e62df5ac3 100644 (file)
@@ -183,6 +183,8 @@ struct eventmode_conf {
                /**< 64 bit field to specify extended params */
        uint64_t vector_tmo_ns;
                /**< Max vector timeout in nanoseconds */
+       uint64_t vector_pool_sz;
+               /**< Vector pool size */
 };
 
 /**
index 9de1c6d85c74413a65f9a930ae0042d85c201e26..42b5081840bcc885fc58853f6fc93569111596a6 100644 (file)
@@ -118,6 +118,8 @@ struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
 #define CMD_LINE_OPT_EVENT_VECTOR      "event-vector"
 #define CMD_LINE_OPT_VECTOR_SIZE       "vector-size"
 #define CMD_LINE_OPT_VECTOR_TIMEOUT    "vector-tmo"
+#define CMD_LINE_OPT_VECTOR_POOL_SZ    "vector-pool-sz"
+#define CMD_LINE_OPT_PER_PORT_POOL     "per-port-pool"
 
 #define CMD_LINE_ARG_EVENT     "event"
 #define CMD_LINE_ARG_POLL      "poll"
@@ -145,6 +147,8 @@ enum {
        CMD_LINE_OPT_EVENT_VECTOR_NUM,
        CMD_LINE_OPT_VECTOR_SIZE_NUM,
        CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
+       CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
+       CMD_LINE_OPT_PER_PORT_POOL_NUM,
 };
 
 static const struct option lgopts[] = {
@@ -161,6 +165,8 @@ static const struct option lgopts[] = {
        {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
        {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
        {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
+       {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
+       {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
        {NULL, 0, 0, 0}
 };
 
@@ -234,7 +240,6 @@ struct lcore_conf {
        struct rt_ctx *rt6_ctx;
        struct {
                struct rte_ip_frag_tbl *tbl;
-               struct rte_mempool *pool_dir;
                struct rte_mempool *pool_indir;
                struct rte_ip_frag_death_row dr;
        } frag;
@@ -262,6 +267,8 @@ static struct rte_eth_conf port_conf = {
 
 struct socket_ctx socket_ctx[NB_SOCKETS];
 
+bool per_port_pool;
+
 /*
  * Determine is multi-segment support required:
  *  - either frame buffer size is smaller then mtu
@@ -630,12 +637,10 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
 
        if (proto == IPPROTO_IP)
                rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
-                       n, mtu_size, qconf->frag.pool_dir,
-                       qconf->frag.pool_indir);
+                       n, mtu_size, m->pool, qconf->frag.pool_indir);
        else
                rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
-                       n, mtu_size, qconf->frag.pool_dir,
-                       qconf->frag.pool_indir);
+                       n, mtu_size, m->pool, qconf->frag.pool_indir);
 
        if (rc >= 0)
                len += rc;
@@ -1256,7 +1261,6 @@ ipsec_poll_mode_worker(void)
        qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
        qconf->outbound.session_priv_pool =
                        socket_ctx[socket_id].session_priv_pool;
-       qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
        qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
 
        rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
@@ -1511,6 +1515,9 @@ print_usage(const char *prgname)
                "  --vector-size Max vector size (default value: 16)\n"
                "  --vector-tmo Max vector timeout in nanoseconds"
                "    (default value: 102400)\n"
+               "  --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
+               "  --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
+               "                    (default value is based on mbuf count)\n"
                "\n",
                prgname);
 }
@@ -1894,6 +1901,15 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
                        em_conf = eh_conf->mode_params;
                        em_conf->vector_tmo_ns = ret;
                        break;
+               case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
+                       ret = parse_decimal(optarg);
+
+                       em_conf = eh_conf->mode_params;
+                       em_conf->vector_pool_sz = ret;
+                       break;
+               case CMD_LINE_OPT_PER_PORT_POOL_NUM:
+                       per_port_pool = 1;
+                       break;
                default:
                        print_usage(prgname);
                        return -1;
@@ -2381,6 +2397,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
                /* init RX queues */
                for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
                        struct rte_eth_rxconf rxq_conf;
+                       struct rte_mempool *pool;
 
                        if (portid != qconf->rx_queue_list[queue].port_id)
                                continue;
@@ -2392,9 +2409,14 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 
                        rxq_conf = dev_info.default_rxconf;
                        rxq_conf.offloads = local_port_conf.rxmode.offloads;
+
+                       if (per_port_pool)
+                               pool = socket_ctx[socket_id].mbuf_pool[portid];
+                       else
+                               pool = socket_ctx[socket_id].mbuf_pool[0];
+
                        ret = rte_eth_rx_queue_setup(portid, rx_queueid,
-                                       nb_rxd, socket_id, &rxq_conf,
-                                       socket_ctx[socket_id].mbuf_pool);
+                                       nb_rxd, socket_id, &rxq_conf, pool);
                        if (ret < 0)
                                rte_exit(EXIT_FAILURE,
                                        "rte_eth_rx_queue_setup: err=%d, "
@@ -2507,28 +2529,37 @@ session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
 }
 
 static void
-pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
+pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
+         uint32_t nb_mbuf)
 {
        char s[64];
        int32_t ms;
 
-       snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
-       ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
-                       MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
-                       frame_buf_size, socket_id);
+
+       /* mbuf_pool is initialised by the pool_init() function*/
+       if (socket_ctx[socket_id].mbuf_pool[portid])
+               return;
+
+       snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
+       ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
+                                                        MEMPOOL_CACHE_SIZE,
+                                                        ipsec_metadata_size(),
+                                                        frame_buf_size,
+                                                        socket_id);
 
        /*
         * if multi-segment support is enabled, then create a pool
-        * for indirect mbufs.
+        * for indirect mbufs. This is not per-port but global.
         */
        ms = multi_seg_required();
-       if (ms != 0) {
+       if (ms != 0 && !ctx->mbuf_pool_indir) {
                snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
                ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
                        MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
        }
 
-       if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
+       if (ctx->mbuf_pool[portid] == NULL ||
+           (ms != 0 && ctx->mbuf_pool_indir == NULL))
                rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
                                socket_id);
        else
@@ -3344,11 +3375,22 @@ main(int32_t argc, char **argv)
                else
                        socket_id = 0;
 
-               /* mbuf_pool is initialised by the pool_init() function*/
-               if (socket_ctx[socket_id].mbuf_pool)
+               if (per_port_pool) {
+                       RTE_ETH_FOREACH_DEV(portid) {
+                               if ((enabled_port_mask & (1 << portid)) == 0)
+                                       continue;
+
+                               pool_init(&socket_ctx[socket_id], socket_id,
+                                         portid, nb_bufs_in_pool);
+                       }
+               } else {
+                       pool_init(&socket_ctx[socket_id], socket_id, 0,
+                                 nb_bufs_in_pool);
+               }
+
+               if (socket_ctx[socket_id].session_pool)
                        continue;
 
-               pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
                session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
                session_priv_pool_init(&socket_ctx[socket_id], socket_id,
                        sess_sz);
@@ -3421,7 +3463,7 @@ main(int32_t argc, char **argv)
        /* Replicate each context per socket */
        for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
                socket_id = rte_socket_id_by_idx(i);
-               if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+               if ((socket_ctx[socket_id].session_pool != NULL) &&
                        (socket_ctx[socket_id].sa_in == NULL) &&
                        (socket_ctx[socket_id].sa_out == NULL)) {
                        sa_init(&socket_ctx[socket_id], socket_id);
index ac4fa5e612a184f3ddae0f6d36a86d78f81d4f68..24f11ad4d4d73b127d705cc15370450aa00bbd6b 100644 (file)
@@ -134,6 +134,8 @@ extern volatile bool force_quit;
 
 extern uint32_t nb_bufs_in_pool;
 
+extern bool per_port_pool;
+
 static inline uint8_t
 is_unprotected_port(uint16_t port_id)
 {
index bc87b1a51d0ff175679cb31a67e26c9a563842f1..ccfde8e3df0abd35cb7385727d0d2f85915e992c 100644 (file)
@@ -248,7 +248,7 @@ struct socket_ctx {
        struct sp_ctx *sp_ip6_out;
        struct rt_ctx *rt_ip4;
        struct rt_ctx *rt_ip6;
-       struct rte_mempool *mbuf_pool;
+       struct rte_mempool *mbuf_pool[RTE_MAX_ETHPORTS];
        struct rte_mempool *mbuf_pool_indir;
        struct rte_mempool *session_pool;
        struct rte_mempool *session_priv_pool;