eventdev/eth_rx: fix telemetry Rx stats reset
[dpdk.git] / examples / ipsec-secgw / ipsec-secgw.c
index cca2b0e..42b5081 100644 (file)
@@ -115,6 +115,11 @@ struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS];
 #define CMD_LINE_OPT_REASSEMBLE                "reassemble"
 #define CMD_LINE_OPT_MTU               "mtu"
 #define CMD_LINE_OPT_FRAG_TTL          "frag-ttl"
+#define CMD_LINE_OPT_EVENT_VECTOR      "event-vector"
+#define CMD_LINE_OPT_VECTOR_SIZE       "vector-size"
+#define CMD_LINE_OPT_VECTOR_TIMEOUT    "vector-tmo"
+#define CMD_LINE_OPT_VECTOR_POOL_SZ    "vector-pool-sz"
+#define CMD_LINE_OPT_PER_PORT_POOL     "per-port-pool"
 
 #define CMD_LINE_ARG_EVENT     "event"
 #define CMD_LINE_ARG_POLL      "poll"
@@ -139,6 +144,11 @@ enum {
        CMD_LINE_OPT_REASSEMBLE_NUM,
        CMD_LINE_OPT_MTU_NUM,
        CMD_LINE_OPT_FRAG_TTL_NUM,
+       CMD_LINE_OPT_EVENT_VECTOR_NUM,
+       CMD_LINE_OPT_VECTOR_SIZE_NUM,
+       CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
+       CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
+       CMD_LINE_OPT_PER_PORT_POOL_NUM,
 };
 
 static const struct option lgopts[] = {
@@ -152,6 +162,11 @@ static const struct option lgopts[] = {
        {CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},
        {CMD_LINE_OPT_MTU, 1, 0, CMD_LINE_OPT_MTU_NUM},
        {CMD_LINE_OPT_FRAG_TTL, 1, 0, CMD_LINE_OPT_FRAG_TTL_NUM},
+       {CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
+       {CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
+       {CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
+       {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
+       {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
        {NULL, 0, 0, 0}
 };
 
@@ -164,7 +179,7 @@ static int32_t promiscuous_on = 1;
 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
 static uint32_t nb_lcores;
 static uint32_t single_sa;
-static uint32_t nb_bufs_in_pool;
+uint32_t nb_bufs_in_pool;
 
 /*
  * RX/TX HW offload capabilities to enable/use on ethernet ports.
@@ -225,7 +240,6 @@ struct lcore_conf {
        struct rt_ctx *rt6_ctx;
        struct {
                struct rte_ip_frag_tbl *tbl;
-               struct rte_mempool *pool_dir;
                struct rte_mempool *pool_indir;
                struct rte_ip_frag_death_row dr;
        } frag;
@@ -253,10 +267,12 @@ static struct rte_eth_conf port_conf = {
 
 struct socket_ctx socket_ctx[NB_SOCKETS];
 
+bool per_port_pool;
+
 /*
  * Determine is multi-segment support required:
  *  - either frame buffer size is smaller then mtu
- *  - or reassmeble support is requested
+ *  - or reassemble support is requested
  */
 static int
 multi_seg_required(void)
@@ -621,12 +637,10 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
 
        if (proto == IPPROTO_IP)
                rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
-                       n, mtu_size, qconf->frag.pool_dir,
-                       qconf->frag.pool_indir);
+                       n, mtu_size, m->pool, qconf->frag.pool_indir);
        else
                rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
-                       n, mtu_size, qconf->frag.pool_dir,
-                       qconf->frag.pool_indir);
+                       n, mtu_size, m->pool, qconf->frag.pool_indir);
 
        if (rc >= 0)
                len += rc;
@@ -1247,7 +1261,6 @@ ipsec_poll_mode_worker(void)
        qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
        qconf->outbound.session_priv_pool =
                        socket_ctx[socket_id].session_priv_pool;
-       qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
        qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
 
        rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
@@ -1440,6 +1453,9 @@ print_usage(const char *prgname)
                " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]"
                " [--" CMD_LINE_OPT_REASSEMBLE " REASSEMBLE_TABLE_SIZE]"
                " [--" CMD_LINE_OPT_MTU " MTU]"
+               " [--event-vector]"
+               " [--vector-size SIZE]"
+               " [--vector-tmo TIMEOUT in ns]"
                "\n\n"
                "  -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
                "  -P : Enable promiscuous mode\n"
@@ -1495,6 +1511,13 @@ print_usage(const char *prgname)
                "  --" CMD_LINE_OPT_FRAG_TTL " FRAG_TTL_NS"
                ": fragments lifetime in nanoseconds, default\n"
                "    and maximum value is 10.000.000.000 ns (10 s)\n"
+               "  --event-vector enables event vectorization\n"
+               "  --vector-size Max vector size (default value: 16)\n"
+               "  --vector-tmo Max vector timeout in nanoseconds"
+               "    (default value: 102400)\n"
+               "  --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
+               "  --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
+               "                    (default value is based on mbuf count)\n"
                "\n",
                prgname);
 }
@@ -1661,6 +1684,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
        int32_t option_index;
        char *prgname = argv[0];
        int32_t f_present = 0;
+       struct eventmode_conf *em_conf = NULL;
 
        argvopt = argv;
 
@@ -1855,6 +1879,37 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
                        }
                        frag_ttl_ns = ret;
                        break;
+               case CMD_LINE_OPT_EVENT_VECTOR_NUM:
+                       em_conf = eh_conf->mode_params;
+                       em_conf->ext_params.event_vector = 1;
+                       break;
+               case CMD_LINE_OPT_VECTOR_SIZE_NUM:
+                       ret = parse_decimal(optarg);
+
+                       if (ret > MAX_PKT_BURST) {
+                               printf("Invalid argument for \'%s\': %s\n",
+                                       CMD_LINE_OPT_VECTOR_SIZE, optarg);
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       em_conf = eh_conf->mode_params;
+                       em_conf->ext_params.vector_size = ret;
+                       break;
+               case CMD_LINE_OPT_VECTOR_TIMEOUT_NUM:
+                       ret = parse_decimal(optarg);
+
+                       em_conf = eh_conf->mode_params;
+                       em_conf->vector_tmo_ns = ret;
+                       break;
+               case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
+                       ret = parse_decimal(optarg);
+
+                       em_conf = eh_conf->mode_params;
+                       em_conf->vector_pool_sz = ret;
+                       break;
+               case CMD_LINE_OPT_PER_PORT_POOL_NUM:
+                       per_port_pool = 1;
+                       break;
                default:
                        print_usage(prgname);
                        return -1;
@@ -2011,7 +2066,7 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
 
        ret = rte_hash_add_key_data(map, &key, (void *)i);
        if (ret < 0) {
-               printf("Faled to insert cdev mapping for (lcore %u, "
+               printf("Failed to insert cdev mapping for (lcore %u, "
                                "cdev %u, qp %u), errno %d\n",
                                key.lcore_id, ipsec_ctx->tbl[i].id,
                                ipsec_ctx->tbl[i].qp, ret);
@@ -2044,7 +2099,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
                str = "Inbound";
        }
 
-       /* Required cryptodevs with operation chainning */
+       /* Required cryptodevs with operation chaining */
        if (!(dev_info->feature_flags &
                                RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
                return ret;
@@ -2212,7 +2267,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
                        "Error during getting device (port %u) info: %s\n",
                        portid, strerror(-ret));
 
-       /* limit allowed HW offloafs, as user requested */
+       /* limit allowed HW offloads, as user requested */
        dev_info.rx_offload_capa &= dev_rx_offload;
        dev_info.tx_offload_capa &= dev_tx_offload;
 
@@ -2259,7 +2314,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
                        local_port_conf.rxmode.offloads)
                rte_exit(EXIT_FAILURE,
                        "Error: port %u required RX offloads: 0x%" PRIx64
-                       ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+                       ", available RX offloads: 0x%" PRIx64 "\n",
                        portid, local_port_conf.rxmode.offloads,
                        dev_info.rx_offload_capa);
 
@@ -2267,7 +2322,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
                        local_port_conf.txmode.offloads)
                rte_exit(EXIT_FAILURE,
                        "Error: port %u required TX offloads: 0x%" PRIx64
-                       ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+                       ", available TX offloads: 0x%" PRIx64 "\n",
                        portid, local_port_conf.txmode.offloads,
                        dev_info.tx_offload_capa);
 
@@ -2278,7 +2333,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
        if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
                local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
 
-       printf("port %u configurng rx_offloads=0x%" PRIx64
+       printf("port %u configuring rx_offloads=0x%" PRIx64
                ", tx_offloads=0x%" PRIx64 "\n",
                portid, local_port_conf.rxmode.offloads,
                local_port_conf.txmode.offloads);
@@ -2342,6 +2397,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
                /* init RX queues */
                for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
                        struct rte_eth_rxconf rxq_conf;
+                       struct rte_mempool *pool;
 
                        if (portid != qconf->rx_queue_list[queue].port_id)
                                continue;
@@ -2353,9 +2409,14 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 
                        rxq_conf = dev_info.default_rxconf;
                        rxq_conf.offloads = local_port_conf.rxmode.offloads;
+
+                       if (per_port_pool)
+                               pool = socket_ctx[socket_id].mbuf_pool[portid];
+                       else
+                               pool = socket_ctx[socket_id].mbuf_pool[0];
+
                        ret = rte_eth_rx_queue_setup(portid, rx_queueid,
-                                       nb_rxd, socket_id, &rxq_conf,
-                                       socket_ctx[socket_id].mbuf_pool);
+                                       nb_rxd, socket_id, &rxq_conf, pool);
                        if (ret < 0)
                                rte_exit(EXIT_FAILURE,
                                        "rte_eth_rx_queue_setup: err=%d, "
@@ -2468,28 +2529,37 @@ session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
 }
 
 static void
-pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
+pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
+         uint32_t nb_mbuf)
 {
        char s[64];
        int32_t ms;
 
-       snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
-       ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
-                       MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
-                       frame_buf_size, socket_id);
+
+       /* mbuf_pool is initialised by the pool_init() function*/
+       if (socket_ctx[socket_id].mbuf_pool[portid])
+               return;
+
+       snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
+       ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
+                                                        MEMPOOL_CACHE_SIZE,
+                                                        ipsec_metadata_size(),
+                                                        frame_buf_size,
+                                                        socket_id);
 
        /*
         * if multi-segment support is enabled, then create a pool
-        * for indirect mbufs.
+        * for indirect mbufs. This is not per-port but global.
         */
        ms = multi_seg_required();
-       if (ms != 0) {
+       if (ms != 0 && !ctx->mbuf_pool_indir) {
                snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
                ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
                        MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
        }
 
-       if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
+       if (ctx->mbuf_pool[portid] == NULL ||
+           (ms != 0 && ctx->mbuf_pool_indir == NULL))
                rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
                                socket_id);
        else
@@ -2608,7 +2678,7 @@ rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,
                                rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 
                        struct rte_ipv6_hdr *iph;
-                       struct ipv6_extension_fragment *fh;
+                       struct rte_ipv6_fragment_ext *fh;
 
                        iph = (struct rte_ipv6_hdr *)(eth + 1);
                        fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
@@ -2986,12 +3056,14 @@ handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
        struct rte_tel_data *spd4_data = rte_tel_data_alloc();
        struct rte_tel_data *spd6_data = rte_tel_data_alloc();
        struct rte_tel_data *sad_data = rte_tel_data_alloc();
-
        unsigned int coreid = UINT32_MAX;
+       int rc = 0;
 
        /* verify allocated telemetry data structures */
-       if (!spd4_data || !spd6_data || !sad_data)
-               return -ENOMEM;
+       if (!spd4_data || !spd6_data || !sad_data) {
+               rc = -ENOMEM;
+               goto exit;
+       }
 
        /* initialize telemetry data structs as dicts */
        rte_tel_data_start_dict(data);
@@ -3002,8 +3074,10 @@ handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
 
        if (params) {
                coreid = (uint32_t)atoi(params);
-               if (rte_lcore_is_enabled(coreid) == 0)
-                       return -EINVAL;
+               if (rte_lcore_is_enabled(coreid) == 0) {
+                       rc = -EINVAL;
+                       goto exit;
+               }
        }
 
        update_statistics(&total_stats, coreid);
@@ -3037,7 +3111,13 @@ handle_telemetry_cmd_ipsec_secgw_stats_outbound(const char *cmd __rte_unused,
 
        rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
 
-       return 0;
+exit:
+       if (rc) {
+               rte_tel_data_free(spd4_data);
+               rte_tel_data_free(spd6_data);
+               rte_tel_data_free(sad_data);
+       }
+       return rc;
 }
 
 static int
@@ -3049,12 +3129,14 @@ handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
        struct rte_tel_data *spd4_data = rte_tel_data_alloc();
        struct rte_tel_data *spd6_data = rte_tel_data_alloc();
        struct rte_tel_data *sad_data = rte_tel_data_alloc();
-
        unsigned int coreid = UINT32_MAX;
+       int rc = 0;
 
        /* verify allocated telemetry data structures */
-       if (!spd4_data || !spd6_data || !sad_data)
-               return -ENOMEM;
+       if (!spd4_data || !spd6_data || !sad_data) {
+               rc = -ENOMEM;
+               goto exit;
+       }
 
        /* initialize telemetry data structs as dicts */
        rte_tel_data_start_dict(data);
@@ -3066,8 +3148,10 @@ handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
 
        if (params) {
                coreid = (uint32_t)atoi(params);
-               if (rte_lcore_is_enabled(coreid) == 0)
-                       return -EINVAL;
+               if (rte_lcore_is_enabled(coreid) == 0) {
+                       rc = -EINVAL;
+                       goto exit;
+               }
        }
 
        update_statistics(&total_stats, coreid);
@@ -3101,7 +3185,13 @@ handle_telemetry_cmd_ipsec_secgw_stats_inbound(const char *cmd __rte_unused,
 
        rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
 
-       return 0;
+exit:
+       if (rc) {
+               rte_tel_data_free(spd4_data);
+               rte_tel_data_free(spd6_data);
+               rte_tel_data_free(sad_data);
+       }
+       return rc;
 }
 
 static int
@@ -3112,8 +3202,14 @@ handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
 
        struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
        struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
-
        unsigned int coreid = UINT32_MAX;
+       int rc = 0;
+
+       /* verify allocated telemetry data structures */
+       if (!lpm4_data || !lpm6_data) {
+               rc = -ENOMEM;
+               goto exit;
+       }
 
        /* initialize telemetry data structs as dicts */
        rte_tel_data_start_dict(data);
@@ -3123,8 +3219,10 @@ handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
 
        if (params) {
                coreid = (uint32_t)atoi(params);
-               if (rte_lcore_is_enabled(coreid) == 0)
-                       return -EINVAL;
+               if (rte_lcore_is_enabled(coreid) == 0) {
+                       rc = -EINVAL;
+                       goto exit;
+               }
        }
 
        update_statistics(&total_stats, coreid);
@@ -3141,7 +3239,12 @@ handle_telemetry_cmd_ipsec_secgw_stats_routing(const char *cmd __rte_unused,
 
        rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
 
-       return 0;
+exit:
+       if (rc) {
+               rte_tel_data_free(lpm4_data);
+               rte_tel_data_free(lpm6_data);
+       }
+       return rc;
 }
 
 static void
@@ -3221,6 +3324,9 @@ main(int32_t argc, char **argv)
                rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
                                unprotected_port_mask);
 
+       if (unprotected_port_mask && !nb_sa_in)
+               rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
+
        if (check_poll_mode_params(eh_conf) < 0)
                rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
 
@@ -3269,11 +3375,22 @@ main(int32_t argc, char **argv)
                else
                        socket_id = 0;
 
-               /* mbuf_pool is initialised by the pool_init() function*/
-               if (socket_ctx[socket_id].mbuf_pool)
+               if (per_port_pool) {
+                       RTE_ETH_FOREACH_DEV(portid) {
+                               if ((enabled_port_mask & (1 << portid)) == 0)
+                                       continue;
+
+                               pool_init(&socket_ctx[socket_id], socket_id,
+                                         portid, nb_bufs_in_pool);
+                       }
+               } else {
+                       pool_init(&socket_ctx[socket_id], socket_id, 0,
+                                 nb_bufs_in_pool);
+               }
+
+               if (socket_ctx[socket_id].session_pool)
                        continue;
 
-               pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
                session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
                session_priv_pool_init(&socket_ctx[socket_id], socket_id,
                        sess_sz);
@@ -3307,13 +3424,14 @@ main(int32_t argc, char **argv)
                if ((enabled_port_mask & (1 << portid)) == 0)
                        continue;
 
-               /* Create flow before starting the device */
-               create_default_ipsec_flow(portid, req_rx_offloads[portid]);
-
                ret = rte_eth_dev_start(portid);
                if (ret < 0)
                        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
                                        "err=%d, port=%d\n", ret, portid);
+
+               /* Create flow after starting the device */
+               create_default_ipsec_flow(portid, req_rx_offloads[portid]);
+
                /*
                 * If enabled, put device in promiscuous mode.
                 * This allows IO forwarding mode to forward packets
@@ -3345,7 +3463,7 @@ main(int32_t argc, char **argv)
        /* Replicate each context per socket */
        for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
                socket_id = rte_socket_id_by_idx(i);
-               if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+               if ((socket_ctx[socket_id].session_pool != NULL) &&
                        (socket_ctx[socket_id].sa_in == NULL) &&
                        (socket_ctx[socket_id].sa_out == NULL)) {
                        sa_init(&socket_ctx[socket_id], socket_id);