buildtools: use Python pmdinfogen
[dpdk.git] / app / test-eventdev / test_pipeline_common.c
index 1e52564..c67be48 100644 (file)
@@ -60,7 +60,7 @@ pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
 
        int port_idx = 0;
        /* launch workers */
-       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+       RTE_LCORE_FOREACH_WORKER(lcore_id) {
                if (!(opt->wlcores[lcore_id]))
                        continue;
 
@@ -106,11 +106,17 @@ int
 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
 {
        unsigned int lcores;
-       /*
-        * N worker + 1 master
-        */
+
+       /* N worker + main */
        lcores = 2;
 
+       if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
+               evt_err("Invalid producer type '%s' valid producer '%s'",
+                       evt_prod_id_to_name(opt->prod_type),
+                       evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR));
+               return -1;
+       }
+
        if (!rte_eth_dev_count_avail()) {
                evt_err("test needs minimum 1 ethernet dev");
                return -1;
@@ -122,8 +128,8 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
        }
 
        /* Validate worker lcores */
-       if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
-               evt_err("worker lcores overlaps with master lcore");
+       if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
+               evt_err("worker lcores overlaps with main lcore");
                return -1;
        }
        if (evt_has_disabled_lcore(opt->wlcores)) {
@@ -159,13 +165,13 @@ int
 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 {
        uint16_t i;
+       int ret;
        uint8_t nb_queues = 1;
        struct test_pipeline *t = evt_test_priv(test);
        struct rte_eth_rxconf rx_conf;
        struct rte_eth_conf port_conf = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = ETHER_MAX_LEN,
                },
                .rx_adv_conf = {
                        .rss_conf = {
@@ -175,23 +181,48 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
                },
        };
 
-       RTE_SET_USED(opt);
        if (!rte_eth_dev_count_avail()) {
                evt_err("No ethernet ports found.");
                return -ENODEV;
        }
 
+       if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) {
+               evt_err("max_pkt_sz can not be less than %d",
+                       RTE_ETHER_MIN_LEN);
+               return -EINVAL;
+       }
+
+       port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
+       if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
+               port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
        t->internal_port = 1;
        RTE_ETH_FOREACH_DEV(i) {
                struct rte_eth_dev_info dev_info;
                struct rte_eth_conf local_port_conf = port_conf;
                uint32_t caps = 0;
 
-               rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
+               ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
+               if (ret != 0) {
+                       evt_err("failed to get event tx adapter[%d] caps", i);
+                       return ret;
+               }
+
                if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
                        t->internal_port = 0;
 
-               rte_eth_dev_info_get(i, &dev_info);
+               ret = rte_eth_dev_info_get(i, &dev_info);
+               if (ret != 0) {
+                       evt_err("Error during getting device (port %u) info: %s\n",
+                               i, strerror(-ret));
+                       return ret;
+               }
+
+               /* Enable mbuf fast free if PMD has the capability. */
+               if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+                       local_port_conf.txmode.offloads |=
+                               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
                rx_conf = dev_info.default_rxconf;
                rx_conf.offloads = port_conf.rxmode.offloads;
 
@@ -226,7 +257,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
                        return -EINVAL;
                }
 
-               rte_eth_promiscuous_enable(i);
+               ret = rte_eth_promiscuous_enable(i);
+               if (ret != 0) {
+                       evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+                               i, rte_strerror(-ret));
+                       return ret;
+               }
        }
 
        return 0;
@@ -306,7 +342,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
                }
 
                if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
-                       uint32_t service_id;
+                       uint32_t service_id = -1U;
 
                        rte_event_eth_rx_adapter_service_id_get(prod,
                                        &service_id);
@@ -358,14 +394,18 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt,
                }
 
                if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
-                       uint32_t service_id;
+                       uint32_t service_id = -1U;
 
-                       rte_event_eth_tx_adapter_service_id_get(consm,
-                                       &service_id);
+                       ret = rte_event_eth_tx_adapter_service_id_get(consm,
+                                                                  &service_id);
+                       if (ret != -ESRCH && ret != 0) {
+                               evt_err("Failed to get Tx adptr service ID");
+                               return ret;
+                       }
                        ret = evt_service_setup(service_id);
                        if (ret) {
                                evt_err("Failed to setup service core"
-                                               " for Tx adapter\n");
+                                               " for Tx adapter");
                                return ret;
                        }
                }
@@ -404,12 +444,42 @@ int
 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
 {
        struct test_pipeline *t = evt_test_priv(test);
+       int i, ret;
+
+       if (!opt->mbuf_sz)
+               opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE;
+
+       if (!opt->max_pkt_sz)
+               opt->max_pkt_sz = RTE_ETHER_MAX_LEN;
+
+       RTE_ETH_FOREACH_DEV(i) {
+               struct rte_eth_dev_info dev_info;
+               uint16_t data_size = 0;
+
+               memset(&dev_info, 0, sizeof(dev_info));
+               ret = rte_eth_dev_info_get(i, &dev_info);
+               if (ret != 0) {
+                       evt_err("Error during getting device (port %u) info: %s\n",
+                               i, strerror(-ret));
+                       return ret;
+               }
+
+               if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+                               dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+                       data_size = opt->max_pkt_sz /
+                               dev_info.rx_desc_lim.nb_mtu_seg_max;
+                       data_size += RTE_PKTMBUF_HEADROOM;
+
+                       if (data_size  > opt->mbuf_sz)
+                               opt->mbuf_sz = data_size;
+               }
+       }
 
        t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
                        opt->pool_sz, /* number of elements*/
                        512, /* cache size*/
                        0,
-                       RTE_MBUF_DEFAULT_BUF_SIZE,
+                       opt->mbuf_sz,
                        opt->socket_id); /* flags */
 
        if (t->pool == NULL) {