app/testpmd: fix flow director filter
[dpdk.git] / app / test-eventdev / test_perf_atq.c
index a5e2837..b36b22a 100644 (file)
@@ -10,7 +10,8 @@ static inline int
 atq_nb_event_queues(struct evt_options *opt)
 {
        /* nb_queues = number of producers */
-       return evt_nr_active_lcores(opt->plcores);
+       return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+               rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores);
 }
 
 static inline __attribute__((always_inline)) void
@@ -159,21 +160,30 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
        uint8_t queue;
        uint8_t nb_queues;
        uint8_t nb_ports;
+       struct rte_event_dev_info dev_info;
 
        nb_ports = evt_nr_active_lcores(opt->wlcores);
        nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
                evt_nr_active_lcores(opt->plcores);
 
-       nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
-               rte_eth_dev_count() : atq_nb_event_queues(opt);
+       nb_queues = atq_nb_event_queues(opt);
+
+       memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
+       ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
+       if (ret) {
+               evt_err("failed to get eventdev info %d", opt->dev_id);
+               return ret;
+       }
 
        const struct rte_event_dev_config config = {
                        .nb_event_queues = nb_queues,
                        .nb_event_ports = nb_ports,
-                       .nb_events_limit  = 4096,
+                       .nb_events_limit  = dev_info.max_num_events,
                        .nb_event_queue_flows = opt->nb_flows,
-                       .nb_event_port_dequeue_depth = 128,
-                       .nb_event_port_enqueue_depth = 128,
+                       .nb_event_port_dequeue_depth =
+                               dev_info.max_event_port_dequeue_depth,
+                       .nb_event_port_enqueue_depth =
+                               dev_info.max_event_port_enqueue_depth,
        };
 
        ret = rte_event_dev_configure(opt->dev_id, &config);
@@ -197,15 +207,29 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
                }
        }
 
-       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
-                                       nb_queues);
+       if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+               opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+       /* port configuration */
+       const struct rte_event_port_conf p_conf = {
+                       .dequeue_depth = opt->wkr_deq_dep,
+                       .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+                       .new_event_threshold = dev_info.max_num_events,
+       };
+
+       ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+                       &p_conf);
        if (ret)
                return ret;
 
-       ret = evt_service_setup(opt->dev_id);
-       if (ret) {
-               evt_err("No service lcore found to run event dev.");
-               return ret;
+       if (!evt_has_distributed_sched(opt->dev_id)) {
+               uint32_t service_id;
+               rte_event_dev_service_id_get(opt->dev_id, &service_id);
+               ret = evt_service_setup(service_id);
+               if (ret) {
+                       evt_err("No service lcore found to run event dev.");
+                       return ret;
+               }
        }
 
        ret = rte_event_dev_start(opt->dev_id);
@@ -258,6 +282,7 @@ static const struct evt_test_ops perf_atq =  {
        .launch_lcores      = perf_atq_launch_lcores,
        .eventdev_destroy   = perf_eventdev_destroy,
        .mempool_destroy    = perf_mempool_destroy,
+       .ethdev_destroy     = perf_ethdev_destroy,
        .test_result        = perf_test_result,
        .test_destroy       = perf_test_destroy,
 };