common/mlx5: fix user mode register access command
[dpdk.git] / app / test-eventdev / evt_common.h
index 463e062..f9d7378 100644 (file)
 #define EVT_MAX_PORTS            256
 #define EVT_MAX_QUEUES           256
 
+enum evt_prod_type {
+       EVT_PROD_TYPE_NONE,
+       EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
+       EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
+       EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Timer Adapter. */
+       EVT_PROD_TYPE_MAX,
+};
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN     32
+       char test_name[EVT_TEST_NAME_MAX_LEN];
+       bool plcores[RTE_MAX_LCORE];
+       bool wlcores[RTE_MAX_LCORE];
+       int pool_sz;
+       int socket_id;
+       int nb_stages;
+       int verbose_level;
+       uint8_t dev_id;
+       uint8_t timdev_cnt;
+       uint8_t nb_timer_adptrs;
+       uint8_t timdev_use_burst;
+       uint8_t sched_type_list[EVT_MAX_STAGES];
+       uint16_t mbuf_sz;
+       uint16_t wkr_deq_dep;
+       uint32_t nb_flows;
+       uint32_t tx_first;
+       uint32_t max_pkt_sz;
+       uint32_t deq_tmo_nsec;
+       uint32_t q_priority:1;
+       uint32_t fwd_latency:1;
+       uint64_t nb_pkts;
+       uint64_t nb_timers;
+       uint64_t expiry_nsec;
+       uint64_t max_tmo_nsec;
+       uint64_t timer_tick_nsec;
+       uint64_t optm_timer_tick_nsec;
+       enum evt_prod_type prod_type;
+};
+
 static inline bool
 evt_has_distributed_sched(uint8_t dev_id)
 {
@@ -66,42 +105,79 @@ evt_has_all_types_queue(uint8_t dev_id)
 }
 
 static inline int
-evt_service_setup(uint8_t dev_id)
+evt_service_setup(uint32_t service_id)
 {
-       uint32_t service_id;
        int32_t core_cnt;
        unsigned int lcore = 0;
        uint32_t core_array[RTE_MAX_LCORE];
        uint8_t cnt;
        uint8_t min_cnt = UINT8_MAX;
 
-       if (evt_has_distributed_sched(dev_id))
-               return 0;
-
        if (!rte_service_lcore_count())
                return -ENOENT;
 
-       if (!rte_event_dev_service_id_get(dev_id, &service_id)) {
-               core_cnt = rte_service_lcore_list(core_array,
-                               RTE_MAX_LCORE);
-               if (core_cnt < 0)
-                       return -ENOENT;
-               /* Get the core which has least number of services running. */
-               while (core_cnt--) {
-                       /* Reset default mapping */
-                       rte_service_map_lcore_set(service_id,
-                                       core_array[core_cnt], 0);
-                       cnt = rte_service_lcore_count_services(
-                                       core_array[core_cnt]);
-                       if (cnt < min_cnt) {
-                               lcore = core_array[core_cnt];
-                               min_cnt = cnt;
-                       }
+       core_cnt = rte_service_lcore_list(core_array,
+                       RTE_MAX_LCORE);
+       if (core_cnt < 0)
+               return -ENOENT;
+       /* Get the core which has least number of services running. */
+       while (core_cnt--) {
+               /* Reset default mapping */
+               rte_service_map_lcore_set(service_id,
+                               core_array[core_cnt], 0);
+               cnt = rte_service_lcore_count_services(
+                               core_array[core_cnt]);
+               if (cnt < min_cnt) {
+                       lcore = core_array[core_cnt];
+                       min_cnt = cnt;
                }
-               if (rte_service_map_lcore_set(service_id, lcore, 1))
-                       return -ENOENT;
        }
+       if (rte_service_map_lcore_set(service_id, lcore, 1))
+               return -ENOENT;
+
        return 0;
 }
 
+static inline int
+evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
+               uint8_t nb_ports)
+{
+       struct rte_event_dev_info info;
+       int ret;
+
+       memset(&info, 0, sizeof(struct rte_event_dev_info));
+       ret = rte_event_dev_info_get(opt->dev_id, &info);
+       if (ret) {
+               evt_err("failed to get eventdev info %d", opt->dev_id);
+               return ret;
+       }
+
+       if (opt->deq_tmo_nsec) {
+               if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+                       opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+                       evt_info("dequeue_timeout_ns too low, using %d",
+                                       opt->deq_tmo_nsec);
+               }
+               if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+                       opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+                       evt_info("dequeue_timeout_ns too high, using %d",
+                                       opt->deq_tmo_nsec);
+               }
+       }
+
+       const struct rte_event_dev_config config = {
+                       .dequeue_timeout_ns = opt->deq_tmo_nsec,
+                       .nb_event_queues = nb_queues,
+                       .nb_event_ports = nb_ports,
+                       .nb_events_limit  = info.max_num_events,
+                       .nb_event_queue_flows = opt->nb_flows,
+                       .nb_event_port_dequeue_depth =
+                               info.max_event_port_dequeue_depth,
+                       .nb_event_port_enqueue_depth =
+                               info.max_event_port_enqueue_depth,
+       };
+
+       return rte_event_dev_configure(opt->dev_id, &config);
+}
+
 #endif /*  _EVT_COMMON_*/