+static inline int
+evt_service_setup(uint32_t service_id)
+{
+ int32_t core_cnt;
+ unsigned int lcore = 0;
+ uint32_t core_array[RTE_MAX_LCORE];
+ uint8_t cnt;
+ uint8_t min_cnt = UINT8_MAX;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ core_cnt = rte_service_lcore_list(core_array,
+ RTE_MAX_LCORE);
+ if (core_cnt < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (core_cnt--) {
+ /* Reset default mapping */
+ rte_service_map_lcore_set(service_id,
+ core_array[core_cnt], 0);
+ cnt = rte_service_lcore_count_services(
+ core_array[core_cnt]);
+ if (cnt < min_cnt) {
+ lcore = core_array[core_cnt];
+ min_cnt = cnt;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, lcore, 1))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline int
+evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
+ uint8_t nb_ports)
+{
+ struct rte_event_dev_info info;
+ int ret;
+
+ memset(&info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
+ const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = info.max_num_events,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
+ };
+
+ return rte_event_dev_configure(opt->dev_id, &config);
+}
+