1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
8 #include <rte_common.h>
10 #include <rte_eventdev.h>
11 #include <rte_service.h>
13 #define CLNRM "\x1b[0m"
14 #define CLRED "\x1b[31m"
15 #define CLGRN "\x1b[32m"
16 #define CLYEL "\x1b[33m"
18 #define evt_err(fmt, args...) \
19 fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
21 #define evt_info(fmt, args...) \
22 fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
24 #define EVT_STR_FMT 20
26 #define evt_dump(str, fmt, val...) \
27 printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
29 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
31 #define evt_dump_end printf("\b}\n")
33 #define EVT_MAX_STAGES 64
34 #define EVT_MAX_PORTS 256
35 #define EVT_MAX_QUEUES 256
39 EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
40 EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
41 EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
46 #define EVT_TEST_NAME_MAX_LEN 32
47 char test_name[EVT_TEST_NAME_MAX_LEN];
48 bool plcores[RTE_MAX_LCORE];
49 bool wlcores[RTE_MAX_LCORE];
56 uint8_t nb_timer_adptrs;
57 uint8_t timdev_use_burst;
58 uint8_t per_port_pool;
59 uint8_t sched_type_list[EVT_MAX_STAGES];
67 uint32_t prod_enq_burst_sz;
68 uint32_t deq_tmo_nsec;
69 uint32_t q_priority:1;
70 uint32_t fwd_latency:1;
71 uint32_t ena_vector : 1;
75 uint64_t max_tmo_nsec;
76 uint64_t vector_tmo_nsec;
77 uint64_t timer_tick_nsec;
78 uint64_t optm_timer_tick_nsec;
79 enum evt_prod_type prod_type;
83 evt_has_distributed_sched(uint8_t dev_id)
85 struct rte_event_dev_info dev_info;
87 rte_event_dev_info_get(dev_id, &dev_info);
88 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
93 evt_has_burst_mode(uint8_t dev_id)
95 struct rte_event_dev_info dev_info;
97 rte_event_dev_info_get(dev_id, &dev_info);
98 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
104 evt_has_all_types_queue(uint8_t dev_id)
106 struct rte_event_dev_info dev_info;
108 rte_event_dev_info_get(dev_id, &dev_info);
109 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
114 evt_has_flow_id(uint8_t dev_id)
116 struct rte_event_dev_info dev_info;
118 rte_event_dev_info_get(dev_id, &dev_info);
119 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
124 evt_service_setup(uint32_t service_id)
127 unsigned int lcore = 0;
128 uint32_t core_array[RTE_MAX_LCORE];
130 uint8_t min_cnt = UINT8_MAX;
132 if (!rte_service_lcore_count())
135 core_cnt = rte_service_lcore_list(core_array,
139 /* Get the core which has least number of services running. */
141 /* Reset default mapping */
142 rte_service_map_lcore_set(service_id,
143 core_array[core_cnt], 0);
144 cnt = rte_service_lcore_count_services(
145 core_array[core_cnt]);
147 lcore = core_array[core_cnt];
151 if (rte_service_map_lcore_set(service_id, lcore, 1))
158 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
161 struct rte_event_dev_info info;
164 memset(&info, 0, sizeof(struct rte_event_dev_info));
165 ret = rte_event_dev_info_get(opt->dev_id, &info);
167 evt_err("failed to get eventdev info %d", opt->dev_id);
171 if (opt->deq_tmo_nsec) {
172 if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
173 opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
174 evt_info("dequeue_timeout_ns too low, using %d",
177 if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
178 opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
179 evt_info("dequeue_timeout_ns too high, using %d",
184 const struct rte_event_dev_config config = {
185 .dequeue_timeout_ns = opt->deq_tmo_nsec,
186 .nb_event_queues = nb_queues,
187 .nb_event_ports = nb_ports,
188 .nb_single_link_event_port_queues = 0,
189 .nb_events_limit = info.max_num_events,
190 .nb_event_queue_flows = opt->nb_flows,
191 .nb_event_port_dequeue_depth =
192 info.max_event_port_dequeue_depth,
193 .nb_event_port_enqueue_depth =
194 info.max_event_port_enqueue_depth,
197 return rte_event_dev_configure(opt->dev_id, &config);
200 #endif /* _EVT_COMMON_*/