#define EVT_MAX_PORTS 256
#define EVT_MAX_QUEUES 256
+enum evt_prod_type {
+ EVT_PROD_TYPE_NONE,
+ EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
+ EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
+ EVT_PROD_TYPE_MAX,
+};
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN 32
+ char test_name[EVT_TEST_NAME_MAX_LEN];
+ bool plcores[RTE_MAX_LCORE];
+ bool wlcores[RTE_MAX_LCORE];
+ uint8_t sched_type_list[EVT_MAX_STAGES];
+ uint32_t nb_flows;
+ int socket_id;
+ int pool_sz;
+ int nb_stages;
+ int verbose_level;
+ uint64_t nb_pkts;
+ uint8_t nb_timer_adptrs;
+ uint64_t nb_timers;
+ uint64_t timer_tick_nsec;
+ uint64_t optm_timer_tick_nsec;
+ uint64_t max_tmo_nsec;
+ uint64_t expiry_nsec;
+ uint16_t wkr_deq_dep;
+ uint8_t dev_id;
+ uint32_t tx_first;
+ uint32_t fwd_latency:1;
+ uint32_t q_priority:1;
+ uint32_t deq_tmo_nsec;
+ enum evt_prod_type prod_type;
+ uint8_t timdev_use_burst;
+ uint8_t timdev_cnt;
+};
+
static inline bool
evt_has_distributed_sched(uint8_t dev_id)
{
return 0;
}
+static inline int
+evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
+ uint8_t nb_ports)
+{
+ struct rte_event_dev_info info;
+ int ret;
+
+ memset(&info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
+ const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = info.max_num_events,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
+ };
+
+ return rte_event_dev_configure(opt->dev_id, &config);
+}
+
#endif /* _EVT_COMMON_*/
return 0;
}
+static int
+evt_parse_deq_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->deq_tmo_nsec), arg);
+
+ return ret;
+}
+
static int
evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
{
"\t--worker_deq_depth : dequeue depth of the worker\n"
"\t--fwd_latency : perform fwd_latency measurement\n"
"\t--queue_priority : enable queue priority\n"
+ "\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
"\t expity_nsec would be the timeout\n"
{ EVT_SCHED_TYPE_LIST, 1, 0, 0 },
{ EVT_FWD_LATENCY, 0, 0, 0 },
{ EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_DEQ_TMO_NSEC, 1, 0, 0 },
{ EVT_PROD_ETHDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV, 0, 0, 0 },
{ EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
{ EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ { EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
{ EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
#define EVT_SCHED_TYPE_LIST ("stlist")
#define EVT_FWD_LATENCY ("fwd_latency")
#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_DEQ_TMO_NSEC ("deq_tmo_nsec")
#define EVT_PROD_ETHDEV ("prod_type_ethdev")
#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
#define EVT_EXPIRY_NSEC ("expiry_nsec")
#define EVT_HELP ("help")
-enum evt_prod_type {
- EVT_PROD_TYPE_NONE,
- EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
- EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
- EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
- EVT_PROD_TYPE_MAX,
-};
-
-struct evt_options {
-#define EVT_TEST_NAME_MAX_LEN 32
- char test_name[EVT_TEST_NAME_MAX_LEN];
- bool plcores[RTE_MAX_LCORE];
- bool wlcores[RTE_MAX_LCORE];
- uint8_t sched_type_list[EVT_MAX_STAGES];
- uint32_t nb_flows;
- int socket_id;
- int pool_sz;
- int nb_stages;
- int verbose_level;
- uint64_t nb_pkts;
- uint8_t nb_timer_adptrs;
- uint64_t nb_timers;
- uint64_t timer_tick_nsec;
- uint64_t optm_timer_tick_nsec;
- uint64_t max_tmo_nsec;
- uint64_t expiry_nsec;
- uint16_t wkr_deq_dep;
- uint8_t dev_id;
- uint32_t fwd_latency:1;
- uint32_t q_priority:1;
- enum evt_prod_type prod_type;
- uint8_t timdev_use_burst;
- uint8_t timdev_cnt;
-};
-
void evt_options_default(struct evt_options *opt);
int evt_options_parse(struct evt_options *opt, int argc, char **argv);
void evt_options_dump(struct evt_options *opt);
/* number of active worker cores + 1 producer */
const uint8_t nb_ports = nb_workers + 1;
- const struct rte_event_dev_config config = {
- .nb_event_queues = NB_QUEUES,/* one all types queue */
- .nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
- };
-
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
/* number of active worker cores + 1 producer */
const uint8_t nb_ports = nb_workers + 1;
- const struct rte_event_dev_config config = {
- .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
- .nb_event_ports = nb_ports,
- .nb_events_limit = 4096,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth = 128,
- .nb_event_port_enqueue_depth = 128,
- };
-
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
return ret;
}
- const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = dev_info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- dev_info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- dev_info.max_event_port_enqueue_depth,
- };
-
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
- opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
evt_nr_active_lcores(opt->plcores);
nb_queues = perf_queue_nb_event_queues(opt);
return ret;
}
- const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = dev_info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- dev_info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- dev_info.max_event_port_enqueue_depth,
- };
-
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
rte_event_dev_info_get(opt->dev_id, &info);
- const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- info.max_event_port_enqueue_depth,
- };
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
rte_event_dev_info_get(opt->dev_id, &info);
- const struct rte_event_dev_config config = {
- .nb_event_queues = nb_queues,
- .nb_event_ports = nb_ports,
- .nb_events_limit = info.max_num_events,
- .nb_event_queue_flows = opt->nb_flows,
- .nb_event_port_dequeue_depth =
- info.max_event_port_dequeue_depth,
- .nb_event_port_enqueue_depth =
- info.max_event_port_enqueue_depth,
- };
- ret = rte_event_dev_configure(opt->dev_id, &config);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
if (ret) {
evt_err("failed to configure eventdev %d", opt->dev_id);
return ret;
Use event timer adapter as producer.
- * ``--prod_type_timerdev_burst``
+* ``--prod_type_timerdev_burst``
- Use burst mode event timer adapter as producer.
+ Use burst mode event timer adapter as producer.
- * ``--timer_tick_nsec``
+* ``--timer_tick_nsec``
- Used to dictate number of nano seconds between bucket traversal of the
- event timer adapter. Refer `rte_event_timer_adapter_conf`.
+ Used to dictate number of nano seconds between bucket traversal of the
+ event timer adapter. Refer `rte_event_timer_adapter_conf`.
- * ``--max_tmo_nsec``
+* ``--max_tmo_nsec``
- Used to configure event timer adapter max arm timeout in nano seconds.
+ Used to configure event timer adapter max arm timeout in nano seconds.
- * ``--expiry_nsec``
+* ``--expiry_nsec``
- Dictate the number of nano seconds after which the event timer expires.
+ Dictate the number of nano seconds after which the event timer expires.
- * ``--nb_timers``
+* ``--nb_timers``
- Number of event timers each producer core will generate.
+ Number of event timers each producer core will generate.
- * ``--nb_timer_adptrs``
+* ``--nb_timer_adptrs``
+
+ Number of event timer adapters to be used. Each adapter is used in
+ round robin manner by the producer cores.
+
+* ``--deq_tmo_nsec``
+
+ Global dequeue timeout for all the event ports if the provided dequeue
+ timeout is out of the supported range of event device it will be
+ adjusted to the highest/lowest supported dequeue timeout supported.
- Number of event timer adapters to be used. Each adapter is used in
- round robin manner by the producer cores.
Eventdev Tests
--------------
--nb_flows
--nb_pkts
--worker_deq_depth
+ --deq_tmo_nsec
Example
^^^^^^^
--nb_flows
--nb_pkts
--worker_deq_depth
+ --deq_tmo_nsec
Example
^^^^^^^
--expiry_nsec
--nb_timers
--nb_timer_adptrs
+ --deq_tmo_nsec
Example
^^^^^^^
--expiry_nsec
--nb_timers
--nb_timer_adptrs
+ --deq_tmo_nsec
Example
^^^^^^^
--stlist
--worker_deq_depth
--prod_type_ethdev
+ --deq_tmo_nsec
.. Note::
--stlist
--worker_deq_depth
--prod_type_ethdev
+ --deq_tmo_nsec
.. Note::