2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
6 #ifndef _TEST_PIPELINE_COMMON_
7 #define _TEST_PIPELINE_COMMON_
13 #include <rte_cycles.h>
14 #include <rte_ethdev.h>
15 #include <rte_eventdev.h>
16 #include <rte_event_eth_rx_adapter.h>
17 #include <rte_event_eth_tx_adapter.h>
18 #include <rte_lcore.h>
19 #include <rte_malloc.h>
20 #include <rte_mempool.h>
21 #include <rte_prefetch.h>
22 #include <rte_spinlock.h>
23 #include <rte_service.h>
24 #include <rte_service_component.h>
26 #include "evt_common.h"
27 #include "evt_options.h"
33 uint64_t processed_pkts;
36 struct test_pipeline *t;
37 } __rte_cache_aligned;
39 struct test_pipeline {
40 /* Don't change the offset of "done". Signal handler use this memory
41 * to terminate all lcores work.
45 uint8_t internal_port;
46 uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
47 enum evt_test_result result;
49 uint64_t outstand_pkts;
50 struct rte_mempool *pool;
51 struct worker_data worker[EVT_MAX_PORTS];
52 struct evt_options *opt;
53 uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
54 } __rte_cache_aligned;
58 #define PIPELINE_WORKER_SINGLE_STAGE_INIT \
59 struct worker_data *w = arg; \
60 struct test_pipeline *t = w->t; \
61 const uint8_t dev = w->dev_id; \
62 const uint8_t port = w->port_id; \
63 struct rte_event ev __rte_cache_aligned
65 #define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
67 struct worker_data *w = arg; \
68 struct test_pipeline *t = w->t; \
69 const uint8_t dev = w->dev_id; \
70 const uint8_t port = w->port_id; \
71 struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
73 #define PIPELINE_WORKER_MULTI_STAGE_INIT \
74 struct worker_data *w = arg; \
75 struct test_pipeline *t = w->t; \
77 const uint8_t dev = w->dev_id; \
78 const uint8_t port = w->port_id; \
79 const uint8_t last_queue = t->opt->nb_stages - 1; \
80 uint8_t *const sched_type_list = &t->sched_type_list[0]; \
81 const uint8_t nb_stages = t->opt->nb_stages + 1; \
82 struct rte_event ev __rte_cache_aligned
84 #define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \
86 struct worker_data *w = arg; \
87 struct test_pipeline *t = w->t; \
89 const uint8_t dev = w->dev_id; \
90 const uint8_t port = w->port_id; \
91 const uint8_t last_queue = t->opt->nb_stages - 1; \
92 uint8_t *const sched_type_list = &t->sched_type_list[0]; \
93 const uint8_t nb_stages = t->opt->nb_stages + 1; \
94 struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
96 static __rte_always_inline void
97 pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
99 ev->event_type = RTE_EVENT_TYPE_CPU;
100 ev->op = RTE_EVENT_OP_FORWARD;
101 ev->sched_type = sched;
104 static __rte_always_inline void
105 pipeline_event_tx(const uint8_t dev, const uint8_t port,
106 struct rte_event * const ev)
108 rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
109 while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1))
113 static __rte_always_inline void
114 pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
115 struct rte_event *ev, const uint16_t nb_rx)
119 enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx);
120 while (enq < nb_rx) {
121 enq += rte_event_eth_tx_adapter_enqueue(dev, port,
122 ev + enq, nb_rx - enq);
126 static __rte_always_inline void
127 pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
128 struct rte_event *ev)
130 while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
134 static __rte_always_inline void
135 pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
136 struct rte_event *ev, const uint16_t nb_rx)
140 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
141 while (enq < nb_rx) {
142 enq += rte_event_enqueue_burst(dev, port,
143 ev + enq, nb_rx - enq);
148 pipeline_nb_event_ports(struct evt_options *opt)
150 return evt_nr_active_lcores(opt->wlcores);
153 int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
154 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
155 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
156 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
157 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
158 struct rte_event_port_conf prod_conf);
159 int pipeline_event_tx_adapter_setup(struct evt_options *opt,
160 struct rte_event_port_conf prod_conf);
161 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
162 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
163 uint8_t *queue_arr, uint8_t nb_queues,
164 const struct rte_event_port_conf p_conf);
165 int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
166 int (*worker)(void *));
167 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
168 void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
169 void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
170 void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
171 void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
173 #endif /* _TEST_PIPELINE_COMMON_ */