4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _OPDL_EVDEV_H_
34 #define _OPDL_EVDEV_H_
36 #include <rte_eventdev.h>
37 #include <rte_eventdev_pmd_vdev.h>
38 #include <rte_atomic.h>
39 #include "opdl_ring.h"
41 #define OPDL_QID_NUM_FIDS 1024
42 #define OPDL_IQS_MAX 1
43 #define OPDL_Q_PRIORITY_MAX 1
44 #define OPDL_PORTS_MAX 64
45 #define MAX_OPDL_CONS_Q_DEPTH 128
47 #define OPDL_INFLIGHT_EVENTS_TOTAL 4096
48 /* allow for lots of over-provisioning */
49 #define OPDL_FRAGMENTS_MAX 1
51 /* report dequeue burst sizes in buckets */
52 #define OPDL_DEQ_STAT_BUCKET_SHIFT 2
53 /* how many packets pulled from port by sched */
54 #define SCHED_DEQUEUE_BURST_SIZE 32
56 /* size of our history list */
57 #define OPDL_PORT_HIST_LIST (MAX_OPDL_PROD_Q_DEPTH)
59 /* how many data points use for average stats */
60 #define NUM_SAMPLES 64
62 #define EVENTDEV_NAME_OPDL_PMD event_opdl
63 #define OPDL_PMD_NAME RTE_STR(event_opdl)
64 #define OPDL_PMD_NAME_MAX 64
66 #define OPDL_INVALID_QID 255
68 #define OPDL_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
70 #define OPDL_NUM_POLL_BUCKETS \
71 (MAX_OPDL_CONS_Q_DEPTH >> OPDL_DEQ_STAT_BUCKET_SHIFT)
74 QE_FLAG_VALID_SHIFT = 0,
75 QE_FLAG_COMPLETE_SHIFT,
76 QE_FLAG_NOT_EOP_SHIFT,
81 OPDL_INVALID_PORT = 0,
82 OPDL_REGULAR_PORT = 1,
89 OPDL_Q_TYPE_INVALID = 0,
90 OPDL_Q_TYPE_SINGLE_LINK = 1,
101 #define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
102 #define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
103 #define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
105 static const uint8_t opdl_qe_flag_map[] = {
106 QE_FLAG_VALID /* NEW Event */,
107 QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
108 QE_FLAG_COMPLETE /* RELEASE Event */,
110 /* Values which can be used for future support for partial
111 * events, i.e. where one event comes back to the scheduler
112 * as multiple which need to be tracked together
114 QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
118 enum port_xstat_name {
119 claim_pkts_requested = 0,
127 #define OPDL_MAX_PORT_XSTAT_NUM (OPDL_PORTS_MAX * max_num_port_xstat)
131 typedef uint16_t (*opdl_enq_operation)(struct opdl_port *port,
132 const struct rte_event ev[],
135 typedef uint16_t (*opdl_deq_operation)(struct opdl_port *port,
136 struct rte_event ev[],
141 struct opdl_stage_meta_data {
142 uint32_t num_claimed; /* number of entries claimed by this stage */
143 uint32_t burst_sz; /* Port claim burst size */
149 struct opdl_evdev *opdl;
151 /* enq handler & stage instance */
152 opdl_enq_operation enq;
153 struct opdl_stage *enq_stage_inst;
155 /* deq handler & stage instance */
156 opdl_deq_operation deq;
157 struct opdl_stage *deq_stage_inst;
159 /* port id has correctly been set */
162 /* set when the port is initialized */
165 /* A numeric ID for the port */
168 /* Space for claimed entries */
169 struct rte_event *entries[MAX_OPDL_CONS_Q_DEPTH];
171 /* RX/REGULAR/TX/ASYNC - determined on position in queue */
172 enum port_type p_type;
174 /* if the claim is static atomic type */
177 /* Queue linked to this port - internal queue id*/
180 /* Queue linked to this port - external queue id*/
181 uint8_t external_qid;
183 /* Next queue linked to this port - external queue id*/
184 uint8_t next_external_qid;
186 /* number of instances of this stage */
187 uint32_t num_instance;
189 /* instance ID of this stage*/
190 uint32_t instance_id;
192 /* track packets in and out of this port */
193 uint64_t port_stat[max_num_port_xstat];
194 uint64_t start_cycles;
197 struct opdl_queue_meta_data {
199 enum queue_type type;
203 struct opdl_xstats_entry {
204 struct rte_event_dev_xstats_name stat;
211 /* Opdl ring this queue is associated with */
214 /* type and position have correctly been set */
217 /* port number and associated ports have been associated */
220 /* type of this queue (Atomic, Ordered, Parallel, Direct)*/
221 enum queue_type q_type;
223 /* position of queue (START, MIDDLE, END) */
224 enum queue_pos q_pos;
226 /* external queue id. It is mapped to the queue position */
227 uint8_t external_qid;
229 struct opdl_port *ports[OPDL_PORTS_MAX];
232 /* priority, reserved for future */
237 #define OPDL_TUR_PER_DEV 12
239 /* PMD needs an extra queue per Opdl */
240 #define OPDL_MAX_QUEUES (RTE_EVENT_MAX_QUEUES_PER_DEV - OPDL_TUR_PER_DEV)
244 struct rte_eventdev_data *data;
248 /* Max number of ports and queues*/
249 uint32_t max_port_nb;
250 uint32_t max_queue_nb;
252 /* slots in the opdl ring */
253 uint32_t nb_events_limit;
256 * Array holding all opdl for this device
258 struct opdl_ring *opdl[OPDL_TUR_PER_DEV];
261 struct opdl_queue_meta_data q_md[OPDL_MAX_QUEUES];
264 /* Internal queues - one per logical queue */
266 queue[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
270 struct opdl_stage_meta_data s_md[OPDL_PORTS_MAX];
272 /* Contains all ports - load balanced and directed */
273 struct opdl_port ports[OPDL_PORTS_MAX] __rte_cache_aligned;
276 uint8_t q_map_ex_to_in[OPDL_INVALID_QID];
279 struct opdl_xstats_entry port_xstat[OPDL_MAX_PORT_XSTAT_NUM];
281 char service_name[OPDL_PMD_NAME_MAX];
288 static inline struct opdl_evdev *
289 opdl_pmd_priv(const struct rte_eventdev *eventdev)
291 return eventdev->data->dev_private;
294 static inline uint8_t
295 opdl_pmd_dev_id(const struct opdl_evdev *opdl)
297 return opdl->data->dev_id;
300 static inline const struct opdl_evdev *
301 opdl_pmd_priv_const(const struct rte_eventdev *eventdev)
303 return eventdev->data->dev_private;
306 uint16_t opdl_event_enqueue(void *port, const struct rte_event *ev);
307 uint16_t opdl_event_enqueue_burst(void *port, const struct rte_event ev[],
310 uint16_t opdl_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
311 uint16_t opdl_event_dequeue_burst(void *port, struct rte_event *ev,
312 uint16_t num, uint64_t wait);
313 void opdl_event_schedule(struct rte_eventdev *dev);
315 void opdl_xstats_init(struct rte_eventdev *dev);
316 int opdl_xstats_uninit(struct rte_eventdev *dev);
317 int opdl_xstats_get_names(const struct rte_eventdev *dev,
318 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
319 struct rte_event_dev_xstats_name *xstats_names,
320 unsigned int *ids, unsigned int size);
321 int opdl_xstats_get(const struct rte_eventdev *dev,
322 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
323 const unsigned int ids[], uint64_t values[], unsigned int n);
324 uint64_t opdl_xstats_get_by_name(const struct rte_eventdev *dev,
325 const char *name, unsigned int *id);
326 int opdl_xstats_reset(struct rte_eventdev *dev,
327 enum rte_event_dev_xstats_mode mode,
328 int16_t queue_port_id,
329 const uint32_t ids[],
332 int opdl_add_event_handlers(struct rte_eventdev *dev);
333 int build_all_dependencies(struct rte_eventdev *dev);
334 int check_queues_linked(struct rte_eventdev *dev);
335 int create_queues_and_rings(struct rte_eventdev *dev);
336 int initialise_all_other_ports(struct rte_eventdev *dev);
337 int initialise_queue_zero_ports(struct rte_eventdev *dev);
338 int assign_internal_queue_ids(struct rte_eventdev *dev);
339 void destroy_queues_and_rings(struct rte_eventdev *dev);
340 int opdl_selftest(void);
342 #endif /* _OPDL_EVDEV_H_ */