1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include "sw_evdev_log.h"
9 #include <rte_eventdev.h>
10 #include <eventdev_pmd_vdev.h>
11 #include <rte_atomic.h>
13 #define SW_DEFAULT_CREDIT_QUANTA 32
14 #define SW_DEFAULT_SCHED_QUANTA 128
15 #define SW_QID_NUM_FIDS 16384
17 #define SW_Q_PRIORITY_MAX 255
18 #define SW_PORTS_MAX 64
19 #define MAX_SW_CONS_Q_DEPTH 128
20 #define SW_INFLIGHT_EVENTS_TOTAL 4096
21 /* allow for lots of over-provisioning */
22 #define MAX_SW_PROD_Q_DEPTH 4096
23 #define SW_FRAGMENTS_MAX 16
25 /* Should be power-of-two minus one, to leave room for the next pointer */
26 #define SW_EVS_PER_Q_CHUNK 255
27 #define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event))
29 /* report dequeue burst sizes in buckets */
30 #define SW_DEQ_STAT_BUCKET_SHIFT 2
31 /* how many packets pulled from port by sched */
32 #define SCHED_DEQUEUE_DEFAULT_BURST_SIZE 32
34 #define SCHED_DEQUEUE_MAX_BURST_SIZE 256
36 /* Flush the pipeline after this many no enq to cq */
37 #define SCHED_NO_ENQ_CYCLE_FLUSH 256
40 #define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
41 #define NUM_SAMPLES 64 /* how many data points use for average stats */
43 #define EVENTDEV_NAME_SW_PMD event_sw
44 #define SW_PMD_NAME RTE_STR(event_sw)
45 #define SW_PMD_NAME_MAX 64
47 #define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
49 #define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
52 QE_FLAG_VALID_SHIFT = 0,
53 QE_FLAG_COMPLETE_SHIFT,
54 QE_FLAG_NOT_EOP_SHIFT,
58 #define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
59 #define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
60 #define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
62 static const uint8_t sw_qe_flag_map[] = {
63 QE_FLAG_VALID /* NEW Event */,
64 QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
65 QE_FLAG_COMPLETE /* RELEASE Event */,
67 /* Values which can be used for future support for partial
68 * events, i.e. where one event comes back to the scheduler
69 * as multiple which need to be tracked together
71 QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
74 /* Records basic event stats at a given point. Used in port and qid structs */
75 struct sw_point_stats {
81 /* structure used to track what port a flow (FID) is pinned to */
83 /* which CQ this FID is currently pinned to */
85 /* number of packets gone to the CQ with this FID */
89 struct reorder_buffer_entry {
90 uint16_t num_fragments; /**< Number of packet fragments */
91 uint16_t fragment_index; /**< Points to the oldest valid frag */
92 uint8_t ready; /**< Entry is ready to be reordered */
93 struct rte_event fragments[SW_FRAGMENTS_MAX];
97 struct sw_queue_chunk *head;
98 struct sw_queue_chunk *tail;
105 /* set when the QID has been initialized */
107 /* The type of this QID */
109 /* Integer ID representing the queue. This is used in history lists,
110 * to identify the stage of processing.
113 struct sw_point_stats stats;
115 /* Internal priority rings for packets */
116 struct sw_iq iq[SW_IQS_MAX];
117 uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
118 uint64_t iq_pkt_count[SW_IQS_MAX];
120 /* Information on what CQs are polling this IQ */
121 uint32_t cq_num_mapped_cqs;
122 uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
123 uint32_t cq_map[SW_PORTS_MAX];
124 uint64_t to_port[SW_PORTS_MAX];
126 /* Track flow ids for atomic load balancing */
127 struct sw_fid_t fids[SW_QID_NUM_FIDS];
129 /* Track packet order for reordering when needed */
130 struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
131 struct rob_ring *reorder_buffer_freelist; /* available reorder slots */
132 uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
133 uint32_t window_size; /* Used to wrap reorder_buffer_index */
138 struct sw_hist_list_entry {
141 struct reorder_buffer_entry *rob_entry;
147 /* new enqueue / dequeue API doesn't have an instance pointer, only the
148 * pointer to the port being enqueue/dequeued from
152 /* set when the port is initialized */
154 /* A numeric ID for the port */
157 /* An atomic counter for when the port has been unlinked, and the
158 * scheduler has not yet acked this unlink - hence there may still be
159 * events in the buffers going to the port. When the unlinks in
160 * progress is read by the scheduler, no more events will be pushed to
161 * the port - hence the scheduler core can just assign zero.
163 uint8_t unlinks_in_progress;
165 int16_t is_directed; /** Takes from a single directed QID */
167 * For loadbalanced we can optimise pulling packets from
168 * producers if there is no reordering involved
170 int16_t num_ordered_qids;
172 /** Ring and buffer for pulling events from workers for scheduling */
173 struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
174 /** Ring and buffer for pushing packets to workers after scheduling */
175 struct rte_event_ring *cq_worker_ring;
179 /* num releases yet to be completed on this port */
180 uint16_t outstanding_releases __rte_cache_aligned;
181 uint16_t inflight_max; /* app requested max inflights for this port */
182 uint16_t inflight_credits; /* num credits this port has right now */
183 uint8_t implicit_release; /* release events before dequeuing */
185 uint16_t last_dequeue_burst_sz; /* how big the burst was */
186 uint64_t last_dequeue_ticks; /* used to track burst processing time */
187 uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
188 uint64_t total_polls; /* how many polls were counted in stats */
189 uint64_t zero_polls; /* tracks polls returning nothing */
190 uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
191 /* bucket values in 4s for shorter reporting */
193 /* History list structs, containing info on pkts egressed to worker */
194 uint16_t hist_head __rte_cache_aligned;
197 struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
199 /* track packets in and out of this port */
200 struct sw_point_stats stats;
203 uint32_t pp_buf_start;
204 uint32_t pp_buf_count;
205 uint16_t cq_buf_count;
206 struct rte_event pp_buf[SCHED_DEQUEUE_MAX_BURST_SIZE];
207 struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
209 uint8_t num_qids_mapped;
213 struct rte_eventdev_data *data;
217 uint32_t xstats_count;
218 struct sw_xstats_entry *xstats;
219 uint32_t xstats_count_mode_dev;
220 uint32_t xstats_count_mode_port;
221 uint32_t xstats_count_mode_queue;
223 /* Minimum burst size*/
224 uint32_t sched_min_burst_size __rte_cache_aligned;
225 /* Port dequeue burst size*/
226 uint32_t sched_deq_burst_size;
227 /* Refill pp buffers only once per scheduler call*/
228 uint32_t refill_once_per_iter;
230 uint32_t sched_flush_count;
231 uint32_t sched_min_burst;
233 /* Contains all ports - load balanced and directed */
234 struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
236 rte_atomic32_t inflights __rte_cache_aligned;
239 * max events in this instance. Cached here for performance.
240 * (also available in data->conf.nb_events_limit)
242 uint32_t nb_events_limit;
244 /* Internal queues - one per logical queue */
245 struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
246 struct sw_queue_chunk *chunk_list_head;
247 struct sw_queue_chunk *chunks;
249 /* Cache how many packets are in each cq */
250 uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
252 /* Array of pointers to load-balanced QIDs sorted by priority level */
253 struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
256 struct sw_point_stats stats __rte_cache_aligned;
257 uint64_t sched_called;
258 int32_t sched_quanta;
259 uint64_t sched_no_iq_enqueues;
260 uint64_t sched_no_cq_enqueues;
261 uint64_t sched_cq_qid_called;
262 uint64_t sched_last_iter_bitmask;
263 uint8_t sched_progress_last_iter;
266 uint32_t credit_update_quanta;
268 /* store num stats and offset of the stats for each port */
269 uint16_t xstats_count_per_port[SW_PORTS_MAX];
270 uint16_t xstats_offset_for_port[SW_PORTS_MAX];
271 /* store num stats and offset of the stats for each queue */
272 uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
273 uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
276 char service_name[SW_PMD_NAME_MAX];
279 static inline struct sw_evdev *
280 sw_pmd_priv(const struct rte_eventdev *eventdev)
282 return eventdev->data->dev_private;
285 static inline const struct sw_evdev *
286 sw_pmd_priv_const(const struct rte_eventdev *eventdev)
288 return eventdev->data->dev_private;
291 uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
292 uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
295 uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
296 uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
298 void sw_event_schedule(struct rte_eventdev *dev);
299 int sw_xstats_init(struct sw_evdev *dev);
300 int sw_xstats_uninit(struct sw_evdev *dev);
301 int sw_xstats_get_names(const struct rte_eventdev *dev,
302 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
303 struct rte_event_dev_xstats_name *xstats_names,
304 unsigned int *ids, unsigned int size);
305 int sw_xstats_get(const struct rte_eventdev *dev,
306 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
307 const unsigned int ids[], uint64_t values[], unsigned int n);
308 uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
309 const char *name, unsigned int *id);
310 int sw_xstats_reset(struct rte_eventdev *dev,
311 enum rte_event_dev_xstats_mode mode,
312 int16_t queue_port_id,
313 const uint32_t ids[],
316 int test_sw_eventdev(void);
318 #endif /* _SW_EVDEV_H_ */