1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_eventdev.h>
9 #include <rte_eventdev_pmd_vdev.h>
10 #include <rte_atomic.h>
12 #define SW_DEFAULT_CREDIT_QUANTA 32
13 #define SW_DEFAULT_SCHED_QUANTA 128
14 #define SW_QID_NUM_FIDS 16384
16 #define SW_Q_PRIORITY_MAX 255
17 #define SW_PORTS_MAX 64
18 #define MAX_SW_CONS_Q_DEPTH 128
19 #define SW_INFLIGHT_EVENTS_TOTAL 4096
20 /* allow for lots of over-provisioning */
21 #define MAX_SW_PROD_Q_DEPTH 4096
22 #define SW_FRAGMENTS_MAX 16
24 /* Should be power-of-two minus one, to leave room for the next pointer */
25 #define SW_EVS_PER_Q_CHUNK 255
26 #define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event))
28 /* report dequeue burst sizes in buckets */
29 #define SW_DEQ_STAT_BUCKET_SHIFT 2
30 /* how many packets pulled from port by sched */
31 #define SCHED_DEQUEUE_BURST_SIZE 32
33 #define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
34 #define NUM_SAMPLES 64 /* how many data points use for average stats */
36 #define EVENTDEV_NAME_SW_PMD event_sw
37 #define SW_PMD_NAME RTE_STR(event_sw)
38 #define SW_PMD_NAME_MAX 64
40 #define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
42 #define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
45 QE_FLAG_VALID_SHIFT = 0,
46 QE_FLAG_COMPLETE_SHIFT,
47 QE_FLAG_NOT_EOP_SHIFT,
51 #define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
52 #define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
53 #define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
55 static const uint8_t sw_qe_flag_map[] = {
56 QE_FLAG_VALID /* NEW Event */,
57 QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
58 QE_FLAG_COMPLETE /* RELEASE Event */,
60 /* Values which can be used for future support for partial
61 * events, i.e. where one event comes back to the scheduler
62 * as multiple which need to be tracked together
64 QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
67 #ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
68 #define SW_LOG_INFO(fmt, args...) \
69 RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
71 __func__, __LINE__, ## args)
73 #define SW_LOG_DBG(fmt, args...) \
74 RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
76 __func__, __LINE__, ## args)
78 #define SW_LOG_INFO(fmt, args...)
79 #define SW_LOG_DBG(fmt, args...)
82 #define SW_LOG_ERR(fmt, args...) \
83 RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
85 __func__, __LINE__, ## args)
87 /* Records basic event stats at a given point. Used in port and qid structs */
88 struct sw_point_stats {
94 /* structure used to track what port a flow (FID) is pinned to */
96 /* which CQ this FID is currently pinned to */
98 /* number of packets gone to the CQ with this FID */
102 struct reorder_buffer_entry {
103 uint16_t num_fragments; /**< Number of packet fragments */
104 uint16_t fragment_index; /**< Points to the oldest valid frag */
105 uint8_t ready; /**< Entry is ready to be reordered */
106 struct rte_event fragments[SW_FRAGMENTS_MAX];
110 struct sw_queue_chunk *head;
111 struct sw_queue_chunk *tail;
118 /* set when the QID has been initialized */
120 /* The type of this QID */
122 /* Integer ID representing the queue. This is used in history lists,
123 * to identify the stage of processing.
126 struct sw_point_stats stats;
128 /* Internal priority rings for packets */
129 struct sw_iq iq[SW_IQS_MAX];
130 uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
131 uint64_t iq_pkt_count[SW_IQS_MAX];
133 /* Information on what CQs are polling this IQ */
134 uint32_t cq_num_mapped_cqs;
135 uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
136 uint32_t cq_map[SW_PORTS_MAX];
137 uint64_t to_port[SW_PORTS_MAX];
139 /* Track flow ids for atomic load balancing */
140 struct sw_fid_t fids[SW_QID_NUM_FIDS];
142 /* Track packet order for reordering when needed */
143 struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
144 struct rte_ring *reorder_buffer_freelist; /* available reorder slots */
145 uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
146 uint32_t window_size; /* Used to wrap reorder_buffer_index */
151 struct sw_hist_list_entry {
154 struct reorder_buffer_entry *rob_entry;
160 /* new enqueue / dequeue API doesn't have an instance pointer, only the
161 * pointer to the port being enqueue/dequeued from
165 /* set when the port is initialized */
167 /* A numeric ID for the port */
170 int16_t is_directed; /** Takes from a single directed QID */
172 * For loadbalanced we can optimise pulling packets from
173 * producers if there is no reordering involved
175 int16_t num_ordered_qids;
177 /** Ring and buffer for pulling events from workers for scheduling */
178 struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
179 /** Ring and buffer for pushing packets to workers after scheduling */
180 struct rte_event_ring *cq_worker_ring;
184 /* num releases yet to be completed on this port */
185 uint16_t outstanding_releases __rte_cache_aligned;
186 uint16_t inflight_max; /* app requested max inflights for this port */
187 uint16_t inflight_credits; /* num credits this port has right now */
189 uint16_t last_dequeue_burst_sz; /* how big the burst was */
190 uint64_t last_dequeue_ticks; /* used to track burst processing time */
191 uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
192 uint64_t total_polls; /* how many polls were counted in stats */
193 uint64_t zero_polls; /* tracks polls returning nothing */
194 uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
195 /* bucket values in 4s for shorter reporting */
197 /* History list structs, containing info on pkts egressed to worker */
198 uint16_t hist_head __rte_cache_aligned;
201 struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
203 /* track packets in and out of this port */
204 struct sw_point_stats stats;
207 uint32_t pp_buf_start;
208 uint32_t pp_buf_count;
209 uint16_t cq_buf_count;
210 struct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];
211 struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
213 uint8_t num_qids_mapped;
217 struct rte_eventdev_data *data;
221 uint32_t xstats_count;
222 struct sw_xstats_entry *xstats;
223 uint32_t xstats_count_mode_dev;
224 uint32_t xstats_count_mode_port;
225 uint32_t xstats_count_mode_queue;
227 /* Contains all ports - load balanced and directed */
228 struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
230 rte_atomic32_t inflights __rte_cache_aligned;
233 * max events in this instance. Cached here for performance.
234 * (also available in data->conf.nb_events_limit)
236 uint32_t nb_events_limit;
238 /* Internal queues - one per logical queue */
239 struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
240 struct sw_queue_chunk *chunk_list_head;
241 struct sw_queue_chunk *chunks;
243 /* Cache how many packets are in each cq */
244 uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
246 /* Array of pointers to load-balanced QIDs sorted by priority level */
247 struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
250 struct sw_point_stats stats __rte_cache_aligned;
251 uint64_t sched_called;
252 int32_t sched_quanta;
253 uint64_t sched_no_iq_enqueues;
254 uint64_t sched_no_cq_enqueues;
255 uint64_t sched_cq_qid_called;
258 uint32_t credit_update_quanta;
260 /* store num stats and offset of the stats for each port */
261 uint16_t xstats_count_per_port[SW_PORTS_MAX];
262 uint16_t xstats_offset_for_port[SW_PORTS_MAX];
263 /* store num stats and offset of the stats for each queue */
264 uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
265 uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
268 char service_name[SW_PMD_NAME_MAX];
271 static inline struct sw_evdev *
272 sw_pmd_priv(const struct rte_eventdev *eventdev)
274 return eventdev->data->dev_private;
277 static inline const struct sw_evdev *
278 sw_pmd_priv_const(const struct rte_eventdev *eventdev)
280 return eventdev->data->dev_private;
283 uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
284 uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
287 uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
288 uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
290 void sw_event_schedule(struct rte_eventdev *dev);
291 int sw_xstats_init(struct sw_evdev *dev);
292 int sw_xstats_uninit(struct sw_evdev *dev);
293 int sw_xstats_get_names(const struct rte_eventdev *dev,
294 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
295 struct rte_event_dev_xstats_name *xstats_names,
296 unsigned int *ids, unsigned int size);
297 int sw_xstats_get(const struct rte_eventdev *dev,
298 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
299 const unsigned int ids[], uint64_t values[], unsigned int n);
300 uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
301 const char *name, unsigned int *id);
302 int sw_xstats_reset(struct rte_eventdev *dev,
303 enum rte_event_dev_xstats_mode mode,
304 int16_t queue_port_id,
305 const uint32_t ids[],
309 #endif /* _SW_EVDEV_H_ */