4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_eventdev.h>
37 #include <rte_eventdev_pmd_vdev.h>
38 #include <rte_atomic.h>
40 #define SW_DEFAULT_CREDIT_QUANTA 32
41 #define SW_DEFAULT_SCHED_QUANTA 128
42 #define SW_QID_NUM_FIDS 16384
44 #define SW_Q_PRIORITY_MAX 255
45 #define SW_PORTS_MAX 64
46 #define MAX_SW_CONS_Q_DEPTH 128
47 #define SW_INFLIGHT_EVENTS_TOTAL 4096
48 /* allow for lots of over-provisioning */
49 #define MAX_SW_PROD_Q_DEPTH 4096
50 #define SW_FRAGMENTS_MAX 16
52 /* report dequeue burst sizes in buckets */
53 #define SW_DEQ_STAT_BUCKET_SHIFT 2
54 /* how many packets pulled from port by sched */
55 #define SCHED_DEQUEUE_BURST_SIZE 32
57 #define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
58 #define NUM_SAMPLES 64 /* how many data points use for average stats */
60 #define EVENTDEV_NAME_SW_PMD event_sw
61 #define SW_PMD_NAME RTE_STR(event_sw)
62 #define SW_PMD_NAME_MAX 64
64 #define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
66 #define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
69 QE_FLAG_VALID_SHIFT = 0,
70 QE_FLAG_COMPLETE_SHIFT,
71 QE_FLAG_NOT_EOP_SHIFT,
75 #define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
76 #define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
77 #define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
79 static const uint8_t sw_qe_flag_map[] = {
80 QE_FLAG_VALID /* NEW Event */,
81 QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
82 QE_FLAG_COMPLETE /* RELEASE Event */,
84 /* Values which can be used for future support for partial
85 * events, i.e. where one event comes back to the scheduler
86 * as multiple which need to be tracked together
88 QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
91 #ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
92 #define SW_LOG_INFO(fmt, args...) \
93 RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
95 __func__, __LINE__, ## args)
97 #define SW_LOG_DBG(fmt, args...) \
98 RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
100 __func__, __LINE__, ## args)
102 #define SW_LOG_INFO(fmt, args...)
103 #define SW_LOG_DBG(fmt, args...)
106 #define SW_LOG_ERR(fmt, args...) \
107 RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
109 __func__, __LINE__, ## args)
111 /* Records basic event stats at a given point. Used in port and qid structs */
112 struct sw_point_stats {
118 /* structure used to track what port a flow (FID) is pinned to */
120 /* which CQ this FID is currently pinned to */
122 /* number of packets gone to the CQ with this FID */
126 struct reorder_buffer_entry {
127 uint16_t num_fragments; /**< Number of packet fragments */
128 uint16_t fragment_index; /**< Points to the oldest valid frag */
129 uint8_t ready; /**< Entry is ready to be reordered */
130 struct rte_event fragments[SW_FRAGMENTS_MAX];
134 /* set when the QID has been initialized */
136 /* The type of this QID */
138 /* Integer ID representing the queue. This is used in history lists,
139 * to identify the stage of processing.
142 struct sw_point_stats stats;
144 /* Internal priority rings for packets */
145 struct iq_ring *iq[SW_IQS_MAX];
146 uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
147 uint64_t iq_pkt_count[SW_IQS_MAX];
149 /* Information on what CQs are polling this IQ */
150 uint32_t cq_num_mapped_cqs;
151 uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
152 uint32_t cq_map[SW_PORTS_MAX];
153 uint64_t to_port[SW_PORTS_MAX];
155 /* Track flow ids for atomic load balancing */
156 struct sw_fid_t fids[SW_QID_NUM_FIDS];
158 /* Track packet order for reordering when needed */
159 struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
160 struct rte_ring *reorder_buffer_freelist; /* available reorder slots */
161 uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
162 uint32_t window_size; /* Used to wrap reorder_buffer_index */
167 struct sw_hist_list_entry {
170 struct reorder_buffer_entry *rob_entry;
176 /* new enqueue / dequeue API doesn't have an instance pointer, only the
177 * pointer to the port being enqueue/dequeued from
181 /* set when the port is initialized */
183 /* A numeric ID for the port */
186 int16_t is_directed; /** Takes from a single directed QID */
188 * For loadbalanced we can optimise pulling packets from
189 * producers if there is no reordering involved
191 int16_t num_ordered_qids;
193 /** Ring and buffer for pulling events from workers for scheduling */
194 struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
195 /** Ring and buffer for pushing packets to workers after scheduling */
196 struct rte_event_ring *cq_worker_ring;
200 /* num releases yet to be completed on this port */
201 uint16_t outstanding_releases __rte_cache_aligned;
202 uint16_t inflight_max; /* app requested max inflights for this port */
203 uint16_t inflight_credits; /* num credits this port has right now */
205 uint16_t last_dequeue_burst_sz; /* how big the burst was */
206 uint64_t last_dequeue_ticks; /* used to track burst processing time */
207 uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
208 uint64_t total_polls; /* how many polls were counted in stats */
209 uint64_t zero_polls; /* tracks polls returning nothing */
210 uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
211 /* bucket values in 4s for shorter reporting */
213 /* History list structs, containing info on pkts egressed to worker */
214 uint16_t hist_head __rte_cache_aligned;
217 struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
219 /* track packets in and out of this port */
220 struct sw_point_stats stats;
223 uint32_t pp_buf_start;
224 uint32_t pp_buf_count;
225 uint16_t cq_buf_count;
226 struct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];
227 struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
229 uint8_t num_qids_mapped;
233 struct rte_eventdev_data *data;
237 uint32_t xstats_count;
238 struct sw_xstats_entry *xstats;
239 uint32_t xstats_count_mode_dev;
240 uint32_t xstats_count_mode_port;
241 uint32_t xstats_count_mode_queue;
243 /* Contains all ports - load balanced and directed */
244 struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
246 rte_atomic32_t inflights __rte_cache_aligned;
249 * max events in this instance. Cached here for performance.
250 * (also available in data->conf.nb_events_limit)
252 uint32_t nb_events_limit;
254 /* Internal queues - one per logical queue */
255 struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
257 /* Cache how many packets are in each cq */
258 uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
260 /* Array of pointers to load-balanced QIDs sorted by priority level */
261 struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
264 struct sw_point_stats stats __rte_cache_aligned;
265 uint64_t sched_called;
266 int32_t sched_quanta;
267 uint64_t sched_no_iq_enqueues;
268 uint64_t sched_no_cq_enqueues;
269 uint64_t sched_cq_qid_called;
272 uint32_t credit_update_quanta;
274 /* store num stats and offset of the stats for each port */
275 uint16_t xstats_count_per_port[SW_PORTS_MAX];
276 uint16_t xstats_offset_for_port[SW_PORTS_MAX];
277 /* store num stats and offset of the stats for each queue */
278 uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
279 uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
282 char service_name[SW_PMD_NAME_MAX];
285 static inline struct sw_evdev *
286 sw_pmd_priv(const struct rte_eventdev *eventdev)
288 return eventdev->data->dev_private;
291 static inline const struct sw_evdev *
292 sw_pmd_priv_const(const struct rte_eventdev *eventdev)
294 return eventdev->data->dev_private;
297 uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
298 uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
301 uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
302 uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
304 void sw_event_schedule(struct rte_eventdev *dev);
305 int sw_xstats_init(struct sw_evdev *dev);
306 int sw_xstats_uninit(struct sw_evdev *dev);
307 int sw_xstats_get_names(const struct rte_eventdev *dev,
308 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
309 struct rte_event_dev_xstats_name *xstats_names,
310 unsigned int *ids, unsigned int size);
311 int sw_xstats_get(const struct rte_eventdev *dev,
312 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
313 const unsigned int ids[], uint64_t values[], unsigned int n);
314 uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
315 const char *name, unsigned int *id);
316 int sw_xstats_reset(struct rte_eventdev *dev,
317 enum rte_event_dev_xstats_mode mode,
318 int16_t queue_port_id,
319 const uint32_t ids[],
323 #endif /* _SW_EVDEV_H_ */