1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <rte_eventdev.h>
12 #define IQ_ROB_NAMESIZE 12
14 struct sw_queue_chunk {
15 struct rte_event events[SW_EVS_PER_Q_CHUNK];
16 struct sw_queue_chunk *next;
17 } __rte_cache_aligned;
19 static __rte_always_inline bool
20 iq_empty(struct sw_iq *iq)
22 return (iq->count == 0);
25 static __rte_always_inline uint16_t
26 iq_count(const struct sw_iq *iq)
31 static __rte_always_inline struct sw_queue_chunk *
32 iq_alloc_chunk(struct sw_evdev *sw)
34 struct sw_queue_chunk *chunk = sw->chunk_list_head;
35 sw->chunk_list_head = chunk->next;
40 static __rte_always_inline void
41 iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
43 chunk->next = sw->chunk_list_head;
44 sw->chunk_list_head = chunk;
47 static __rte_always_inline void
48 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
50 iq->head = iq_alloc_chunk(sw);
56 static __rte_always_inline void
57 iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
59 iq->tail->events[iq->tail_idx++] = *ev;
62 if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
63 /* The number of chunks is defined in relation to the total
64 * number of inflight events and number of IQS such that
65 * allocation will always succeed.
67 struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
68 iq->tail->next = chunk;
74 static __rte_always_inline void
75 iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
80 if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
81 struct sw_queue_chunk *next = iq->head->next;
82 iq_free_chunk(sw, iq->head);
88 static __rte_always_inline const struct rte_event *
89 iq_peek(struct sw_iq *iq)
91 return &iq->head->events[iq->head_idx];
94 /* Note: the caller must ensure that count <= iq_count() */
95 static __rte_always_inline uint16_t
96 iq_dequeue_burst(struct sw_evdev *sw,
101 struct sw_queue_chunk *current;
102 uint16_t total, index;
104 count = RTE_MIN(count, iq_count(iq));
107 index = iq->head_idx;
110 /* Loop over the chunks */
112 struct sw_queue_chunk *next;
113 for (; index < SW_EVS_PER_Q_CHUNK;) {
114 ev[total++] = current->events[index++];
116 if (unlikely(total == count))
120 /* Move to the next chunk */
121 next = current->next;
122 iq_free_chunk(sw, current);
128 if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
129 struct sw_queue_chunk *next = iq->head->next;
130 iq_free_chunk(sw, current);
135 iq->head_idx = index;
143 static __rte_always_inline void
144 iq_put_back(struct sw_evdev *sw,
146 struct rte_event *ev,
149 /* Put back events that fit in the current head chunk. If necessary,
150 * put back events in a new head chunk. The caller must ensure that
151 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
154 uint16_t avail_space = iq->head_idx;
156 if (avail_space >= count) {
157 const uint16_t idx = avail_space - count;
160 for (i = 0; i < count; i++)
161 iq->head->events[idx + i] = ev[i];
164 } else if (avail_space < count) {
165 const uint16_t remaining = count - avail_space;
166 struct sw_queue_chunk *new_head;
169 for (i = 0; i < avail_space; i++)
170 iq->head->events[i] = ev[remaining + i];
172 new_head = iq_alloc_chunk(sw);
173 new_head->next = iq->head;
175 iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
177 for (i = 0; i < remaining; i++)
178 iq->head->events[iq->head_idx + i] = ev[i];
184 #endif /* _IQ_CHUNK_H_ */