1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <rte_eventdev.h>
12 #define IQ_ROB_NAMESIZE 12
14 struct sw_queue_chunk {
15 struct rte_event events[SW_EVS_PER_Q_CHUNK];
16 struct sw_queue_chunk *next;
17 } __rte_cache_aligned;
19 static __rte_always_inline bool
20 iq_empty(struct sw_iq *iq)
22 return (iq->count == 0);
25 static __rte_always_inline uint16_t
26 iq_count(const struct sw_iq *iq)
31 static __rte_always_inline struct sw_queue_chunk *
32 iq_alloc_chunk(struct sw_evdev *sw)
34 struct sw_queue_chunk *chunk = sw->chunk_list_head;
35 sw->chunk_list_head = chunk->next;
40 static __rte_always_inline void
41 iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
43 chunk->next = sw->chunk_list_head;
44 sw->chunk_list_head = chunk;
47 static __rte_always_inline void
48 iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
51 struct sw_queue_chunk *next;
53 iq_free_chunk(sw, head);
58 static __rte_always_inline void
59 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
61 iq->head = iq_alloc_chunk(sw);
68 static __rte_always_inline void
69 iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
71 iq->tail->events[iq->tail_idx++] = *ev;
74 if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
75 /* The number of chunks is defined in relation to the total
76 * number of inflight events and number of IQS such that
77 * allocation will always succeed.
79 struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
80 iq->tail->next = chunk;
86 static __rte_always_inline void
87 iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
92 if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
93 struct sw_queue_chunk *next = iq->head->next;
94 iq_free_chunk(sw, iq->head);
100 static __rte_always_inline const struct rte_event *
101 iq_peek(struct sw_iq *iq)
103 return &iq->head->events[iq->head_idx];
106 /* Note: the caller must ensure that count <= iq_count() */
107 static __rte_always_inline uint16_t
108 iq_dequeue_burst(struct sw_evdev *sw,
110 struct rte_event *ev,
113 struct sw_queue_chunk *current;
114 uint16_t total, index;
116 count = RTE_MIN(count, iq_count(iq));
119 index = iq->head_idx;
122 /* Loop over the chunks */
124 struct sw_queue_chunk *next;
125 for (; index < SW_EVS_PER_Q_CHUNK;) {
126 ev[total++] = current->events[index++];
128 if (unlikely(total == count))
132 /* Move to the next chunk */
133 next = current->next;
134 iq_free_chunk(sw, current);
140 if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
141 struct sw_queue_chunk *next = current->next;
142 iq_free_chunk(sw, current);
147 iq->head_idx = index;
155 static __rte_always_inline void
156 iq_put_back(struct sw_evdev *sw,
158 struct rte_event *ev,
161 /* Put back events that fit in the current head chunk. If necessary,
162 * put back events in a new head chunk. The caller must ensure that
163 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
166 uint16_t avail_space = iq->head_idx;
168 if (avail_space >= count) {
169 const uint16_t idx = avail_space - count;
172 for (i = 0; i < count; i++)
173 iq->head->events[idx + i] = ev[i];
176 } else if (avail_space < count) {
177 const uint16_t remaining = count - avail_space;
178 struct sw_queue_chunk *new_head;
181 for (i = 0; i < avail_space; i++)
182 iq->head->events[i] = ev[remaining + i];
184 new_head = iq_alloc_chunk(sw);
185 new_head->next = iq->head;
187 iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
189 for (i = 0; i < remaining; i++)
190 iq->head->events[iq->head_idx + i] = ev[i];
196 #endif /* _IQ_CHUNK_H_ */