219bc0eb8445c85347d3cd81e193641b7765c4ad
[dpdk.git] / drivers / event / sw / iq_chunk.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #ifndef _IQ_CHUNK_H_
6 #define _IQ_CHUNK_H_
7
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <rte_eventdev.h>
11
12 #define IQ_ROB_NAMESIZE 12
13
14 struct sw_queue_chunk {
15         struct rte_event events[SW_EVS_PER_Q_CHUNK];
16         struct sw_queue_chunk *next;
17 } __rte_cache_aligned;
18
19 static __rte_always_inline bool
20 iq_empty(struct sw_iq *iq)
21 {
22         return (iq->count == 0);
23 }
24
25 static __rte_always_inline uint16_t
26 iq_count(const struct sw_iq *iq)
27 {
28         return iq->count;
29 }
30
31 static __rte_always_inline struct sw_queue_chunk *
32 iq_alloc_chunk(struct sw_evdev *sw)
33 {
34         struct sw_queue_chunk *chunk = sw->chunk_list_head;
35         sw->chunk_list_head = chunk->next;
36         chunk->next = NULL;
37         return chunk;
38 }
39
40 static __rte_always_inline void
41 iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
42 {
43         chunk->next = sw->chunk_list_head;
44         sw->chunk_list_head = chunk;
45 }
46
47 static __rte_always_inline void
48 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
49 {
50         iq->head = iq_alloc_chunk(sw);
51         iq->tail = iq->head;
52         iq->head_idx = 0;
53         iq->tail_idx = 0;
54 }
55
56 static __rte_always_inline void
57 iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
58 {
59         iq->tail->events[iq->tail_idx++] = *ev;
60         iq->count++;
61
62         if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
63                 /* The number of chunks is defined in relation to the total
64                  * number of inflight events and number of IQS such that
65                  * allocation will always succeed.
66                  */
67                 struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
68                 iq->tail->next = chunk;
69                 iq->tail = chunk;
70                 iq->tail_idx = 0;
71         }
72 }
73
74 static __rte_always_inline void
75 iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
76 {
77         iq->head_idx++;
78         iq->count--;
79
80         if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
81                 struct sw_queue_chunk *next = iq->head->next;
82                 iq_free_chunk(sw, iq->head);
83                 iq->head = next;
84                 iq->head_idx = 0;
85         }
86 }
87
88 static __rte_always_inline const struct rte_event *
89 iq_peek(struct sw_iq *iq)
90 {
91         return &iq->head->events[iq->head_idx];
92 }
93
94 /* Note: the caller must ensure that count <= iq_count() */
95 static __rte_always_inline uint16_t
96 iq_dequeue_burst(struct sw_evdev *sw,
97                  struct sw_iq *iq,
98                  struct rte_event *ev,
99                  uint16_t count)
100 {
101         struct sw_queue_chunk *current;
102         uint16_t total, index;
103
104         count = RTE_MIN(count, iq_count(iq));
105
106         current = iq->head;
107         index = iq->head_idx;
108         total = 0;
109
110         /* Loop over the chunks */
111         while (1) {
112                 struct sw_queue_chunk *next;
113                 for (; index < SW_EVS_PER_Q_CHUNK;) {
114                         ev[total++] = current->events[index++];
115
116                         if (unlikely(total == count))
117                                 goto done;
118                 }
119
120                 /* Move to the next chunk */
121                 next = current->next;
122                 iq_free_chunk(sw, current);
123                 current = next;
124                 index = 0;
125         }
126
127 done:
128         if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
129                 struct sw_queue_chunk *next = iq->head->next;
130                 iq_free_chunk(sw, current);
131                 iq->head = next;
132                 iq->head_idx = 0;
133         } else {
134                 iq->head = current;
135                 iq->head_idx = index;
136         }
137
138         iq->count -= total;
139
140         return total;
141 }
142
143 static __rte_always_inline void
144 iq_put_back(struct sw_evdev *sw,
145             struct sw_iq *iq,
146             struct rte_event *ev,
147             unsigned int count)
148 {
149         /* Put back events that fit in the current head chunk. If necessary,
150          * put back events in a new head chunk. The caller must ensure that
151          * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
152          * needed.
153          */
154         uint16_t avail_space = iq->head_idx;
155
156         if (avail_space >= count) {
157                 const uint16_t idx = avail_space - count;
158                 uint16_t i;
159
160                 for (i = 0; i < count; i++)
161                         iq->head->events[idx + i] = ev[i];
162
163                 iq->head_idx = idx;
164         } else if (avail_space < count) {
165                 const uint16_t remaining = count - avail_space;
166                 struct sw_queue_chunk *new_head;
167                 uint16_t i;
168
169                 for (i = 0; i < avail_space; i++)
170                         iq->head->events[i] = ev[remaining + i];
171
172                 new_head = iq_alloc_chunk(sw);
173                 new_head->next = iq->head;
174                 iq->head = new_head;
175                 iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
176
177                 for (i = 0; i < remaining; i++)
178                         iq->head->events[iq->head_idx + i] = ev[i];
179         }
180
181         iq->count += count;
182 }
183
184 #endif /* _IQ_CHUNK_H_ */