net/mlx4: fix RxQ errors stat
[dpdk.git] / drivers / event / sw / iq_chunk.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #ifndef _IQ_CHUNK_H_
6 #define _IQ_CHUNK_H_
7
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <rte_eventdev.h>
11
12 #define IQ_ROB_NAMESIZE 12
13
14 struct sw_queue_chunk {
15         struct rte_event events[SW_EVS_PER_Q_CHUNK];
16         struct sw_queue_chunk *next;
17 } __rte_cache_aligned;
18
19 static __rte_always_inline bool
20 iq_empty(struct sw_iq *iq)
21 {
22         return (iq->count == 0);
23 }
24
25 static __rte_always_inline uint16_t
26 iq_count(const struct sw_iq *iq)
27 {
28         return iq->count;
29 }
30
31 static __rte_always_inline struct sw_queue_chunk *
32 iq_alloc_chunk(struct sw_evdev *sw)
33 {
34         struct sw_queue_chunk *chunk = sw->chunk_list_head;
35         sw->chunk_list_head = chunk->next;
36         chunk->next = NULL;
37         return chunk;
38 }
39
40 static __rte_always_inline void
41 iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
42 {
43         chunk->next = sw->chunk_list_head;
44         sw->chunk_list_head = chunk;
45 }
46
47 static __rte_always_inline void
48 iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
49 {
50         while (head) {
51                 struct sw_queue_chunk *next;
52                 next = head->next;
53                 iq_free_chunk(sw, head);
54                 head = next;
55         }
56 }
57
58 static __rte_always_inline void
59 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
60 {
61         iq->head = iq_alloc_chunk(sw);
62         iq->tail = iq->head;
63         iq->head_idx = 0;
64         iq->tail_idx = 0;
65         iq->count = 0;
66 }
67
68 static __rte_always_inline void
69 iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
70 {
71         iq->tail->events[iq->tail_idx++] = *ev;
72         iq->count++;
73
74         if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
75                 /* The number of chunks is defined in relation to the total
76                  * number of inflight events and number of IQS such that
77                  * allocation will always succeed.
78                  */
79                 struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
80                 iq->tail->next = chunk;
81                 iq->tail = chunk;
82                 iq->tail_idx = 0;
83         }
84 }
85
86 static __rte_always_inline void
87 iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
88 {
89         iq->head_idx++;
90         iq->count--;
91
92         if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
93                 struct sw_queue_chunk *next = iq->head->next;
94                 iq_free_chunk(sw, iq->head);
95                 iq->head = next;
96                 iq->head_idx = 0;
97         }
98 }
99
100 static __rte_always_inline const struct rte_event *
101 iq_peek(struct sw_iq *iq)
102 {
103         return &iq->head->events[iq->head_idx];
104 }
105
106 /* Note: the caller must ensure that count <= iq_count() */
107 static __rte_always_inline uint16_t
108 iq_dequeue_burst(struct sw_evdev *sw,
109                  struct sw_iq *iq,
110                  struct rte_event *ev,
111                  uint16_t count)
112 {
113         struct sw_queue_chunk *current;
114         uint16_t total, index;
115
116         count = RTE_MIN(count, iq_count(iq));
117
118         current = iq->head;
119         index = iq->head_idx;
120         total = 0;
121
122         /* Loop over the chunks */
123         while (1) {
124                 struct sw_queue_chunk *next;
125                 for (; index < SW_EVS_PER_Q_CHUNK;) {
126                         ev[total++] = current->events[index++];
127
128                         if (unlikely(total == count))
129                                 goto done;
130                 }
131
132                 /* Move to the next chunk */
133                 next = current->next;
134                 iq_free_chunk(sw, current);
135                 current = next;
136                 index = 0;
137         }
138
139 done:
140         if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
141                 struct sw_queue_chunk *next = current->next;
142                 iq_free_chunk(sw, current);
143                 iq->head = next;
144                 iq->head_idx = 0;
145         } else {
146                 iq->head = current;
147                 iq->head_idx = index;
148         }
149
150         iq->count -= total;
151
152         return total;
153 }
154
155 static __rte_always_inline void
156 iq_put_back(struct sw_evdev *sw,
157             struct sw_iq *iq,
158             struct rte_event *ev,
159             unsigned int count)
160 {
161         /* Put back events that fit in the current head chunk. If necessary,
162          * put back events in a new head chunk. The caller must ensure that
163          * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
164          * needed.
165          */
166         uint16_t avail_space = iq->head_idx;
167
168         if (avail_space >= count) {
169                 const uint16_t idx = avail_space - count;
170                 uint16_t i;
171
172                 for (i = 0; i < count; i++)
173                         iq->head->events[idx + i] = ev[i];
174
175                 iq->head_idx = idx;
176         } else if (avail_space < count) {
177                 const uint16_t remaining = count - avail_space;
178                 struct sw_queue_chunk *new_head;
179                 uint16_t i;
180
181                 for (i = 0; i < avail_space; i++)
182                         iq->head->events[i] = ev[remaining + i];
183
184                 new_head = iq_alloc_chunk(sw);
185                 new_head->next = iq->head;
186                 iq->head = new_head;
187                 iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
188
189                 for (i = 0; i < remaining; i++)
190                         iq->head->events[iq->head_idx + i] = ev[i];
191         }
192
193         iq->count += count;
194 }
195
196 #endif /* _IQ_CHUNK_H_ */