b2caadd712753d91537939794b8596bc777dce19
[dpdk.git] / drivers / event / sw / iq_ring.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 /*
6  * Ring structure definitions used for the internal ring buffers of the
7  * SW eventdev implementation. These are designed for single-core use only.
8  */
9 #ifndef _IQ_RING_
10 #define _IQ_RING_
11
12 #include <stdint.h>
13
14 #include <rte_common.h>
15 #include <rte_memory.h>
16 #include <rte_malloc.h>
17 #include <rte_eventdev.h>
18
19 #define IQ_RING_NAMESIZE 12
20 #define QID_IQ_DEPTH 512
21 #define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
22
23 struct iq_ring {
24         char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
25         uint16_t write_idx;
26         uint16_t read_idx;
27
28         struct rte_event ring[QID_IQ_DEPTH];
29 };
30
31 static inline struct iq_ring *
32 iq_ring_create(const char *name, unsigned int socket_id)
33 {
34         struct iq_ring *retval;
35
36         retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
37         if (retval == NULL)
38                 goto end;
39
40         snprintf(retval->name, sizeof(retval->name), "%s", name);
41         retval->write_idx = retval->read_idx = 0;
42 end:
43         return retval;
44 }
45
46 static inline void
47 iq_ring_destroy(struct iq_ring *r)
48 {
49         rte_free(r);
50 }
51
52 static __rte_always_inline uint16_t
53 iq_ring_count(const struct iq_ring *r)
54 {
55         return r->write_idx - r->read_idx;
56 }
57
58 static __rte_always_inline uint16_t
59 iq_ring_free_count(const struct iq_ring *r)
60 {
61         return QID_IQ_MASK - iq_ring_count(r);
62 }
63
64 static __rte_always_inline uint16_t
65 iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
66 {
67         const uint16_t read = r->read_idx;
68         uint16_t write = r->write_idx;
69         const uint16_t space = read + QID_IQ_MASK - write;
70         uint16_t i;
71
72         if (space < nb_qes)
73                 nb_qes = space;
74
75         for (i = 0; i < nb_qes; i++, write++)
76                 r->ring[write & QID_IQ_MASK] = qes[i];
77
78         r->write_idx = write;
79
80         return nb_qes;
81 }
82
83 static __rte_always_inline uint16_t
84 iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
85 {
86         uint16_t read = r->read_idx;
87         const uint16_t write = r->write_idx;
88         const uint16_t items = write - read;
89         uint16_t i;
90
91         for (i = 0; i < nb_qes; i++, read++)
92                 qes[i] = r->ring[read & QID_IQ_MASK];
93
94         if (items < nb_qes)
95                 nb_qes = items;
96
97         r->read_idx += nb_qes;
98
99         return nb_qes;
100 }
101
102 /* assumes there is space, from a previous dequeue_burst */
103 static __rte_always_inline uint16_t
104 iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
105 {
106         uint16_t i, read = r->read_idx;
107
108         for (i = nb_qes; i-- > 0; )
109                 r->ring[--read & QID_IQ_MASK] = qes[i];
110
111         r->read_idx = read;
112         return nb_qes;
113 }
114
115 static __rte_always_inline const struct rte_event *
116 iq_ring_peek(const struct iq_ring *r)
117 {
118         return &r->ring[r->read_idx & QID_IQ_MASK];
119 }
120
121 static __rte_always_inline void
122 iq_ring_pop(struct iq_ring *r)
123 {
124         r->read_idx++;
125 }
126
127 static __rte_always_inline int
128 iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
129 {
130         const uint16_t read = r->read_idx;
131         const uint16_t write = r->write_idx;
132         const uint16_t space = read + QID_IQ_MASK - write;
133
134         if (space == 0)
135                 return -1;
136
137         r->ring[write & QID_IQ_MASK] = *qe;
138
139         r->write_idx = write + 1;
140
141         return 0;
142 }
143
144 #endif