4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * Generic ring structure for passing events from one core to another.
36 * Used by the software scheduler for the producer and consumer rings for
37 * each port, i.e. for passing events from worker cores to scheduler and
38 * vice-versa. Designed for single-producer, single-consumer use with two
39 * cores working on each ring.
47 #include <rte_common.h>
48 #include <rte_memory.h>
49 #include <rte_malloc.h>
51 #define QE_RING_NAMESIZE 32
54 char name[QE_RING_NAMESIZE] __rte_cache_aligned;
55 uint32_t ring_size; /* size of memory block allocated to the ring */
56 uint32_t mask; /* mask for read/write values == ring_size -1 */
57 uint32_t size; /* actual usable space in the ring */
58 volatile uint32_t write_idx __rte_cache_aligned;
59 volatile uint32_t read_idx __rte_cache_aligned;
61 struct rte_event ring[0] __rte_cache_aligned;
64 static inline struct qe_ring *
65 qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
67 struct qe_ring *retval;
68 const uint32_t ring_size = rte_align32pow2(size + 1);
69 size_t memsize = sizeof(*retval) +
70 (ring_size * sizeof(retval->ring[0]));
72 retval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);
76 snprintf(retval->name, sizeof(retval->name), "EVDEV_RG_%s", name);
77 retval->ring_size = ring_size;
78 retval->mask = ring_size - 1;
85 qe_ring_destroy(struct qe_ring *r)
90 static __rte_always_inline unsigned int
91 qe_ring_count(const struct qe_ring *r)
93 return r->write_idx - r->read_idx;
96 static __rte_always_inline unsigned int
97 qe_ring_free_count(const struct qe_ring *r)
99 return r->size - qe_ring_count(r);
102 static __rte_always_inline unsigned int
103 qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
104 unsigned int nb_qes, uint16_t *free_count)
106 const uint32_t size = r->size;
107 const uint32_t mask = r->mask;
108 const uint32_t read = r->read_idx;
109 uint32_t write = r->write_idx;
110 const uint32_t space = read + size - write;
116 for (i = 0; i < nb_qes; i++, write++)
117 r->ring[write & mask] = qes[i];
122 r->write_idx = write;
124 *free_count = space - nb_qes;
129 static __rte_always_inline unsigned int
130 qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
131 unsigned int nb_qes, uint8_t *ops)
133 const uint32_t size = r->size;
134 const uint32_t mask = r->mask;
135 const uint32_t read = r->read_idx;
136 uint32_t write = r->write_idx;
137 const uint32_t space = read + size - write;
143 for (i = 0; i < nb_qes; i++, write++) {
144 r->ring[write & mask] = qes[i];
145 r->ring[write & mask].op = ops[i];
151 r->write_idx = write;
156 static __rte_always_inline unsigned int
157 qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
160 const uint32_t mask = r->mask;
161 uint32_t read = r->read_idx;
162 const uint32_t write = r->write_idx;
163 const uint32_t items = write - read;
170 for (i = 0; i < nb_qes; i++, read++)
171 qes[i] = r->ring[read & mask];
176 r->read_idx += nb_qes;