4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_malloc.h>
38 #include "rte_port_sched.h"
43 #ifdef RTE_PORT_STATS_COLLECT
45 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val) \
46 port->stats.n_pkts_in += val
47 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val) \
48 port->stats.n_pkts_drop += val
52 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val)
53 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val)
57 struct rte_port_sched_reader {
58 struct rte_port_in_stats stats;
60 struct rte_sched_port *sched;
64 rte_port_sched_reader_create(void *params, int socket_id)
66 struct rte_port_sched_reader_params *conf =
67 (struct rte_port_sched_reader_params *) params;
68 struct rte_port_sched_reader *port;
70 /* Check input parameters */
72 (conf->sched == NULL)) {
73 RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
77 /* Memory allocation */
78 port = rte_zmalloc_socket("PORT", sizeof(*port),
79 RTE_CACHE_LINE_SIZE, socket_id);
81 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
86 port->sched = conf->sched;
92 rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
94 struct rte_port_sched_reader *p = (struct rte_port_sched_reader *) port;
97 nb_rx = rte_sched_port_dequeue(p->sched, pkts, n_pkts);
98 RTE_PORT_SCHED_READER_PKTS_IN_ADD(p, nb_rx);
104 rte_port_sched_reader_free(void *port)
107 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
117 rte_port_sched_reader_stats_read(void *port,
118 struct rte_port_in_stats *stats, int clear)
120 struct rte_port_sched_reader *p =
121 (struct rte_port_sched_reader *) port;
124 memcpy(stats, &p->stats, sizeof(p->stats));
127 memset(&p->stats, 0, sizeof(p->stats));
135 #ifdef RTE_PORT_STATS_COLLECT
137 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val) \
138 port->stats.n_pkts_in += val
139 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val) \
140 port->stats.n_pkts_drop += val
144 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val)
145 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val)
149 struct rte_port_sched_writer {
150 struct rte_port_out_stats stats;
152 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
153 struct rte_sched_port *sched;
154 uint32_t tx_burst_sz;
155 uint32_t tx_buf_count;
160 rte_port_sched_writer_create(void *params, int socket_id)
162 struct rte_port_sched_writer_params *conf =
163 (struct rte_port_sched_writer_params *) params;
164 struct rte_port_sched_writer *port;
166 /* Check input parameters */
167 if ((conf == NULL) ||
168 (conf->sched == NULL) ||
169 (conf->tx_burst_sz == 0) ||
170 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
171 (!rte_is_power_of_2(conf->tx_burst_sz))) {
172 RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
176 /* Memory allocation */
177 port = rte_zmalloc_socket("PORT", sizeof(*port),
178 RTE_CACHE_LINE_SIZE, socket_id);
180 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
185 port->sched = conf->sched;
186 port->tx_burst_sz = conf->tx_burst_sz;
187 port->tx_buf_count = 0;
188 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
194 rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt)
196 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
198 p->tx_buf[p->tx_buf_count++] = pkt;
199 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
200 if (p->tx_buf_count >= p->tx_burst_sz) {
201 __rte_unused uint32_t nb_tx;
203 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
204 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
212 rte_port_sched_writer_tx_bulk(void *port,
213 struct rte_mbuf **pkts,
216 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
217 uint64_t bsz_mask = p->bsz_mask;
218 uint32_t tx_buf_count = p->tx_buf_count;
219 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
220 ((pkts_mask & bsz_mask) ^ bsz_mask);
223 __rte_unused uint32_t nb_tx;
224 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
227 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
229 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
233 nb_tx = rte_sched_port_enqueue(p->sched, pkts, n_pkts);
234 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - nb_tx);
236 for ( ; pkts_mask; ) {
237 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
238 uint64_t pkt_mask = 1LLU << pkt_index;
239 struct rte_mbuf *pkt = pkts[pkt_index];
241 p->tx_buf[tx_buf_count++] = pkt;
242 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
243 pkts_mask &= ~pkt_mask;
245 p->tx_buf_count = tx_buf_count;
247 if (tx_buf_count >= p->tx_burst_sz) {
248 __rte_unused uint32_t nb_tx;
250 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
252 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
261 rte_port_sched_writer_flush(void *port)
263 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
265 if (p->tx_buf_count) {
266 __rte_unused uint32_t nb_tx;
268 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
269 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
277 rte_port_sched_writer_free(void *port)
280 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
284 rte_port_sched_writer_flush(port);
291 rte_port_sched_writer_stats_read(void *port,
292 struct rte_port_out_stats *stats, int clear)
294 struct rte_port_sched_writer *p =
295 (struct rte_port_sched_writer *) port;
298 memcpy(stats, &p->stats, sizeof(p->stats));
301 memset(&p->stats, 0, sizeof(p->stats));
307 * Summary of port operations
309 struct rte_port_in_ops rte_port_sched_reader_ops = {
310 .f_create = rte_port_sched_reader_create,
311 .f_free = rte_port_sched_reader_free,
312 .f_rx = rte_port_sched_reader_rx,
313 .f_stats = rte_port_sched_reader_stats_read,
316 struct rte_port_out_ops rte_port_sched_writer_ops = {
317 .f_create = rte_port_sched_writer_create,
318 .f_free = rte_port_sched_writer_free,
319 .f_tx = rte_port_sched_writer_tx,
320 .f_tx_bulk = rte_port_sched_writer_tx_bulk,
321 .f_flush = rte_port_sched_writer_flush,
322 .f_stats = rte_port_sched_writer_stats_read,