1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
6 #include <rte_malloc.h>
8 #include "rte_port_sched.h"
13 #ifdef RTE_PORT_STATS_COLLECT
15 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val) \
16 port->stats.n_pkts_in += val
17 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val) \
18 port->stats.n_pkts_drop += val
22 #define RTE_PORT_SCHED_READER_PKTS_IN_ADD(port, val)
23 #define RTE_PORT_SCHED_READER_PKTS_DROP_ADD(port, val)
27 struct rte_port_sched_reader {
28 struct rte_port_in_stats stats;
30 struct rte_sched_port *sched;
34 rte_port_sched_reader_create(void *params, int socket_id)
36 struct rte_port_sched_reader_params *conf =
38 struct rte_port_sched_reader *port;
40 /* Check input parameters */
42 (conf->sched == NULL)) {
43 RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
47 /* Memory allocation */
48 port = rte_zmalloc_socket("PORT", sizeof(*port),
49 RTE_CACHE_LINE_SIZE, socket_id);
51 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
56 port->sched = conf->sched;
62 rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
64 struct rte_port_sched_reader *p = port;
67 nb_rx = rte_sched_port_dequeue(p->sched, pkts, n_pkts);
68 RTE_PORT_SCHED_READER_PKTS_IN_ADD(p, nb_rx);
74 rte_port_sched_reader_free(void *port)
77 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
87 rte_port_sched_reader_stats_read(void *port,
88 struct rte_port_in_stats *stats, int clear)
90 struct rte_port_sched_reader *p =
94 memcpy(stats, &p->stats, sizeof(p->stats));
97 memset(&p->stats, 0, sizeof(p->stats));
105 #ifdef RTE_PORT_STATS_COLLECT
107 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val) \
108 port->stats.n_pkts_in += val
109 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val) \
110 port->stats.n_pkts_drop += val
114 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(port, val)
115 #define RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(port, val)
119 struct rte_port_sched_writer {
120 struct rte_port_out_stats stats;
122 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
123 struct rte_sched_port *sched;
124 uint32_t tx_burst_sz;
125 uint32_t tx_buf_count;
130 rte_port_sched_writer_create(void *params, int socket_id)
132 struct rte_port_sched_writer_params *conf =
134 struct rte_port_sched_writer *port;
136 /* Check input parameters */
137 if ((conf == NULL) ||
138 (conf->sched == NULL) ||
139 (conf->tx_burst_sz == 0) ||
140 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
141 (!rte_is_power_of_2(conf->tx_burst_sz))) {
142 RTE_LOG(ERR, PORT, "%s: Invalid params\n", __func__);
146 /* Memory allocation */
147 port = rte_zmalloc_socket("PORT", sizeof(*port),
148 RTE_CACHE_LINE_SIZE, socket_id);
150 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
155 port->sched = conf->sched;
156 port->tx_burst_sz = conf->tx_burst_sz;
157 port->tx_buf_count = 0;
158 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
164 rte_port_sched_writer_tx(void *port, struct rte_mbuf *pkt)
166 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
168 p->tx_buf[p->tx_buf_count++] = pkt;
169 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
170 if (p->tx_buf_count >= p->tx_burst_sz) {
171 __rte_unused uint32_t nb_tx;
173 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
174 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
182 rte_port_sched_writer_tx_bulk(void *port,
183 struct rte_mbuf **pkts,
186 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
187 uint64_t bsz_mask = p->bsz_mask;
188 uint32_t tx_buf_count = p->tx_buf_count;
189 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
190 ((pkts_mask & bsz_mask) ^ bsz_mask);
193 __rte_unused uint32_t nb_tx;
194 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
197 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
199 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
203 nb_tx = rte_sched_port_enqueue(p->sched, pkts, n_pkts);
204 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - nb_tx);
206 for ( ; pkts_mask; ) {
207 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
208 uint64_t pkt_mask = 1LLU << pkt_index;
209 struct rte_mbuf *pkt = pkts[pkt_index];
211 p->tx_buf[tx_buf_count++] = pkt;
212 RTE_PORT_SCHED_WRITER_STATS_PKTS_IN_ADD(p, 1);
213 pkts_mask &= ~pkt_mask;
215 p->tx_buf_count = tx_buf_count;
217 if (tx_buf_count >= p->tx_burst_sz) {
218 __rte_unused uint32_t nb_tx;
220 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf,
222 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, tx_buf_count - nb_tx);
231 rte_port_sched_writer_flush(void *port)
233 struct rte_port_sched_writer *p = (struct rte_port_sched_writer *) port;
235 if (p->tx_buf_count) {
236 __rte_unused uint32_t nb_tx;
238 nb_tx = rte_sched_port_enqueue(p->sched, p->tx_buf, p->tx_buf_count);
239 RTE_PORT_SCHED_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
247 rte_port_sched_writer_free(void *port)
250 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
254 rte_port_sched_writer_flush(port);
261 rte_port_sched_writer_stats_read(void *port,
262 struct rte_port_out_stats *stats, int clear)
264 struct rte_port_sched_writer *p =
268 memcpy(stats, &p->stats, sizeof(p->stats));
271 memset(&p->stats, 0, sizeof(p->stats));
277 * Summary of port operations
279 struct rte_port_in_ops rte_port_sched_reader_ops = {
280 .f_create = rte_port_sched_reader_create,
281 .f_free = rte_port_sched_reader_free,
282 .f_rx = rte_port_sched_reader_rx,
283 .f_stats = rte_port_sched_reader_stats_read,
286 struct rte_port_out_ops rte_port_sched_writer_ops = {
287 .f_create = rte_port_sched_writer_create,
288 .f_free = rte_port_sched_writer_free,
289 .f_tx = rte_port_sched_writer_tx,
290 .f_tx_bulk = rte_port_sched_writer_tx_bulk,
291 .f_flush = rte_port_sched_writer_flush,
292 .f_stats = rte_port_sched_writer_stats_read,