4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
40 #include "rte_port_ethdev.h"
45 struct rte_port_ethdev_reader {
51 rte_port_ethdev_reader_create(void *params, int socket_id)
53 struct rte_port_ethdev_reader_params *conf =
54 (struct rte_port_ethdev_reader_params *) params;
55 struct rte_port_ethdev_reader *port;
57 /* Check input parameters */
59 RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
63 /* Memory allocation */
64 port = rte_zmalloc_socket("PORT", sizeof(*port),
65 RTE_CACHE_LINE_SIZE, socket_id);
67 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
72 port->port_id = conf->port_id;
73 port->queue_id = conf->queue_id;
79 rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
81 struct rte_port_ethdev_reader *p =
82 (struct rte_port_ethdev_reader *) port;
84 return rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
88 rte_port_ethdev_reader_free(void *port)
91 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
103 struct rte_port_ethdev_writer {
104 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
105 uint32_t tx_burst_sz;
106 uint16_t tx_buf_count;
113 rte_port_ethdev_writer_create(void *params, int socket_id)
115 struct rte_port_ethdev_writer_params *conf =
116 (struct rte_port_ethdev_writer_params *) params;
117 struct rte_port_ethdev_writer *port;
119 /* Check input parameters */
120 if ((conf == NULL) ||
121 (conf->tx_burst_sz == 0) ||
122 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
123 (!rte_is_power_of_2(conf->tx_burst_sz))) {
124 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
128 /* Memory allocation */
129 port = rte_zmalloc_socket("PORT", sizeof(*port),
130 RTE_CACHE_LINE_SIZE, socket_id);
132 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
137 port->port_id = conf->port_id;
138 port->queue_id = conf->queue_id;
139 port->tx_burst_sz = conf->tx_burst_sz;
140 port->tx_buf_count = 0;
141 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
147 send_burst(struct rte_port_ethdev_writer *p)
151 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id,
152 p->tx_buf, p->tx_buf_count);
154 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
155 rte_pktmbuf_free(p->tx_buf[nb_tx]);
161 rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
163 struct rte_port_ethdev_writer *p =
164 (struct rte_port_ethdev_writer *) port;
166 p->tx_buf[p->tx_buf_count++] = pkt;
167 if (p->tx_buf_count >= p->tx_burst_sz)
174 rte_port_ethdev_writer_tx_bulk(void *port,
175 struct rte_mbuf **pkts,
178 struct rte_port_ethdev_writer *p =
179 (struct rte_port_ethdev_writer *) port;
180 uint32_t bsz_mask = p->bsz_mask;
181 uint32_t tx_buf_count = p->tx_buf_count;
182 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
183 ((pkts_mask & bsz_mask) ^ bsz_mask);
186 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
192 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
195 for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
196 struct rte_mbuf *pkt = pkts[n_pkts_ok];
198 rte_pktmbuf_free(pkt);
201 for ( ; pkts_mask; ) {
202 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
203 uint64_t pkt_mask = 1LLU << pkt_index;
204 struct rte_mbuf *pkt = pkts[pkt_index];
206 p->tx_buf[tx_buf_count++] = pkt;
207 pkts_mask &= ~pkt_mask;
210 p->tx_buf_count = tx_buf_count;
211 if (tx_buf_count >= p->tx_burst_sz)
219 rte_port_ethdev_writer_flush(void *port)
221 struct rte_port_ethdev_writer *p =
222 (struct rte_port_ethdev_writer *) port;
224 if (p->tx_buf_count > 0)
231 rte_port_ethdev_writer_free(void *port)
234 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
238 rte_port_ethdev_writer_flush(port);
245 * Port ETHDEV Writer Nodrop
247 struct rte_port_ethdev_writer_nodrop {
248 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
249 uint32_t tx_burst_sz;
250 uint16_t tx_buf_count;
258 rte_port_ethdev_writer_nodrop_create(void *params, int socket_id)
260 struct rte_port_ethdev_writer_nodrop_params *conf =
261 (struct rte_port_ethdev_writer_nodrop_params *) params;
262 struct rte_port_ethdev_writer_nodrop *port;
264 /* Check input parameters */
265 if ((conf == NULL) ||
266 (conf->tx_burst_sz == 0) ||
267 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
268 (!rte_is_power_of_2(conf->tx_burst_sz))) {
269 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
273 /* Memory allocation */
274 port = rte_zmalloc_socket("PORT", sizeof(*port),
275 RTE_CACHE_LINE_SIZE, socket_id);
277 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
282 port->port_id = conf->port_id;
283 port->queue_id = conf->queue_id;
284 port->tx_burst_sz = conf->tx_burst_sz;
285 port->tx_buf_count = 0;
286 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
289 * When n_retries is 0 it means that we should wait for every packet to
290 * send no matter how many retries should it take. To limit number of
291 * branches in fast path, we use UINT64_MAX instead of branching.
293 port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
299 send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p)
301 uint32_t nb_tx = 0, i;
303 nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf,
306 /* We sent all the packets in a first try */
307 if (nb_tx >= p->tx_buf_count)
310 for (i = 0; i < p->n_retries; i++) {
311 nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id,
312 p->tx_buf + nb_tx, p->tx_buf_count - nb_tx);
314 /* We sent all the packets in more than one try */
315 if (nb_tx >= p->tx_buf_count)
319 /* We didn't send the packets in maximum allowed attempts */
320 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
321 rte_pktmbuf_free(p->tx_buf[nb_tx]);
327 rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
329 struct rte_port_ethdev_writer_nodrop *p =
330 (struct rte_port_ethdev_writer_nodrop *) port;
332 p->tx_buf[p->tx_buf_count++] = pkt;
333 if (p->tx_buf_count >= p->tx_burst_sz)
334 send_burst_nodrop(p);
340 rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
341 struct rte_mbuf **pkts,
344 struct rte_port_ethdev_writer_nodrop *p =
345 (struct rte_port_ethdev_writer_nodrop *) port;
347 uint32_t bsz_mask = p->bsz_mask;
348 uint32_t tx_buf_count = p->tx_buf_count;
349 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
350 ((pkts_mask & bsz_mask) ^ bsz_mask);
353 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
357 send_burst_nodrop(p);
359 n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
362 if (n_pkts_ok >= n_pkts)
366 * If we didnt manage to send all packets in single burst, move
367 * remaining packets to the buffer and call send burst.
369 for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
370 struct rte_mbuf *pkt = pkts[n_pkts_ok];
371 p->tx_buf[p->tx_buf_count++] = pkt;
373 send_burst_nodrop(p);
375 for ( ; pkts_mask; ) {
376 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
377 uint64_t pkt_mask = 1LLU << pkt_index;
378 struct rte_mbuf *pkt = pkts[pkt_index];
380 p->tx_buf[tx_buf_count++] = pkt;
381 pkts_mask &= ~pkt_mask;
384 p->tx_buf_count = tx_buf_count;
385 if (tx_buf_count >= p->tx_burst_sz)
386 send_burst_nodrop(p);
393 rte_port_ethdev_writer_nodrop_flush(void *port)
395 struct rte_port_ethdev_writer_nodrop *p =
396 (struct rte_port_ethdev_writer_nodrop *) port;
398 if (p->tx_buf_count > 0)
399 send_burst_nodrop(p);
405 rte_port_ethdev_writer_nodrop_free(void *port)
408 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
412 rte_port_ethdev_writer_nodrop_flush(port);
419 * Summary of port operations
421 struct rte_port_in_ops rte_port_ethdev_reader_ops = {
422 .f_create = rte_port_ethdev_reader_create,
423 .f_free = rte_port_ethdev_reader_free,
424 .f_rx = rte_port_ethdev_reader_rx,
427 struct rte_port_out_ops rte_port_ethdev_writer_ops = {
428 .f_create = rte_port_ethdev_writer_create,
429 .f_free = rte_port_ethdev_writer_free,
430 .f_tx = rte_port_ethdev_writer_tx,
431 .f_tx_bulk = rte_port_ethdev_writer_tx_bulk,
432 .f_flush = rte_port_ethdev_writer_flush,
435 struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = {
436 .f_create = rte_port_ethdev_writer_nodrop_create,
437 .f_free = rte_port_ethdev_writer_nodrop_free,
438 .f_tx = rte_port_ethdev_writer_nodrop_tx,
439 .f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk,
440 .f_flush = rte_port_ethdev_writer_nodrop_flush,