1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>.
3 * Copyright(c) 2016 Intel Corporation.
7 #include <rte_malloc.h>
10 #include "rte_port_kni.h"
15 #ifdef RTE_PORT_STATS_COLLECT
17 #define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val) \
18 port->stats.n_pkts_in += val
19 #define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val) \
20 port->stats.n_pkts_drop += val
24 #define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val)
25 #define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val)
29 struct rte_port_kni_reader {
30 struct rte_port_in_stats stats;
36 rte_port_kni_reader_create(void *params, int socket_id)
38 struct rte_port_kni_reader_params *conf =
40 struct rte_port_kni_reader *port;
42 /* Check input parameters */
44 RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
48 /* Memory allocation */
49 port = rte_zmalloc_socket("PORT", sizeof(*port),
50 RTE_CACHE_LINE_SIZE, socket_id);
52 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
57 port->kni = conf->kni;
63 rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
65 struct rte_port_kni_reader *p =
69 rx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts);
70 RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
75 rte_port_kni_reader_free(void *port)
78 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
87 static int rte_port_kni_reader_stats_read(void *port,
88 struct rte_port_in_stats *stats, int clear)
90 struct rte_port_kni_reader *p =
94 memcpy(stats, &p->stats, sizeof(p->stats));
97 memset(&p->stats, 0, sizeof(p->stats));
105 #ifdef RTE_PORT_STATS_COLLECT
107 #define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val) \
108 port->stats.n_pkts_in += val
109 #define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val) \
110 port->stats.n_pkts_drop += val
114 #define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val)
115 #define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val)
119 struct rte_port_kni_writer {
120 struct rte_port_out_stats stats;
122 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
123 uint32_t tx_burst_sz;
124 uint32_t tx_buf_count;
130 rte_port_kni_writer_create(void *params, int socket_id)
132 struct rte_port_kni_writer_params *conf =
134 struct rte_port_kni_writer *port;
136 /* Check input parameters */
137 if ((conf == NULL) ||
138 (conf->tx_burst_sz == 0) ||
139 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
140 (!rte_is_power_of_2(conf->tx_burst_sz))) {
141 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
145 /* Memory allocation */
146 port = rte_zmalloc_socket("PORT", sizeof(*port),
147 RTE_CACHE_LINE_SIZE, socket_id);
149 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
154 port->kni = conf->kni;
155 port->tx_burst_sz = conf->tx_burst_sz;
156 port->tx_buf_count = 0;
157 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
163 send_burst(struct rte_port_kni_writer *p)
167 nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
169 RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
170 for (; nb_tx < p->tx_buf_count; nb_tx++)
171 rte_pktmbuf_free(p->tx_buf[nb_tx]);
177 rte_port_kni_writer_tx(void *port, struct rte_mbuf *pkt)
179 struct rte_port_kni_writer *p =
182 p->tx_buf[p->tx_buf_count++] = pkt;
183 RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
184 if (p->tx_buf_count >= p->tx_burst_sz)
191 rte_port_kni_writer_tx_bulk(void *port,
192 struct rte_mbuf **pkts,
195 struct rte_port_kni_writer *p =
197 uint64_t bsz_mask = p->bsz_mask;
198 uint32_t tx_buf_count = p->tx_buf_count;
199 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
200 ((pkts_mask & bsz_mask) ^ bsz_mask);
203 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
209 RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
210 n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
212 RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
213 for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
214 struct rte_mbuf *pkt = pkts[n_pkts_ok];
216 rte_pktmbuf_free(pkt);
220 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
221 uint64_t pkt_mask = 1LLU << pkt_index;
222 struct rte_mbuf *pkt = pkts[pkt_index];
224 p->tx_buf[tx_buf_count++] = pkt;
225 RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
226 pkts_mask &= ~pkt_mask;
229 p->tx_buf_count = tx_buf_count;
230 if (tx_buf_count >= p->tx_burst_sz)
238 rte_port_kni_writer_flush(void *port)
240 struct rte_port_kni_writer *p =
243 if (p->tx_buf_count > 0)
250 rte_port_kni_writer_free(void *port)
253 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
257 rte_port_kni_writer_flush(port);
263 static int rte_port_kni_writer_stats_read(void *port,
264 struct rte_port_out_stats *stats, int clear)
266 struct rte_port_kni_writer *p =
270 memcpy(stats, &p->stats, sizeof(p->stats));
273 memset(&p->stats, 0, sizeof(p->stats));
279 * Port KNI Writer Nodrop
281 #ifdef RTE_PORT_STATS_COLLECT
283 #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
284 port->stats.n_pkts_in += val
285 #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
286 port->stats.n_pkts_drop += val
290 #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
291 #define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
295 struct rte_port_kni_writer_nodrop {
296 struct rte_port_out_stats stats;
298 struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
299 uint32_t tx_burst_sz;
300 uint32_t tx_buf_count;
307 rte_port_kni_writer_nodrop_create(void *params, int socket_id)
309 struct rte_port_kni_writer_nodrop_params *conf =
311 struct rte_port_kni_writer_nodrop *port;
313 /* Check input parameters */
314 if ((conf == NULL) ||
315 (conf->tx_burst_sz == 0) ||
316 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
317 (!rte_is_power_of_2(conf->tx_burst_sz))) {
318 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
322 /* Memory allocation */
323 port = rte_zmalloc_socket("PORT", sizeof(*port),
324 RTE_CACHE_LINE_SIZE, socket_id);
326 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
331 port->kni = conf->kni;
332 port->tx_burst_sz = conf->tx_burst_sz;
333 port->tx_buf_count = 0;
334 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
337 * When n_retries is 0 it means that we should wait for every packet to
338 * send no matter how many retries should it take. To limit number of
339 * branches in fast path, we use UINT64_MAX instead of branching.
341 port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
347 send_burst_nodrop(struct rte_port_kni_writer_nodrop *p)
349 uint32_t nb_tx = 0, i;
351 nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
353 /* We sent all the packets in a first try */
354 if (nb_tx >= p->tx_buf_count) {
359 for (i = 0; i < p->n_retries; i++) {
360 nb_tx += rte_kni_tx_burst(p->kni,
362 p->tx_buf_count - nb_tx);
364 /* We sent all the packets in more than one try */
365 if (nb_tx >= p->tx_buf_count) {
371 /* We didn't send the packets in maximum allowed attempts */
372 RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
373 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
374 rte_pktmbuf_free(p->tx_buf[nb_tx]);
380 rte_port_kni_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
382 struct rte_port_kni_writer_nodrop *p =
385 p->tx_buf[p->tx_buf_count++] = pkt;
386 RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
387 if (p->tx_buf_count >= p->tx_burst_sz)
388 send_burst_nodrop(p);
394 rte_port_kni_writer_nodrop_tx_bulk(void *port,
395 struct rte_mbuf **pkts,
398 struct rte_port_kni_writer_nodrop *p =
401 uint64_t bsz_mask = p->bsz_mask;
402 uint32_t tx_buf_count = p->tx_buf_count;
403 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
404 ((pkts_mask & bsz_mask) ^ bsz_mask);
407 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
411 send_burst_nodrop(p);
413 RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
414 n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
416 if (n_pkts_ok >= n_pkts)
420 * If we didn't manage to send all packets in single burst, move
421 * remaining packets to the buffer and call send burst.
423 for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
424 struct rte_mbuf *pkt = pkts[n_pkts_ok];
425 p->tx_buf[p->tx_buf_count++] = pkt;
427 send_burst_nodrop(p);
429 for ( ; pkts_mask; ) {
430 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
431 uint64_t pkt_mask = 1LLU << pkt_index;
432 struct rte_mbuf *pkt = pkts[pkt_index];
434 p->tx_buf[tx_buf_count++] = pkt;
435 RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
436 pkts_mask &= ~pkt_mask;
439 p->tx_buf_count = tx_buf_count;
440 if (tx_buf_count >= p->tx_burst_sz)
441 send_burst_nodrop(p);
448 rte_port_kni_writer_nodrop_flush(void *port)
450 struct rte_port_kni_writer_nodrop *p =
453 if (p->tx_buf_count > 0)
454 send_burst_nodrop(p);
460 rte_port_kni_writer_nodrop_free(void *port)
463 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
467 rte_port_kni_writer_nodrop_flush(port);
473 static int rte_port_kni_writer_nodrop_stats_read(void *port,
474 struct rte_port_out_stats *stats, int clear)
476 struct rte_port_kni_writer_nodrop *p =
480 memcpy(stats, &p->stats, sizeof(p->stats));
483 memset(&p->stats, 0, sizeof(p->stats));
490 * Summary of port operations
492 struct rte_port_in_ops rte_port_kni_reader_ops = {
493 .f_create = rte_port_kni_reader_create,
494 .f_free = rte_port_kni_reader_free,
495 .f_rx = rte_port_kni_reader_rx,
496 .f_stats = rte_port_kni_reader_stats_read,
499 struct rte_port_out_ops rte_port_kni_writer_ops = {
500 .f_create = rte_port_kni_writer_create,
501 .f_free = rte_port_kni_writer_free,
502 .f_tx = rte_port_kni_writer_tx,
503 .f_tx_bulk = rte_port_kni_writer_tx_bulk,
504 .f_flush = rte_port_kni_writer_flush,
505 .f_stats = rte_port_kni_writer_stats_read,
508 struct rte_port_out_ops rte_port_kni_writer_nodrop_ops = {
509 .f_create = rte_port_kni_writer_nodrop_create,
510 .f_free = rte_port_kni_writer_nodrop_free,
511 .f_tx = rte_port_kni_writer_nodrop_tx,
512 .f_tx_bulk = rte_port_kni_writer_nodrop_tx_bulk,
513 .f_flush = rte_port_kni_writer_nodrop_flush,
514 .f_stats = rte_port_kni_writer_nodrop_stats_read,