1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
6 #include <rte_common.h>
7 #include <rte_malloc.h>
9 #include "rte_port_sym_crypto.h"
14 #ifdef RTE_PORT_STATS_COLLECT
16 #define RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_IN_ADD(port, val) \
17 (port)->stats.n_pkts_in += (val)
18 #define RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_DROP_ADD(port, val) \
19 (port)->stats.n_pkts_drop += (val)
23 #define RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_IN_ADD(port, val)
24 #define RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_DROP_ADD(port, val)
28 struct rte_port_sym_crypto_reader {
29 struct rte_port_in_stats stats;
33 struct rte_crypto_op *ops[RTE_PORT_IN_BURST_SIZE_MAX];
34 rte_port_sym_crypto_reader_callback_fn f_callback;
39 rte_port_sym_crypto_reader_create(void *params, int socket_id)
41 struct rte_port_sym_crypto_reader_params *conf =
43 struct rte_port_sym_crypto_reader *port;
45 /* Check input parameters */
47 RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
51 /* Memory allocation */
52 port = rte_zmalloc_socket("PORT", sizeof(*port),
53 RTE_CACHE_LINE_SIZE, socket_id);
55 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
60 port->cryptodev_id = conf->cryptodev_id;
61 port->queue_id = conf->queue_id;
62 port->f_callback = conf->f_callback;
63 port->arg_callback = conf->arg_callback;
69 rte_port_sym_crypto_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
71 struct rte_port_sym_crypto_reader *p =
73 uint16_t rx_ops_cnt, i, n = 0;
75 rx_ops_cnt = rte_cryptodev_dequeue_burst(p->cryptodev_id, p->queue_id,
78 for (i = 0; i < rx_ops_cnt; i++) {
79 struct rte_crypto_op *op = p->ops[i];
81 /** Drop failed pkts */
82 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
83 rte_pktmbuf_free(op->sym->m_src);
87 pkts[n++] = op->sym->m_src;
91 (*p->f_callback)(pkts, n, p->arg_callback);
93 RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_IN_ADD(p, n);
94 RTE_PORT_SYM_CRYPTO_READER_STATS_PKTS_DROP_ADD(p, rx_ops_cnt - n);
100 rte_port_sym_crypto_reader_free(void *port)
103 RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
112 static int rte_port_sym_crypto_reader_stats_read(void *port,
113 struct rte_port_in_stats *stats, int clear)
115 struct rte_port_sym_crypto_reader *p =
119 memcpy(stats, &p->stats, sizeof(p->stats));
122 memset(&p->stats, 0, sizeof(p->stats));
130 #ifdef RTE_PORT_STATS_COLLECT
132 #define RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(port, val) \
133 (port)->stats.n_pkts_in += (val)
134 #define RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_DROP_ADD(port, val) \
135 (port)->stats.n_pkts_drop += (val)
139 #define RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(port, val)
140 #define RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_DROP_ADD(port, val)
144 struct rte_port_sym_crypto_writer {
145 struct rte_port_out_stats stats;
147 struct rte_crypto_op *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
149 uint32_t tx_burst_sz;
150 uint32_t tx_buf_count;
153 uint8_t cryptodev_id;
155 uint16_t crypto_op_offset;
159 rte_port_sym_crypto_writer_create(void *params, int socket_id)
161 struct rte_port_sym_crypto_writer_params *conf =
163 struct rte_port_sym_crypto_writer *port;
165 /* Check input parameters */
166 if ((conf == NULL) ||
167 (conf->tx_burst_sz == 0) ||
168 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
169 (!rte_is_power_of_2(conf->tx_burst_sz))) {
170 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
174 /* Memory allocation */
175 port = rte_zmalloc_socket("PORT", sizeof(*port),
176 RTE_CACHE_LINE_SIZE, socket_id);
178 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
183 port->tx_burst_sz = conf->tx_burst_sz;
184 port->tx_buf_count = 0;
185 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
187 port->cryptodev_id = conf->cryptodev_id;
188 port->queue_id = conf->queue_id;
189 port->crypto_op_offset = conf->crypto_op_offset;
195 send_burst(struct rte_port_sym_crypto_writer *p)
199 nb_tx = rte_cryptodev_enqueue_burst(p->cryptodev_id, p->queue_id,
200 p->tx_buf, p->tx_buf_count);
202 RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count -
204 for (; nb_tx < p->tx_buf_count; nb_tx++)
205 rte_pktmbuf_free(p->tx_buf[nb_tx]->sym->m_src);
211 rte_port_sym_crypto_writer_tx(void *port, struct rte_mbuf *pkt)
213 struct rte_port_sym_crypto_writer *p =
216 p->tx_buf[p->tx_buf_count++] = (struct rte_crypto_op *)
217 RTE_MBUF_METADATA_UINT8_PTR(pkt, p->crypto_op_offset);
218 RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(p, 1);
219 if (p->tx_buf_count >= p->tx_burst_sz)
226 rte_port_sym_crypto_writer_tx_bulk(void *port,
227 struct rte_mbuf **pkts,
230 struct rte_port_sym_crypto_writer *p =
232 uint64_t bsz_mask = p->bsz_mask;
233 uint32_t tx_buf_count = p->tx_buf_count;
234 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
235 ((pkts_mask & bsz_mask) ^ bsz_mask);
238 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
241 RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
243 for (i = 0; i < n_pkts; i++)
244 p->tx_buf[p->tx_buf_count++] = (struct rte_crypto_op *)
245 RTE_MBUF_METADATA_UINT8_PTR(pkts[i],
246 p->crypto_op_offset);
248 if (p->tx_buf_count >= p->tx_burst_sz)
252 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
253 uint64_t pkt_mask = 1LLU << pkt_index;
254 struct rte_mbuf *pkt = pkts[pkt_index];
256 p->tx_buf[tx_buf_count++] = (struct rte_crypto_op *)
257 RTE_MBUF_METADATA_UINT8_PTR(pkt,
258 p->crypto_op_offset);
260 RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(p, 1);
261 pkts_mask &= ~pkt_mask;
264 p->tx_buf_count = tx_buf_count;
265 if (tx_buf_count >= p->tx_burst_sz)
273 rte_port_sym_crypto_writer_flush(void *port)
275 struct rte_port_sym_crypto_writer *p =
278 if (p->tx_buf_count > 0)
285 rte_port_sym_crypto_writer_free(void *port)
288 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
292 rte_port_sym_crypto_writer_flush(port);
298 static int rte_port_sym_crypto_writer_stats_read(void *port,
299 struct rte_port_out_stats *stats, int clear)
301 struct rte_port_sym_crypto_writer *p =
305 memcpy(stats, &p->stats, sizeof(p->stats));
308 memset(&p->stats, 0, sizeof(p->stats));
314 * Port crypto Writer Nodrop
316 #ifdef RTE_PORT_STATS_COLLECT
318 #define RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
319 (port)->stats.n_pkts_in += (val)
320 #define RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
321 (port)->stats.n_pkts_drop += (val)
325 #define RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
326 #define RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
330 struct rte_port_sym_crypto_writer_nodrop {
331 struct rte_port_out_stats stats;
333 struct rte_crypto_op *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
334 uint32_t tx_burst_sz;
335 uint32_t tx_buf_count;
339 uint8_t cryptodev_id;
341 uint16_t crypto_op_offset;
345 rte_port_sym_crypto_writer_nodrop_create(void *params, int socket_id)
347 struct rte_port_sym_crypto_writer_nodrop_params *conf =
349 struct rte_port_sym_crypto_writer_nodrop *port;
351 /* Check input parameters */
352 if ((conf == NULL) ||
353 (conf->tx_burst_sz == 0) ||
354 (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
355 (!rte_is_power_of_2(conf->tx_burst_sz))) {
356 RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
360 /* Memory allocation */
361 port = rte_zmalloc_socket("PORT", sizeof(*port),
362 RTE_CACHE_LINE_SIZE, socket_id);
364 RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
369 port->cryptodev_id = conf->cryptodev_id;
370 port->queue_id = conf->queue_id;
371 port->crypto_op_offset = conf->crypto_op_offset;
372 port->tx_burst_sz = conf->tx_burst_sz;
373 port->tx_buf_count = 0;
374 port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
377 * When n_retries is 0 it means that we should wait for every packet to
378 * send no matter how many retries should it take. To limit number of
379 * branches in fast path, we use UINT64_MAX instead of branching.
381 port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
387 send_burst_nodrop(struct rte_port_sym_crypto_writer_nodrop *p)
389 uint32_t nb_tx = 0, i;
391 nb_tx = rte_cryptodev_enqueue_burst(p->cryptodev_id, p->queue_id,
392 p->tx_buf, p->tx_buf_count);
394 /* We sent all the packets in a first try */
395 if (nb_tx >= p->tx_buf_count) {
400 for (i = 0; i < p->n_retries; i++) {
401 nb_tx += rte_cryptodev_enqueue_burst(p->cryptodev_id,
402 p->queue_id, p->tx_buf + nb_tx,
403 p->tx_buf_count - nb_tx);
405 /* We sent all the packets in more than one try */
406 if (nb_tx >= p->tx_buf_count) {
412 /* We didn't send the packets in maximum allowed attempts */
413 RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_DROP_ADD(p,
414 p->tx_buf_count - nb_tx);
415 for ( ; nb_tx < p->tx_buf_count; nb_tx++)
416 rte_pktmbuf_free(p->tx_buf[nb_tx]->sym->m_src);
422 rte_port_sym_crypto_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
424 struct rte_port_sym_crypto_writer_nodrop *p =
427 p->tx_buf[p->tx_buf_count++] = (struct rte_crypto_op *)
428 RTE_MBUF_METADATA_UINT8_PTR(pkt, p->crypto_op_offset);
429 RTE_PORT_SYM_CRYPTO_WRITER_STATS_PKTS_IN_ADD(p, 1);
430 if (p->tx_buf_count >= p->tx_burst_sz)
431 send_burst_nodrop(p);
437 rte_port_sym_crypto_writer_nodrop_tx_bulk(void *port,
438 struct rte_mbuf **pkts,
441 struct rte_port_sym_crypto_writer_nodrop *p =
444 uint64_t bsz_mask = p->bsz_mask;
445 uint32_t tx_buf_count = p->tx_buf_count;
446 uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
447 ((pkts_mask & bsz_mask) ^ bsz_mask);
450 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
453 RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
455 for (i = 0; i < n_pkts; i++)
456 p->tx_buf[p->tx_buf_count++] = (struct rte_crypto_op *)
457 RTE_MBUF_METADATA_UINT8_PTR(pkts[i],
458 p->crypto_op_offset);
460 if (p->tx_buf_count >= p->tx_burst_sz)
461 send_burst_nodrop(p);
463 for ( ; pkts_mask; ) {
464 uint32_t pkt_index = __builtin_ctzll(pkts_mask);
465 uint64_t pkt_mask = 1LLU << pkt_index;
466 struct rte_mbuf *pkt = pkts[pkt_index];
468 p->tx_buf[tx_buf_count++] = (struct rte_crypto_op *)
469 RTE_MBUF_METADATA_UINT8_PTR(pkt,
470 p->crypto_op_offset);
471 RTE_PORT_SYM_CRYPTO_WRITER_NODROP_STATS_PKTS_IN_ADD(p,
473 pkts_mask &= ~pkt_mask;
476 p->tx_buf_count = tx_buf_count;
477 if (tx_buf_count >= p->tx_burst_sz)
478 send_burst_nodrop(p);
485 rte_port_sym_crypto_writer_nodrop_flush(void *port)
487 struct rte_port_sym_crypto_writer_nodrop *p =
490 if (p->tx_buf_count > 0)
491 send_burst_nodrop(p);
497 rte_port_sym_crypto_writer_nodrop_free(void *port)
500 RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
504 rte_port_sym_crypto_writer_nodrop_flush(port);
510 static int rte_port_sym_crypto_writer_nodrop_stats_read(void *port,
511 struct rte_port_out_stats *stats, int clear)
513 struct rte_port_sym_crypto_writer_nodrop *p =
517 memcpy(stats, &p->stats, sizeof(p->stats));
520 memset(&p->stats, 0, sizeof(p->stats));
527 * Summary of port operations
529 struct rte_port_in_ops rte_port_sym_crypto_reader_ops = {
530 .f_create = rte_port_sym_crypto_reader_create,
531 .f_free = rte_port_sym_crypto_reader_free,
532 .f_rx = rte_port_sym_crypto_reader_rx,
533 .f_stats = rte_port_sym_crypto_reader_stats_read,
536 struct rte_port_out_ops rte_port_sym_crypto_writer_ops = {
537 .f_create = rte_port_sym_crypto_writer_create,
538 .f_free = rte_port_sym_crypto_writer_free,
539 .f_tx = rte_port_sym_crypto_writer_tx,
540 .f_tx_bulk = rte_port_sym_crypto_writer_tx_bulk,
541 .f_flush = rte_port_sym_crypto_writer_flush,
542 .f_stats = rte_port_sym_crypto_writer_stats_read,
545 struct rte_port_out_ops rte_port_sym_crypto_writer_nodrop_ops = {
546 .f_create = rte_port_sym_crypto_writer_nodrop_create,
547 .f_free = rte_port_sym_crypto_writer_nodrop_free,
548 .f_tx = rte_port_sym_crypto_writer_nodrop_tx,
549 .f_tx_bulk = rte_port_sym_crypto_writer_nodrop_tx_bulk,
550 .f_flush = rte_port_sym_crypto_writer_nodrop_flush,
551 .f_stats = rte_port_sym_crypto_writer_nodrop_stats_read,