1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2016 Intel Corporation
11 #include <sys/types.h>
12 #include <sys/queue.h>
13 #include <netinet/in.h>
22 #include <rte_string_fns.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_common.h>
25 #include <rte_cryptodev.h>
26 #include <rte_cycles.h>
27 #include <rte_debug.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev.h>
31 #include <rte_interrupts.h>
33 #include <rte_launch.h>
34 #include <rte_lcore.h>
36 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_memory.h>
40 #include <rte_mempool.h>
41 #include <rte_per_lcore.h>
42 #include <rte_prefetch.h>
43 #include <rte_random.h>
44 #include <rte_hexdump.h>
45 #ifdef RTE_CRYPTO_SCHEDULER
46 #include <rte_cryptodev_scheduler.h>
55 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
59 #define MAX_STR_LEN 32
60 #define MAX_KEY_SIZE 128
61 #define MAX_IV_SIZE 16
62 #define MAX_AAD_SIZE 65535
63 #define MAX_PKT_BURST 32
64 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
65 #define SESSION_POOL_CACHE_SIZE 0
67 #define MAXIMUM_IV_LENGTH 16
68 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
69 sizeof(struct rte_crypto_sym_op))
72 * Configurable number of RX/TX ring descriptors
74 #define RTE_TEST_RX_DESC_DEFAULT 1024
75 #define RTE_TEST_TX_DESC_DEFAULT 1024
77 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
78 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
80 /* ethernet addresses of ports */
81 static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
83 /* mask of enabled ports */
84 static uint64_t l2fwd_enabled_port_mask;
85 static uint64_t l2fwd_enabled_crypto_mask;
87 /* list of enabled ports */
88 static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
93 struct rte_mbuf *buffer[MAX_PKT_BURST];
98 struct rte_crypto_op *buffer[MAX_PKT_BURST];
101 #define MAX_RX_QUEUE_PER_LCORE 16
102 #define MAX_TX_QUEUE_PER_PORT 16
104 enum l2fwd_crypto_xform_chain {
105 L2FWD_CRYPTO_CIPHER_HASH,
106 L2FWD_CRYPTO_HASH_CIPHER,
107 L2FWD_CRYPTO_CIPHER_ONLY,
108 L2FWD_CRYPTO_HASH_ONLY,
115 rte_iova_t phys_addr;
123 /** l2fwd crypto application command line options */
124 struct l2fwd_crypto_options {
126 unsigned nb_ports_per_lcore;
127 unsigned refresh_period;
128 unsigned single_lcore:1;
131 unsigned sessionless:1;
133 enum l2fwd_crypto_xform_chain xform_chain;
135 struct rte_crypto_sym_xform cipher_xform;
137 int ckey_random_size;
138 uint8_t cipher_key[MAX_KEY_SIZE];
140 struct l2fwd_iv cipher_iv;
141 unsigned int cipher_iv_param;
142 int cipher_iv_random_size;
144 struct rte_crypto_sym_xform auth_xform;
146 int akey_random_size;
147 uint8_t auth_key[MAX_KEY_SIZE];
149 struct l2fwd_iv auth_iv;
150 unsigned int auth_iv_param;
151 int auth_iv_random_size;
153 struct rte_crypto_sym_xform aead_xform;
154 unsigned int aead_key_param;
155 int aead_key_random_size;
156 uint8_t aead_key[MAX_KEY_SIZE];
158 struct l2fwd_iv aead_iv;
159 unsigned int aead_iv_param;
160 int aead_iv_random_size;
162 struct l2fwd_key aad;
169 char string_type[MAX_STR_LEN];
171 uint64_t cryptodev_mask;
173 unsigned int mac_updating;
176 /** l2fwd crypto lcore params */
177 struct l2fwd_crypto_params {
181 unsigned digest_length;
184 uint32_t cipher_dataunit_len;
186 struct l2fwd_iv cipher_iv;
187 struct l2fwd_iv auth_iv;
188 struct l2fwd_iv aead_iv;
189 struct l2fwd_key aad;
190 struct rte_cryptodev_sym_session *session;
197 enum rte_crypto_cipher_algorithm cipher_algo;
198 enum rte_crypto_auth_algorithm auth_algo;
199 enum rte_crypto_aead_algorithm aead_algo;
202 /** lcore configuration */
203 struct lcore_queue_conf {
204 unsigned nb_rx_ports;
205 uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
207 unsigned nb_crypto_devs;
208 unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
210 struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
211 struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
212 } __rte_cache_aligned;
214 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
216 static struct rte_eth_conf port_conf = {
218 .mq_mode = RTE_ETH_MQ_RX_NONE,
222 .mq_mode = RTE_ETH_MQ_TX_NONE,
226 struct rte_mempool *l2fwd_pktmbuf_pool;
227 struct rte_mempool *l2fwd_crypto_op_pool;
229 struct rte_mempool *sess_mp;
230 struct rte_mempool *priv_mp;
231 } session_pool_socket[RTE_MAX_NUMA_NODES];
233 /* Per-port statistics struct */
234 struct l2fwd_port_statistics {
238 uint64_t crypto_enqueued;
239 uint64_t crypto_dequeued;
242 } __rte_cache_aligned;
244 struct l2fwd_crypto_statistics {
249 } __rte_cache_aligned;
251 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
252 struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
254 /* A tsc-based timer responsible for triggering statistics printout */
255 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
256 #define MAX_TIMER_PERIOD 86400UL /* 1 day max */
258 /* default period is 10 seconds */
259 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
261 /* Print out statistics on packets dropped */
265 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
266 uint64_t total_packets_enqueued, total_packets_dequeued,
267 total_packets_errors;
271 total_packets_dropped = 0;
272 total_packets_tx = 0;
273 total_packets_rx = 0;
274 total_packets_enqueued = 0;
275 total_packets_dequeued = 0;
276 total_packets_errors = 0;
278 const char clr[] = { 27, '[', '2', 'J', '\0' };
279 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
281 /* Clear screen and move to top left */
282 printf("%s%s", clr, topLeft);
284 printf("\nPort statistics ====================================");
286 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
287 /* skip disabled ports */
288 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
290 printf("\nStatistics for port %u ------------------------------"
291 "\nPackets sent: %32"PRIu64
292 "\nPackets received: %28"PRIu64
293 "\nPackets dropped: %29"PRIu64,
295 port_statistics[portid].tx,
296 port_statistics[portid].rx,
297 port_statistics[portid].dropped);
299 total_packets_dropped += port_statistics[portid].dropped;
300 total_packets_tx += port_statistics[portid].tx;
301 total_packets_rx += port_statistics[portid].rx;
303 printf("\nCrypto statistics ==================================");
305 for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
306 /* skip disabled ports */
307 if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
309 printf("\nStatistics for cryptodev %"PRIu64
310 " -------------------------"
311 "\nPackets enqueued: %28"PRIu64
312 "\nPackets dequeued: %28"PRIu64
313 "\nPackets errors: %30"PRIu64,
315 crypto_statistics[cdevid].enqueued,
316 crypto_statistics[cdevid].dequeued,
317 crypto_statistics[cdevid].errors);
319 total_packets_enqueued += crypto_statistics[cdevid].enqueued;
320 total_packets_dequeued += crypto_statistics[cdevid].dequeued;
321 total_packets_errors += crypto_statistics[cdevid].errors;
323 printf("\nAggregate statistics ==============================="
324 "\nTotal packets received: %22"PRIu64
325 "\nTotal packets enqueued: %22"PRIu64
326 "\nTotal packets dequeued: %22"PRIu64
327 "\nTotal packets sent: %26"PRIu64
328 "\nTotal packets dropped: %23"PRIu64
329 "\nTotal packets crypto errors: %17"PRIu64,
331 total_packets_enqueued,
332 total_packets_dequeued,
334 total_packets_dropped,
335 total_packets_errors);
336 printf("\n====================================================\n");
341 /* l2fwd_crypto_send_burst 8< */
343 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
344 struct l2fwd_crypto_params *cparams)
346 struct rte_crypto_op **op_buffer;
349 op_buffer = (struct rte_crypto_op **)
350 qconf->op_buf[cparams->dev_id].buffer;
352 ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
353 cparams->qp_id, op_buffer, (uint16_t) n);
355 crypto_statistics[cparams->dev_id].enqueued += ret;
356 if (unlikely(ret < n)) {
357 crypto_statistics[cparams->dev_id].errors += (n - ret);
359 rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
360 rte_crypto_op_free(op_buffer[ret]);
366 /* >8 End of l2fwd_crypto_send_burst. */
368 /* Crypto enqueue. 8< */
370 l2fwd_crypto_enqueue(struct rte_crypto_op *op,
371 struct l2fwd_crypto_params *cparams)
373 unsigned lcore_id, len;
374 struct lcore_queue_conf *qconf;
376 lcore_id = rte_lcore_id();
378 qconf = &lcore_queue_conf[lcore_id];
379 len = qconf->op_buf[cparams->dev_id].len;
380 qconf->op_buf[cparams->dev_id].buffer[len] = op;
383 /* enough ops to be sent */
384 if (len == MAX_PKT_BURST) {
385 l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
389 qconf->op_buf[cparams->dev_id].len = len;
392 /* >8 End of crypto enqueue. */
395 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
396 struct rte_crypto_op *op,
397 struct l2fwd_crypto_params *cparams)
399 struct rte_ether_hdr *eth_hdr;
400 struct rte_ipv4_hdr *ip_hdr;
402 uint32_t ipdata_offset, data_len;
403 uint32_t pad_len = 0;
406 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
408 if (eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
411 ipdata_offset = sizeof(struct rte_ether_hdr);
413 ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
416 ipdata_offset += (ip_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK)
417 * RTE_IPV4_IHL_MULTIPLIER;
420 /* Zero pad data to be crypto'd so it is block aligned */
421 data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
423 if ((cparams->do_hash || cparams->do_aead) && cparams->hash_verify)
424 data_len -= cparams->digest_length;
426 if (cparams->do_cipher) {
428 * Following algorithms are block cipher algorithms,
429 * and might need padding
431 switch (cparams->cipher_algo) {
432 case RTE_CRYPTO_CIPHER_AES_CBC:
433 case RTE_CRYPTO_CIPHER_AES_ECB:
434 case RTE_CRYPTO_CIPHER_DES_CBC:
435 case RTE_CRYPTO_CIPHER_3DES_CBC:
436 case RTE_CRYPTO_CIPHER_3DES_ECB:
437 if (data_len % cparams->block_size)
438 pad_len = cparams->block_size -
439 (data_len % cparams->block_size);
441 case RTE_CRYPTO_CIPHER_AES_XTS:
442 if (cparams->cipher_dataunit_len != 0 &&
443 (data_len % cparams->cipher_dataunit_len))
444 pad_len = cparams->cipher_dataunit_len -
445 (data_len % cparams->cipher_dataunit_len);
452 padding = rte_pktmbuf_append(m, pad_len);
453 if (unlikely(!padding))
457 memset(padding, 0, pad_len);
461 /* Set crypto operation data parameters */
462 rte_crypto_op_attach_sym_session(op, cparams->session);
464 if (cparams->do_hash) {
465 if (cparams->auth_iv.length) {
466 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
469 cparams->cipher_iv.length);
471 * Copy IV at the end of the crypto operation,
472 * after the cipher IV, if added
474 rte_memcpy(iv_ptr, cparams->auth_iv.data,
475 cparams->auth_iv.length);
477 if (!cparams->hash_verify) {
478 /* Append space for digest to end of packet */
479 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
480 cparams->digest_length);
482 op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
483 uint8_t *) + ipdata_offset + data_len;
486 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
487 rte_pktmbuf_pkt_len(m) - cparams->digest_length);
489 /* For wireless algorithms, offset/length must be in bits */
490 if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
491 cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
492 cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
493 op->sym->auth.data.offset = ipdata_offset << 3;
494 op->sym->auth.data.length = data_len << 3;
496 op->sym->auth.data.offset = ipdata_offset;
497 op->sym->auth.data.length = data_len;
501 if (cparams->do_cipher) {
502 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
504 /* Copy IV at the end of the crypto operation */
505 rte_memcpy(iv_ptr, cparams->cipher_iv.data,
506 cparams->cipher_iv.length);
508 /* For wireless algorithms, offset/length must be in bits */
509 if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
510 cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
511 cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
512 op->sym->cipher.data.offset = ipdata_offset << 3;
513 op->sym->cipher.data.length = data_len << 3;
515 op->sym->cipher.data.offset = ipdata_offset;
516 op->sym->cipher.data.length = data_len;
520 if (cparams->do_aead) {
521 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
523 /* Copy IV at the end of the crypto operation */
525 * If doing AES-CCM, nonce is copied one byte
526 * after the start of IV field
528 if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
529 rte_memcpy(iv_ptr + 1, cparams->aead_iv.data,
530 cparams->aead_iv.length);
532 rte_memcpy(iv_ptr, cparams->aead_iv.data,
533 cparams->aead_iv.length);
535 op->sym->aead.data.offset = ipdata_offset;
536 op->sym->aead.data.length = data_len;
538 if (!cparams->hash_verify) {
539 /* Append space for digest to end of packet */
540 op->sym->aead.digest.data = (uint8_t *)rte_pktmbuf_append(m,
541 cparams->digest_length);
543 op->sym->aead.digest.data = rte_pktmbuf_mtod(m,
544 uint8_t *) + ipdata_offset + data_len;
547 op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
548 rte_pktmbuf_pkt_len(m) - cparams->digest_length);
550 if (cparams->aad.length) {
551 op->sym->aead.aad.data = cparams->aad.data;
552 op->sym->aead.aad.phys_addr = cparams->aad.phys_addr;
558 return l2fwd_crypto_enqueue(op, cparams);
562 /* Send the burst of packets on an output interface */
564 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
567 struct rte_mbuf **pkt_buffer;
570 pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
572 ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
573 port_statistics[port].tx += ret;
574 if (unlikely(ret < n)) {
575 port_statistics[port].dropped += (n - ret);
577 rte_pktmbuf_free(pkt_buffer[ret]);
584 /* Enqueue packets for TX and prepare them to be sent. 8< */
586 l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
588 unsigned lcore_id, len;
589 struct lcore_queue_conf *qconf;
591 lcore_id = rte_lcore_id();
593 qconf = &lcore_queue_conf[lcore_id];
594 len = qconf->pkt_buf[port].len;
595 qconf->pkt_buf[port].buffer[len] = m;
598 /* enough pkts to be sent */
599 if (unlikely(len == MAX_PKT_BURST)) {
600 l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
604 qconf->pkt_buf[port].len = len;
607 /* >8 End of Enqueuing packets for TX. */
610 l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
612 struct rte_ether_hdr *eth;
615 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
617 /* 02:00:00:00:00:xx */
618 tmp = ð->dst_addr.addr_bytes[0];
619 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
622 rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], ð->src_addr);
626 l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
627 struct l2fwd_crypto_options *options)
631 struct rte_ipv4_hdr *ip_hdr;
632 uint32_t ipdata_offset = sizeof(struct rte_ether_hdr);
634 ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
636 dst_port = l2fwd_dst_ports[portid];
638 if (options->mac_updating)
639 l2fwd_mac_updating(m, dst_port);
641 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
642 rte_pktmbuf_trim(m, options->auth_xform.auth.digest_length);
644 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
645 pad_len = m->pkt_len - rte_be_to_cpu_16(ip_hdr->total_length) -
647 rte_pktmbuf_trim(m, pad_len);
650 l2fwd_send_packet(m, dst_port);
653 /** Generate random key */
655 generate_random_key(uint8_t *key, unsigned length)
660 fd = open("/dev/urandom", O_RDONLY);
662 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
664 ret = read(fd, key, length);
667 if (ret != (signed)length)
668 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
671 /* Session is created and is later attached to the crypto operation. 8< */
672 static struct rte_cryptodev_sym_session *
673 initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
675 struct rte_crypto_sym_xform *first_xform;
676 struct rte_cryptodev_sym_session *session;
677 int retval = rte_cryptodev_socket_id(cdev_id);
682 uint8_t socket_id = (uint8_t) retval;
684 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
685 first_xform = &options->aead_xform;
686 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
687 first_xform = &options->cipher_xform;
688 first_xform->next = &options->auth_xform;
689 } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
690 first_xform = &options->auth_xform;
691 first_xform->next = &options->cipher_xform;
692 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
693 first_xform = &options->cipher_xform;
695 first_xform = &options->auth_xform;
698 session = rte_cryptodev_sym_session_create(
699 session_pool_socket[socket_id].sess_mp);
703 if (rte_cryptodev_sym_session_init(cdev_id, session,
705 session_pool_socket[socket_id].priv_mp) < 0)
710 /* >8 End of creation of session. */
713 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options);
715 /* main processing loop */
717 l2fwd_main_loop(struct l2fwd_crypto_options *options)
719 struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
720 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
722 unsigned lcore_id = rte_lcore_id();
723 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
724 unsigned int i, j, nb_rx, len;
726 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
727 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
728 US_PER_S * BURST_TX_DRAIN_US;
729 struct l2fwd_crypto_params *cparams;
730 struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs];
731 struct rte_cryptodev_sym_session *session;
733 if (qconf->nb_rx_ports == 0) {
734 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
738 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
740 for (i = 0; i < qconf->nb_rx_ports; i++) {
742 portid = qconf->rx_port_list[i];
743 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
747 for (i = 0; i < qconf->nb_crypto_devs; i++) {
748 port_cparams[i].do_cipher = 0;
749 port_cparams[i].do_hash = 0;
750 port_cparams[i].do_aead = 0;
752 switch (options->xform_chain) {
753 case L2FWD_CRYPTO_AEAD:
754 port_cparams[i].do_aead = 1;
756 case L2FWD_CRYPTO_CIPHER_HASH:
757 case L2FWD_CRYPTO_HASH_CIPHER:
758 port_cparams[i].do_cipher = 1;
759 port_cparams[i].do_hash = 1;
761 case L2FWD_CRYPTO_HASH_ONLY:
762 port_cparams[i].do_hash = 1;
764 case L2FWD_CRYPTO_CIPHER_ONLY:
765 port_cparams[i].do_cipher = 1;
769 port_cparams[i].dev_id = qconf->cryptodev_list[i];
770 port_cparams[i].qp_id = 0;
772 port_cparams[i].block_size = options->block_size;
774 if (port_cparams[i].do_hash) {
775 port_cparams[i].auth_iv.data = options->auth_iv.data;
776 port_cparams[i].auth_iv.length = options->auth_iv.length;
777 if (!options->auth_iv_param)
778 generate_random_key(port_cparams[i].auth_iv.data,
779 port_cparams[i].auth_iv.length);
780 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
781 port_cparams[i].hash_verify = 1;
783 port_cparams[i].hash_verify = 0;
785 port_cparams[i].auth_algo = options->auth_xform.auth.algo;
786 port_cparams[i].digest_length =
787 options->auth_xform.auth.digest_length;
788 /* Set IV parameters */
789 if (options->auth_iv.length) {
790 options->auth_xform.auth.iv.offset =
791 IV_OFFSET + options->cipher_iv.length;
792 options->auth_xform.auth.iv.length =
793 options->auth_iv.length;
797 if (port_cparams[i].do_aead) {
798 port_cparams[i].aead_iv.data = options->aead_iv.data;
799 port_cparams[i].aead_iv.length = options->aead_iv.length;
800 if (!options->aead_iv_param)
801 generate_random_key(port_cparams[i].aead_iv.data,
802 port_cparams[i].aead_iv.length);
803 port_cparams[i].aead_algo = options->aead_xform.aead.algo;
804 port_cparams[i].digest_length =
805 options->aead_xform.aead.digest_length;
806 if (options->aead_xform.aead.aad_length) {
807 port_cparams[i].aad.data = options->aad.data;
808 port_cparams[i].aad.phys_addr = options->aad.phys_addr;
809 port_cparams[i].aad.length = options->aad.length;
810 if (!options->aad_param)
811 generate_random_key(port_cparams[i].aad.data,
812 port_cparams[i].aad.length);
814 * If doing AES-CCM, first 18 bytes has to be reserved,
815 * and actual AAD should start from byte 18
817 if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
818 memmove(port_cparams[i].aad.data + 18,
819 port_cparams[i].aad.data,
820 port_cparams[i].aad.length);
823 port_cparams[i].aad.length = 0;
825 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_DECRYPT)
826 port_cparams[i].hash_verify = 1;
828 port_cparams[i].hash_verify = 0;
830 /* Set IV parameters */
831 options->aead_xform.aead.iv.offset = IV_OFFSET;
832 options->aead_xform.aead.iv.length = options->aead_iv.length;
835 if (port_cparams[i].do_cipher) {
836 port_cparams[i].cipher_iv.data = options->cipher_iv.data;
837 port_cparams[i].cipher_iv.length = options->cipher_iv.length;
838 if (!options->cipher_iv_param)
839 generate_random_key(port_cparams[i].cipher_iv.data,
840 port_cparams[i].cipher_iv.length);
842 port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo;
843 port_cparams[i].cipher_dataunit_len =
844 options->cipher_xform.cipher.dataunit_len;
845 /* Set IV parameters */
846 options->cipher_xform.cipher.iv.offset = IV_OFFSET;
847 options->cipher_xform.cipher.iv.length =
848 options->cipher_iv.length;
851 session = initialize_crypto_session(options,
852 port_cparams[i].dev_id);
854 rte_exit(EXIT_FAILURE, "Failed to initialize crypto session\n");
856 port_cparams[i].session = session;
858 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id,
859 port_cparams[i].dev_id);
862 l2fwd_crypto_options_print(options);
865 * Initialize previous tsc timestamp before the loop,
866 * to avoid showing the port statistics immediately,
867 * so user can see the crypto information.
869 prev_tsc = rte_rdtsc();
872 cur_tsc = rte_rdtsc();
875 * Crypto device/TX burst queue drain
877 diff_tsc = cur_tsc - prev_tsc;
878 if (unlikely(diff_tsc > drain_tsc)) {
879 /* Enqueue all crypto ops remaining in buffers */
880 for (i = 0; i < qconf->nb_crypto_devs; i++) {
881 cparams = &port_cparams[i];
882 len = qconf->op_buf[cparams->dev_id].len;
883 l2fwd_crypto_send_burst(qconf, len, cparams);
884 qconf->op_buf[cparams->dev_id].len = 0;
886 /* Transmit all packets remaining in buffers */
887 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
888 if (qconf->pkt_buf[portid].len == 0)
890 l2fwd_send_burst(&lcore_queue_conf[lcore_id],
891 qconf->pkt_buf[portid].len,
893 qconf->pkt_buf[portid].len = 0;
896 /* if timer is enabled */
897 if (timer_period > 0) {
899 /* advance the timer */
900 timer_tsc += diff_tsc;
902 /* if timer has reached its timeout */
903 if (unlikely(timer_tsc >=
904 (uint64_t)timer_period)) {
906 /* do this only on main core */
907 if (lcore_id == rte_get_main_lcore()
908 && options->refresh_period) {
919 * Read packet from RX queues
921 for (i = 0; i < qconf->nb_rx_ports; i++) {
922 portid = qconf->rx_port_list[i];
924 cparams = &port_cparams[i];
926 nb_rx = rte_eth_rx_burst(portid, 0,
927 pkts_burst, MAX_PKT_BURST);
929 port_statistics[portid].rx += nb_rx;
931 /* Allocate and fillcrypto operations. 8< */
934 * If we can't allocate a crypto_ops, then drop
935 * the rest of the burst and dequeue and
936 * process the packets to free offload structs
938 if (rte_crypto_op_bulk_alloc(
939 l2fwd_crypto_op_pool,
940 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
943 for (j = 0; j < nb_rx; j++)
944 rte_pktmbuf_free(pkts_burst[j]);
948 /* >8 End of crypto operation allocated and filled. */
950 /* Enqueue packets from Crypto device*/
951 for (j = 0; j < nb_rx; j++) {
954 l2fwd_simple_crypto_enqueue(m,
955 ops_burst[j], cparams);
959 /* Dequeue packets from Crypto device. 8< */
961 nb_rx = rte_cryptodev_dequeue_burst(
962 cparams->dev_id, cparams->qp_id,
963 ops_burst, MAX_PKT_BURST);
965 crypto_statistics[cparams->dev_id].dequeued +=
968 /* Forward crypto'd packets */
969 for (j = 0; j < nb_rx; j++) {
970 m = ops_burst[j]->sym->m_src;
972 rte_crypto_op_free(ops_burst[j]);
973 l2fwd_simple_forward(m, portid,
976 } while (nb_rx == MAX_PKT_BURST);
977 /* >8 End of dequeue packets from crypto device. */
983 l2fwd_launch_one_lcore(void *arg)
985 l2fwd_main_loop((struct l2fwd_crypto_options *)arg);
989 /* Display command line arguments usage */
991 l2fwd_crypto_usage(const char *prgname)
993 printf("%s [EAL options] --\n"
994 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
995 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
996 " -s manage all ports from single lcore\n"
997 " -T PERIOD: statistics will be refreshed each PERIOD seconds"
998 " (0 to disable, 10 default, 86400 maximum)\n"
1000 " --cdev_type HW / SW / ANY\n"
1001 " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
1002 " HASH_ONLY / AEAD\n"
1004 " --cipher_algo ALGO\n"
1005 " --cipher_op ENCRYPT / DECRYPT\n"
1006 " --cipher_key KEY (bytes separated with \":\")\n"
1007 " --cipher_key_random_size SIZE: size of cipher key when generated randomly\n"
1008 " --cipher_iv IV (bytes separated with \":\")\n"
1009 " --cipher_iv_random_size SIZE: size of cipher IV when generated randomly\n"
1010 " --cipher_dataunit_len SIZE: length of the algorithm data-unit\n"
1012 " --auth_algo ALGO\n"
1013 " --auth_op GENERATE / VERIFY\n"
1014 " --auth_key KEY (bytes separated with \":\")\n"
1015 " --auth_key_random_size SIZE: size of auth key when generated randomly\n"
1016 " --auth_iv IV (bytes separated with \":\")\n"
1017 " --auth_iv_random_size SIZE: size of auth IV when generated randomly\n"
1019 " --aead_algo ALGO\n"
1020 " --aead_op ENCRYPT / DECRYPT\n"
1021 " --aead_key KEY (bytes separated with \":\")\n"
1022 " --aead_key_random_size SIZE: size of AEAD key when generated randomly\n"
1023 " --aead_iv IV (bytes separated with \":\")\n"
1024 " --aead_iv_random_size SIZE: size of AEAD IV when generated randomly\n"
1025 " --aad AAD (bytes separated with \":\")\n"
1026 " --aad_random_size SIZE: size of AAD when generated randomly\n"
1028 " --digest_size SIZE: size of digest to be generated/verified\n"
1031 " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n"
1033 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
1035 " - The source MAC address is replaced by the TX port MAC address\n"
1036 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
1040 /** Parse crypto device type command line argument */
1042 parse_cryptodev_type(enum cdev_type *type, char *optarg)
1044 if (strcmp("HW", optarg) == 0) {
1045 *type = CDEV_TYPE_HW;
1047 } else if (strcmp("SW", optarg) == 0) {
1048 *type = CDEV_TYPE_SW;
1050 } else if (strcmp("ANY", optarg) == 0) {
1051 *type = CDEV_TYPE_ANY;
1058 /** Parse crypto chain xform command line argument */
1060 parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
1062 if (strcmp("CIPHER_HASH", optarg) == 0) {
1063 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
1065 } else if (strcmp("HASH_CIPHER", optarg) == 0) {
1066 options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER;
1068 } else if (strcmp("CIPHER_ONLY", optarg) == 0) {
1069 options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY;
1071 } else if (strcmp("HASH_ONLY", optarg) == 0) {
1072 options->xform_chain = L2FWD_CRYPTO_HASH_ONLY;
1074 } else if (strcmp("AEAD", optarg) == 0) {
1075 options->xform_chain = L2FWD_CRYPTO_AEAD;
1082 /** Parse crypto cipher algo option command line argument */
1084 parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
1087 if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) {
1088 RTE_LOG(ERR, USER1, "Cipher algorithm specified "
1089 "not supported!\n");
1096 /** Parse crypto cipher operation command line argument */
1098 parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
1100 if (strcmp("ENCRYPT", optarg) == 0) {
1101 *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1103 } else if (strcmp("DECRYPT", optarg) == 0) {
1104 *op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1108 printf("Cipher operation not supported!\n");
1112 /** Parse bytes from command line argument */
1114 parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size)
1116 unsigned byte_count;
1120 for (byte_count = 0, token = strtok(input_arg, ":");
1121 (byte_count < max_size) && (token != NULL);
1122 token = strtok(NULL, ":")) {
1124 int number = (int)strtol(token, NULL, 16);
1126 if (errno == EINVAL || errno == ERANGE || number > 0xFF)
1129 data[byte_count++] = (uint8_t)number;
1135 /** Parse size param*/
1137 parse_size(int *size, const char *q_arg)
1142 /* parse hexadecimal string */
1143 n = strtoul(q_arg, &end, 10);
1144 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1148 printf("invalid size\n");
1156 /** Parse crypto cipher operation command line argument */
1158 parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
1160 if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) {
1161 RTE_LOG(ERR, USER1, "Authentication algorithm specified "
1162 "not supported!\n");
1170 parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
1172 if (strcmp("VERIFY", optarg) == 0) {
1173 *op = RTE_CRYPTO_AUTH_OP_VERIFY;
1175 } else if (strcmp("GENERATE", optarg) == 0) {
1176 *op = RTE_CRYPTO_AUTH_OP_GENERATE;
1180 printf("Authentication operation specified not supported!\n");
1185 parse_aead_algo(enum rte_crypto_aead_algorithm *algo, char *optarg)
1187 if (rte_cryptodev_get_aead_algo_enum(algo, optarg) < 0) {
1188 RTE_LOG(ERR, USER1, "AEAD algorithm specified "
1189 "not supported!\n");
1197 parse_aead_op(enum rte_crypto_aead_operation *op, char *optarg)
1199 if (strcmp("ENCRYPT", optarg) == 0) {
1200 *op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1202 } else if (strcmp("DECRYPT", optarg) == 0) {
1203 *op = RTE_CRYPTO_AEAD_OP_DECRYPT;
1207 printf("AEAD operation specified not supported!\n");
1211 parse_cryptodev_mask(struct l2fwd_crypto_options *options,
1217 /* parse hexadecimal string */
1218 pm = strtoul(q_arg, &end, 16);
1219 if ((pm == '\0') || (end == NULL) || (*end != '\0'))
1222 options->cryptodev_mask = pm;
1223 if (options->cryptodev_mask == 0) {
1224 printf("invalid cryptodev_mask specified\n");
1231 /** Parse long options */
1233 l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
1234 struct option *lgopts, int option_index)
1239 if (strcmp(lgopts[option_index].name, "cdev_type") == 0) {
1240 retval = parse_cryptodev_type(&options->type, optarg);
1242 strlcpy(options->string_type, optarg, MAX_STR_LEN);
1246 else if (strcmp(lgopts[option_index].name, "chain") == 0)
1247 return parse_crypto_opt_chain(options, optarg);
1249 /* Cipher options */
1250 else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0)
1251 return parse_cipher_algo(&options->cipher_xform.cipher.algo,
1254 else if (strcmp(lgopts[option_index].name, "cipher_op") == 0)
1255 return parse_cipher_op(&options->cipher_xform.cipher.op,
1258 else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
1259 options->ckey_param = 1;
1260 options->cipher_xform.cipher.key.length =
1261 parse_bytes(options->cipher_key, optarg, MAX_KEY_SIZE);
1262 if (options->cipher_xform.cipher.key.length > 0)
1268 else if (strcmp(lgopts[option_index].name, "cipher_dataunit_len") == 0) {
1269 retval = parse_size(&val, optarg);
1270 if (retval == 0 && val >= 0) {
1271 options->cipher_xform.cipher.dataunit_len =
1278 else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0)
1279 return parse_size(&options->ckey_random_size, optarg);
1281 else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
1282 options->cipher_iv_param = 1;
1283 options->cipher_iv.length =
1284 parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE);
1285 if (options->cipher_iv.length > 0)
1291 else if (strcmp(lgopts[option_index].name, "cipher_iv_random_size") == 0)
1292 return parse_size(&options->cipher_iv_random_size, optarg);
1294 /* Authentication options */
1295 else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) {
1296 return parse_auth_algo(&options->auth_xform.auth.algo,
1300 else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
1301 return parse_auth_op(&options->auth_xform.auth.op,
1304 else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
1305 options->akey_param = 1;
1306 options->auth_xform.auth.key.length =
1307 parse_bytes(options->auth_key, optarg, MAX_KEY_SIZE);
1308 if (options->auth_xform.auth.key.length > 0)
1314 else if (strcmp(lgopts[option_index].name, "auth_key_random_size") == 0) {
1315 return parse_size(&options->akey_random_size, optarg);
1318 else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
1319 options->auth_iv_param = 1;
1320 options->auth_iv.length =
1321 parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE);
1322 if (options->auth_iv.length > 0)
1328 else if (strcmp(lgopts[option_index].name, "auth_iv_random_size") == 0)
1329 return parse_size(&options->auth_iv_random_size, optarg);
1332 else if (strcmp(lgopts[option_index].name, "aead_algo") == 0) {
1333 return parse_aead_algo(&options->aead_xform.aead.algo,
1337 else if (strcmp(lgopts[option_index].name, "aead_op") == 0)
1338 return parse_aead_op(&options->aead_xform.aead.op,
1341 else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
1342 options->aead_key_param = 1;
1343 options->aead_xform.aead.key.length =
1344 parse_bytes(options->aead_key, optarg, MAX_KEY_SIZE);
1345 if (options->aead_xform.aead.key.length > 0)
1351 else if (strcmp(lgopts[option_index].name, "aead_key_random_size") == 0)
1352 return parse_size(&options->aead_key_random_size, optarg);
1355 else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
1356 options->aead_iv_param = 1;
1357 options->aead_iv.length =
1358 parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE);
1359 if (options->aead_iv.length > 0)
1365 else if (strcmp(lgopts[option_index].name, "aead_iv_random_size") == 0)
1366 return parse_size(&options->aead_iv_random_size, optarg);
1368 else if (strcmp(lgopts[option_index].name, "aad") == 0) {
1369 options->aad_param = 1;
1370 options->aad.length =
1371 parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE);
1372 if (options->aad.length > 0)
1378 else if (strcmp(lgopts[option_index].name, "aad_random_size") == 0) {
1379 return parse_size(&options->aad_random_size, optarg);
1382 else if (strcmp(lgopts[option_index].name, "digest_size") == 0) {
1383 return parse_size(&options->digest_size, optarg);
1386 else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
1387 options->sessionless = 1;
1391 else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
1392 return parse_cryptodev_mask(options, optarg);
1394 else if (strcmp(lgopts[option_index].name, "mac-updating") == 0) {
1395 options->mac_updating = 1;
1399 else if (strcmp(lgopts[option_index].name, "no-mac-updating") == 0) {
1400 options->mac_updating = 0;
1407 /** Parse port mask */
1409 l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options,
1415 /* parse hexadecimal string */
1416 pm = strtoul(q_arg, &end, 16);
1417 if ((pm == '\0') || (end == NULL) || (*end != '\0'))
1420 options->portmask = pm;
1421 if (options->portmask == 0) {
1422 printf("invalid portmask specified\n");
1429 /** Parse number of queues */
1431 l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options,
1437 /* parse hexadecimal string */
1438 n = strtoul(q_arg, &end, 10);
1439 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1441 else if (n >= MAX_RX_QUEUE_PER_LCORE)
1444 options->nb_ports_per_lcore = n;
1445 if (options->nb_ports_per_lcore == 0) {
1446 printf("invalid number of ports selected\n");
1453 /** Parse timer period */
1455 l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
1461 /* parse number string */
1462 n = (unsigned)strtol(q_arg, &end, 10);
1463 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1466 if (n >= MAX_TIMER_PERIOD) {
1467 printf("Warning refresh period specified %lu is greater than "
1468 "max value %lu! using max value",
1469 n, MAX_TIMER_PERIOD);
1470 n = MAX_TIMER_PERIOD;
1473 options->refresh_period = n * 1000 * TIMER_MILLISECOND;
1478 /** Generate default options for application */
1480 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
1482 options->portmask = 0xffffffff;
1483 options->nb_ports_per_lcore = 1;
1484 options->refresh_period = 10000;
1485 options->single_lcore = 0;
1486 options->sessionless = 0;
1488 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
1491 options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1492 options->cipher_xform.next = NULL;
1493 options->ckey_param = 0;
1494 options->ckey_random_size = -1;
1495 options->cipher_xform.cipher.key.length = 0;
1496 options->cipher_iv_param = 0;
1497 options->cipher_iv_random_size = -1;
1498 options->cipher_iv.length = 0;
1500 options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1501 options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1502 options->cipher_xform.cipher.dataunit_len = 0;
1504 /* Authentication Data */
1505 options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1506 options->auth_xform.next = NULL;
1507 options->akey_param = 0;
1508 options->akey_random_size = -1;
1509 options->auth_xform.auth.key.length = 0;
1510 options->auth_iv_param = 0;
1511 options->auth_iv_random_size = -1;
1512 options->auth_iv.length = 0;
1514 options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
1515 options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1518 options->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1519 options->aead_xform.next = NULL;
1520 options->aead_key_param = 0;
1521 options->aead_key_random_size = -1;
1522 options->aead_xform.aead.key.length = 0;
1523 options->aead_iv_param = 0;
1524 options->aead_iv_random_size = -1;
1525 options->aead_iv.length = 0;
1527 options->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
1528 options->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1530 options->aad_param = 0;
1531 options->aad_random_size = -1;
1532 options->aad.length = 0;
1534 options->digest_size = -1;
1536 options->type = CDEV_TYPE_ANY;
1537 options->cryptodev_mask = UINT64_MAX;
1539 options->mac_updating = 1;
1543 display_cipher_info(struct l2fwd_crypto_options *options)
1545 printf("\n---- Cipher information ---\n");
1546 printf("Algorithm: %s\n",
1547 rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]);
1548 rte_hexdump(stdout, "Cipher key:",
1549 options->cipher_xform.cipher.key.data,
1550 options->cipher_xform.cipher.key.length);
1551 rte_hexdump(stdout, "IV:", options->cipher_iv.data, options->cipher_iv.length);
1555 display_auth_info(struct l2fwd_crypto_options *options)
1557 printf("\n---- Authentication information ---\n");
1558 printf("Algorithm: %s\n",
1559 rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]);
1560 rte_hexdump(stdout, "Auth key:",
1561 options->auth_xform.auth.key.data,
1562 options->auth_xform.auth.key.length);
1563 rte_hexdump(stdout, "IV:", options->auth_iv.data, options->auth_iv.length);
1567 display_aead_info(struct l2fwd_crypto_options *options)
1569 printf("\n---- AEAD information ---\n");
1570 printf("Algorithm: %s\n",
1571 rte_crypto_aead_algorithm_strings[options->aead_xform.aead.algo]);
1572 rte_hexdump(stdout, "AEAD key:",
1573 options->aead_xform.aead.key.data,
1574 options->aead_xform.aead.key.length);
1575 rte_hexdump(stdout, "IV:", options->aead_iv.data, options->aead_iv.length);
1576 rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length);
1580 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
1582 char string_cipher_op[MAX_STR_LEN];
1583 char string_auth_op[MAX_STR_LEN];
1584 char string_aead_op[MAX_STR_LEN];
1586 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1587 strcpy(string_cipher_op, "Encrypt");
1589 strcpy(string_cipher_op, "Decrypt");
1591 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
1592 strcpy(string_auth_op, "Auth generate");
1594 strcpy(string_auth_op, "Auth verify");
1596 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
1597 strcpy(string_aead_op, "Authenticated encryption");
1599 strcpy(string_aead_op, "Authenticated decryption");
1602 printf("Options:-\nn");
1603 printf("portmask: %x\n", options->portmask);
1604 printf("ports per lcore: %u\n", options->nb_ports_per_lcore);
1605 printf("refresh period : %u\n", options->refresh_period);
1606 printf("single lcore mode: %s\n",
1607 options->single_lcore ? "enabled" : "disabled");
1608 printf("stats_printing: %s\n",
1609 options->refresh_period == 0 ? "disabled" : "enabled");
1611 printf("sessionless crypto: %s\n",
1612 options->sessionless ? "enabled" : "disabled");
1614 if (options->ckey_param && (options->ckey_random_size != -1))
1615 printf("Cipher key already parsed, ignoring size of random key\n");
1617 if (options->akey_param && (options->akey_random_size != -1))
1618 printf("Auth key already parsed, ignoring size of random key\n");
1620 if (options->cipher_iv_param && (options->cipher_iv_random_size != -1))
1621 printf("Cipher IV already parsed, ignoring size of random IV\n");
1623 if (options->auth_iv_param && (options->auth_iv_random_size != -1))
1624 printf("Auth IV already parsed, ignoring size of random IV\n");
1626 if (options->aad_param && (options->aad_random_size != -1))
1627 printf("AAD already parsed, ignoring size of random AAD\n");
1629 printf("\nCrypto chain: ");
1630 switch (options->xform_chain) {
1631 case L2FWD_CRYPTO_AEAD:
1632 printf("Input --> %s --> Output\n", string_aead_op);
1633 display_aead_info(options);
1635 case L2FWD_CRYPTO_CIPHER_HASH:
1636 printf("Input --> %s --> %s --> Output\n",
1637 string_cipher_op, string_auth_op);
1638 display_cipher_info(options);
1639 display_auth_info(options);
1641 case L2FWD_CRYPTO_HASH_CIPHER:
1642 printf("Input --> %s --> %s --> Output\n",
1643 string_auth_op, string_cipher_op);
1644 display_cipher_info(options);
1645 display_auth_info(options);
1647 case L2FWD_CRYPTO_HASH_ONLY:
1648 printf("Input --> %s --> Output\n", string_auth_op);
1649 display_auth_info(options);
1651 case L2FWD_CRYPTO_CIPHER_ONLY:
1652 printf("Input --> %s --> Output\n", string_cipher_op);
1653 display_cipher_info(options);
1658 /* Parse the argument given in the command line of the application */
1660 l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
1661 int argc, char **argv)
1663 int opt, retval, option_index;
1664 char **argvopt = argv, *prgname = argv[0];
1666 static struct option lgopts[] = {
1667 { "sessionless", no_argument, 0, 0 },
1669 { "cdev_type", required_argument, 0, 0 },
1670 { "chain", required_argument, 0, 0 },
1672 { "cipher_algo", required_argument, 0, 0 },
1673 { "cipher_op", required_argument, 0, 0 },
1674 { "cipher_key", required_argument, 0, 0 },
1675 { "cipher_key_random_size", required_argument, 0, 0 },
1676 { "cipher_iv", required_argument, 0, 0 },
1677 { "cipher_iv_random_size", required_argument, 0, 0 },
1678 { "cipher_dataunit_len", required_argument, 0, 0},
1680 { "auth_algo", required_argument, 0, 0 },
1681 { "auth_op", required_argument, 0, 0 },
1682 { "auth_key", required_argument, 0, 0 },
1683 { "auth_key_random_size", required_argument, 0, 0 },
1684 { "auth_iv", required_argument, 0, 0 },
1685 { "auth_iv_random_size", required_argument, 0, 0 },
1687 { "aead_algo", required_argument, 0, 0 },
1688 { "aead_op", required_argument, 0, 0 },
1689 { "aead_key", required_argument, 0, 0 },
1690 { "aead_key_random_size", required_argument, 0, 0 },
1691 { "aead_iv", required_argument, 0, 0 },
1692 { "aead_iv_random_size", required_argument, 0, 0 },
1694 { "aad", required_argument, 0, 0 },
1695 { "aad_random_size", required_argument, 0, 0 },
1697 { "digest_size", required_argument, 0, 0 },
1699 { "sessionless", no_argument, 0, 0 },
1700 { "cryptodev_mask", required_argument, 0, 0},
1702 { "mac-updating", no_argument, 0, 0},
1703 { "no-mac-updating", no_argument, 0, 0},
1708 l2fwd_crypto_default_options(options);
1710 while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
1711 &option_index)) != EOF) {
1715 retval = l2fwd_crypto_parse_args_long_options(options,
1716 lgopts, option_index);
1718 l2fwd_crypto_usage(prgname);
1725 retval = l2fwd_crypto_parse_portmask(options, optarg);
1727 l2fwd_crypto_usage(prgname);
1734 retval = l2fwd_crypto_parse_nqueue(options, optarg);
1736 l2fwd_crypto_usage(prgname);
1743 options->single_lcore = 1;
1749 retval = l2fwd_crypto_parse_timer_period(options,
1752 l2fwd_crypto_usage(prgname);
1758 l2fwd_crypto_usage(prgname);
1765 argv[optind-1] = prgname;
1768 optind = 1; /* reset getopt lib */
1773 /* Check the link status of all ports in up to 9s, and print them finally */
1775 check_all_ports_link_status(uint32_t port_mask)
1777 #define CHECK_INTERVAL 100 /* 100ms */
1778 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1780 uint8_t count, all_ports_up, print_flag = 0;
1781 struct rte_eth_link link;
1783 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1785 printf("\nChecking link status");
1787 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1789 RTE_ETH_FOREACH_DEV(portid) {
1790 if ((port_mask & (1 << portid)) == 0)
1792 memset(&link, 0, sizeof(link));
1793 ret = rte_eth_link_get_nowait(portid, &link);
1796 if (print_flag == 1)
1797 printf("Port %u link get failed: %s\n",
1798 portid, rte_strerror(-ret));
1801 /* print link status if flag set */
1802 if (print_flag == 1) {
1803 rte_eth_link_to_str(link_status_text,
1804 sizeof(link_status_text), &link);
1805 printf("Port %d %s\n", portid,
1809 /* clear all_ports_up flag if any link down */
1810 if (link.link_status == RTE_ETH_LINK_DOWN) {
1815 /* after finally printing all link status, get out */
1816 if (print_flag == 1)
1819 if (all_ports_up == 0) {
1822 rte_delay_ms(CHECK_INTERVAL);
1825 /* set the print_flag if all ports up or timeout */
1826 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1833 /* Check if device has to be HW/SW or any */
1835 check_type(const struct l2fwd_crypto_options *options,
1836 const struct rte_cryptodev_info *dev_info)
1838 if (options->type == CDEV_TYPE_HW &&
1839 (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1841 if (options->type == CDEV_TYPE_SW &&
1842 !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1844 if (options->type == CDEV_TYPE_ANY)
1850 static const struct rte_cryptodev_capabilities *
1851 check_device_support_cipher_algo(const struct l2fwd_crypto_options *options,
1852 const struct rte_cryptodev_info *dev_info,
1856 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1857 enum rte_crypto_cipher_algorithm cap_cipher_algo;
1858 enum rte_crypto_cipher_algorithm opt_cipher_algo =
1859 options->cipher_xform.cipher.algo;
1861 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1862 cap_cipher_algo = cap->sym.cipher.algo;
1863 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1864 if (cap_cipher_algo == opt_cipher_algo) {
1865 if (check_type(options, dev_info) == 0)
1869 cap = &dev_info->capabilities[++i];
1872 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1873 printf("Algorithm %s not supported by cryptodev %u"
1874 " or device not of preferred type (%s)\n",
1875 rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
1877 options->string_type);
1884 static const struct rte_cryptodev_capabilities *
1885 check_device_support_auth_algo(const struct l2fwd_crypto_options *options,
1886 const struct rte_cryptodev_info *dev_info,
1890 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1891 enum rte_crypto_auth_algorithm cap_auth_algo;
1892 enum rte_crypto_auth_algorithm opt_auth_algo =
1893 options->auth_xform.auth.algo;
1895 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1896 cap_auth_algo = cap->sym.auth.algo;
1897 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1898 if (cap_auth_algo == opt_auth_algo) {
1899 if (check_type(options, dev_info) == 0)
1903 cap = &dev_info->capabilities[++i];
1906 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1907 printf("Algorithm %s not supported by cryptodev %u"
1908 " or device not of preferred type (%s)\n",
1909 rte_crypto_auth_algorithm_strings[opt_auth_algo],
1911 options->string_type);
1918 static const struct rte_cryptodev_capabilities *
1919 check_device_support_aead_algo(const struct l2fwd_crypto_options *options,
1920 const struct rte_cryptodev_info *dev_info,
1924 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1925 enum rte_crypto_aead_algorithm cap_aead_algo;
1926 enum rte_crypto_aead_algorithm opt_aead_algo =
1927 options->aead_xform.aead.algo;
1929 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1930 cap_aead_algo = cap->sym.aead.algo;
1931 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1932 if (cap_aead_algo == opt_aead_algo) {
1933 if (check_type(options, dev_info) == 0)
1937 cap = &dev_info->capabilities[++i];
1940 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1941 printf("Algorithm %s not supported by cryptodev %u"
1942 " or device not of preferred type (%s)\n",
1943 rte_crypto_aead_algorithm_strings[opt_aead_algo],
1945 options->string_type);
1952 /* Check if the device is enabled by cryptodev_mask */
1954 check_cryptodev_mask(struct l2fwd_crypto_options *options,
1957 if (options->cryptodev_mask & (1 << cdev_id))
1964 check_supported_size(uint16_t length, uint16_t min, uint16_t max,
1970 if (increment == 0) {
1977 /* Range of values */
1978 for (supp_size = min; supp_size <= max; supp_size += increment) {
1979 if (length == supp_size)
1987 check_iv_param(const struct rte_crypto_param_range *iv_range_size,
1988 unsigned int iv_param, int iv_random_size,
1992 * Check if length of provided IV is supported
1993 * by the algorithm chosen.
1996 if (check_supported_size(iv_length,
1999 iv_range_size->increment)
2003 * Check if length of IV to be randomly generated
2004 * is supported by the algorithm chosen.
2006 } else if (iv_random_size != -1) {
2007 if (check_supported_size(iv_random_size,
2010 iv_range_size->increment)
2019 check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
2021 struct rte_cryptodev_info dev_info;
2022 const struct rte_cryptodev_capabilities *cap;
2024 rte_cryptodev_info_get(cdev_id, &dev_info);
2026 /* Set AEAD parameters */
2027 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
2028 /* Check if device supports AEAD algo */
2029 cap = check_device_support_aead_algo(options, &dev_info,
2034 if (check_iv_param(&cap->sym.aead.iv_size,
2035 options->aead_iv_param,
2036 options->aead_iv_random_size,
2037 options->aead_iv.length) != 0) {
2038 RTE_LOG(DEBUG, USER1,
2039 "Device %u does not support IV length\n",
2045 * Check if length of provided AEAD key is supported
2046 * by the algorithm chosen.
2048 if (options->aead_key_param) {
2049 if (check_supported_size(
2050 options->aead_xform.aead.key.length,
2051 cap->sym.aead.key_size.min,
2052 cap->sym.aead.key_size.max,
2053 cap->sym.aead.key_size.increment)
2055 RTE_LOG(DEBUG, USER1,
2056 "Device %u does not support "
2057 "AEAD key length\n",
2062 * Check if length of the aead key to be randomly generated
2063 * is supported by the algorithm chosen.
2065 } else if (options->aead_key_random_size != -1) {
2066 if (check_supported_size(options->aead_key_random_size,
2067 cap->sym.aead.key_size.min,
2068 cap->sym.aead.key_size.max,
2069 cap->sym.aead.key_size.increment)
2071 RTE_LOG(DEBUG, USER1,
2072 "Device %u does not support "
2073 "AEAD key length\n",
2081 * Check if length of provided AAD is supported
2082 * by the algorithm chosen.
2084 if (options->aad_param) {
2085 if (check_supported_size(options->aad.length,
2086 cap->sym.aead.aad_size.min,
2087 cap->sym.aead.aad_size.max,
2088 cap->sym.aead.aad_size.increment)
2090 RTE_LOG(DEBUG, USER1,
2091 "Device %u does not support "
2097 * Check if length of AAD to be randomly generated
2098 * is supported by the algorithm chosen.
2100 } else if (options->aad_random_size != -1) {
2101 if (check_supported_size(options->aad_random_size,
2102 cap->sym.aead.aad_size.min,
2103 cap->sym.aead.aad_size.max,
2104 cap->sym.aead.aad_size.increment)
2106 RTE_LOG(DEBUG, USER1,
2107 "Device %u does not support "
2114 /* Check if digest size is supported by the algorithm. */
2115 if (options->digest_size != -1) {
2116 if (check_supported_size(options->digest_size,
2117 cap->sym.aead.digest_size.min,
2118 cap->sym.aead.digest_size.max,
2119 cap->sym.aead.digest_size.increment)
2121 RTE_LOG(DEBUG, USER1,
2122 "Device %u does not support "
2130 /* Set cipher parameters */
2131 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2132 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2133 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
2135 /* Check if device supports cipher algo. 8< */
2136 cap = check_device_support_cipher_algo(options, &dev_info,
2141 if (check_iv_param(&cap->sym.cipher.iv_size,
2142 options->cipher_iv_param,
2143 options->cipher_iv_random_size,
2144 options->cipher_iv.length) != 0) {
2145 RTE_LOG(DEBUG, USER1,
2146 "Device %u does not support IV length\n",
2150 /* >8 End of check if device supports cipher algo. */
2152 /* Check if capable cipher is supported. 8< */
2155 * Check if length of provided cipher key is supported
2156 * by the algorithm chosen.
2158 if (options->ckey_param) {
2159 if (check_supported_size(
2160 options->cipher_xform.cipher.key.length,
2161 cap->sym.cipher.key_size.min,
2162 cap->sym.cipher.key_size.max,
2163 cap->sym.cipher.key_size.increment)
2165 if (dev_info.feature_flags &
2166 RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY) {
2167 RTE_LOG(DEBUG, USER1,
2168 "Key length does not match the device "
2169 "%u capability. Key may be wrapped\n",
2172 RTE_LOG(DEBUG, USER1,
2173 "Key length does not match the device "
2181 * Check if length of the cipher key to be randomly generated
2182 * is supported by the algorithm chosen.
2184 } else if (options->ckey_random_size != -1) {
2185 if (check_supported_size(options->ckey_random_size,
2186 cap->sym.cipher.key_size.min,
2187 cap->sym.cipher.key_size.max,
2188 cap->sym.cipher.key_size.increment)
2190 RTE_LOG(DEBUG, USER1,
2191 "Device %u does not support cipher "
2198 if (options->cipher_xform.cipher.dataunit_len > 0) {
2199 if (!(dev_info.feature_flags &
2200 RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS)) {
2201 RTE_LOG(DEBUG, USER1,
2202 "Device %u does not support "
2203 "cipher multiple data units\n",
2207 if (cap->sym.cipher.dataunit_set != 0) {
2210 switch (options->cipher_xform.cipher.dataunit_len) {
2212 if (!(cap->sym.cipher.dataunit_set &
2213 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES))
2217 if (!(cap->sym.cipher.dataunit_set &
2218 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES))
2225 RTE_LOG(DEBUG, USER1,
2226 "Device %u does not support "
2227 "data-unit length %u\n",
2229 options->cipher_xform.cipher.dataunit_len);
2234 /* >8 End of checking if cipher is supported. */
2237 /* Set auth parameters */
2238 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2239 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2240 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
2241 /* Check if device supports auth algo */
2242 cap = check_device_support_auth_algo(options, &dev_info,
2247 if (check_iv_param(&cap->sym.auth.iv_size,
2248 options->auth_iv_param,
2249 options->auth_iv_random_size,
2250 options->auth_iv.length) != 0) {
2251 RTE_LOG(DEBUG, USER1,
2252 "Device %u does not support IV length\n",
2257 * Check if length of provided auth key is supported
2258 * by the algorithm chosen.
2260 if (options->akey_param) {
2261 if (check_supported_size(
2262 options->auth_xform.auth.key.length,
2263 cap->sym.auth.key_size.min,
2264 cap->sym.auth.key_size.max,
2265 cap->sym.auth.key_size.increment)
2267 RTE_LOG(DEBUG, USER1,
2268 "Device %u does not support auth "
2274 * Check if length of the auth key to be randomly generated
2275 * is supported by the algorithm chosen.
2277 } else if (options->akey_random_size != -1) {
2278 if (check_supported_size(options->akey_random_size,
2279 cap->sym.auth.key_size.min,
2280 cap->sym.auth.key_size.max,
2281 cap->sym.auth.key_size.increment)
2283 RTE_LOG(DEBUG, USER1,
2284 "Device %u does not support auth "
2291 /* Check if digest size is supported by the algorithm. */
2292 if (options->digest_size != -1) {
2293 if (check_supported_size(options->digest_size,
2294 cap->sym.auth.digest_size.min,
2295 cap->sym.auth.digest_size.max,
2296 cap->sym.auth.digest_size.increment)
2298 RTE_LOG(DEBUG, USER1,
2299 "Device %u does not support "
2311 initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
2312 uint8_t *enabled_cdevs)
2314 uint8_t cdev_id, cdev_count, enabled_cdev_count = 0;
2315 const struct rte_cryptodev_capabilities *cap;
2316 unsigned int sess_sz, max_sess_sz = 0;
2317 uint32_t sessions_needed = 0;
2320 cdev_count = rte_cryptodev_count();
2321 if (cdev_count == 0) {
2322 printf("No crypto devices available\n");
2326 for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
2328 if (check_cryptodev_mask(options, cdev_id) < 0)
2331 if (check_capabilities(options, cdev_id) < 0)
2334 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2335 if (sess_sz > max_sess_sz)
2336 max_sess_sz = sess_sz;
2338 l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
2340 enabled_cdevs[cdev_id] = 1;
2341 enabled_cdev_count++;
2344 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
2345 struct rte_cryptodev_qp_conf qp_conf;
2346 struct rte_cryptodev_info dev_info;
2348 if (enabled_cdevs[cdev_id] == 0)
2351 if (check_cryptodev_mask(options, cdev_id) < 0)
2354 if (check_capabilities(options, cdev_id) < 0)
2357 retval = rte_cryptodev_socket_id(cdev_id);
2360 printf("Invalid crypto device id used\n");
2364 uint8_t socket_id = (uint8_t) retval;
2366 struct rte_cryptodev_config conf = {
2367 .nb_queue_pairs = 1,
2368 .socket_id = socket_id,
2369 .ff_disable = RTE_CRYPTODEV_FF_SECURITY,
2372 rte_cryptodev_info_get(cdev_id, &dev_info);
2375 * Two sessions objects are required for each session
2376 * (one for the header, one for the private data)
2378 if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
2379 #ifdef RTE_CRYPTO_SCHEDULER
2380 uint32_t nb_workers =
2381 rte_cryptodev_scheduler_workers_get(cdev_id,
2384 sessions_needed = enabled_cdev_count * nb_workers;
2387 sessions_needed = enabled_cdev_count;
2389 if (session_pool_socket[socket_id].priv_mp == NULL) {
2390 char mp_name[RTE_MEMPOOL_NAMESIZE];
2392 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2393 "priv_sess_mp_%u", socket_id);
2395 session_pool_socket[socket_id].priv_mp =
2396 rte_mempool_create(mp_name,
2399 0, 0, NULL, NULL, NULL,
2403 if (session_pool_socket[socket_id].priv_mp == NULL) {
2404 printf("Cannot create pool on socket %d\n",
2409 printf("Allocated pool \"%s\" on socket %d\n",
2410 mp_name, socket_id);
2413 if (session_pool_socket[socket_id].sess_mp == NULL) {
2414 char mp_name[RTE_MEMPOOL_NAMESIZE];
2415 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2416 "sess_mp_%u", socket_id);
2418 session_pool_socket[socket_id].sess_mp =
2419 rte_cryptodev_sym_session_pool_create(
2422 0, 0, 0, socket_id);
2424 if (session_pool_socket[socket_id].sess_mp == NULL) {
2425 printf("Cannot create pool on socket %d\n",
2430 printf("Allocated pool \"%s\" on socket %d\n",
2431 mp_name, socket_id);
2434 /* Set AEAD parameters */
2435 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
2436 cap = check_device_support_aead_algo(options, &dev_info,
2439 options->block_size = cap->sym.aead.block_size;
2441 /* Set IV if not provided from command line */
2442 if (options->aead_iv_param == 0) {
2443 if (options->aead_iv_random_size != -1)
2444 options->aead_iv.length =
2445 options->aead_iv_random_size;
2446 /* No size provided, use minimum size. */
2448 options->aead_iv.length =
2449 cap->sym.aead.iv_size.min;
2452 /* Set key if not provided from command line */
2453 if (options->aead_key_param == 0) {
2454 if (options->aead_key_random_size != -1)
2455 options->aead_xform.aead.key.length =
2456 options->aead_key_random_size;
2457 /* No size provided, use minimum size. */
2459 options->aead_xform.aead.key.length =
2460 cap->sym.aead.key_size.min;
2462 generate_random_key(options->aead_key,
2463 options->aead_xform.aead.key.length);
2466 /* Set AAD if not provided from command line */
2467 if (options->aad_param == 0) {
2468 if (options->aad_random_size != -1)
2469 options->aad.length =
2470 options->aad_random_size;
2471 /* No size provided, use minimum size. */
2473 options->aad.length =
2474 cap->sym.auth.aad_size.min;
2477 options->aead_xform.aead.aad_length =
2478 options->aad.length;
2480 /* Set digest size if not provided from command line */
2481 if (options->digest_size != -1)
2482 options->aead_xform.aead.digest_length =
2483 options->digest_size;
2484 /* No size provided, use minimum size. */
2486 options->aead_xform.aead.digest_length =
2487 cap->sym.aead.digest_size.min;
2490 /* Set cipher parameters */
2491 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2492 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2493 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
2494 cap = check_device_support_cipher_algo(options, &dev_info,
2496 options->block_size = cap->sym.cipher.block_size;
2498 /* Set IV if not provided from command line */
2499 if (options->cipher_iv_param == 0) {
2500 if (options->cipher_iv_random_size != -1)
2501 options->cipher_iv.length =
2502 options->cipher_iv_random_size;
2503 /* No size provided, use minimum size. */
2505 options->cipher_iv.length =
2506 cap->sym.cipher.iv_size.min;
2509 /* Set key if not provided from command line */
2510 if (options->ckey_param == 0) {
2511 if (options->ckey_random_size != -1)
2512 options->cipher_xform.cipher.key.length =
2513 options->ckey_random_size;
2514 /* No size provided, use minimum size. */
2516 options->cipher_xform.cipher.key.length =
2517 cap->sym.cipher.key_size.min;
2519 generate_random_key(options->cipher_key,
2520 options->cipher_xform.cipher.key.length);
2524 /* Set auth parameters */
2525 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2526 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2527 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
2528 cap = check_device_support_auth_algo(options, &dev_info,
2531 /* Set IV if not provided from command line */
2532 if (options->auth_iv_param == 0) {
2533 if (options->auth_iv_random_size != -1)
2534 options->auth_iv.length =
2535 options->auth_iv_random_size;
2536 /* No size provided, use minimum size. */
2538 options->auth_iv.length =
2539 cap->sym.auth.iv_size.min;
2542 /* Set key if not provided from command line */
2543 if (options->akey_param == 0) {
2544 if (options->akey_random_size != -1)
2545 options->auth_xform.auth.key.length =
2546 options->akey_random_size;
2547 /* No size provided, use minimum size. */
2549 options->auth_xform.auth.key.length =
2550 cap->sym.auth.key_size.min;
2552 generate_random_key(options->auth_key,
2553 options->auth_xform.auth.key.length);
2556 /* Set digest size if not provided from command line */
2557 if (options->digest_size != -1)
2558 options->auth_xform.auth.digest_length =
2559 options->digest_size;
2560 /* No size provided, use minimum size. */
2562 options->auth_xform.auth.digest_length =
2563 cap->sym.auth.digest_size.min;
2566 retval = rte_cryptodev_configure(cdev_id, &conf);
2568 printf("Failed to configure cryptodev %u", cdev_id);
2572 qp_conf.nb_descriptors = 2048;
2573 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
2574 qp_conf.mp_session_private =
2575 session_pool_socket[socket_id].priv_mp;
2577 retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
2580 printf("Failed to setup queue pair %u on cryptodev %u",
2585 retval = rte_cryptodev_start(cdev_id);
2587 printf("Failed to start device %u: error %d\n",
2593 return enabled_cdev_count;
2597 initialize_ports(struct l2fwd_crypto_options *options)
2599 uint16_t last_portid = 0, portid;
2600 unsigned enabled_portcount = 0;
2601 unsigned nb_ports = rte_eth_dev_count_avail();
2603 if (nb_ports == 0) {
2604 printf("No Ethernet ports - bye\n");
2608 /* Reset l2fwd_dst_ports */
2609 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
2610 l2fwd_dst_ports[portid] = 0;
2612 RTE_ETH_FOREACH_DEV(portid) {
2614 struct rte_eth_dev_info dev_info;
2615 struct rte_eth_rxconf rxq_conf;
2616 struct rte_eth_txconf txq_conf;
2617 struct rte_eth_conf local_port_conf = port_conf;
2619 /* Skip ports that are not enabled */
2620 if ((options->portmask & (1 << portid)) == 0)
2624 printf("Initializing port %u... ", portid);
2627 retval = rte_eth_dev_info_get(portid, &dev_info);
2629 printf("Error during getting device (port %u) info: %s\n",
2630 portid, strerror(-retval));
2634 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
2635 local_port_conf.txmode.offloads |=
2636 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2637 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
2639 printf("Cannot configure device: err=%d, port=%u\n",
2644 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
2647 printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
2652 /* init one RX queue */
2654 rxq_conf = dev_info.default_rxconf;
2655 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2656 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
2657 rte_eth_dev_socket_id(portid),
2658 &rxq_conf, l2fwd_pktmbuf_pool);
2660 printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
2665 /* init one TX queue on each port */
2667 txq_conf = dev_info.default_txconf;
2668 txq_conf.offloads = local_port_conf.txmode.offloads;
2669 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
2670 rte_eth_dev_socket_id(portid),
2673 printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
2680 retval = rte_eth_dev_start(portid);
2682 printf("rte_eth_dev_start:err=%d, port=%u\n",
2687 retval = rte_eth_promiscuous_enable(portid);
2689 printf("rte_eth_promiscuous_enable:err=%s, port=%u\n",
2690 rte_strerror(-retval), portid);
2694 retval = rte_eth_macaddr_get(portid,
2695 &l2fwd_ports_eth_addr[portid]);
2697 printf("rte_eth_macaddr_get :err=%d, port=%u\n",
2702 printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
2704 RTE_ETHER_ADDR_BYTES(&l2fwd_ports_eth_addr[portid]));
2706 /* initialize port stats */
2707 memset(&port_statistics, 0, sizeof(port_statistics));
2709 /* Setup port forwarding table */
2710 if (enabled_portcount % 2) {
2711 l2fwd_dst_ports[portid] = last_portid;
2712 l2fwd_dst_ports[last_portid] = portid;
2714 last_portid = portid;
2717 l2fwd_enabled_port_mask |= (1 << portid);
2718 enabled_portcount++;
2721 if (enabled_portcount == 1) {
2722 l2fwd_dst_ports[last_portid] = last_portid;
2723 } else if (enabled_portcount % 2) {
2724 printf("odd number of ports in portmask- bye\n");
2728 check_all_ports_link_status(l2fwd_enabled_port_mask);
2730 return enabled_portcount;
2734 reserve_key_memory(struct l2fwd_crypto_options *options)
2736 options->cipher_xform.cipher.key.data = options->cipher_key;
2738 options->auth_xform.auth.key.data = options->auth_key;
2740 options->aead_xform.aead.key.data = options->aead_key;
2742 options->cipher_iv.data = rte_malloc("cipher iv", MAX_KEY_SIZE, 0);
2743 if (options->cipher_iv.data == NULL)
2744 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher IV");
2746 options->auth_iv.data = rte_malloc("auth iv", MAX_KEY_SIZE, 0);
2747 if (options->auth_iv.data == NULL)
2748 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth IV");
2750 options->aead_iv.data = rte_malloc("aead_iv", MAX_KEY_SIZE, 0);
2751 if (options->aead_iv.data == NULL)
2752 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD iv");
2754 options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
2755 if (options->aad.data == NULL)
2756 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
2757 options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
2761 main(int argc, char **argv)
2763 struct lcore_queue_conf *qconf = NULL;
2764 struct l2fwd_crypto_options options;
2766 uint8_t nb_cryptodevs, cdev_id;
2768 unsigned lcore_id, rx_lcore_id = 0;
2769 int ret, enabled_cdevcount, enabled_portcount;
2770 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
2773 ret = rte_eal_init(argc, argv);
2775 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
2779 /* reserve memory for Cipher/Auth key and IV */
2780 reserve_key_memory(&options);
2782 /* parse application arguments (after the EAL ones) */
2783 ret = l2fwd_crypto_parse_args(&options, argc, argv);
2785 rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
2787 printf("MAC updating %s\n",
2788 options.mac_updating ? "enabled" : "disabled");
2790 /* create the mbuf pool */
2791 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
2792 RTE_ALIGN(sizeof(struct rte_crypto_op),
2793 RTE_CACHE_LINE_SIZE),
2794 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
2795 if (l2fwd_pktmbuf_pool == NULL)
2796 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
2798 /* create crypto op pool */
2799 l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
2800 RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, MAXIMUM_IV_LENGTH,
2802 if (l2fwd_crypto_op_pool == NULL)
2803 rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
2805 /* Enable Ethernet ports */
2806 enabled_portcount = initialize_ports(&options);
2807 if (enabled_portcount < 1)
2808 rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n");
2810 /* Initialize the port/queue configuration of each logical core */
2811 RTE_ETH_FOREACH_DEV(portid) {
2813 /* skip ports that are not enabled */
2814 if ((options.portmask & (1 << portid)) == 0)
2817 if (options.single_lcore && qconf == NULL) {
2818 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2820 if (rx_lcore_id >= RTE_MAX_LCORE)
2821 rte_exit(EXIT_FAILURE,
2822 "Not enough cores\n");
2824 } else if (!options.single_lcore) {
2825 /* get the lcore_id for this port */
2826 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2827 lcore_queue_conf[rx_lcore_id].nb_rx_ports ==
2828 options.nb_ports_per_lcore) {
2830 if (rx_lcore_id >= RTE_MAX_LCORE)
2831 rte_exit(EXIT_FAILURE,
2832 "Not enough cores\n");
2836 /* Assigned a new logical core in the loop above. */
2837 if (qconf != &lcore_queue_conf[rx_lcore_id])
2838 qconf = &lcore_queue_conf[rx_lcore_id];
2840 qconf->rx_port_list[qconf->nb_rx_ports] = portid;
2841 qconf->nb_rx_ports++;
2843 printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
2846 /* Enable Crypto devices */
2847 enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount,
2849 if (enabled_cdevcount < 0)
2850 rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n");
2852 if (enabled_cdevcount < enabled_portcount)
2853 rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) "
2854 "has to be more or equal to number of ports (%d)\n",
2855 enabled_cdevcount, enabled_portcount);
2857 nb_cryptodevs = rte_cryptodev_count();
2859 /* Initialize the port/cryptodev configuration of each logical core */
2860 for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0;
2861 cdev_id < nb_cryptodevs && enabled_cdevcount;
2863 /* Crypto op not supported by crypto device */
2864 if (!enabled_cdevs[cdev_id])
2867 if (options.single_lcore && qconf == NULL) {
2868 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2870 if (rx_lcore_id >= RTE_MAX_LCORE)
2871 rte_exit(EXIT_FAILURE,
2872 "Not enough cores\n");
2874 } else if (!options.single_lcore) {
2875 /* get the lcore_id for this port */
2876 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2877 lcore_queue_conf[rx_lcore_id].nb_crypto_devs ==
2878 options.nb_ports_per_lcore) {
2880 if (rx_lcore_id >= RTE_MAX_LCORE)
2881 rte_exit(EXIT_FAILURE,
2882 "Not enough cores\n");
2886 /* Assigned a new logical core in the loop above. */
2887 if (qconf != &lcore_queue_conf[rx_lcore_id])
2888 qconf = &lcore_queue_conf[rx_lcore_id];
2890 qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id;
2891 qconf->nb_crypto_devs++;
2893 enabled_cdevcount--;
2895 printf("Lcore %u: cryptodev %u\n", rx_lcore_id,
2899 /* launch per-lcore init on every lcore */
2900 rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options,
2902 RTE_LCORE_FOREACH_WORKER(lcore_id) {
2903 if (rte_eal_wait_lcore(lcore_id) < 0)
2907 /* clean up the EAL */