1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2016 Intel Corporation
11 #include <sys/types.h>
12 #include <sys/queue.h>
13 #include <netinet/in.h>
22 #include <rte_string_fns.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_common.h>
26 #include <rte_cryptodev.h>
27 #include <rte_cycles.h>
28 #include <rte_debug.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_interrupts.h>
34 #include <rte_launch.h>
35 #include <rte_lcore.h>
37 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_memory.h>
41 #include <rte_mempool.h>
42 #include <rte_per_lcore.h>
43 #include <rte_prefetch.h>
44 #include <rte_random.h>
45 #include <rte_hexdump.h>
46 #ifdef RTE_CRYPTO_SCHEDULER
47 #include <rte_cryptodev_scheduler.h>
56 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
60 #define MAX_STR_LEN 32
61 #define MAX_KEY_SIZE 128
62 #define MAX_IV_SIZE 16
63 #define MAX_AAD_SIZE 65535
64 #define MAX_PKT_BURST 32
65 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
66 #define SESSION_POOL_CACHE_SIZE 0
68 #define MAXIMUM_IV_LENGTH 16
69 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
70 sizeof(struct rte_crypto_sym_op))
73 * Configurable number of RX/TX ring descriptors
75 #define RTE_TEST_RX_DESC_DEFAULT 1024
76 #define RTE_TEST_TX_DESC_DEFAULT 1024
78 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
79 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
81 /* ethernet addresses of ports */
82 static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
84 /* mask of enabled ports */
85 static uint64_t l2fwd_enabled_port_mask;
86 static uint64_t l2fwd_enabled_crypto_mask;
88 /* list of enabled ports */
89 static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
94 struct rte_mbuf *buffer[MAX_PKT_BURST];
99 struct rte_crypto_op *buffer[MAX_PKT_BURST];
102 #define MAX_RX_QUEUE_PER_LCORE 16
103 #define MAX_TX_QUEUE_PER_PORT 16
105 enum l2fwd_crypto_xform_chain {
106 L2FWD_CRYPTO_CIPHER_HASH,
107 L2FWD_CRYPTO_HASH_CIPHER,
108 L2FWD_CRYPTO_CIPHER_ONLY,
109 L2FWD_CRYPTO_HASH_ONLY,
116 rte_iova_t phys_addr;
124 /** l2fwd crypto application command line options */
125 struct l2fwd_crypto_options {
127 unsigned nb_ports_per_lcore;
128 unsigned refresh_period;
129 unsigned single_lcore:1;
132 unsigned sessionless:1;
134 enum l2fwd_crypto_xform_chain xform_chain;
136 struct rte_crypto_sym_xform cipher_xform;
138 int ckey_random_size;
139 uint8_t cipher_key[MAX_KEY_SIZE];
141 struct l2fwd_iv cipher_iv;
142 unsigned int cipher_iv_param;
143 int cipher_iv_random_size;
145 struct rte_crypto_sym_xform auth_xform;
147 int akey_random_size;
148 uint8_t auth_key[MAX_KEY_SIZE];
150 struct l2fwd_iv auth_iv;
151 unsigned int auth_iv_param;
152 int auth_iv_random_size;
154 struct rte_crypto_sym_xform aead_xform;
155 unsigned int aead_key_param;
156 int aead_key_random_size;
157 uint8_t aead_key[MAX_KEY_SIZE];
159 struct l2fwd_iv aead_iv;
160 unsigned int aead_iv_param;
161 int aead_iv_random_size;
163 struct l2fwd_key aad;
170 char string_type[MAX_STR_LEN];
172 uint64_t cryptodev_mask;
174 unsigned int mac_updating;
177 /** l2fwd crypto lcore params */
178 struct l2fwd_crypto_params {
182 unsigned digest_length;
185 struct l2fwd_iv cipher_iv;
186 struct l2fwd_iv auth_iv;
187 struct l2fwd_iv aead_iv;
188 struct l2fwd_key aad;
189 struct rte_cryptodev_sym_session *session;
196 enum rte_crypto_cipher_algorithm cipher_algo;
197 enum rte_crypto_auth_algorithm auth_algo;
198 enum rte_crypto_aead_algorithm aead_algo;
201 /** lcore configuration */
202 struct lcore_queue_conf {
203 unsigned nb_rx_ports;
204 uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
206 unsigned nb_crypto_devs;
207 unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
209 struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
210 struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
211 } __rte_cache_aligned;
213 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
215 static struct rte_eth_conf port_conf = {
217 .mq_mode = ETH_MQ_RX_NONE,
218 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
222 .mq_mode = ETH_MQ_TX_NONE,
226 struct rte_mempool *l2fwd_pktmbuf_pool;
227 struct rte_mempool *l2fwd_crypto_op_pool;
229 struct rte_mempool *sess_mp;
230 struct rte_mempool *priv_mp;
231 } session_pool_socket[RTE_MAX_NUMA_NODES];
233 /* Per-port statistics struct */
234 struct l2fwd_port_statistics {
238 uint64_t crypto_enqueued;
239 uint64_t crypto_dequeued;
242 } __rte_cache_aligned;
244 struct l2fwd_crypto_statistics {
249 } __rte_cache_aligned;
251 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
252 struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
254 /* A tsc-based timer responsible for triggering statistics printout */
255 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
256 #define MAX_TIMER_PERIOD 86400UL /* 1 day max */
258 /* default period is 10 seconds */
259 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
261 /* Print out statistics on packets dropped */
265 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
266 uint64_t total_packets_enqueued, total_packets_dequeued,
267 total_packets_errors;
271 total_packets_dropped = 0;
272 total_packets_tx = 0;
273 total_packets_rx = 0;
274 total_packets_enqueued = 0;
275 total_packets_dequeued = 0;
276 total_packets_errors = 0;
278 const char clr[] = { 27, '[', '2', 'J', '\0' };
279 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
281 /* Clear screen and move to top left */
282 printf("%s%s", clr, topLeft);
284 printf("\nPort statistics ====================================");
286 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
287 /* skip disabled ports */
288 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
290 printf("\nStatistics for port %u ------------------------------"
291 "\nPackets sent: %32"PRIu64
292 "\nPackets received: %28"PRIu64
293 "\nPackets dropped: %29"PRIu64,
295 port_statistics[portid].tx,
296 port_statistics[portid].rx,
297 port_statistics[portid].dropped);
299 total_packets_dropped += port_statistics[portid].dropped;
300 total_packets_tx += port_statistics[portid].tx;
301 total_packets_rx += port_statistics[portid].rx;
303 printf("\nCrypto statistics ==================================");
305 for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
306 /* skip disabled ports */
307 if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
309 printf("\nStatistics for cryptodev %"PRIu64
310 " -------------------------"
311 "\nPackets enqueued: %28"PRIu64
312 "\nPackets dequeued: %28"PRIu64
313 "\nPackets errors: %30"PRIu64,
315 crypto_statistics[cdevid].enqueued,
316 crypto_statistics[cdevid].dequeued,
317 crypto_statistics[cdevid].errors);
319 total_packets_enqueued += crypto_statistics[cdevid].enqueued;
320 total_packets_dequeued += crypto_statistics[cdevid].dequeued;
321 total_packets_errors += crypto_statistics[cdevid].errors;
323 printf("\nAggregate statistics ==============================="
324 "\nTotal packets received: %22"PRIu64
325 "\nTotal packets enqueued: %22"PRIu64
326 "\nTotal packets dequeued: %22"PRIu64
327 "\nTotal packets sent: %26"PRIu64
328 "\nTotal packets dropped: %23"PRIu64
329 "\nTotal packets crypto errors: %17"PRIu64,
331 total_packets_enqueued,
332 total_packets_dequeued,
334 total_packets_dropped,
335 total_packets_errors);
336 printf("\n====================================================\n");
342 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
343 struct l2fwd_crypto_params *cparams)
345 struct rte_crypto_op **op_buffer;
348 op_buffer = (struct rte_crypto_op **)
349 qconf->op_buf[cparams->dev_id].buffer;
351 ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
352 cparams->qp_id, op_buffer, (uint16_t) n);
354 crypto_statistics[cparams->dev_id].enqueued += ret;
355 if (unlikely(ret < n)) {
356 crypto_statistics[cparams->dev_id].errors += (n - ret);
358 rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
359 rte_crypto_op_free(op_buffer[ret]);
367 l2fwd_crypto_enqueue(struct rte_crypto_op *op,
368 struct l2fwd_crypto_params *cparams)
370 unsigned lcore_id, len;
371 struct lcore_queue_conf *qconf;
373 lcore_id = rte_lcore_id();
375 qconf = &lcore_queue_conf[lcore_id];
376 len = qconf->op_buf[cparams->dev_id].len;
377 qconf->op_buf[cparams->dev_id].buffer[len] = op;
380 /* enough ops to be sent */
381 if (len == MAX_PKT_BURST) {
382 l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
386 qconf->op_buf[cparams->dev_id].len = len;
391 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
392 struct rte_crypto_op *op,
393 struct l2fwd_crypto_params *cparams)
395 struct rte_ether_hdr *eth_hdr;
396 struct rte_ipv4_hdr *ip_hdr;
398 uint32_t ipdata_offset, data_len;
399 uint32_t pad_len = 0;
402 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
404 if (eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
407 ipdata_offset = sizeof(struct rte_ether_hdr);
409 ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
412 ipdata_offset += (ip_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK)
413 * RTE_IPV4_IHL_MULTIPLIER;
416 /* Zero pad data to be crypto'd so it is block aligned */
417 data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
419 if ((cparams->do_hash || cparams->do_aead) && cparams->hash_verify)
420 data_len -= cparams->digest_length;
422 if (cparams->do_cipher) {
424 * Following algorithms are block cipher algorithms,
425 * and might need padding
427 switch (cparams->cipher_algo) {
428 case RTE_CRYPTO_CIPHER_AES_CBC:
429 case RTE_CRYPTO_CIPHER_AES_ECB:
430 case RTE_CRYPTO_CIPHER_DES_CBC:
431 case RTE_CRYPTO_CIPHER_3DES_CBC:
432 case RTE_CRYPTO_CIPHER_3DES_ECB:
433 if (data_len % cparams->block_size)
434 pad_len = cparams->block_size -
435 (data_len % cparams->block_size);
442 padding = rte_pktmbuf_append(m, pad_len);
443 if (unlikely(!padding))
447 memset(padding, 0, pad_len);
451 /* Set crypto operation data parameters */
452 rte_crypto_op_attach_sym_session(op, cparams->session);
454 if (cparams->do_hash) {
455 if (cparams->auth_iv.length) {
456 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
459 cparams->cipher_iv.length);
461 * Copy IV at the end of the crypto operation,
462 * after the cipher IV, if added
464 rte_memcpy(iv_ptr, cparams->auth_iv.data,
465 cparams->auth_iv.length);
467 if (!cparams->hash_verify) {
468 /* Append space for digest to end of packet */
469 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
470 cparams->digest_length);
472 op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
473 uint8_t *) + ipdata_offset + data_len;
476 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
477 rte_pktmbuf_pkt_len(m) - cparams->digest_length);
479 /* For wireless algorithms, offset/length must be in bits */
480 if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
481 cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
482 cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
483 op->sym->auth.data.offset = ipdata_offset << 3;
484 op->sym->auth.data.length = data_len << 3;
486 op->sym->auth.data.offset = ipdata_offset;
487 op->sym->auth.data.length = data_len;
491 if (cparams->do_cipher) {
492 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
494 /* Copy IV at the end of the crypto operation */
495 rte_memcpy(iv_ptr, cparams->cipher_iv.data,
496 cparams->cipher_iv.length);
498 /* For wireless algorithms, offset/length must be in bits */
499 if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
500 cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
501 cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
502 op->sym->cipher.data.offset = ipdata_offset << 3;
503 op->sym->cipher.data.length = data_len << 3;
505 op->sym->cipher.data.offset = ipdata_offset;
506 op->sym->cipher.data.length = data_len;
510 if (cparams->do_aead) {
511 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
513 /* Copy IV at the end of the crypto operation */
515 * If doing AES-CCM, nonce is copied one byte
516 * after the start of IV field
518 if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
519 rte_memcpy(iv_ptr + 1, cparams->aead_iv.data,
520 cparams->aead_iv.length);
522 rte_memcpy(iv_ptr, cparams->aead_iv.data,
523 cparams->aead_iv.length);
525 op->sym->aead.data.offset = ipdata_offset;
526 op->sym->aead.data.length = data_len;
528 if (!cparams->hash_verify) {
529 /* Append space for digest to end of packet */
530 op->sym->aead.digest.data = (uint8_t *)rte_pktmbuf_append(m,
531 cparams->digest_length);
533 op->sym->aead.digest.data = rte_pktmbuf_mtod(m,
534 uint8_t *) + ipdata_offset + data_len;
537 op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
538 rte_pktmbuf_pkt_len(m) - cparams->digest_length);
540 if (cparams->aad.length) {
541 op->sym->aead.aad.data = cparams->aad.data;
542 op->sym->aead.aad.phys_addr = cparams->aad.phys_addr;
548 return l2fwd_crypto_enqueue(op, cparams);
552 /* Send the burst of packets on an output interface */
554 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
557 struct rte_mbuf **pkt_buffer;
560 pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
562 ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
563 port_statistics[port].tx += ret;
564 if (unlikely(ret < n)) {
565 port_statistics[port].dropped += (n - ret);
567 rte_pktmbuf_free(pkt_buffer[ret]);
574 /* Enqueue packets for TX and prepare them to be sent */
576 l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
578 unsigned lcore_id, len;
579 struct lcore_queue_conf *qconf;
581 lcore_id = rte_lcore_id();
583 qconf = &lcore_queue_conf[lcore_id];
584 len = qconf->pkt_buf[port].len;
585 qconf->pkt_buf[port].buffer[len] = m;
588 /* enough pkts to be sent */
589 if (unlikely(len == MAX_PKT_BURST)) {
590 l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
594 qconf->pkt_buf[port].len = len;
599 l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
601 struct rte_ether_hdr *eth;
604 eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
606 /* 02:00:00:00:00:xx */
607 tmp = ð->d_addr.addr_bytes[0];
608 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
611 rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], ð->s_addr);
615 l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
616 struct l2fwd_crypto_options *options)
620 struct rte_ipv4_hdr *ip_hdr;
621 uint32_t ipdata_offset = sizeof(struct rte_ether_hdr);
623 ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
625 dst_port = l2fwd_dst_ports[portid];
627 if (options->mac_updating)
628 l2fwd_mac_updating(m, dst_port);
630 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
631 rte_pktmbuf_trim(m, options->auth_xform.auth.digest_length);
633 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
634 pad_len = m->pkt_len - rte_be_to_cpu_16(ip_hdr->total_length) -
636 rte_pktmbuf_trim(m, pad_len);
639 l2fwd_send_packet(m, dst_port);
642 /** Generate random key */
644 generate_random_key(uint8_t *key, unsigned length)
649 fd = open("/dev/urandom", O_RDONLY);
651 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
653 ret = read(fd, key, length);
656 if (ret != (signed)length)
657 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
660 static struct rte_cryptodev_sym_session *
661 initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
663 struct rte_crypto_sym_xform *first_xform;
664 struct rte_cryptodev_sym_session *session;
665 int retval = rte_cryptodev_socket_id(cdev_id);
670 uint8_t socket_id = (uint8_t) retval;
672 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
673 first_xform = &options->aead_xform;
674 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
675 first_xform = &options->cipher_xform;
676 first_xform->next = &options->auth_xform;
677 } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
678 first_xform = &options->auth_xform;
679 first_xform->next = &options->cipher_xform;
680 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
681 first_xform = &options->cipher_xform;
683 first_xform = &options->auth_xform;
686 session = rte_cryptodev_sym_session_create(
687 session_pool_socket[socket_id].sess_mp);
691 if (rte_cryptodev_sym_session_init(cdev_id, session,
693 session_pool_socket[socket_id].priv_mp) < 0)
700 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options);
702 /* main processing loop */
704 l2fwd_main_loop(struct l2fwd_crypto_options *options)
706 struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
707 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
709 unsigned lcore_id = rte_lcore_id();
710 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
711 unsigned int i, j, nb_rx, len;
713 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
714 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
715 US_PER_S * BURST_TX_DRAIN_US;
716 struct l2fwd_crypto_params *cparams;
717 struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs];
718 struct rte_cryptodev_sym_session *session;
720 if (qconf->nb_rx_ports == 0) {
721 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
725 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
727 for (i = 0; i < qconf->nb_rx_ports; i++) {
729 portid = qconf->rx_port_list[i];
730 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
734 for (i = 0; i < qconf->nb_crypto_devs; i++) {
735 port_cparams[i].do_cipher = 0;
736 port_cparams[i].do_hash = 0;
737 port_cparams[i].do_aead = 0;
739 switch (options->xform_chain) {
740 case L2FWD_CRYPTO_AEAD:
741 port_cparams[i].do_aead = 1;
743 case L2FWD_CRYPTO_CIPHER_HASH:
744 case L2FWD_CRYPTO_HASH_CIPHER:
745 port_cparams[i].do_cipher = 1;
746 port_cparams[i].do_hash = 1;
748 case L2FWD_CRYPTO_HASH_ONLY:
749 port_cparams[i].do_hash = 1;
751 case L2FWD_CRYPTO_CIPHER_ONLY:
752 port_cparams[i].do_cipher = 1;
756 port_cparams[i].dev_id = qconf->cryptodev_list[i];
757 port_cparams[i].qp_id = 0;
759 port_cparams[i].block_size = options->block_size;
761 if (port_cparams[i].do_hash) {
762 port_cparams[i].auth_iv.data = options->auth_iv.data;
763 port_cparams[i].auth_iv.length = options->auth_iv.length;
764 if (!options->auth_iv_param)
765 generate_random_key(port_cparams[i].auth_iv.data,
766 port_cparams[i].auth_iv.length);
767 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
768 port_cparams[i].hash_verify = 1;
770 port_cparams[i].hash_verify = 0;
772 port_cparams[i].auth_algo = options->auth_xform.auth.algo;
773 port_cparams[i].digest_length =
774 options->auth_xform.auth.digest_length;
775 /* Set IV parameters */
776 if (options->auth_iv.length) {
777 options->auth_xform.auth.iv.offset =
778 IV_OFFSET + options->cipher_iv.length;
779 options->auth_xform.auth.iv.length =
780 options->auth_iv.length;
784 if (port_cparams[i].do_aead) {
785 port_cparams[i].aead_iv.data = options->aead_iv.data;
786 port_cparams[i].aead_iv.length = options->aead_iv.length;
787 if (!options->aead_iv_param)
788 generate_random_key(port_cparams[i].aead_iv.data,
789 port_cparams[i].aead_iv.length);
790 port_cparams[i].aead_algo = options->aead_xform.aead.algo;
791 port_cparams[i].digest_length =
792 options->aead_xform.aead.digest_length;
793 if (options->aead_xform.aead.aad_length) {
794 port_cparams[i].aad.data = options->aad.data;
795 port_cparams[i].aad.phys_addr = options->aad.phys_addr;
796 port_cparams[i].aad.length = options->aad.length;
797 if (!options->aad_param)
798 generate_random_key(port_cparams[i].aad.data,
799 port_cparams[i].aad.length);
801 * If doing AES-CCM, first 18 bytes has to be reserved,
802 * and actual AAD should start from byte 18
804 if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
805 memmove(port_cparams[i].aad.data + 18,
806 port_cparams[i].aad.data,
807 port_cparams[i].aad.length);
810 port_cparams[i].aad.length = 0;
812 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_DECRYPT)
813 port_cparams[i].hash_verify = 1;
815 port_cparams[i].hash_verify = 0;
817 /* Set IV parameters */
818 options->aead_xform.aead.iv.offset = IV_OFFSET;
819 options->aead_xform.aead.iv.length = options->aead_iv.length;
822 if (port_cparams[i].do_cipher) {
823 port_cparams[i].cipher_iv.data = options->cipher_iv.data;
824 port_cparams[i].cipher_iv.length = options->cipher_iv.length;
825 if (!options->cipher_iv_param)
826 generate_random_key(port_cparams[i].cipher_iv.data,
827 port_cparams[i].cipher_iv.length);
829 port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo;
830 /* Set IV parameters */
831 options->cipher_xform.cipher.iv.offset = IV_OFFSET;
832 options->cipher_xform.cipher.iv.length =
833 options->cipher_iv.length;
836 session = initialize_crypto_session(options,
837 port_cparams[i].dev_id);
839 rte_exit(EXIT_FAILURE, "Failed to initialize crypto session\n");
841 port_cparams[i].session = session;
843 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id,
844 port_cparams[i].dev_id);
847 l2fwd_crypto_options_print(options);
850 * Initialize previous tsc timestamp before the loop,
851 * to avoid showing the port statistics immediately,
852 * so user can see the crypto information.
854 prev_tsc = rte_rdtsc();
857 cur_tsc = rte_rdtsc();
860 * Crypto device/TX burst queue drain
862 diff_tsc = cur_tsc - prev_tsc;
863 if (unlikely(diff_tsc > drain_tsc)) {
864 /* Enqueue all crypto ops remaining in buffers */
865 for (i = 0; i < qconf->nb_crypto_devs; i++) {
866 cparams = &port_cparams[i];
867 len = qconf->op_buf[cparams->dev_id].len;
868 l2fwd_crypto_send_burst(qconf, len, cparams);
869 qconf->op_buf[cparams->dev_id].len = 0;
871 /* Transmit all packets remaining in buffers */
872 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
873 if (qconf->pkt_buf[portid].len == 0)
875 l2fwd_send_burst(&lcore_queue_conf[lcore_id],
876 qconf->pkt_buf[portid].len,
878 qconf->pkt_buf[portid].len = 0;
881 /* if timer is enabled */
882 if (timer_period > 0) {
884 /* advance the timer */
885 timer_tsc += diff_tsc;
887 /* if timer has reached its timeout */
888 if (unlikely(timer_tsc >=
889 (uint64_t)timer_period)) {
891 /* do this only on main core */
892 if (lcore_id == rte_get_main_lcore()
893 && options->refresh_period) {
904 * Read packet from RX queues
906 for (i = 0; i < qconf->nb_rx_ports; i++) {
907 portid = qconf->rx_port_list[i];
909 cparams = &port_cparams[i];
911 nb_rx = rte_eth_rx_burst(portid, 0,
912 pkts_burst, MAX_PKT_BURST);
914 port_statistics[portid].rx += nb_rx;
918 * If we can't allocate a crypto_ops, then drop
919 * the rest of the burst and dequeue and
920 * process the packets to free offload structs
922 if (rte_crypto_op_bulk_alloc(
923 l2fwd_crypto_op_pool,
924 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
927 for (j = 0; j < nb_rx; j++)
928 rte_pktmbuf_free(pkts_burst[j]);
933 /* Enqueue packets from Crypto device*/
934 for (j = 0; j < nb_rx; j++) {
937 l2fwd_simple_crypto_enqueue(m,
938 ops_burst[j], cparams);
942 /* Dequeue packets from Crypto device */
944 nb_rx = rte_cryptodev_dequeue_burst(
945 cparams->dev_id, cparams->qp_id,
946 ops_burst, MAX_PKT_BURST);
948 crypto_statistics[cparams->dev_id].dequeued +=
951 /* Forward crypto'd packets */
952 for (j = 0; j < nb_rx; j++) {
953 m = ops_burst[j]->sym->m_src;
955 rte_crypto_op_free(ops_burst[j]);
956 l2fwd_simple_forward(m, portid,
959 } while (nb_rx == MAX_PKT_BURST);
965 l2fwd_launch_one_lcore(void *arg)
967 l2fwd_main_loop((struct l2fwd_crypto_options *)arg);
971 /* Display command line arguments usage */
973 l2fwd_crypto_usage(const char *prgname)
975 printf("%s [EAL options] --\n"
976 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
977 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
978 " -s manage all ports from single lcore\n"
979 " -T PERIOD: statistics will be refreshed each PERIOD seconds"
980 " (0 to disable, 10 default, 86400 maximum)\n"
982 " --cdev_type HW / SW / ANY\n"
983 " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
984 " HASH_ONLY / AEAD\n"
986 " --cipher_algo ALGO\n"
987 " --cipher_op ENCRYPT / DECRYPT\n"
988 " --cipher_key KEY (bytes separated with \":\")\n"
989 " --cipher_key_random_size SIZE: size of cipher key when generated randomly\n"
990 " --cipher_iv IV (bytes separated with \":\")\n"
991 " --cipher_iv_random_size SIZE: size of cipher IV when generated randomly\n"
993 " --auth_algo ALGO\n"
994 " --auth_op GENERATE / VERIFY\n"
995 " --auth_key KEY (bytes separated with \":\")\n"
996 " --auth_key_random_size SIZE: size of auth key when generated randomly\n"
997 " --auth_iv IV (bytes separated with \":\")\n"
998 " --auth_iv_random_size SIZE: size of auth IV when generated randomly\n"
1000 " --aead_algo ALGO\n"
1001 " --aead_op ENCRYPT / DECRYPT\n"
1002 " --aead_key KEY (bytes separated with \":\")\n"
1003 " --aead_key_random_size SIZE: size of AEAD key when generated randomly\n"
1004 " --aead_iv IV (bytes separated with \":\")\n"
1005 " --aead_iv_random_size SIZE: size of AEAD IV when generated randomly\n"
1006 " --aad AAD (bytes separated with \":\")\n"
1007 " --aad_random_size SIZE: size of AAD when generated randomly\n"
1009 " --digest_size SIZE: size of digest to be generated/verified\n"
1012 " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n"
1014 " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
1016 " - The source MAC address is replaced by the TX port MAC address\n"
1017 " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
1021 /** Parse crypto device type command line argument */
1023 parse_cryptodev_type(enum cdev_type *type, char *optarg)
1025 if (strcmp("HW", optarg) == 0) {
1026 *type = CDEV_TYPE_HW;
1028 } else if (strcmp("SW", optarg) == 0) {
1029 *type = CDEV_TYPE_SW;
1031 } else if (strcmp("ANY", optarg) == 0) {
1032 *type = CDEV_TYPE_ANY;
1039 /** Parse crypto chain xform command line argument */
1041 parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
1043 if (strcmp("CIPHER_HASH", optarg) == 0) {
1044 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
1046 } else if (strcmp("HASH_CIPHER", optarg) == 0) {
1047 options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER;
1049 } else if (strcmp("CIPHER_ONLY", optarg) == 0) {
1050 options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY;
1052 } else if (strcmp("HASH_ONLY", optarg) == 0) {
1053 options->xform_chain = L2FWD_CRYPTO_HASH_ONLY;
1055 } else if (strcmp("AEAD", optarg) == 0) {
1056 options->xform_chain = L2FWD_CRYPTO_AEAD;
1063 /** Parse crypto cipher algo option command line argument */
1065 parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
1068 if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) {
1069 RTE_LOG(ERR, USER1, "Cipher algorithm specified "
1070 "not supported!\n");
1077 /** Parse crypto cipher operation command line argument */
1079 parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
1081 if (strcmp("ENCRYPT", optarg) == 0) {
1082 *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1084 } else if (strcmp("DECRYPT", optarg) == 0) {
1085 *op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1089 printf("Cipher operation not supported!\n");
1093 /** Parse bytes from command line argument */
1095 parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size)
1097 unsigned byte_count;
1101 for (byte_count = 0, token = strtok(input_arg, ":");
1102 (byte_count < max_size) && (token != NULL);
1103 token = strtok(NULL, ":")) {
1105 int number = (int)strtol(token, NULL, 16);
1107 if (errno == EINVAL || errno == ERANGE || number > 0xFF)
1110 data[byte_count++] = (uint8_t)number;
1116 /** Parse size param*/
1118 parse_size(int *size, const char *q_arg)
1123 /* parse hexadecimal string */
1124 n = strtoul(q_arg, &end, 10);
1125 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1129 printf("invalid size\n");
1137 /** Parse crypto cipher operation command line argument */
1139 parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
1141 if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) {
1142 RTE_LOG(ERR, USER1, "Authentication algorithm specified "
1143 "not supported!\n");
1151 parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
1153 if (strcmp("VERIFY", optarg) == 0) {
1154 *op = RTE_CRYPTO_AUTH_OP_VERIFY;
1156 } else if (strcmp("GENERATE", optarg) == 0) {
1157 *op = RTE_CRYPTO_AUTH_OP_GENERATE;
1161 printf("Authentication operation specified not supported!\n");
1166 parse_aead_algo(enum rte_crypto_aead_algorithm *algo, char *optarg)
1168 if (rte_cryptodev_get_aead_algo_enum(algo, optarg) < 0) {
1169 RTE_LOG(ERR, USER1, "AEAD algorithm specified "
1170 "not supported!\n");
1178 parse_aead_op(enum rte_crypto_aead_operation *op, char *optarg)
1180 if (strcmp("ENCRYPT", optarg) == 0) {
1181 *op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1183 } else if (strcmp("DECRYPT", optarg) == 0) {
1184 *op = RTE_CRYPTO_AEAD_OP_DECRYPT;
1188 printf("AEAD operation specified not supported!\n");
1192 parse_cryptodev_mask(struct l2fwd_crypto_options *options,
1198 /* parse hexadecimal string */
1199 pm = strtoul(q_arg, &end, 16);
1200 if ((pm == '\0') || (end == NULL) || (*end != '\0'))
1203 options->cryptodev_mask = pm;
1204 if (options->cryptodev_mask == 0) {
1205 printf("invalid cryptodev_mask specified\n");
1212 /** Parse long options */
1214 l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
1215 struct option *lgopts, int option_index)
1219 if (strcmp(lgopts[option_index].name, "cdev_type") == 0) {
1220 retval = parse_cryptodev_type(&options->type, optarg);
1222 strlcpy(options->string_type, optarg, MAX_STR_LEN);
1226 else if (strcmp(lgopts[option_index].name, "chain") == 0)
1227 return parse_crypto_opt_chain(options, optarg);
1229 /* Cipher options */
1230 else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0)
1231 return parse_cipher_algo(&options->cipher_xform.cipher.algo,
1234 else if (strcmp(lgopts[option_index].name, "cipher_op") == 0)
1235 return parse_cipher_op(&options->cipher_xform.cipher.op,
1238 else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
1239 options->ckey_param = 1;
1240 options->cipher_xform.cipher.key.length =
1241 parse_bytes(options->cipher_key, optarg, MAX_KEY_SIZE);
1242 if (options->cipher_xform.cipher.key.length > 0)
1248 else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0)
1249 return parse_size(&options->ckey_random_size, optarg);
1251 else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
1252 options->cipher_iv_param = 1;
1253 options->cipher_iv.length =
1254 parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE);
1255 if (options->cipher_iv.length > 0)
1261 else if (strcmp(lgopts[option_index].name, "cipher_iv_random_size") == 0)
1262 return parse_size(&options->cipher_iv_random_size, optarg);
1264 /* Authentication options */
1265 else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) {
1266 return parse_auth_algo(&options->auth_xform.auth.algo,
1270 else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
1271 return parse_auth_op(&options->auth_xform.auth.op,
1274 else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
1275 options->akey_param = 1;
1276 options->auth_xform.auth.key.length =
1277 parse_bytes(options->auth_key, optarg, MAX_KEY_SIZE);
1278 if (options->auth_xform.auth.key.length > 0)
1284 else if (strcmp(lgopts[option_index].name, "auth_key_random_size") == 0) {
1285 return parse_size(&options->akey_random_size, optarg);
1288 else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
1289 options->auth_iv_param = 1;
1290 options->auth_iv.length =
1291 parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE);
1292 if (options->auth_iv.length > 0)
1298 else if (strcmp(lgopts[option_index].name, "auth_iv_random_size") == 0)
1299 return parse_size(&options->auth_iv_random_size, optarg);
1302 else if (strcmp(lgopts[option_index].name, "aead_algo") == 0) {
1303 return parse_aead_algo(&options->aead_xform.aead.algo,
1307 else if (strcmp(lgopts[option_index].name, "aead_op") == 0)
1308 return parse_aead_op(&options->aead_xform.aead.op,
1311 else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
1312 options->aead_key_param = 1;
1313 options->aead_xform.aead.key.length =
1314 parse_bytes(options->aead_key, optarg, MAX_KEY_SIZE);
1315 if (options->aead_xform.aead.key.length > 0)
1321 else if (strcmp(lgopts[option_index].name, "aead_key_random_size") == 0)
1322 return parse_size(&options->aead_key_random_size, optarg);
1325 else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
1326 options->aead_iv_param = 1;
1327 options->aead_iv.length =
1328 parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE);
1329 if (options->aead_iv.length > 0)
1335 else if (strcmp(lgopts[option_index].name, "aead_iv_random_size") == 0)
1336 return parse_size(&options->aead_iv_random_size, optarg);
1338 else if (strcmp(lgopts[option_index].name, "aad") == 0) {
1339 options->aad_param = 1;
1340 options->aad.length =
1341 parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE);
1342 if (options->aad.length > 0)
1348 else if (strcmp(lgopts[option_index].name, "aad_random_size") == 0) {
1349 return parse_size(&options->aad_random_size, optarg);
1352 else if (strcmp(lgopts[option_index].name, "digest_size") == 0) {
1353 return parse_size(&options->digest_size, optarg);
1356 else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
1357 options->sessionless = 1;
1361 else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
1362 return parse_cryptodev_mask(options, optarg);
1364 else if (strcmp(lgopts[option_index].name, "mac-updating") == 0) {
1365 options->mac_updating = 1;
1369 else if (strcmp(lgopts[option_index].name, "no-mac-updating") == 0) {
1370 options->mac_updating = 0;
1377 /** Parse port mask */
1379 l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options,
1385 /* parse hexadecimal string */
1386 pm = strtoul(q_arg, &end, 16);
1387 if ((pm == '\0') || (end == NULL) || (*end != '\0'))
1390 options->portmask = pm;
1391 if (options->portmask == 0) {
1392 printf("invalid portmask specified\n");
1399 /** Parse number of queues */
1401 l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options,
1407 /* parse hexadecimal string */
1408 n = strtoul(q_arg, &end, 10);
1409 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1411 else if (n >= MAX_RX_QUEUE_PER_LCORE)
1414 options->nb_ports_per_lcore = n;
1415 if (options->nb_ports_per_lcore == 0) {
1416 printf("invalid number of ports selected\n");
1423 /** Parse timer period */
1425 l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
1431 /* parse number string */
1432 n = (unsigned)strtol(q_arg, &end, 10);
1433 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1436 if (n >= MAX_TIMER_PERIOD) {
1437 printf("Warning refresh period specified %lu is greater than "
1438 "max value %lu! using max value",
1439 n, MAX_TIMER_PERIOD);
1440 n = MAX_TIMER_PERIOD;
1443 options->refresh_period = n * 1000 * TIMER_MILLISECOND;
1448 /** Generate default options for application */
1450 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
1452 options->portmask = 0xffffffff;
1453 options->nb_ports_per_lcore = 1;
1454 options->refresh_period = 10000;
1455 options->single_lcore = 0;
1456 options->sessionless = 0;
1458 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
1461 options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1462 options->cipher_xform.next = NULL;
1463 options->ckey_param = 0;
1464 options->ckey_random_size = -1;
1465 options->cipher_xform.cipher.key.length = 0;
1466 options->cipher_iv_param = 0;
1467 options->cipher_iv_random_size = -1;
1468 options->cipher_iv.length = 0;
1470 options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1471 options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1473 /* Authentication Data */
1474 options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1475 options->auth_xform.next = NULL;
1476 options->akey_param = 0;
1477 options->akey_random_size = -1;
1478 options->auth_xform.auth.key.length = 0;
1479 options->auth_iv_param = 0;
1480 options->auth_iv_random_size = -1;
1481 options->auth_iv.length = 0;
1483 options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
1484 options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1487 options->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1488 options->aead_xform.next = NULL;
1489 options->aead_key_param = 0;
1490 options->aead_key_random_size = -1;
1491 options->aead_xform.aead.key.length = 0;
1492 options->aead_iv_param = 0;
1493 options->aead_iv_random_size = -1;
1494 options->aead_iv.length = 0;
1496 options->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
1497 options->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
1499 options->aad_param = 0;
1500 options->aad_random_size = -1;
1501 options->aad.length = 0;
1503 options->digest_size = -1;
1505 options->type = CDEV_TYPE_ANY;
1506 options->cryptodev_mask = UINT64_MAX;
1508 options->mac_updating = 1;
1512 display_cipher_info(struct l2fwd_crypto_options *options)
1514 printf("\n---- Cipher information ---\n");
1515 printf("Algorithm: %s\n",
1516 rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]);
1517 rte_hexdump(stdout, "Cipher key:",
1518 options->cipher_xform.cipher.key.data,
1519 options->cipher_xform.cipher.key.length);
1520 rte_hexdump(stdout, "IV:", options->cipher_iv.data, options->cipher_iv.length);
1524 display_auth_info(struct l2fwd_crypto_options *options)
1526 printf("\n---- Authentication information ---\n");
1527 printf("Algorithm: %s\n",
1528 rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]);
1529 rte_hexdump(stdout, "Auth key:",
1530 options->auth_xform.auth.key.data,
1531 options->auth_xform.auth.key.length);
1532 rte_hexdump(stdout, "IV:", options->auth_iv.data, options->auth_iv.length);
1536 display_aead_info(struct l2fwd_crypto_options *options)
1538 printf("\n---- AEAD information ---\n");
1539 printf("Algorithm: %s\n",
1540 rte_crypto_aead_algorithm_strings[options->aead_xform.aead.algo]);
1541 rte_hexdump(stdout, "AEAD key:",
1542 options->aead_xform.aead.key.data,
1543 options->aead_xform.aead.key.length);
1544 rte_hexdump(stdout, "IV:", options->aead_iv.data, options->aead_iv.length);
1545 rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length);
1549 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
1551 char string_cipher_op[MAX_STR_LEN];
1552 char string_auth_op[MAX_STR_LEN];
1553 char string_aead_op[MAX_STR_LEN];
1555 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1556 strcpy(string_cipher_op, "Encrypt");
1558 strcpy(string_cipher_op, "Decrypt");
1560 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
1561 strcpy(string_auth_op, "Auth generate");
1563 strcpy(string_auth_op, "Auth verify");
1565 if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
1566 strcpy(string_aead_op, "Authenticated encryption");
1568 strcpy(string_aead_op, "Authenticated decryption");
1571 printf("Options:-\nn");
1572 printf("portmask: %x\n", options->portmask);
1573 printf("ports per lcore: %u\n", options->nb_ports_per_lcore);
1574 printf("refresh period : %u\n", options->refresh_period);
1575 printf("single lcore mode: %s\n",
1576 options->single_lcore ? "enabled" : "disabled");
1577 printf("stats_printing: %s\n",
1578 options->refresh_period == 0 ? "disabled" : "enabled");
1580 printf("sessionless crypto: %s\n",
1581 options->sessionless ? "enabled" : "disabled");
1583 if (options->ckey_param && (options->ckey_random_size != -1))
1584 printf("Cipher key already parsed, ignoring size of random key\n");
1586 if (options->akey_param && (options->akey_random_size != -1))
1587 printf("Auth key already parsed, ignoring size of random key\n");
1589 if (options->cipher_iv_param && (options->cipher_iv_random_size != -1))
1590 printf("Cipher IV already parsed, ignoring size of random IV\n");
1592 if (options->auth_iv_param && (options->auth_iv_random_size != -1))
1593 printf("Auth IV already parsed, ignoring size of random IV\n");
1595 if (options->aad_param && (options->aad_random_size != -1))
1596 printf("AAD already parsed, ignoring size of random AAD\n");
1598 printf("\nCrypto chain: ");
1599 switch (options->xform_chain) {
1600 case L2FWD_CRYPTO_AEAD:
1601 printf("Input --> %s --> Output\n", string_aead_op);
1602 display_aead_info(options);
1604 case L2FWD_CRYPTO_CIPHER_HASH:
1605 printf("Input --> %s --> %s --> Output\n",
1606 string_cipher_op, string_auth_op);
1607 display_cipher_info(options);
1608 display_auth_info(options);
1610 case L2FWD_CRYPTO_HASH_CIPHER:
1611 printf("Input --> %s --> %s --> Output\n",
1612 string_auth_op, string_cipher_op);
1613 display_cipher_info(options);
1614 display_auth_info(options);
1616 case L2FWD_CRYPTO_HASH_ONLY:
1617 printf("Input --> %s --> Output\n", string_auth_op);
1618 display_auth_info(options);
1620 case L2FWD_CRYPTO_CIPHER_ONLY:
1621 printf("Input --> %s --> Output\n", string_cipher_op);
1622 display_cipher_info(options);
1627 /* Parse the argument given in the command line of the application */
1629 l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
1630 int argc, char **argv)
1632 int opt, retval, option_index;
1633 char **argvopt = argv, *prgname = argv[0];
1635 static struct option lgopts[] = {
1636 { "sessionless", no_argument, 0, 0 },
1638 { "cdev_type", required_argument, 0, 0 },
1639 { "chain", required_argument, 0, 0 },
1641 { "cipher_algo", required_argument, 0, 0 },
1642 { "cipher_op", required_argument, 0, 0 },
1643 { "cipher_key", required_argument, 0, 0 },
1644 { "cipher_key_random_size", required_argument, 0, 0 },
1645 { "cipher_iv", required_argument, 0, 0 },
1646 { "cipher_iv_random_size", required_argument, 0, 0 },
1648 { "auth_algo", required_argument, 0, 0 },
1649 { "auth_op", required_argument, 0, 0 },
1650 { "auth_key", required_argument, 0, 0 },
1651 { "auth_key_random_size", required_argument, 0, 0 },
1652 { "auth_iv", required_argument, 0, 0 },
1653 { "auth_iv_random_size", required_argument, 0, 0 },
1655 { "aead_algo", required_argument, 0, 0 },
1656 { "aead_op", required_argument, 0, 0 },
1657 { "aead_key", required_argument, 0, 0 },
1658 { "aead_key_random_size", required_argument, 0, 0 },
1659 { "aead_iv", required_argument, 0, 0 },
1660 { "aead_iv_random_size", required_argument, 0, 0 },
1662 { "aad", required_argument, 0, 0 },
1663 { "aad_random_size", required_argument, 0, 0 },
1665 { "digest_size", required_argument, 0, 0 },
1667 { "sessionless", no_argument, 0, 0 },
1668 { "cryptodev_mask", required_argument, 0, 0},
1670 { "mac-updating", no_argument, 0, 0},
1671 { "no-mac-updating", no_argument, 0, 0},
1676 l2fwd_crypto_default_options(options);
1678 while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
1679 &option_index)) != EOF) {
1683 retval = l2fwd_crypto_parse_args_long_options(options,
1684 lgopts, option_index);
1686 l2fwd_crypto_usage(prgname);
1693 retval = l2fwd_crypto_parse_portmask(options, optarg);
1695 l2fwd_crypto_usage(prgname);
1702 retval = l2fwd_crypto_parse_nqueue(options, optarg);
1704 l2fwd_crypto_usage(prgname);
1711 options->single_lcore = 1;
1717 retval = l2fwd_crypto_parse_timer_period(options,
1720 l2fwd_crypto_usage(prgname);
1726 l2fwd_crypto_usage(prgname);
1733 argv[optind-1] = prgname;
1736 optind = 1; /* reset getopt lib */
1741 /* Check the link status of all ports in up to 9s, and print them finally */
1743 check_all_ports_link_status(uint32_t port_mask)
1745 #define CHECK_INTERVAL 100 /* 100ms */
1746 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1748 uint8_t count, all_ports_up, print_flag = 0;
1749 struct rte_eth_link link;
1751 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
1753 printf("\nChecking link status");
1755 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1757 RTE_ETH_FOREACH_DEV(portid) {
1758 if ((port_mask & (1 << portid)) == 0)
1760 memset(&link, 0, sizeof(link));
1761 ret = rte_eth_link_get_nowait(portid, &link);
1764 if (print_flag == 1)
1765 printf("Port %u link get failed: %s\n",
1766 portid, rte_strerror(-ret));
1769 /* print link status if flag set */
1770 if (print_flag == 1) {
1771 rte_eth_link_to_str(link_status_text,
1772 sizeof(link_status_text), &link);
1773 printf("Port %d %s\n", portid,
1777 /* clear all_ports_up flag if any link down */
1778 if (link.link_status == ETH_LINK_DOWN) {
1783 /* after finally printing all link status, get out */
1784 if (print_flag == 1)
1787 if (all_ports_up == 0) {
1790 rte_delay_ms(CHECK_INTERVAL);
1793 /* set the print_flag if all ports up or timeout */
1794 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1801 /* Check if device has to be HW/SW or any */
1803 check_type(const struct l2fwd_crypto_options *options,
1804 const struct rte_cryptodev_info *dev_info)
1806 if (options->type == CDEV_TYPE_HW &&
1807 (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1809 if (options->type == CDEV_TYPE_SW &&
1810 !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1812 if (options->type == CDEV_TYPE_ANY)
1818 static const struct rte_cryptodev_capabilities *
1819 check_device_support_cipher_algo(const struct l2fwd_crypto_options *options,
1820 const struct rte_cryptodev_info *dev_info,
1824 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1825 enum rte_crypto_cipher_algorithm cap_cipher_algo;
1826 enum rte_crypto_cipher_algorithm opt_cipher_algo =
1827 options->cipher_xform.cipher.algo;
1829 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1830 cap_cipher_algo = cap->sym.cipher.algo;
1831 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1832 if (cap_cipher_algo == opt_cipher_algo) {
1833 if (check_type(options, dev_info) == 0)
1837 cap = &dev_info->capabilities[++i];
1840 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1841 printf("Algorithm %s not supported by cryptodev %u"
1842 " or device not of preferred type (%s)\n",
1843 rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
1845 options->string_type);
1852 static const struct rte_cryptodev_capabilities *
1853 check_device_support_auth_algo(const struct l2fwd_crypto_options *options,
1854 const struct rte_cryptodev_info *dev_info,
1858 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1859 enum rte_crypto_auth_algorithm cap_auth_algo;
1860 enum rte_crypto_auth_algorithm opt_auth_algo =
1861 options->auth_xform.auth.algo;
1863 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1864 cap_auth_algo = cap->sym.auth.algo;
1865 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1866 if (cap_auth_algo == opt_auth_algo) {
1867 if (check_type(options, dev_info) == 0)
1871 cap = &dev_info->capabilities[++i];
1874 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1875 printf("Algorithm %s not supported by cryptodev %u"
1876 " or device not of preferred type (%s)\n",
1877 rte_crypto_auth_algorithm_strings[opt_auth_algo],
1879 options->string_type);
1886 static const struct rte_cryptodev_capabilities *
1887 check_device_support_aead_algo(const struct l2fwd_crypto_options *options,
1888 const struct rte_cryptodev_info *dev_info,
1892 const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
1893 enum rte_crypto_aead_algorithm cap_aead_algo;
1894 enum rte_crypto_aead_algorithm opt_aead_algo =
1895 options->aead_xform.aead.algo;
1897 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1898 cap_aead_algo = cap->sym.aead.algo;
1899 if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1900 if (cap_aead_algo == opt_aead_algo) {
1901 if (check_type(options, dev_info) == 0)
1905 cap = &dev_info->capabilities[++i];
1908 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1909 printf("Algorithm %s not supported by cryptodev %u"
1910 " or device not of preferred type (%s)\n",
1911 rte_crypto_aead_algorithm_strings[opt_aead_algo],
1913 options->string_type);
1920 /* Check if the device is enabled by cryptodev_mask */
1922 check_cryptodev_mask(struct l2fwd_crypto_options *options,
1925 if (options->cryptodev_mask & (1 << cdev_id))
1932 check_supported_size(uint16_t length, uint16_t min, uint16_t max,
1938 if (increment == 0) {
1945 /* Range of values */
1946 for (supp_size = min; supp_size <= max; supp_size += increment) {
1947 if (length == supp_size)
1955 check_iv_param(const struct rte_crypto_param_range *iv_range_size,
1956 unsigned int iv_param, int iv_random_size,
1960 * Check if length of provided IV is supported
1961 * by the algorithm chosen.
1964 if (check_supported_size(iv_length,
1967 iv_range_size->increment)
1971 * Check if length of IV to be randomly generated
1972 * is supported by the algorithm chosen.
1974 } else if (iv_random_size != -1) {
1975 if (check_supported_size(iv_random_size,
1978 iv_range_size->increment)
1987 check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
1989 struct rte_cryptodev_info dev_info;
1990 const struct rte_cryptodev_capabilities *cap;
1992 rte_cryptodev_info_get(cdev_id, &dev_info);
1994 /* Set AEAD parameters */
1995 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
1996 /* Check if device supports AEAD algo */
1997 cap = check_device_support_aead_algo(options, &dev_info,
2002 if (check_iv_param(&cap->sym.aead.iv_size,
2003 options->aead_iv_param,
2004 options->aead_iv_random_size,
2005 options->aead_iv.length) != 0) {
2006 RTE_LOG(DEBUG, USER1,
2007 "Device %u does not support IV length\n",
2013 * Check if length of provided AEAD key is supported
2014 * by the algorithm chosen.
2016 if (options->aead_key_param) {
2017 if (check_supported_size(
2018 options->aead_xform.aead.key.length,
2019 cap->sym.aead.key_size.min,
2020 cap->sym.aead.key_size.max,
2021 cap->sym.aead.key_size.increment)
2023 RTE_LOG(DEBUG, USER1,
2024 "Device %u does not support "
2025 "AEAD key length\n",
2030 * Check if length of the aead key to be randomly generated
2031 * is supported by the algorithm chosen.
2033 } else if (options->aead_key_random_size != -1) {
2034 if (check_supported_size(options->aead_key_random_size,
2035 cap->sym.aead.key_size.min,
2036 cap->sym.aead.key_size.max,
2037 cap->sym.aead.key_size.increment)
2039 RTE_LOG(DEBUG, USER1,
2040 "Device %u does not support "
2041 "AEAD key length\n",
2049 * Check if length of provided AAD is supported
2050 * by the algorithm chosen.
2052 if (options->aad_param) {
2053 if (check_supported_size(options->aad.length,
2054 cap->sym.aead.aad_size.min,
2055 cap->sym.aead.aad_size.max,
2056 cap->sym.aead.aad_size.increment)
2058 RTE_LOG(DEBUG, USER1,
2059 "Device %u does not support "
2065 * Check if length of AAD to be randomly generated
2066 * is supported by the algorithm chosen.
2068 } else if (options->aad_random_size != -1) {
2069 if (check_supported_size(options->aad_random_size,
2070 cap->sym.aead.aad_size.min,
2071 cap->sym.aead.aad_size.max,
2072 cap->sym.aead.aad_size.increment)
2074 RTE_LOG(DEBUG, USER1,
2075 "Device %u does not support "
2082 /* Check if digest size is supported by the algorithm. */
2083 if (options->digest_size != -1) {
2084 if (check_supported_size(options->digest_size,
2085 cap->sym.aead.digest_size.min,
2086 cap->sym.aead.digest_size.max,
2087 cap->sym.aead.digest_size.increment)
2089 RTE_LOG(DEBUG, USER1,
2090 "Device %u does not support "
2098 /* Set cipher parameters */
2099 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2100 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2101 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
2102 /* Check if device supports cipher algo */
2103 cap = check_device_support_cipher_algo(options, &dev_info,
2108 if (check_iv_param(&cap->sym.cipher.iv_size,
2109 options->cipher_iv_param,
2110 options->cipher_iv_random_size,
2111 options->cipher_iv.length) != 0) {
2112 RTE_LOG(DEBUG, USER1,
2113 "Device %u does not support IV length\n",
2119 * Check if length of provided cipher key is supported
2120 * by the algorithm chosen.
2122 if (options->ckey_param) {
2123 if (check_supported_size(
2124 options->cipher_xform.cipher.key.length,
2125 cap->sym.cipher.key_size.min,
2126 cap->sym.cipher.key_size.max,
2127 cap->sym.cipher.key_size.increment)
2129 if (dev_info.feature_flags &
2130 RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY) {
2131 RTE_LOG(DEBUG, USER1,
2132 "Key length does not match the device "
2133 "%u capability. Key may be wrapped\n",
2136 RTE_LOG(DEBUG, USER1,
2137 "Key length does not match the device "
2145 * Check if length of the cipher key to be randomly generated
2146 * is supported by the algorithm chosen.
2148 } else if (options->ckey_random_size != -1) {
2149 if (check_supported_size(options->ckey_random_size,
2150 cap->sym.cipher.key_size.min,
2151 cap->sym.cipher.key_size.max,
2152 cap->sym.cipher.key_size.increment)
2154 RTE_LOG(DEBUG, USER1,
2155 "Device %u does not support cipher "
2163 /* Set auth parameters */
2164 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2165 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2166 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
2167 /* Check if device supports auth algo */
2168 cap = check_device_support_auth_algo(options, &dev_info,
2173 if (check_iv_param(&cap->sym.auth.iv_size,
2174 options->auth_iv_param,
2175 options->auth_iv_random_size,
2176 options->auth_iv.length) != 0) {
2177 RTE_LOG(DEBUG, USER1,
2178 "Device %u does not support IV length\n",
2183 * Check if length of provided auth key is supported
2184 * by the algorithm chosen.
2186 if (options->akey_param) {
2187 if (check_supported_size(
2188 options->auth_xform.auth.key.length,
2189 cap->sym.auth.key_size.min,
2190 cap->sym.auth.key_size.max,
2191 cap->sym.auth.key_size.increment)
2193 RTE_LOG(DEBUG, USER1,
2194 "Device %u does not support auth "
2200 * Check if length of the auth key to be randomly generated
2201 * is supported by the algorithm chosen.
2203 } else if (options->akey_random_size != -1) {
2204 if (check_supported_size(options->akey_random_size,
2205 cap->sym.auth.key_size.min,
2206 cap->sym.auth.key_size.max,
2207 cap->sym.auth.key_size.increment)
2209 RTE_LOG(DEBUG, USER1,
2210 "Device %u does not support auth "
2217 /* Check if digest size is supported by the algorithm. */
2218 if (options->digest_size != -1) {
2219 if (check_supported_size(options->digest_size,
2220 cap->sym.auth.digest_size.min,
2221 cap->sym.auth.digest_size.max,
2222 cap->sym.auth.digest_size.increment)
2224 RTE_LOG(DEBUG, USER1,
2225 "Device %u does not support "
2237 initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
2238 uint8_t *enabled_cdevs)
2240 uint8_t cdev_id, cdev_count, enabled_cdev_count = 0;
2241 const struct rte_cryptodev_capabilities *cap;
2242 unsigned int sess_sz, max_sess_sz = 0;
2243 uint32_t sessions_needed = 0;
2246 cdev_count = rte_cryptodev_count();
2247 if (cdev_count == 0) {
2248 printf("No crypto devices available\n");
2252 for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
2254 if (check_cryptodev_mask(options, cdev_id) < 0)
2257 if (check_capabilities(options, cdev_id) < 0)
2260 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
2261 if (sess_sz > max_sess_sz)
2262 max_sess_sz = sess_sz;
2264 l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
2266 enabled_cdevs[cdev_id] = 1;
2267 enabled_cdev_count++;
2270 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
2271 struct rte_cryptodev_qp_conf qp_conf;
2272 struct rte_cryptodev_info dev_info;
2274 if (enabled_cdevs[cdev_id] == 0)
2277 if (check_cryptodev_mask(options, cdev_id) < 0)
2280 if (check_capabilities(options, cdev_id) < 0)
2283 retval = rte_cryptodev_socket_id(cdev_id);
2286 printf("Invalid crypto device id used\n");
2290 uint8_t socket_id = (uint8_t) retval;
2292 struct rte_cryptodev_config conf = {
2293 .nb_queue_pairs = 1,
2294 .socket_id = socket_id,
2295 .ff_disable = RTE_CRYPTODEV_FF_SECURITY,
2298 rte_cryptodev_info_get(cdev_id, &dev_info);
2301 * Two sessions objects are required for each session
2302 * (one for the header, one for the private data)
2304 if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
2305 #ifdef RTE_CRYPTO_SCHEDULER
2306 uint32_t nb_workers =
2307 rte_cryptodev_scheduler_workers_get(cdev_id,
2310 sessions_needed = enabled_cdev_count * nb_workers;
2313 sessions_needed = enabled_cdev_count;
2315 if (session_pool_socket[socket_id].priv_mp == NULL) {
2316 char mp_name[RTE_MEMPOOL_NAMESIZE];
2318 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2319 "priv_sess_mp_%u", socket_id);
2321 session_pool_socket[socket_id].priv_mp =
2322 rte_mempool_create(mp_name,
2325 0, 0, NULL, NULL, NULL,
2329 if (session_pool_socket[socket_id].priv_mp == NULL) {
2330 printf("Cannot create pool on socket %d\n",
2335 printf("Allocated pool \"%s\" on socket %d\n",
2336 mp_name, socket_id);
2339 if (session_pool_socket[socket_id].sess_mp == NULL) {
2340 char mp_name[RTE_MEMPOOL_NAMESIZE];
2341 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
2342 "sess_mp_%u", socket_id);
2344 session_pool_socket[socket_id].sess_mp =
2345 rte_cryptodev_sym_session_pool_create(
2348 0, 0, 0, socket_id);
2350 if (session_pool_socket[socket_id].sess_mp == NULL) {
2351 printf("Cannot create pool on socket %d\n",
2356 printf("Allocated pool \"%s\" on socket %d\n",
2357 mp_name, socket_id);
2360 /* Set AEAD parameters */
2361 if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
2362 cap = check_device_support_aead_algo(options, &dev_info,
2365 options->block_size = cap->sym.aead.block_size;
2367 /* Set IV if not provided from command line */
2368 if (options->aead_iv_param == 0) {
2369 if (options->aead_iv_random_size != -1)
2370 options->aead_iv.length =
2371 options->aead_iv_random_size;
2372 /* No size provided, use minimum size. */
2374 options->aead_iv.length =
2375 cap->sym.aead.iv_size.min;
2378 /* Set key if not provided from command line */
2379 if (options->aead_key_param == 0) {
2380 if (options->aead_key_random_size != -1)
2381 options->aead_xform.aead.key.length =
2382 options->aead_key_random_size;
2383 /* No size provided, use minimum size. */
2385 options->aead_xform.aead.key.length =
2386 cap->sym.aead.key_size.min;
2388 generate_random_key(options->aead_key,
2389 options->aead_xform.aead.key.length);
2392 /* Set AAD if not provided from command line */
2393 if (options->aad_param == 0) {
2394 if (options->aad_random_size != -1)
2395 options->aad.length =
2396 options->aad_random_size;
2397 /* No size provided, use minimum size. */
2399 options->aad.length =
2400 cap->sym.auth.aad_size.min;
2403 options->aead_xform.aead.aad_length =
2404 options->aad.length;
2406 /* Set digest size if not provided from command line */
2407 if (options->digest_size != -1)
2408 options->aead_xform.aead.digest_length =
2409 options->digest_size;
2410 /* No size provided, use minimum size. */
2412 options->aead_xform.aead.digest_length =
2413 cap->sym.aead.digest_size.min;
2416 /* Set cipher parameters */
2417 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2418 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2419 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
2420 cap = check_device_support_cipher_algo(options, &dev_info,
2422 options->block_size = cap->sym.cipher.block_size;
2424 /* Set IV if not provided from command line */
2425 if (options->cipher_iv_param == 0) {
2426 if (options->cipher_iv_random_size != -1)
2427 options->cipher_iv.length =
2428 options->cipher_iv_random_size;
2429 /* No size provided, use minimum size. */
2431 options->cipher_iv.length =
2432 cap->sym.cipher.iv_size.min;
2435 /* Set key if not provided from command line */
2436 if (options->ckey_param == 0) {
2437 if (options->ckey_random_size != -1)
2438 options->cipher_xform.cipher.key.length =
2439 options->ckey_random_size;
2440 /* No size provided, use minimum size. */
2442 options->cipher_xform.cipher.key.length =
2443 cap->sym.cipher.key_size.min;
2445 generate_random_key(options->cipher_key,
2446 options->cipher_xform.cipher.key.length);
2450 /* Set auth parameters */
2451 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
2452 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
2453 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
2454 cap = check_device_support_auth_algo(options, &dev_info,
2457 /* Set IV if not provided from command line */
2458 if (options->auth_iv_param == 0) {
2459 if (options->auth_iv_random_size != -1)
2460 options->auth_iv.length =
2461 options->auth_iv_random_size;
2462 /* No size provided, use minimum size. */
2464 options->auth_iv.length =
2465 cap->sym.auth.iv_size.min;
2468 /* Set key if not provided from command line */
2469 if (options->akey_param == 0) {
2470 if (options->akey_random_size != -1)
2471 options->auth_xform.auth.key.length =
2472 options->akey_random_size;
2473 /* No size provided, use minimum size. */
2475 options->auth_xform.auth.key.length =
2476 cap->sym.auth.key_size.min;
2478 generate_random_key(options->auth_key,
2479 options->auth_xform.auth.key.length);
2482 /* Set digest size if not provided from command line */
2483 if (options->digest_size != -1)
2484 options->auth_xform.auth.digest_length =
2485 options->digest_size;
2486 /* No size provided, use minimum size. */
2488 options->auth_xform.auth.digest_length =
2489 cap->sym.auth.digest_size.min;
2492 retval = rte_cryptodev_configure(cdev_id, &conf);
2494 printf("Failed to configure cryptodev %u", cdev_id);
2498 qp_conf.nb_descriptors = 2048;
2499 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
2500 qp_conf.mp_session_private =
2501 session_pool_socket[socket_id].priv_mp;
2503 retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
2506 printf("Failed to setup queue pair %u on cryptodev %u",
2511 retval = rte_cryptodev_start(cdev_id);
2513 printf("Failed to start device %u: error %d\n",
2519 return enabled_cdev_count;
2523 initialize_ports(struct l2fwd_crypto_options *options)
2525 uint16_t last_portid = 0, portid;
2526 unsigned enabled_portcount = 0;
2527 unsigned nb_ports = rte_eth_dev_count_avail();
2529 if (nb_ports == 0) {
2530 printf("No Ethernet ports - bye\n");
2534 /* Reset l2fwd_dst_ports */
2535 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
2536 l2fwd_dst_ports[portid] = 0;
2538 RTE_ETH_FOREACH_DEV(portid) {
2540 struct rte_eth_dev_info dev_info;
2541 struct rte_eth_rxconf rxq_conf;
2542 struct rte_eth_txconf txq_conf;
2543 struct rte_eth_conf local_port_conf = port_conf;
2545 /* Skip ports that are not enabled */
2546 if ((options->portmask & (1 << portid)) == 0)
2550 printf("Initializing port %u... ", portid);
2553 retval = rte_eth_dev_info_get(portid, &dev_info);
2555 printf("Error during getting device (port %u) info: %s\n",
2556 portid, strerror(-retval));
2560 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
2561 local_port_conf.txmode.offloads |=
2562 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2563 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
2565 printf("Cannot configure device: err=%d, port=%u\n",
2570 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
2573 printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
2578 /* init one RX queue */
2580 rxq_conf = dev_info.default_rxconf;
2581 rxq_conf.offloads = local_port_conf.rxmode.offloads;
2582 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
2583 rte_eth_dev_socket_id(portid),
2584 &rxq_conf, l2fwd_pktmbuf_pool);
2586 printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
2591 /* init one TX queue on each port */
2593 txq_conf = dev_info.default_txconf;
2594 txq_conf.offloads = local_port_conf.txmode.offloads;
2595 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
2596 rte_eth_dev_socket_id(portid),
2599 printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
2606 retval = rte_eth_dev_start(portid);
2608 printf("rte_eth_dev_start:err=%d, port=%u\n",
2613 retval = rte_eth_promiscuous_enable(portid);
2615 printf("rte_eth_promiscuous_enable:err=%s, port=%u\n",
2616 rte_strerror(-retval), portid);
2620 retval = rte_eth_macaddr_get(portid,
2621 &l2fwd_ports_eth_addr[portid]);
2623 printf("rte_eth_macaddr_get :err=%d, port=%u\n",
2628 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
2630 l2fwd_ports_eth_addr[portid].addr_bytes[0],
2631 l2fwd_ports_eth_addr[portid].addr_bytes[1],
2632 l2fwd_ports_eth_addr[portid].addr_bytes[2],
2633 l2fwd_ports_eth_addr[portid].addr_bytes[3],
2634 l2fwd_ports_eth_addr[portid].addr_bytes[4],
2635 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
2637 /* initialize port stats */
2638 memset(&port_statistics, 0, sizeof(port_statistics));
2640 /* Setup port forwarding table */
2641 if (enabled_portcount % 2) {
2642 l2fwd_dst_ports[portid] = last_portid;
2643 l2fwd_dst_ports[last_portid] = portid;
2645 last_portid = portid;
2648 l2fwd_enabled_port_mask |= (1 << portid);
2649 enabled_portcount++;
2652 if (enabled_portcount == 1) {
2653 l2fwd_dst_ports[last_portid] = last_portid;
2654 } else if (enabled_portcount % 2) {
2655 printf("odd number of ports in portmask- bye\n");
2659 check_all_ports_link_status(l2fwd_enabled_port_mask);
2661 return enabled_portcount;
2665 reserve_key_memory(struct l2fwd_crypto_options *options)
2667 options->cipher_xform.cipher.key.data = options->cipher_key;
2669 options->auth_xform.auth.key.data = options->auth_key;
2671 options->aead_xform.aead.key.data = options->aead_key;
2673 options->cipher_iv.data = rte_malloc("cipher iv", MAX_KEY_SIZE, 0);
2674 if (options->cipher_iv.data == NULL)
2675 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher IV");
2677 options->auth_iv.data = rte_malloc("auth iv", MAX_KEY_SIZE, 0);
2678 if (options->auth_iv.data == NULL)
2679 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth IV");
2681 options->aead_iv.data = rte_malloc("aead_iv", MAX_KEY_SIZE, 0);
2682 if (options->aead_iv.data == NULL)
2683 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD iv");
2685 options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
2686 if (options->aad.data == NULL)
2687 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
2688 options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
2692 main(int argc, char **argv)
2694 struct lcore_queue_conf *qconf = NULL;
2695 struct l2fwd_crypto_options options;
2697 uint8_t nb_cryptodevs, cdev_id;
2699 unsigned lcore_id, rx_lcore_id = 0;
2700 int ret, enabled_cdevcount, enabled_portcount;
2701 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
2704 ret = rte_eal_init(argc, argv);
2706 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
2710 /* reserve memory for Cipher/Auth key and IV */
2711 reserve_key_memory(&options);
2713 /* parse application arguments (after the EAL ones) */
2714 ret = l2fwd_crypto_parse_args(&options, argc, argv);
2716 rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
2718 printf("MAC updating %s\n",
2719 options.mac_updating ? "enabled" : "disabled");
2721 /* create the mbuf pool */
2722 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
2723 RTE_ALIGN(sizeof(struct rte_crypto_op),
2724 RTE_CACHE_LINE_SIZE),
2725 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
2726 if (l2fwd_pktmbuf_pool == NULL)
2727 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
2729 /* create crypto op pool */
2730 l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
2731 RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, MAXIMUM_IV_LENGTH,
2733 if (l2fwd_crypto_op_pool == NULL)
2734 rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
2736 /* Enable Ethernet ports */
2737 enabled_portcount = initialize_ports(&options);
2738 if (enabled_portcount < 1)
2739 rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n");
2741 /* Initialize the port/queue configuration of each logical core */
2742 RTE_ETH_FOREACH_DEV(portid) {
2744 /* skip ports that are not enabled */
2745 if ((options.portmask & (1 << portid)) == 0)
2748 if (options.single_lcore && qconf == NULL) {
2749 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2751 if (rx_lcore_id >= RTE_MAX_LCORE)
2752 rte_exit(EXIT_FAILURE,
2753 "Not enough cores\n");
2755 } else if (!options.single_lcore) {
2756 /* get the lcore_id for this port */
2757 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2758 lcore_queue_conf[rx_lcore_id].nb_rx_ports ==
2759 options.nb_ports_per_lcore) {
2761 if (rx_lcore_id >= RTE_MAX_LCORE)
2762 rte_exit(EXIT_FAILURE,
2763 "Not enough cores\n");
2767 /* Assigned a new logical core in the loop above. */
2768 if (qconf != &lcore_queue_conf[rx_lcore_id])
2769 qconf = &lcore_queue_conf[rx_lcore_id];
2771 qconf->rx_port_list[qconf->nb_rx_ports] = portid;
2772 qconf->nb_rx_ports++;
2774 printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
2777 /* Enable Crypto devices */
2778 enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount,
2780 if (enabled_cdevcount < 0)
2781 rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n");
2783 if (enabled_cdevcount < enabled_portcount)
2784 rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) "
2785 "has to be more or equal to number of ports (%d)\n",
2786 enabled_cdevcount, enabled_portcount);
2788 nb_cryptodevs = rte_cryptodev_count();
2790 /* Initialize the port/cryptodev configuration of each logical core */
2791 for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0;
2792 cdev_id < nb_cryptodevs && enabled_cdevcount;
2794 /* Crypto op not supported by crypto device */
2795 if (!enabled_cdevs[cdev_id])
2798 if (options.single_lcore && qconf == NULL) {
2799 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2801 if (rx_lcore_id >= RTE_MAX_LCORE)
2802 rte_exit(EXIT_FAILURE,
2803 "Not enough cores\n");
2805 } else if (!options.single_lcore) {
2806 /* get the lcore_id for this port */
2807 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2808 lcore_queue_conf[rx_lcore_id].nb_crypto_devs ==
2809 options.nb_ports_per_lcore) {
2811 if (rx_lcore_id >= RTE_MAX_LCORE)
2812 rte_exit(EXIT_FAILURE,
2813 "Not enough cores\n");
2817 /* Assigned a new logical core in the loop above. */
2818 if (qconf != &lcore_queue_conf[rx_lcore_id])
2819 qconf = &lcore_queue_conf[rx_lcore_id];
2821 qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id;
2822 qconf->nb_crypto_devs++;
2824 enabled_cdevcount--;
2826 printf("Lcore %u: cryptodev %u\n", rx_lcore_id,
2830 /* launch per-lcore init on every lcore */
2831 rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options,
2833 RTE_LCORE_FOREACH_WORKER(lcore_id) {
2834 if (rte_eal_wait_lcore(lcore_id) < 0)
2838 /* clean up the EAL */