4 * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
41 #include <sys/queue.h>
42 #include <netinet/in.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_common.h>
54 #include <rte_cryptodev.h>
55 #include <rte_cycles.h>
56 #include <rte_debug.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_interrupts.h>
62 #include <rte_launch.h>
63 #include <rte_lcore.h>
65 #include <rte_malloc.h>
67 #include <rte_memcpy.h>
68 #include <rte_memory.h>
69 #include <rte_mempool.h>
70 #include <rte_memzone.h>
72 #include <rte_per_lcore.h>
73 #include <rte_prefetch.h>
74 #include <rte_random.h>
75 #include <rte_hexdump.h>
83 #define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
87 #define MAX_STR_LEN 32
88 #define MAX_KEY_SIZE 128
89 #define MAX_PKT_BURST 32
90 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
93 * Configurable number of RX/TX ring descriptors
95 #define RTE_TEST_RX_DESC_DEFAULT 128
96 #define RTE_TEST_TX_DESC_DEFAULT 512
98 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
99 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
101 /* ethernet addresses of ports */
102 static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
104 /* mask of enabled ports */
105 static uint64_t l2fwd_enabled_port_mask;
106 static uint64_t l2fwd_enabled_crypto_mask;
108 /* list of enabled ports */
109 static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
114 struct rte_mbuf *buffer[MAX_PKT_BURST];
119 struct rte_crypto_op *buffer[MAX_PKT_BURST];
122 #define MAX_RX_QUEUE_PER_LCORE 16
123 #define MAX_TX_QUEUE_PER_PORT 16
125 enum l2fwd_crypto_xform_chain {
126 L2FWD_CRYPTO_CIPHER_HASH,
127 L2FWD_CRYPTO_HASH_CIPHER,
128 L2FWD_CRYPTO_CIPHER_ONLY,
129 L2FWD_CRYPTO_HASH_ONLY
135 phys_addr_t phys_addr;
138 char supported_auth_algo[RTE_CRYPTO_AUTH_LIST_END][MAX_STR_LEN];
139 char supported_cipher_algo[RTE_CRYPTO_CIPHER_LIST_END][MAX_STR_LEN];
141 /** l2fwd crypto application command line options */
142 struct l2fwd_crypto_options {
144 unsigned nb_ports_per_lcore;
145 unsigned refresh_period;
146 unsigned single_lcore:1;
149 unsigned sessionless:1;
151 enum l2fwd_crypto_xform_chain xform_chain;
153 struct rte_crypto_sym_xform cipher_xform;
155 int ckey_random_size;
161 struct rte_crypto_sym_xform auth_xform;
163 int akey_random_size;
165 struct l2fwd_key aad;
172 char string_type[MAX_STR_LEN];
175 /** l2fwd crypto lcore params */
176 struct l2fwd_crypto_params {
180 unsigned digest_length;
184 struct l2fwd_key aad;
185 struct rte_cryptodev_sym_session *session;
191 enum rte_crypto_cipher_algorithm cipher_algo;
192 enum rte_crypto_auth_algorithm auth_algo;
195 /** lcore configuration */
196 struct lcore_queue_conf {
197 unsigned nb_rx_ports;
198 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
200 unsigned nb_crypto_devs;
201 unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
203 struct op_buffer op_buf[RTE_MAX_ETHPORTS];
204 struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
205 } __rte_cache_aligned;
207 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
209 static const struct rte_eth_conf port_conf = {
211 .mq_mode = ETH_MQ_RX_NONE,
212 .max_rx_pkt_len = ETHER_MAX_LEN,
214 .header_split = 0, /**< Header Split disabled */
215 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
216 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
217 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
218 .hw_strip_crc = 0, /**< CRC stripped by hardware */
221 .mq_mode = ETH_MQ_TX_NONE,
225 struct rte_mempool *l2fwd_pktmbuf_pool;
226 struct rte_mempool *l2fwd_crypto_op_pool;
228 /* Per-port statistics struct */
229 struct l2fwd_port_statistics {
233 uint64_t crypto_enqueued;
234 uint64_t crypto_dequeued;
237 } __rte_cache_aligned;
239 struct l2fwd_crypto_statistics {
244 } __rte_cache_aligned;
246 struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
247 struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
249 /* A tsc-based timer responsible for triggering statistics printout */
250 #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
251 #define MAX_TIMER_PERIOD 86400UL /* 1 day max */
253 /* default period is 10 seconds */
254 static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
256 /* Print out statistics on packets dropped */
260 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
261 uint64_t total_packets_enqueued, total_packets_dequeued,
262 total_packets_errors;
266 total_packets_dropped = 0;
267 total_packets_tx = 0;
268 total_packets_rx = 0;
269 total_packets_enqueued = 0;
270 total_packets_dequeued = 0;
271 total_packets_errors = 0;
273 const char clr[] = { 27, '[', '2', 'J', '\0' };
274 const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
276 /* Clear screen and move to top left */
277 printf("%s%s", clr, topLeft);
279 printf("\nPort statistics ====================================");
281 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
282 /* skip disabled ports */
283 if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
285 printf("\nStatistics for port %u ------------------------------"
286 "\nPackets sent: %32"PRIu64
287 "\nPackets received: %28"PRIu64
288 "\nPackets dropped: %29"PRIu64,
290 port_statistics[portid].tx,
291 port_statistics[portid].rx,
292 port_statistics[portid].dropped);
294 total_packets_dropped += port_statistics[portid].dropped;
295 total_packets_tx += port_statistics[portid].tx;
296 total_packets_rx += port_statistics[portid].rx;
298 printf("\nCrypto statistics ==================================");
300 for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
301 /* skip disabled ports */
302 if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0)
304 printf("\nStatistics for cryptodev %"PRIu64
305 " -------------------------"
306 "\nPackets enqueued: %28"PRIu64
307 "\nPackets dequeued: %28"PRIu64
308 "\nPackets errors: %30"PRIu64,
310 crypto_statistics[cdevid].enqueued,
311 crypto_statistics[cdevid].dequeued,
312 crypto_statistics[cdevid].errors);
314 total_packets_enqueued += crypto_statistics[cdevid].enqueued;
315 total_packets_dequeued += crypto_statistics[cdevid].dequeued;
316 total_packets_errors += crypto_statistics[cdevid].errors;
318 printf("\nAggregate statistics ==============================="
319 "\nTotal packets received: %22"PRIu64
320 "\nTotal packets enqueued: %22"PRIu64
321 "\nTotal packets dequeued: %22"PRIu64
322 "\nTotal packets sent: %26"PRIu64
323 "\nTotal packets dropped: %23"PRIu64
324 "\nTotal packets crypto errors: %17"PRIu64,
326 total_packets_enqueued,
327 total_packets_dequeued,
329 total_packets_dropped,
330 total_packets_errors);
331 printf("\n====================================================\n");
335 fill_supported_algorithm_tables(void)
339 for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++)
340 strcpy(supported_auth_algo[i], "NOT_SUPPORTED");
342 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
343 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
344 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
345 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
347 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
348 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
349 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
350 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
351 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
352 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
353 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3");
354 strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
356 for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
357 strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
359 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC");
360 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR");
361 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
362 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
363 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
364 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3");
365 strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
370 l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
371 struct l2fwd_crypto_params *cparams)
373 struct rte_crypto_op **op_buffer;
376 op_buffer = (struct rte_crypto_op **)
377 qconf->op_buf[cparams->dev_id].buffer;
379 ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
380 cparams->qp_id, op_buffer, (uint16_t) n);
382 crypto_statistics[cparams->dev_id].enqueued += ret;
383 if (unlikely(ret < n)) {
384 crypto_statistics[cparams->dev_id].errors += (n - ret);
386 rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
387 rte_crypto_op_free(op_buffer[ret]);
395 l2fwd_crypto_enqueue(struct rte_crypto_op *op,
396 struct l2fwd_crypto_params *cparams)
398 unsigned lcore_id, len;
399 struct lcore_queue_conf *qconf;
401 lcore_id = rte_lcore_id();
403 qconf = &lcore_queue_conf[lcore_id];
404 len = qconf->op_buf[cparams->dev_id].len;
405 qconf->op_buf[cparams->dev_id].buffer[len] = op;
408 /* enough ops to be sent */
409 if (len == MAX_PKT_BURST) {
410 l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
414 qconf->op_buf[cparams->dev_id].len = len;
419 l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
420 struct rte_crypto_op *op,
421 struct l2fwd_crypto_params *cparams)
423 struct ether_hdr *eth_hdr;
424 struct ipv4_hdr *ip_hdr;
426 unsigned ipdata_offset, pad_len, data_len;
429 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
431 if (eth_hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_IPv4))
434 ipdata_offset = sizeof(struct ether_hdr);
436 ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
439 ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK)
440 * IPV4_IHL_MULTIPLIER;
443 /* Zero pad data to be crypto'd so it is block aligned */
444 data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
445 pad_len = data_len % cparams->block_size ? cparams->block_size -
446 (data_len % cparams->block_size) : 0;
449 padding = rte_pktmbuf_append(m, pad_len);
450 if (unlikely(!padding))
454 memset(padding, 0, pad_len);
457 /* Set crypto operation data parameters */
458 rte_crypto_op_attach_sym_session(op, cparams->session);
460 if (cparams->do_hash) {
461 if (!cparams->hash_verify) {
462 /* Append space for digest to end of packet */
463 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
464 cparams->digest_length);
466 op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
467 cparams->digest_length);
470 op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
471 rte_pktmbuf_pkt_len(m) - cparams->digest_length);
472 op->sym->auth.digest.length = cparams->digest_length;
474 /* For wireless algorithms, offset/length must be in bits */
475 if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
476 cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
477 cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
478 op->sym->auth.data.offset = ipdata_offset << 3;
479 op->sym->auth.data.length = data_len << 3;
481 op->sym->auth.data.offset = ipdata_offset;
482 op->sym->auth.data.length = data_len;
485 if (cparams->aad.length) {
486 op->sym->auth.aad.data = cparams->aad.data;
487 op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
488 op->sym->auth.aad.length = cparams->aad.length;
492 if (cparams->do_cipher) {
493 op->sym->cipher.iv.data = cparams->iv.data;
494 op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr;
495 op->sym->cipher.iv.length = cparams->iv.length;
497 /* For wireless algorithms, offset/length must be in bits */
498 if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
499 cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
500 cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
501 op->sym->cipher.data.offset = ipdata_offset << 3;
502 if (cparams->do_hash && cparams->hash_verify)
503 /* Do not cipher the hash tag */
504 op->sym->cipher.data.length = (data_len -
505 cparams->digest_length) << 3;
507 op->sym->cipher.data.length = data_len << 3;
510 op->sym->cipher.data.offset = ipdata_offset;
511 if (cparams->do_hash && cparams->hash_verify)
512 /* Do not cipher the hash tag */
513 op->sym->cipher.data.length = data_len -
514 cparams->digest_length;
516 op->sym->cipher.data.length = data_len;
522 return l2fwd_crypto_enqueue(op, cparams);
526 /* Send the burst of packets on an output interface */
528 l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
531 struct rte_mbuf **pkt_buffer;
534 pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
536 ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
537 port_statistics[port].tx += ret;
538 if (unlikely(ret < n)) {
539 port_statistics[port].dropped += (n - ret);
541 rte_pktmbuf_free(pkt_buffer[ret]);
548 /* Enqueue packets for TX and prepare them to be sent */
550 l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
552 unsigned lcore_id, len;
553 struct lcore_queue_conf *qconf;
555 lcore_id = rte_lcore_id();
557 qconf = &lcore_queue_conf[lcore_id];
558 len = qconf->pkt_buf[port].len;
559 qconf->pkt_buf[port].buffer[len] = m;
562 /* enough pkts to be sent */
563 if (unlikely(len == MAX_PKT_BURST)) {
564 l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
568 qconf->pkt_buf[port].len = len;
573 l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
575 struct ether_hdr *eth;
579 dst_port = l2fwd_dst_ports[portid];
580 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
582 /* 02:00:00:00:00:xx */
583 tmp = ð->d_addr.addr_bytes[0];
584 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
587 ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr);
589 l2fwd_send_packet(m, (uint8_t) dst_port);
592 /** Generate random key */
594 generate_random_key(uint8_t *key, unsigned length)
599 fd = open("/dev/urandom", O_RDONLY);
601 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
603 ret = read(fd, key, length);
606 if (ret != (signed)length)
607 rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
610 static struct rte_cryptodev_sym_session *
611 initialize_crypto_session(struct l2fwd_crypto_options *options,
614 struct rte_crypto_sym_xform *first_xform;
616 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
617 first_xform = &options->cipher_xform;
618 first_xform->next = &options->auth_xform;
619 } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
620 first_xform = &options->auth_xform;
621 first_xform->next = &options->cipher_xform;
622 } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
623 first_xform = &options->cipher_xform;
625 first_xform = &options->auth_xform;
628 /* Setup Cipher Parameters */
629 return rte_cryptodev_sym_session_create(cdev_id, first_xform);
633 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options);
635 /* main processing loop */
637 l2fwd_main_loop(struct l2fwd_crypto_options *options)
639 struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
640 struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
642 unsigned lcore_id = rte_lcore_id();
643 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
644 unsigned i, j, portid, nb_rx, len;
645 struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
646 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
647 US_PER_S * BURST_TX_DRAIN_US;
648 struct l2fwd_crypto_params *cparams;
649 struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs];
651 if (qconf->nb_rx_ports == 0) {
652 RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
656 RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
658 for (i = 0; i < qconf->nb_rx_ports; i++) {
660 portid = qconf->rx_port_list[i];
661 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
665 for (i = 0; i < qconf->nb_crypto_devs; i++) {
666 port_cparams[i].do_cipher = 0;
667 port_cparams[i].do_hash = 0;
669 switch (options->xform_chain) {
670 case L2FWD_CRYPTO_CIPHER_HASH:
671 case L2FWD_CRYPTO_HASH_CIPHER:
672 port_cparams[i].do_cipher = 1;
673 port_cparams[i].do_hash = 1;
675 case L2FWD_CRYPTO_HASH_ONLY:
676 port_cparams[i].do_hash = 1;
678 case L2FWD_CRYPTO_CIPHER_ONLY:
679 port_cparams[i].do_cipher = 1;
683 port_cparams[i].dev_id = qconf->cryptodev_list[i];
684 port_cparams[i].qp_id = 0;
686 port_cparams[i].block_size = options->block_size;
688 if (port_cparams[i].do_hash) {
689 port_cparams[i].digest_length =
690 options->auth_xform.auth.digest_length;
691 if (options->auth_xform.auth.add_auth_data_length) {
692 port_cparams[i].aad.data = options->aad.data;
693 port_cparams[i].aad.length =
694 options->auth_xform.auth.add_auth_data_length;
695 port_cparams[i].aad.phys_addr = options->aad.phys_addr;
696 if (!options->aad_param)
697 generate_random_key(port_cparams[i].aad.data,
698 port_cparams[i].aad.length);
702 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
703 port_cparams[i].hash_verify = 1;
705 port_cparams[i].hash_verify = 0;
707 port_cparams[i].auth_algo = options->auth_xform.auth.algo;
710 if (port_cparams[i].do_cipher) {
711 port_cparams[i].iv.data = options->iv.data;
712 port_cparams[i].iv.length = options->iv.length;
713 port_cparams[i].iv.phys_addr = options->iv.phys_addr;
714 if (!options->iv_param)
715 generate_random_key(port_cparams[i].iv.data,
716 port_cparams[i].iv.length);
718 port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo;
721 port_cparams[i].session = initialize_crypto_session(options,
722 port_cparams[i].dev_id);
724 if (port_cparams[i].session == NULL)
726 RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id,
727 port_cparams[i].dev_id);
730 l2fwd_crypto_options_print(options);
733 * Initialize previous tsc timestamp before the loop,
734 * to avoid showing the port statistics immediately,
735 * so user can see the crypto information.
737 prev_tsc = rte_rdtsc();
740 cur_tsc = rte_rdtsc();
743 * Crypto device/TX burst queue drain
745 diff_tsc = cur_tsc - prev_tsc;
746 if (unlikely(diff_tsc > drain_tsc)) {
747 /* Enqueue all crypto ops remaining in buffers */
748 for (i = 0; i < qconf->nb_crypto_devs; i++) {
749 cparams = &port_cparams[i];
750 len = qconf->op_buf[cparams->dev_id].len;
751 l2fwd_crypto_send_burst(qconf, len, cparams);
752 qconf->op_buf[cparams->dev_id].len = 0;
754 /* Transmit all packets remaining in buffers */
755 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
756 if (qconf->pkt_buf[portid].len == 0)
758 l2fwd_send_burst(&lcore_queue_conf[lcore_id],
759 qconf->pkt_buf[portid].len,
761 qconf->pkt_buf[portid].len = 0;
764 /* if timer is enabled */
765 if (timer_period > 0) {
767 /* advance the timer */
768 timer_tsc += diff_tsc;
770 /* if timer has reached its timeout */
771 if (unlikely(timer_tsc >=
772 (uint64_t)timer_period)) {
774 /* do this only on master core */
775 if (lcore_id == rte_get_master_lcore()
776 && options->refresh_period) {
787 * Read packet from RX queues
789 for (i = 0; i < qconf->nb_rx_ports; i++) {
790 portid = qconf->rx_port_list[i];
792 cparams = &port_cparams[i];
794 nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
795 pkts_burst, MAX_PKT_BURST);
797 port_statistics[portid].rx += nb_rx;
801 * If we can't allocate a crypto_ops, then drop
802 * the rest of the burst and dequeue and
803 * process the packets to free offload structs
805 if (rte_crypto_op_bulk_alloc(
806 l2fwd_crypto_op_pool,
807 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
810 for (j = 0; j < nb_rx; j++)
811 rte_pktmbuf_free(pkts_burst[i]);
816 /* Enqueue packets from Crypto device*/
817 for (j = 0; j < nb_rx; j++) {
820 l2fwd_simple_crypto_enqueue(m,
821 ops_burst[j], cparams);
825 /* Dequeue packets from Crypto device */
827 nb_rx = rte_cryptodev_dequeue_burst(
828 cparams->dev_id, cparams->qp_id,
829 ops_burst, MAX_PKT_BURST);
831 crypto_statistics[cparams->dev_id].dequeued +=
834 /* Forward crypto'd packets */
835 for (j = 0; j < nb_rx; j++) {
836 m = ops_burst[j]->sym->m_src;
838 rte_crypto_op_free(ops_burst[j]);
839 l2fwd_simple_forward(m, portid);
841 } while (nb_rx == MAX_PKT_BURST);
847 l2fwd_launch_one_lcore(void *arg)
849 l2fwd_main_loop((struct l2fwd_crypto_options *)arg);
853 /* Display command line arguments usage */
855 l2fwd_crypto_usage(const char *prgname)
857 printf("%s [EAL options] --\n"
858 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
859 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
860 " -s manage all ports from single lcore\n"
861 " -T PERIOD: statistics will be refreshed each PERIOD seconds"
862 " (0 to disable, 10 default, 86400 maximum)\n"
864 " --cdev_type HW / SW / ANY\n"
865 " --chain HASH_CIPHER / CIPHER_HASH\n"
867 " --cipher_algo ALGO\n"
868 " --cipher_op ENCRYPT / DECRYPT\n"
869 " --cipher_key KEY (bytes separated with \":\")\n"
870 " --cipher_key_random_size SIZE: size of cipher key when generated randomly\n"
871 " --iv IV (bytes separated with \":\")\n"
872 " --iv_random_size SIZE: size of IV when generated randomly\n"
874 " --auth_algo ALGO\n"
875 " --auth_op GENERATE / VERIFY\n"
876 " --auth_key KEY (bytes separated with \":\")\n"
877 " --auth_key_random_size SIZE: size of auth key when generated randomly\n"
878 " --aad AAD (bytes separated with \":\")\n"
879 " --aad_random_size SIZE: size of AAD when generated randomly\n"
880 " --digest_size SIZE: size of digest to be generated/verified\n"
886 /** Parse crypto device type command line argument */
888 parse_cryptodev_type(enum cdev_type *type, char *optarg)
890 if (strcmp("HW", optarg) == 0) {
891 *type = CDEV_TYPE_HW;
893 } else if (strcmp("SW", optarg) == 0) {
894 *type = CDEV_TYPE_SW;
896 } else if (strcmp("ANY", optarg) == 0) {
897 *type = CDEV_TYPE_ANY;
904 /** Parse crypto chain xform command line argument */
906 parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
908 if (strcmp("CIPHER_HASH", optarg) == 0) {
909 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
911 } else if (strcmp("HASH_CIPHER", optarg) == 0) {
912 options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER;
914 } else if (strcmp("CIPHER_ONLY", optarg) == 0) {
915 options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY;
917 } else if (strcmp("HASH_ONLY", optarg) == 0) {
918 options->xform_chain = L2FWD_CRYPTO_HASH_ONLY;
925 /** Parse crypto cipher algo option command line argument */
927 parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
931 for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) {
932 if (!strcmp(supported_cipher_algo[i], optarg)) {
933 *algo = (enum rte_crypto_cipher_algorithm)i;
938 printf("Cipher algorithm not supported!\n");
942 /** Parse crypto cipher operation command line argument */
944 parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
946 if (strcmp("ENCRYPT", optarg) == 0) {
947 *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
949 } else if (strcmp("DECRYPT", optarg) == 0) {
950 *op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
954 printf("Cipher operation not supported!\n");
958 /** Parse crypto key command line argument */
960 parse_key(uint8_t *data, char *input_arg)
965 for (byte_count = 0, token = strtok(input_arg, ":");
966 (byte_count < MAX_KEY_SIZE) && (token != NULL);
967 token = strtok(NULL, ":")) {
969 int number = (int)strtol(token, NULL, 16);
971 if (errno == EINVAL || errno == ERANGE || number > 0xFF)
974 data[byte_count++] = (uint8_t)number;
980 /** Parse size param*/
982 parse_size(int *size, const char *q_arg)
987 /* parse hexadecimal string */
988 n = strtoul(q_arg, &end, 10);
989 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
993 printf("invalid size\n");
1001 /** Parse crypto cipher operation command line argument */
1003 parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
1007 for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++) {
1008 if (!strcmp(supported_auth_algo[i], optarg)) {
1009 *algo = (enum rte_crypto_auth_algorithm)i;
1014 printf("Authentication algorithm specified not supported!\n");
1019 parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
1021 if (strcmp("VERIFY", optarg) == 0) {
1022 *op = RTE_CRYPTO_AUTH_OP_VERIFY;
1024 } else if (strcmp("GENERATE", optarg) == 0) {
1025 *op = RTE_CRYPTO_AUTH_OP_GENERATE;
1029 printf("Authentication operation specified not supported!\n");
1033 /** Parse long options */
1035 l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
1036 struct option *lgopts, int option_index)
1040 if (strcmp(lgopts[option_index].name, "cdev_type") == 0) {
1041 retval = parse_cryptodev_type(&options->type, optarg);
1043 snprintf(options->string_type, MAX_STR_LEN,
1048 else if (strcmp(lgopts[option_index].name, "chain") == 0)
1049 return parse_crypto_opt_chain(options, optarg);
1051 /* Cipher options */
1052 else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0)
1053 return parse_cipher_algo(&options->cipher_xform.cipher.algo,
1056 else if (strcmp(lgopts[option_index].name, "cipher_op") == 0)
1057 return parse_cipher_op(&options->cipher_xform.cipher.op,
1060 else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
1061 options->ckey_param = 1;
1062 options->cipher_xform.cipher.key.length =
1063 parse_key(options->cipher_xform.cipher.key.data, optarg);
1064 if (options->cipher_xform.cipher.key.length > 0)
1070 else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0)
1071 return parse_size(&options->ckey_random_size, optarg);
1073 else if (strcmp(lgopts[option_index].name, "iv") == 0) {
1074 options->iv_param = 1;
1075 options->iv.length =
1076 parse_key(options->iv.data, optarg);
1077 if (options->iv.length > 0)
1083 else if (strcmp(lgopts[option_index].name, "iv_random_size") == 0)
1084 return parse_size(&options->iv_random_size, optarg);
1086 /* Authentication options */
1087 else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) {
1088 return parse_auth_algo(&options->auth_xform.auth.algo,
1092 else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
1093 return parse_auth_op(&options->auth_xform.auth.op,
1096 else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
1097 options->akey_param = 1;
1098 options->auth_xform.auth.key.length =
1099 parse_key(options->auth_xform.auth.key.data, optarg);
1100 if (options->auth_xform.auth.key.length > 0)
1106 else if (strcmp(lgopts[option_index].name, "auth_key_random_size") == 0) {
1107 return parse_size(&options->akey_random_size, optarg);
1110 else if (strcmp(lgopts[option_index].name, "aad") == 0) {
1111 options->aad_param = 1;
1112 options->aad.length =
1113 parse_key(options->aad.data, optarg);
1114 if (options->aad.length > 0)
1120 else if (strcmp(lgopts[option_index].name, "aad_random_size") == 0) {
1121 return parse_size(&options->aad_random_size, optarg);
1124 else if (strcmp(lgopts[option_index].name, "digest_size") == 0) {
1125 return parse_size(&options->digest_size, optarg);
1128 else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
1129 options->sessionless = 1;
1136 /** Parse port mask */
1138 l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options,
1144 /* parse hexadecimal string */
1145 pm = strtoul(q_arg, &end, 16);
1146 if ((pm == '\0') || (end == NULL) || (*end != '\0'))
1149 options->portmask = pm;
1150 if (options->portmask == 0) {
1151 printf("invalid portmask specified\n");
1158 /** Parse number of queues */
1160 l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options,
1166 /* parse hexadecimal string */
1167 n = strtoul(q_arg, &end, 10);
1168 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1170 else if (n >= MAX_RX_QUEUE_PER_LCORE)
1173 options->nb_ports_per_lcore = n;
1174 if (options->nb_ports_per_lcore == 0) {
1175 printf("invalid number of ports selected\n");
1182 /** Parse timer period */
1184 l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
1190 /* parse number string */
1191 n = (unsigned)strtol(q_arg, &end, 10);
1192 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
1195 if (n >= MAX_TIMER_PERIOD) {
1196 printf("Warning refresh period specified %lu is greater than "
1197 "max value %lu! using max value",
1198 n, MAX_TIMER_PERIOD);
1199 n = MAX_TIMER_PERIOD;
1202 options->refresh_period = n * 1000 * TIMER_MILLISECOND;
1207 /** Generate default options for application */
1209 l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
1211 options->portmask = 0xffffffff;
1212 options->nb_ports_per_lcore = 1;
1213 options->refresh_period = 10000;
1214 options->single_lcore = 0;
1215 options->sessionless = 0;
1217 options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
1220 options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1221 options->cipher_xform.next = NULL;
1222 options->ckey_param = 0;
1223 options->ckey_random_size = -1;
1224 options->cipher_xform.cipher.key.length = 0;
1225 options->iv_param = 0;
1226 options->iv_random_size = -1;
1227 options->iv.length = 0;
1229 options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1230 options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1232 /* Authentication Data */
1233 options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1234 options->auth_xform.next = NULL;
1235 options->akey_param = 0;
1236 options->akey_random_size = -1;
1237 options->auth_xform.auth.key.length = 0;
1238 options->aad_param = 0;
1239 options->aad_random_size = -1;
1240 options->aad.length = 0;
1241 options->digest_size = -1;
1243 options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
1244 options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1246 options->type = CDEV_TYPE_ANY;
1250 display_cipher_info(struct l2fwd_crypto_options *options)
1252 printf("\n---- Cipher information ---\n");
1253 printf("Algorithm: %s\n",
1254 supported_cipher_algo[options->cipher_xform.cipher.algo]);
1255 rte_hexdump(stdout, "Cipher key:",
1256 options->cipher_xform.cipher.key.data,
1257 options->cipher_xform.cipher.key.length);
1258 rte_hexdump(stdout, "IV:", options->iv.data, options->iv.length);
1262 display_auth_info(struct l2fwd_crypto_options *options)
1264 printf("\n---- Authentication information ---\n");
1265 printf("Algorithm: %s\n",
1266 supported_auth_algo[options->auth_xform.auth.algo]);
1267 rte_hexdump(stdout, "Auth key:",
1268 options->auth_xform.auth.key.data,
1269 options->auth_xform.auth.key.length);
1270 rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length);
1274 l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
1276 char string_cipher_op[MAX_STR_LEN];
1277 char string_auth_op[MAX_STR_LEN];
1279 if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1280 strcpy(string_cipher_op, "Encrypt");
1282 strcpy(string_cipher_op, "Decrypt");
1284 if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
1285 strcpy(string_auth_op, "Auth generate");
1287 strcpy(string_auth_op, "Auth verify");
1289 printf("Options:-\nn");
1290 printf("portmask: %x\n", options->portmask);
1291 printf("ports per lcore: %u\n", options->nb_ports_per_lcore);
1292 printf("refresh period : %u\n", options->refresh_period);
1293 printf("single lcore mode: %s\n",
1294 options->single_lcore ? "enabled" : "disabled");
1295 printf("stats_printing: %s\n",
1296 options->refresh_period == 0 ? "disabled" : "enabled");
1298 printf("sessionless crypto: %s\n",
1299 options->sessionless ? "enabled" : "disabled");
1301 if (options->ckey_param && (options->ckey_random_size != -1))
1302 printf("Cipher key already parsed, ignoring size of random key\n");
1304 if (options->akey_param && (options->akey_random_size != -1))
1305 printf("Auth key already parsed, ignoring size of random key\n");
1307 if (options->iv_param && (options->iv_random_size != -1))
1308 printf("IV already parsed, ignoring size of random IV\n");
1310 if (options->aad_param && (options->aad_random_size != -1))
1311 printf("AAD already parsed, ignoring size of random AAD\n");
1313 printf("\nCrypto chain: ");
1314 switch (options->xform_chain) {
1315 case L2FWD_CRYPTO_CIPHER_HASH:
1316 printf("Input --> %s --> %s --> Output\n",
1317 string_cipher_op, string_auth_op);
1318 display_cipher_info(options);
1319 display_auth_info(options);
1321 case L2FWD_CRYPTO_HASH_CIPHER:
1322 printf("Input --> %s --> %s --> Output\n",
1323 string_auth_op, string_cipher_op);
1324 display_cipher_info(options);
1325 display_auth_info(options);
1327 case L2FWD_CRYPTO_HASH_ONLY:
1328 printf("Input --> %s --> Output\n", string_auth_op);
1329 display_auth_info(options);
1331 case L2FWD_CRYPTO_CIPHER_ONLY:
1332 printf("Input --> %s --> Output\n", string_cipher_op);
1333 display_cipher_info(options);
1338 /* Parse the argument given in the command line of the application */
1340 l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
1341 int argc, char **argv)
1343 int opt, retval, option_index;
1344 char **argvopt = argv, *prgname = argv[0];
1346 static struct option lgopts[] = {
1347 { "sessionless", no_argument, 0, 0 },
1349 { "cdev_type", required_argument, 0, 0 },
1350 { "chain", required_argument, 0, 0 },
1352 { "cipher_algo", required_argument, 0, 0 },
1353 { "cipher_op", required_argument, 0, 0 },
1354 { "cipher_key", required_argument, 0, 0 },
1355 { "cipher_key_random_size", required_argument, 0, 0 },
1357 { "auth_algo", required_argument, 0, 0 },
1358 { "auth_op", required_argument, 0, 0 },
1359 { "auth_key", required_argument, 0, 0 },
1360 { "auth_key_random_size", required_argument, 0, 0 },
1362 { "iv", required_argument, 0, 0 },
1363 { "iv_random_size", required_argument, 0, 0 },
1364 { "aad", required_argument, 0, 0 },
1365 { "aad_random_size", required_argument, 0, 0 },
1366 { "digest_size", required_argument, 0, 0 },
1368 { "sessionless", no_argument, 0, 0 },
1373 l2fwd_crypto_default_options(options);
1375 while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts,
1376 &option_index)) != EOF) {
1380 retval = l2fwd_crypto_parse_args_long_options(options,
1381 lgopts, option_index);
1383 l2fwd_crypto_usage(prgname);
1390 retval = l2fwd_crypto_parse_portmask(options, optarg);
1392 l2fwd_crypto_usage(prgname);
1399 retval = l2fwd_crypto_parse_nqueue(options, optarg);
1401 l2fwd_crypto_usage(prgname);
1408 options->single_lcore = 1;
1414 retval = l2fwd_crypto_parse_timer_period(options,
1417 l2fwd_crypto_usage(prgname);
1423 l2fwd_crypto_usage(prgname);
1430 argv[optind-1] = prgname;
1433 optind = 0; /* reset getopt lib */
1438 /* Check the link status of all ports in up to 9s, and print them finally */
1440 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1442 #define CHECK_INTERVAL 100 /* 100ms */
1443 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1444 uint8_t portid, count, all_ports_up, print_flag = 0;
1445 struct rte_eth_link link;
1447 printf("\nChecking link status");
1449 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1451 for (portid = 0; portid < port_num; portid++) {
1452 if ((port_mask & (1 << portid)) == 0)
1454 memset(&link, 0, sizeof(link));
1455 rte_eth_link_get_nowait(portid, &link);
1456 /* print link status if flag set */
1457 if (print_flag == 1) {
1458 if (link.link_status)
1459 printf("Port %d Link Up - speed %u "
1460 "Mbps - %s\n", (uint8_t)portid,
1461 (unsigned)link.link_speed,
1462 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1463 ("full-duplex") : ("half-duplex\n"));
1465 printf("Port %d Link Down\n",
1469 /* clear all_ports_up flag if any link down */
1470 if (link.link_status == ETH_LINK_DOWN) {
1475 /* after finally printing all link status, get out */
1476 if (print_flag == 1)
1479 if (all_ports_up == 0) {
1482 rte_delay_ms(CHECK_INTERVAL);
1485 /* set the print_flag if all ports up or timeout */
1486 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1493 /* Check if device has to be HW/SW or any */
1495 check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_info)
1497 if (options->type == CDEV_TYPE_HW &&
1498 (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1500 if (options->type == CDEV_TYPE_SW &&
1501 !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
1503 if (options->type == CDEV_TYPE_ANY)
1510 check_supported_size(uint16_t length, uint16_t min, uint16_t max,
1516 if (increment == 0) {
1523 /* Range of values */
1524 for (supp_size = min; supp_size <= max; supp_size += increment) {
1525 if (length == supp_size)
1532 initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
1533 uint8_t *enabled_cdevs)
1535 unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
1536 const struct rte_cryptodev_capabilities *cap;
1537 enum rte_crypto_auth_algorithm cap_auth_algo;
1538 enum rte_crypto_auth_algorithm opt_auth_algo;
1539 enum rte_crypto_cipher_algorithm cap_cipher_algo;
1540 enum rte_crypto_cipher_algorithm opt_cipher_algo;
1543 cdev_count = rte_cryptodev_count();
1544 if (cdev_count == 0) {
1545 printf("No crypto devices available\n");
1549 for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
1551 struct rte_cryptodev_qp_conf qp_conf;
1552 struct rte_cryptodev_info dev_info;
1554 struct rte_cryptodev_config conf = {
1555 .nb_queue_pairs = 1,
1556 .socket_id = SOCKET_ID_ANY,
1563 rte_cryptodev_info_get(cdev_id, &dev_info);
1565 /* Set cipher parameters */
1566 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
1567 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
1568 options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
1569 /* Check if device supports cipher algo */
1571 opt_cipher_algo = options->cipher_xform.cipher.algo;
1572 cap = &dev_info.capabilities[i];
1573 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1574 cap_cipher_algo = cap->sym.cipher.algo;
1575 if (cap->sym.xform_type ==
1576 RTE_CRYPTO_SYM_XFORM_CIPHER) {
1577 if (cap_cipher_algo == opt_cipher_algo) {
1578 if (check_type(options, &dev_info) == 0)
1582 cap = &dev_info.capabilities[++i];
1585 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1586 printf("Algorithm %s not supported by cryptodev %u"
1587 " or device not of preferred type (%s)\n",
1588 supported_cipher_algo[opt_cipher_algo],
1590 options->string_type);
1594 options->block_size = cap->sym.cipher.block_size;
1596 * Check if length of provided IV is supported
1597 * by the algorithm chosen.
1599 if (options->iv_param) {
1600 if (check_supported_size(options->iv.length,
1601 cap->sym.cipher.iv_size.min,
1602 cap->sym.cipher.iv_size.max,
1603 cap->sym.cipher.iv_size.increment)
1605 printf("Unsupported IV length\n");
1609 * Check if length of IV to be randomly generated
1610 * is supported by the algorithm chosen.
1612 } else if (options->iv_random_size != -1) {
1613 if (check_supported_size(options->iv_random_size,
1614 cap->sym.cipher.iv_size.min,
1615 cap->sym.cipher.iv_size.max,
1616 cap->sym.cipher.iv_size.increment)
1618 printf("Unsupported IV length\n");
1621 options->iv.length = options->iv_random_size;
1622 /* No size provided, use minimum size. */
1624 options->iv.length = cap->sym.cipher.iv_size.min;
1627 * Check if length of provided cipher key is supported
1628 * by the algorithm chosen.
1630 if (options->ckey_param) {
1631 if (check_supported_size(
1632 options->cipher_xform.cipher.key.length,
1633 cap->sym.cipher.key_size.min,
1634 cap->sym.cipher.key_size.max,
1635 cap->sym.cipher.key_size.increment)
1637 printf("Unsupported cipher key length\n");
1641 * Check if length of the cipher key to be randomly generated
1642 * is supported by the algorithm chosen.
1644 } else if (options->ckey_random_size != -1) {
1645 if (check_supported_size(options->ckey_random_size,
1646 cap->sym.cipher.key_size.min,
1647 cap->sym.cipher.key_size.max,
1648 cap->sym.cipher.key_size.increment)
1650 printf("Unsupported cipher key length\n");
1653 options->cipher_xform.cipher.key.length =
1654 options->ckey_random_size;
1655 /* No size provided, use minimum size. */
1657 options->cipher_xform.cipher.key.length =
1658 cap->sym.cipher.key_size.min;
1660 if (!options->ckey_param)
1661 generate_random_key(
1662 options->cipher_xform.cipher.key.data,
1663 options->cipher_xform.cipher.key.length);
1667 /* Set auth parameters */
1668 if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
1669 options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
1670 options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
1671 /* Check if device supports auth algo */
1673 opt_auth_algo = options->auth_xform.auth.algo;
1674 cap = &dev_info.capabilities[i];
1675 while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1676 cap_auth_algo = cap->sym.auth.algo;
1677 if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
1678 (cap_auth_algo == opt_auth_algo) &&
1679 (check_type(options, &dev_info) == 0)) {
1682 cap = &dev_info.capabilities[++i];
1685 if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
1686 printf("Algorithm %s not supported by cryptodev %u"
1687 " or device not of preferred type (%s)\n",
1688 supported_auth_algo[opt_auth_algo],
1690 options->string_type);
1694 options->block_size = cap->sym.auth.block_size;
1696 * Check if length of provided AAD is supported
1697 * by the algorithm chosen.
1699 if (options->aad_param) {
1700 if (check_supported_size(options->aad.length,
1701 cap->sym.auth.aad_size.min,
1702 cap->sym.auth.aad_size.max,
1703 cap->sym.auth.aad_size.increment)
1705 printf("Unsupported AAD length\n");
1709 * Check if length of AAD to be randomly generated
1710 * is supported by the algorithm chosen.
1712 } else if (options->aad_random_size != -1) {
1713 if (check_supported_size(options->aad_random_size,
1714 cap->sym.auth.aad_size.min,
1715 cap->sym.auth.aad_size.max,
1716 cap->sym.auth.aad_size.increment)
1718 printf("Unsupported AAD length\n");
1721 options->aad.length = options->aad_random_size;
1722 /* No size provided, use minimum size. */
1724 options->aad.length = cap->sym.auth.aad_size.min;
1726 options->auth_xform.auth.add_auth_data_length =
1727 options->aad.length;
1730 * Check if length of provided auth key is supported
1731 * by the algorithm chosen.
1733 if (options->akey_param) {
1734 if (check_supported_size(
1735 options->auth_xform.auth.key.length,
1736 cap->sym.auth.key_size.min,
1737 cap->sym.auth.key_size.max,
1738 cap->sym.auth.key_size.increment)
1740 printf("Unsupported auth key length\n");
1744 * Check if length of the auth key to be randomly generated
1745 * is supported by the algorithm chosen.
1747 } else if (options->akey_random_size != -1) {
1748 if (check_supported_size(options->akey_random_size,
1749 cap->sym.auth.key_size.min,
1750 cap->sym.auth.key_size.max,
1751 cap->sym.auth.key_size.increment)
1753 printf("Unsupported auth key length\n");
1756 options->auth_xform.auth.key.length =
1757 options->akey_random_size;
1758 /* No size provided, use minimum size. */
1760 options->auth_xform.auth.key.length =
1761 cap->sym.auth.key_size.min;
1763 if (!options->akey_param)
1764 generate_random_key(
1765 options->auth_xform.auth.key.data,
1766 options->auth_xform.auth.key.length);
1768 /* Check if digest size is supported by the algorithm. */
1769 if (options->digest_size != -1) {
1770 if (check_supported_size(options->digest_size,
1771 cap->sym.auth.digest_size.min,
1772 cap->sym.auth.digest_size.max,
1773 cap->sym.auth.digest_size.increment)
1775 printf("Unsupported digest length\n");
1778 options->auth_xform.auth.digest_length =
1779 options->digest_size;
1780 /* No size provided, use minimum size. */
1782 options->auth_xform.auth.digest_length =
1783 cap->sym.auth.digest_size.min;
1786 retval = rte_cryptodev_configure(cdev_id, &conf);
1788 printf("Failed to configure cryptodev %u", cdev_id);
1792 qp_conf.nb_descriptors = 2048;
1794 retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
1797 printf("Failed to setup queue pair %u on cryptodev %u",
1802 retval = rte_cryptodev_start(cdev_id);
1804 printf("Failed to start device %u: error %d\n",
1809 l2fwd_enabled_crypto_mask |= (1 << cdev_id);
1811 enabled_cdevs[cdev_id] = 1;
1812 enabled_cdev_count++;
1815 return enabled_cdev_count;
1819 initialize_ports(struct l2fwd_crypto_options *options)
1821 uint8_t last_portid, portid;
1822 unsigned enabled_portcount = 0;
1823 unsigned nb_ports = rte_eth_dev_count();
1825 if (nb_ports == 0) {
1826 printf("No Ethernet ports - bye\n");
1830 /* Reset l2fwd_dst_ports */
1831 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
1832 l2fwd_dst_ports[portid] = 0;
1834 for (last_portid = 0, portid = 0; portid < nb_ports; portid++) {
1837 /* Skip ports that are not enabled */
1838 if ((options->portmask & (1 << portid)) == 0)
1842 printf("Initializing port %u... ", (unsigned) portid);
1844 retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
1846 printf("Cannot configure device: err=%d, port=%u\n",
1847 retval, (unsigned) portid);
1851 /* init one RX queue */
1853 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
1854 rte_eth_dev_socket_id(portid),
1855 NULL, l2fwd_pktmbuf_pool);
1857 printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
1858 retval, (unsigned) portid);
1862 /* init one TX queue on each port */
1864 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
1865 rte_eth_dev_socket_id(portid),
1868 printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
1869 retval, (unsigned) portid);
1875 retval = rte_eth_dev_start(portid);
1877 printf("rte_eth_dev_start:err=%d, port=%u\n",
1878 retval, (unsigned) portid);
1882 rte_eth_promiscuous_enable(portid);
1884 rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
1886 printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
1888 l2fwd_ports_eth_addr[portid].addr_bytes[0],
1889 l2fwd_ports_eth_addr[portid].addr_bytes[1],
1890 l2fwd_ports_eth_addr[portid].addr_bytes[2],
1891 l2fwd_ports_eth_addr[portid].addr_bytes[3],
1892 l2fwd_ports_eth_addr[portid].addr_bytes[4],
1893 l2fwd_ports_eth_addr[portid].addr_bytes[5]);
1895 /* initialize port stats */
1896 memset(&port_statistics, 0, sizeof(port_statistics));
1898 /* Setup port forwarding table */
1899 if (enabled_portcount % 2) {
1900 l2fwd_dst_ports[portid] = last_portid;
1901 l2fwd_dst_ports[last_portid] = portid;
1903 last_portid = portid;
1906 l2fwd_enabled_port_mask |= (1 << portid);
1907 enabled_portcount++;
1910 if (enabled_portcount == 1) {
1911 l2fwd_dst_ports[last_portid] = last_portid;
1912 } else if (enabled_portcount % 2) {
1913 printf("odd number of ports in portmask- bye\n");
1917 check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
1919 return enabled_portcount;
1923 reserve_key_memory(struct l2fwd_crypto_options *options)
1925 options->cipher_xform.cipher.key.data = rte_malloc("crypto key",
1927 if (options->cipher_xform.cipher.key.data == NULL)
1928 rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher key");
1931 options->auth_xform.auth.key.data = rte_malloc("auth key",
1933 if (options->auth_xform.auth.key.data == NULL)
1934 rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth key");
1936 options->iv.data = rte_malloc("iv", MAX_KEY_SIZE, 0);
1937 if (options->iv.data == NULL)
1938 rte_exit(EXIT_FAILURE, "Failed to allocate memory for IV");
1939 options->iv.phys_addr = rte_malloc_virt2phy(options->iv.data);
1941 options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
1942 if (options->aad.data == NULL)
1943 rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
1944 options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
1948 main(int argc, char **argv)
1950 struct lcore_queue_conf *qconf;
1951 struct l2fwd_crypto_options options;
1953 uint8_t nb_ports, nb_cryptodevs, portid, cdev_id;
1954 unsigned lcore_id, rx_lcore_id;
1955 int ret, enabled_cdevcount, enabled_portcount;
1956 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
1959 ret = rte_eal_init(argc, argv);
1961 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
1965 /* reserve memory for Cipher/Auth key and IV */
1966 reserve_key_memory(&options);
1968 /* fill out the supported algorithm tables */
1969 fill_supported_algorithm_tables();
1971 /* parse application arguments (after the EAL ones) */
1972 ret = l2fwd_crypto_parse_args(&options, argc, argv);
1974 rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
1976 /* create the mbuf pool */
1977 l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
1978 sizeof(struct rte_crypto_op),
1979 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
1980 if (l2fwd_pktmbuf_pool == NULL)
1981 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1983 /* create crypto op pool */
1984 l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
1985 RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
1987 if (l2fwd_crypto_op_pool == NULL)
1988 rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
1990 /* Enable Ethernet ports */
1991 enabled_portcount = initialize_ports(&options);
1992 if (enabled_portcount < 1)
1993 rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n");
1995 nb_ports = rte_eth_dev_count();
1996 /* Initialize the port/queue configuration of each logical core */
1997 for (rx_lcore_id = 0, qconf = NULL, portid = 0;
1998 portid < nb_ports; portid++) {
2000 /* skip ports that are not enabled */
2001 if ((options.portmask & (1 << portid)) == 0)
2004 if (options.single_lcore && qconf == NULL) {
2005 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2007 if (rx_lcore_id >= RTE_MAX_LCORE)
2008 rte_exit(EXIT_FAILURE,
2009 "Not enough cores\n");
2011 } else if (!options.single_lcore) {
2012 /* get the lcore_id for this port */
2013 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2014 lcore_queue_conf[rx_lcore_id].nb_rx_ports ==
2015 options.nb_ports_per_lcore) {
2017 if (rx_lcore_id >= RTE_MAX_LCORE)
2018 rte_exit(EXIT_FAILURE,
2019 "Not enough cores\n");
2023 /* Assigned a new logical core in the loop above. */
2024 if (qconf != &lcore_queue_conf[rx_lcore_id])
2025 qconf = &lcore_queue_conf[rx_lcore_id];
2027 qconf->rx_port_list[qconf->nb_rx_ports] = portid;
2028 qconf->nb_rx_ports++;
2030 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned)portid);
2033 /* Enable Crypto devices */
2034 enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount,
2036 if (enabled_cdevcount < 0)
2037 rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n");
2039 if (enabled_cdevcount < enabled_portcount)
2040 rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) "
2041 "has to be more or equal to number of ports (%d)\n",
2042 enabled_cdevcount, enabled_portcount);
2044 nb_cryptodevs = rte_cryptodev_count();
2046 /* Initialize the port/cryptodev configuration of each logical core */
2047 for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0;
2048 cdev_id < nb_cryptodevs && enabled_cdevcount;
2050 /* Crypto op not supported by crypto device */
2051 if (!enabled_cdevs[cdev_id])
2054 if (options.single_lcore && qconf == NULL) {
2055 while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
2057 if (rx_lcore_id >= RTE_MAX_LCORE)
2058 rte_exit(EXIT_FAILURE,
2059 "Not enough cores\n");
2061 } else if (!options.single_lcore) {
2062 /* get the lcore_id for this port */
2063 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
2064 lcore_queue_conf[rx_lcore_id].nb_crypto_devs ==
2065 options.nb_ports_per_lcore) {
2067 if (rx_lcore_id >= RTE_MAX_LCORE)
2068 rte_exit(EXIT_FAILURE,
2069 "Not enough cores\n");
2073 /* Assigned a new logical core in the loop above. */
2074 if (qconf != &lcore_queue_conf[rx_lcore_id])
2075 qconf = &lcore_queue_conf[rx_lcore_id];
2077 qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id;
2078 qconf->nb_crypto_devs++;
2080 enabled_cdevcount--;
2082 printf("Lcore %u: cryptodev %u\n", rx_lcore_id,
2086 /* launch per-lcore init on every lcore */
2087 rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options,
2089 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
2090 if (rte_eal_wait_lcore(lcore_id) < 0)