X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fl2fwd-crypto%2Fmain.c;h=43fef59e5295f8e9b4a2ff963797f9513aa50102;hb=8ca2c96faa57c363e943589c0925c608f2fe1892;hp=35171d12280c1018ab9e9e4d3f9725f4c04b60fa;hpb=09419f235e099ecb265a590778fe64a685a2a241;p=dpdk.git diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index 35171d1228..43fef59e52 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include @@ -70,7 +72,6 @@ #include #include #include -#include #include enum cdev_type { @@ -243,7 +244,7 @@ struct l2fwd_crypto_statistics { } __rte_cache_aligned; struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; -struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS]; +struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS]; /* A tsc-based timer responsible for triggering statistics printout */ #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ @@ -339,22 +340,38 @@ fill_supported_algorithm_tables(void) strcpy(supported_auth_algo[i], "NOT_SUPPORTED"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC], + "AES_XCBC_MAC"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512"); strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3"); + strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9"); for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) strcpy(supported_cipher_algo[i], "NOT_SUPPORTED"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL"); strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR"); + strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC"); } @@ -434,6 +451,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, /* Zero pad data to be crypto'd so it is block aligned */ data_len = rte_pktmbuf_data_len(m) - ipdata_offset; + + if (cparams->do_hash && cparams->hash_verify) + data_len -= cparams->digest_length; + pad_len = data_len % cparams->block_size ? cparams->block_size - (data_len % cparams->block_size) : 0; @@ -455,16 +476,18 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, cparams->digest_length); } else { - op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m, - cparams->digest_length); + op->sym->auth.digest.data = rte_pktmbuf_mtod(m, + uint8_t *) + ipdata_offset + data_len; } op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, rte_pktmbuf_pkt_len(m) - cparams->digest_length); op->sym->auth.digest.length = cparams->digest_length; - /* For SNOW3G algorithms, offset/length must be in bits */ - if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { + /* For wireless algorithms, offset/length must be in bits */ + if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || + cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 || + cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { op->sym->auth.data.offset = ipdata_offset << 3; op->sym->auth.data.length = data_len << 3; } else { @@ -484,24 +507,15 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m, op->sym->cipher.iv.phys_addr = cparams->iv.phys_addr; op->sym->cipher.iv.length = cparams->iv.length; - /* For SNOW3G algorithms, offset/length must be in bits */ - if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) { + /* For wireless algorithms, offset/length must be in bits */ + if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || + cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 || + cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) { op->sym->cipher.data.offset = ipdata_offset << 3; - if (cparams->do_hash && cparams->hash_verify) - /* Do not cipher the hash tag */ - op->sym->cipher.data.length = (data_len - - cparams->digest_length) << 3; - else - op->sym->cipher.data.length = data_len << 3; - + op->sym->cipher.data.length = data_len << 3; } else { op->sym->cipher.data.offset = ipdata_offset; - if (cparams->do_hash && cparams->hash_verify) - /* Do not cipher the hash tag */ - op->sym->cipher.data.length = data_len - - cparams->digest_length; - else - op->sym->cipher.data.length = data_len; + op->sym->cipher.data.length = data_len; } } @@ -581,10 +595,18 @@ l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) static void generate_random_key(uint8_t *key, unsigned length) { - unsigned i; + int fd; + int ret; + + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) + rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); - for (i = 0; i < length; i++) - key[i] = rand() % 0xff; + ret = read(fd, key, length); + close(fd); + + if (ret != (signed)length) + rte_exit(EXIT_FAILURE, "Failed to generate random key\n"); } static struct rte_cryptodev_sym_session * @@ -621,7 +643,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) unsigned lcore_id = rte_lcore_id(); uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; - unsigned i, j, portid, nb_rx; + unsigned i, j, portid, nb_rx, len; struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -720,10 +742,18 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) cur_tsc = rte_rdtsc(); /* - * TX burst queue drain + * Crypto device/TX burst queue drain */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { + /* Enqueue all crypto ops remaining in buffers */ + for (i = 0; i < qconf->nb_crypto_devs; i++) { + cparams = &port_cparams[i]; + len = qconf->op_buf[cparams->dev_id].len; + l2fwd_crypto_send_burst(qconf, len, cparams); + qconf->op_buf[cparams->dev_id].len = 0; + } + /* Transmit all packets remaining in buffers */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { if (qconf->pkt_buf[portid].len == 0) continue; @@ -902,7 +932,7 @@ parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg) for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) { if (!strcmp(supported_cipher_algo[i], optarg)) { - *algo = i; + *algo = (enum rte_crypto_cipher_algorithm)i; return 0; } } @@ -978,7 +1008,7 @@ parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg) for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++) { if (!strcmp(supported_auth_algo[i], optarg)) { - *algo = i; + *algo = (enum rte_crypto_auth_algorithm)i; return 0; } } @@ -1012,7 +1042,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options, if (strcmp(lgopts[option_index].name, "cdev_type") == 0) { retval = parse_cryptodev_type(&options->type, optarg); if (retval == 0) - strcpy(options->string_type, optarg); + snprintf(options->string_type, MAX_STR_LEN, + "%s", optarg); return retval; } @@ -1179,8 +1210,6 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options, static void l2fwd_crypto_default_options(struct l2fwd_crypto_options *options) { - srand(time(NULL)); - options->portmask = 0xffffffff; options->nb_ports_per_lcore = 1; options->refresh_period = 10000; @@ -1485,6 +1514,15 @@ check_supported_size(uint16_t length, uint16_t min, uint16_t max, { uint16_t supp_size; + /* Single value */ + if (increment == 0) { + if (length == min) + return 0; + else + return -1; + } + + /* Range of values */ for (supp_size = min; supp_size <= max; supp_size += increment) { if (length == supp_size) return 0; @@ -1763,6 +1801,13 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports, return -1; } + retval = rte_cryptodev_start(cdev_id); + if (retval < 0) { + printf("Failed to start device %u: error %d\n", + cdev_id, retval); + return -1; + } + l2fwd_enabled_crypto_mask |= (1 << cdev_id); enabled_cdevs[cdev_id] = 1; @@ -1784,9 +1829,6 @@ initialize_ports(struct l2fwd_crypto_options *options) return -1; } - if (nb_ports > RTE_MAX_ETHPORTS) - nb_ports = RTE_MAX_ETHPORTS; - /* Reset l2fwd_dst_ports */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) l2fwd_dst_ports[portid] = 0;