unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
/* skip disabled ports */
- if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0)
+ if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
continue;
printf("\nStatistics for cryptodev %"PRIu64
" -------------------------"
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ip_hdr;
- unsigned ipdata_offset, pad_len, data_len;
+ uint32_t ipdata_offset, data_len;
+ uint32_t pad_len = 0;
char *padding;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
/* Zero pad data to be crypto'd so it is block aligned */
data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
- pad_len = data_len % cparams->block_size ? cparams->block_size -
- (data_len % cparams->block_size) : 0;
- if (pad_len) {
- padding = rte_pktmbuf_append(m, pad_len);
- if (unlikely(!padding))
- return -1;
+ if (cparams->do_hash && cparams->hash_verify)
+ data_len -= cparams->digest_length;
+
+ if (cparams->do_cipher) {
+ /*
+ * Following algorithms are block cipher algorithms,
+ * and might need padding
+ */
+ switch (cparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ if (data_len % cparams->block_size)
+ pad_len = cparams->block_size -
+ (data_len % cparams->block_size);
+ break;
+ default:
+ pad_len = 0;
+ }
- data_len += pad_len;
- memset(padding, 0, pad_len);
+ if (pad_len) {
+ padding = rte_pktmbuf_append(m, pad_len);
+ if (unlikely(!padding))
+ return -1;
+
+ data_len += pad_len;
+ memset(padding, 0, pad_len);
+ }
}
/* Set crypto operation data parameters */
op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
} else {
- op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
- cparams->digest_length);
+ op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
+ uint8_t *) + ipdata_offset + data_len;
}
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
op->sym->cipher.data.offset = ipdata_offset << 3;
- if (cparams->do_hash && cparams->hash_verify)
- /* Do not cipher the hash tag */
- op->sym->cipher.data.length = (data_len -
- cparams->digest_length) << 3;
- else
- op->sym->cipher.data.length = data_len << 3;
-
+ op->sym->cipher.data.length = data_len << 3;
} else {
op->sym->cipher.data.offset = ipdata_offset;
- if (cparams->do_hash && cparams->hash_verify)
- /* Do not cipher the hash tag */
- op->sym->cipher.data.length = data_len -
- cparams->digest_length;
- else
- op->sym->cipher.data.length = data_len;
+ op->sym->cipher.data.length = data_len;
}
}
argv[optind-1] = prgname;
retval = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return retval;
}
return -1;
}
- l2fwd_enabled_crypto_mask |= (1 << cdev_id);
+ l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
enabled_cdevs[cdev_id] = 1;
enabled_cdev_count++;