-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <stdio.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
+#include <rte_security.h>
#include "ipsec.h"
#include "parser.h"
#define CDEV_MP_CACHE_SZ 64
#define MAX_QUEUE_PAIRS 1
-#define OPTION_CONFIG "config"
-#define OPTION_SINGLE_SA "single-sa"
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 4
/*
* Configurable number of RX/TX ring descriptors
*/
-#define IPSEC_SECGW_RX_DESC_DEFAULT 128
-#define IPSEC_SECGW_TX_DESC_DEFAULT 512
+#define IPSEC_SECGW_RX_DESC_DEFAULT 1024
+#define IPSEC_SECGW_TX_DESC_DEFAULT 1024
static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
};
+#define CMD_LINE_OPT_CONFIG "config"
+#define CMD_LINE_OPT_SINGLE_SA "single-sa"
+#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options
+ */
+ CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_CONFIG_NUM,
+ CMD_LINE_OPT_SINGLE_SA_NUM,
+ CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
+};
+
+static const struct option lgopts[] = {
+ {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
+ {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
+ {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
+ {NULL, 0, 0, 0}
+};
+
/* mask of enabled ports */
static uint32_t enabled_port_mask;
+static uint64_t enabled_cryptodev_mask = UINT64_MAX;
static uint32_t unprotected_port_mask;
static int32_t promiscuous_on = 1;
static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
static uint32_t single_sa_idx;
+static uint32_t frame_size;
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP,
+ .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
+ .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS),
},
};
RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
rte_pktmbuf_free(pkt);
}
+
+ /* Check if the packet has been processed inline. For inline protocol
+ * processed packets, the metadata in the mbuf can be used to identify
+ * the security processing done on the packet. The metadata will be
+ * used to retrieve the application registered userdata associated
+ * with the security session.
+ */
+
+ if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ pkt->port);
+
+ /* Retrieve the userdata registered. Here, the userdata
+ * registered is the SA pointer.
+ */
+
+ sa = (struct ipsec_sa *)
+ rte_security_get_userdata(ctx, pkt->udata64);
+
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return;
+ }
+
+ /* Save SA as priv member in mbuf. This will be used in the
+ * IPsec selector(SP-SA) check.
+ */
+
+ priv = get_priv(pkt);
+ priv->sa = sa;
+ }
}
static inline void
}
static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
{
struct ip *ip;
struct ether_hdr *ethhdr;
}
static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
{
int32_t i;
const int32_t prefetch_offset = 2;
/* Send burst of packets on an output interface */
static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int32_t ret;
/* Enqueue a single packet, and send burst if queue is filled */
static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
ip->pkts[j++] = m;
continue;
}
- if (res & DISCARD || i < lim) {
+ if (res & DISCARD) {
rte_pktmbuf_free(m);
continue;
}
+
/* Only check SPI match for processed IPSec packets */
+ if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
sa_idx = ip->res[i] & PROTECT_MASK;
- if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
+ if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
+ !inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
sa_idx = ip->res[i] & PROTECT_MASK;
- if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
+ if (ip->res[i] & DISCARD)
rte_pktmbuf_free(m);
- else if (sa_idx != 0) {
+ else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
} else /* BYPASS */
traffic->ip6.num = nb_pkts_out;
}
+static inline int32_t
+get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
+{
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+
+ priv = get_priv(pkt);
+
+ sa = priv->sa;
+ if (unlikely(sa == NULL)) {
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ goto fail;
+ }
+
+ if (is_ipv6)
+ return sa->portid;
+
+ /* else */
+ return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
+
+fail:
+ if (is_ipv6)
+ return -1;
+
+ /* else */
+ return 0;
+}
+
static inline void
route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
+ int32_t pkt_hop = 0;
uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
if (nb_pkts == 0)
return;
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
for (i = 0; i < nb_pkts; i++) {
- offset = offsetof(struct ip, ip_dst);
- dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],
- uint32_t *, offset);
- dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
+ if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip, ip_dst);
+ dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i],
+ uint32_t *, offset);
+ dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
+ lpm_pkts++;
+ }
}
- rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);
+ rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
+
+ lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {
+ if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
rte_pktmbuf_free(pkts[i]);
continue;
}
- send_single_packet(pkts[i], hop[i] & 0xff);
+ send_single_packet(pkts[i], pkt_hop & 0xff);
}
}
static inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
- int16_t hop[MAX_PKT_BURST * 2];
+ int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
uint8_t *ip6_dst;
+ int32_t pkt_hop = 0;
uint16_t i, offset;
+ uint16_t lpm_pkts = 0;
if (nb_pkts == 0)
return;
+ /* Need to do an LPM lookup for non-inline packets. Inline packets will
+ * have port ID in the SA
+ */
+
for (i = 0; i < nb_pkts; i++) {
- offset = offsetof(struct ip6_hdr, ip6_dst);
- ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
- memcpy(&dst_ip[i][0], ip6_dst, 16);
+ if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ /* Security offload not enabled. So an LPM lookup is
+ * required to get the hop
+ */
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *,
+ offset);
+ memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16);
+ lpm_pkts++;
+ }
}
- rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
- hop, nb_pkts);
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
+ lpm_pkts);
+
+ lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (hop[i] == -1) {
+ if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ /* Read hop from the SA */
+ pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
+ } else {
+ /* Need to use hop returned by lookup */
+ pkt_hop = hop[lpm_pkts++];
+ }
+
+ if (pkt_hop == -1) {
rte_pktmbuf_free(pkts[i]);
continue;
}
- send_single_packet(pkts[i], hop[i] & 0xff);
+ send_single_packet(pkts[i], pkt_hop & 0xff);
}
}
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint8_t portid)
+ uint8_t nb_pkts, uint16_t portid)
{
struct ipsec_traffic traffic;
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int32_t i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int32_t socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
+ qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
+ qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
if (qconf->nb_rx_queue == 0) {
RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
static int32_t
check_params(void)
{
- uint8_t lcore, portid, nb_ports;
+ uint8_t lcore;
+ uint16_t portid;
uint16_t i;
int32_t socket_id;
return -1;
}
- nb_ports = rte_eth_dev_count();
-
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
}
static uint8_t
-get_port_nb_rx_queues(const uint8_t port)
+get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
uint16_t i;
static void
print_usage(const char *prgname)
{
- printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
- " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
- " --single-sa SAIDX -f CONFIG_FILE\n"
- " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " -P : enable promiscuous mode\n"
- " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
- " --"OPTION_CONFIG": (port,queue,lcore): "
- "rx queues configuration\n"
- " --single-sa SAIDX: use single SA index for outbound, "
- "bypassing the SP\n"
- " -f CONFIG_FILE: Configuration file path\n",
+ fprintf(stderr, "%s [EAL options] --"
+ " -p PORTMASK"
+ " [-P]"
+ " [-u PORTMASK]"
+ " [-j FRAMESIZE]"
+ " -f CONFIG_FILE"
+ " --config (port,queue,lcore)[,(port,queue,lcore)]"
+ " [--single-sa SAIDX]"
+ " [--cryptodev_mask MASK]"
+ "\n\n"
+ " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
+ " -P : Enable promiscuous mode\n"
+ " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
+ " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
+ " packet size\n"
+ " -f CONFIG_FILE: Configuration file\n"
+ " --config (port,queue,lcore): Rx queue configuration\n"
+ " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
+ " bypassing the SP\n"
+ " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
+ " devices to configure\n"
+ "\n",
prgname);
}
return 0;
}
-#define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
-static int32_t
-parse_args_long_options(struct option *lgopts, int32_t option_index)
-{
- int32_t ret = -1;
- const char *optname = lgopts[option_index].name;
-
- if (__STRNCMP(optname, OPTION_CONFIG)) {
- ret = parse_config(optarg);
- if (ret)
- printf("invalid config\n");
- }
-
- if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
- ret = parse_decimal(optarg);
- if (ret != -1) {
- single_sa = 1;
- single_sa_idx = ret;
- printf("Configured with single SA index %u\n",
- single_sa_idx);
- ret = 0;
- }
- }
-
- return ret;
-}
-#undef __STRNCMP
-
static int32_t
parse_args(int32_t argc, char **argv)
{
char **argvopt;
int32_t option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- {OPTION_CONFIG, 1, 0, 0},
- {OPTION_SINGLE_SA, 1, 0, 0},
- {NULL, 0, 0, 0}
- };
int32_t f_present = 0;
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:Pu:f:",
+ while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
lgopts, &option_index)) != EOF) {
switch (opt) {
}
f_present = 1;
break;
- case 0:
- if (parse_args_long_options(lgopts, option_index)) {
+ case 'j':
+ {
+ int32_t size = parse_decimal(optarg);
+ if (size <= 1518) {
+ printf("Invalid jumbo frame size\n");
+ if (size < 0) {
+ print_usage(prgname);
+ return -1;
+ }
+ printf("Using default value 9000\n");
+ frame_size = 9000;
+ } else {
+ frame_size = size;
+ }
+ }
+ printf("Enabled jumbo frames size %u\n", frame_size);
+ break;
+ case CMD_LINE_OPT_CONFIG_NUM:
+ ret = parse_config(optarg);
+ if (ret) {
+ printf("Invalid config\n");
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+ case CMD_LINE_OPT_SINGLE_SA_NUM:
+ ret = parse_decimal(optarg);
+ if (ret == -1) {
+ printf("Invalid argument[sa_idx]\n");
+ print_usage(prgname);
+ return -1;
+ }
+
+ /* else */
+ single_sa = 1;
+ single_sa_idx = ret;
+ printf("Configured with single SA index %u\n",
+ single_sa_idx);
+ break;
+ case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
+ ret = parse_portmask(optarg);
+ if (ret == -1) {
+ printf("Invalid argument[portmask]\n");
print_usage(prgname);
return -1;
}
+
+ /* else */
+ enabled_cryptodev_mask = ret;
break;
default:
print_usage(prgname);
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (uint32_t)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
uint16_t qp, struct lcore_params *params,
struct ipsec_ctx *ipsec_ctx,
const struct rte_cryptodev_capabilities *cipher,
- const struct rte_cryptodev_capabilities *auth)
+ const struct rte_cryptodev_capabilities *auth,
+ const struct rte_cryptodev_capabilities *aead)
{
int32_t ret = 0;
unsigned long i;
key.cipher_algo = cipher->sym.cipher.algo;
if (auth)
key.auth_algo = auth->sym.auth.algo;
+ if (aead)
+ key.aead_algo = aead->sym.aead.algo;
ret = rte_hash_lookup(map, &key);
if (ret != -ENOENT)
if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
continue;
+ if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret |= add_mapping(map, str, cdev_id, qp, params,
+ ipsec_ctx, NULL, NULL, i);
+ continue;
+ }
+
if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
continue;
continue;
ret |= add_mapping(map, str, cdev_id, qp, params,
- ipsec_ctx, i, j);
+ ipsec_ctx, i, j, NULL);
}
}
return ret;
}
+/* Check if the device is enabled by cryptodev_mask */
+static int
+check_cryptodev_mask(uint8_t cdev_id)
+{
+ if (enabled_cryptodev_mask & (1 << cdev_id))
+ return 0;
+
+ return -1;
+}
+
static int32_t
cryptodevs_init(void)
{
struct rte_cryptodev_config dev_conf;
struct rte_cryptodev_qp_conf qp_conf;
uint16_t idx, max_nb_qps, qp, i;
- int16_t cdev_id;
+ int16_t cdev_id, port_id;
struct rte_hash_parameters params = { 0 };
params.entries = CDEV_MAP_ENTRIES;
printf("lcore/cryptodev/qp mappings:\n");
+ uint32_t max_sess_sz = 0, sess_sz;
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+ RTE_ETH_FOREACH_DEV(port_id) {
+ void *sec_ctx;
+
+ if ((enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+ if (sec_ctx == NULL)
+ continue;
+
+ sess_sz = rte_security_session_get_size(sec_ctx);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+
idx = 0;
- /* Start from last cdev id to give HW priority */
- for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
struct rte_cryptodev_info cdev_info;
+ if (check_cryptodev_mask((uint8_t)cdev_id))
+ continue;
+
rte_cryptodev_info_get(cdev_id, &cdev_info);
if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
- dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
- dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
+
+ if (!socket_ctx[dev_conf.socket_id].session_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", dev_conf.socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ max_sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, dev_conf.socket_id,
+ 0);
+ if (sess_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool on socket %d\n",
+ dev_conf.socket_id);
+ else
+ printf("Allocated session pool on socket %d\n",
+ dev_conf.socket_id);
+ socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+ }
if (rte_cryptodev_configure(cdev_id, &dev_conf))
- rte_panic("Failed to initialize crypodev %u\n",
+ rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
- &qp_conf, dev_conf.socket_id))
+ &qp_conf, dev_conf.socket_id,
+ socket_ctx[dev_conf.socket_id].session_pool))
rte_panic("Failed to setup queue %u for "
"cdev_id %u\n", 0, cdev_id);
cdev_id);
}
+ /* create session pools for eth devices that implement security */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((enabled_port_mask & (1 << port_id)) &&
+ rte_eth_dev_get_sec_ctx(port_id)) {
+ int socket_id = rte_eth_dev_socket_id(port_id);
+
+ if (!socket_ctx[socket_id].session_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ max_sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+ if (sess_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool "
+ "on socket %d\n", socket_id);
+ else
+ printf("Allocated session pool "
+ "on socket %d\n", socket_id);
+ socket_ctx[socket_id].session_pool = sess_mp;
+ }
+ }
+ }
+
+
printf("\n");
return 0;
}
static void
-port_init(uint8_t portid)
+port_init(uint16_t portid)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int32_t ret, socket_id;
struct lcore_conf *qconf;
struct ether_addr ethaddr;
+ struct rte_eth_conf local_port_conf = port_conf;
rte_eth_dev_info_get(portid, &dev_info);
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
+ if (frame_size) {
+ local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+ local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+ local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
- &port_conf);
+ &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
+ "err=%d, port=%d\n", ret, portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = 0;
+ txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
+ txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
socket_id, txconf);
/* init RX queues */
for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
+ struct rte_eth_rxconf rxq_conf;
+
if (portid != qconf->rx_queue_list[queue].port_id)
continue;
printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
socket_id);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
ret = rte_eth_rx_queue_setup(portid, rx_queueid,
- nb_rxd, socket_id, NULL,
+ nb_rxd, socket_id, &rxq_conf,
socket_ctx[socket_id].mbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE,
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
char s[64];
+ uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
+ RTE_MBUF_DEFAULT_BUF_SIZE;
+
snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- RTE_MBUF_DEFAULT_BUF_SIZE,
+ buff_size,
socket_id);
if (ctx->mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
printf("Allocated mbuf pool on socket %d\n", socket_id);
}
+static inline int
+inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
+{
+ struct ipsec_sa *sa;
+
+ /* For inline protocol processing, the metadata in the event will
+ * uniquely identify the security session which raised the event.
+ * Application would then need the userdata it had registered with the
+ * security session to process the event.
+ */
+
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
+
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return -1;
+ }
+
+ /* Sequence number over flow. SA need to be re-established */
+ RTE_SET_USED(sa);
+ return 0;
+}
+
+static int
+inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param)
+{
+ uint64_t md;
+ struct rte_eth_event_ipsec_desc *event_desc = NULL;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(port_id);
+
+ RTE_SET_USED(param);
+
+ if (type != RTE_ETH_EVENT_IPSEC)
+ return -1;
+
+ event_desc = ret_param;
+ if (event_desc == NULL) {
+ printf("Event descriptor not set\n");
+ return -1;
+ }
+
+ md = event_desc->metadata;
+
+ if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
+ return inline_ipsec_event_esn_overflow(ctx, md);
+ else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
+ printf("Invalid IPsec event reported\n");
+ return -1;
+ }
+
+ return -1;
+}
+
int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
- uint32_t lcore_id, nb_ports;
- uint8_t portid, socket_id;
+ uint32_t lcore_id;
+ uint8_t socket_id;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
unprotected_port_mask);
- nb_ports = rte_eth_dev_count();
-
if (check_params() < 0)
rte_exit(EXIT_FAILURE, "check_params failed\n");
nb_lcores = rte_lcore_count();
- /* Replicate each contex per socket */
+ /* Replicate each context per socket */
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
}
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
cryptodevs_init();
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
*/
if (promiscuous_on)
rte_eth_promiscuous_enable(portid);
+
+ rte_eth_dev_callback_register(portid,
+ RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);