#define CMD_LINE_OPT_EVENT_VECTOR "event-vector"
#define CMD_LINE_OPT_VECTOR_SIZE "vector-size"
#define CMD_LINE_OPT_VECTOR_TIMEOUT "vector-tmo"
+#define CMD_LINE_OPT_VECTOR_POOL_SZ "vector-pool-sz"
+#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
#define CMD_LINE_ARG_EVENT "event"
#define CMD_LINE_ARG_POLL "poll"
CMD_LINE_OPT_EVENT_VECTOR_NUM,
CMD_LINE_OPT_VECTOR_SIZE_NUM,
CMD_LINE_OPT_VECTOR_TIMEOUT_NUM,
+ CMD_LINE_OPT_VECTOR_POOL_SZ_NUM,
+ CMD_LINE_OPT_PER_PORT_POOL_NUM,
};
static const struct option lgopts[] = {
{CMD_LINE_OPT_EVENT_VECTOR, 0, 0, CMD_LINE_OPT_EVENT_VECTOR_NUM},
{CMD_LINE_OPT_VECTOR_SIZE, 1, 0, CMD_LINE_OPT_VECTOR_SIZE_NUM},
{CMD_LINE_OPT_VECTOR_TIMEOUT, 1, 0, CMD_LINE_OPT_VECTOR_TIMEOUT_NUM},
+ {CMD_LINE_OPT_VECTOR_POOL_SZ, 1, 0, CMD_LINE_OPT_VECTOR_POOL_SZ_NUM},
+ {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PER_PORT_POOL_NUM},
{NULL, 0, 0, 0}
};
struct rt_ctx *rt6_ctx;
struct {
struct rte_ip_frag_tbl *tbl;
- struct rte_mempool *pool_dir;
struct rte_mempool *pool_indir;
struct rte_ip_frag_death_row dr;
} frag;
struct socket_ctx socket_ctx[NB_SOCKETS];
+bool per_port_pool;
+
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
- * - or reassmeble support is requested
+ * - or reassemble support is requested
*/
static int
multi_seg_required(void)
if (proto == IPPROTO_IP)
rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
else
rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
- n, mtu_size, qconf->frag.pool_dir,
- qconf->frag.pool_indir);
+ n, mtu_size, m->pool, qconf->frag.pool_indir);
if (rc >= 0)
len += rc;
qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.session_priv_pool =
socket_ctx[socket_id].session_priv_pool;
- qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;
qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
" --vector-size Max vector size (default value: 16)\n"
" --vector-tmo Max vector timeout in nanoseconds"
" (default value: 102400)\n"
+ " --" CMD_LINE_OPT_PER_PORT_POOL " Enable per port mbuf pool\n"
+ " --" CMD_LINE_OPT_VECTOR_POOL_SZ " Vector pool size\n"
+ " (default value is based on mbuf count)\n"
"\n",
prgname);
}
em_conf = eh_conf->mode_params;
em_conf->vector_tmo_ns = ret;
break;
+ case CMD_LINE_OPT_VECTOR_POOL_SZ_NUM:
+ ret = parse_decimal(optarg);
+
+ em_conf = eh_conf->mode_params;
+ em_conf->vector_pool_sz = ret;
+ break;
+ case CMD_LINE_OPT_PER_PORT_POOL_NUM:
+ per_port_pool = 1;
+ break;
default:
print_usage(prgname);
return -1;
ret = rte_hash_add_key_data(map, &key, (void *)i);
if (ret < 0) {
- printf("Faled to insert cdev mapping for (lcore %u, "
+ printf("Failed to insert cdev mapping for (lcore %u, "
"cdev %u, qp %u), errno %d\n",
key.lcore_id, ipsec_ctx->tbl[i].id,
ipsec_ctx->tbl[i].qp, ret);
str = "Inbound";
}
- /* Required cryptodevs with operation chainning */
+ /* Required cryptodevs with operation chaining */
if (!(dev_info->feature_flags &
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
return ret;
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- /* limit allowed HW offloafs, as user requested */
+ /* limit allowed HW offloads, as user requested */
dev_info.rx_offload_capa &= dev_rx_offload;
dev_info.tx_offload_capa &= dev_tx_offload;
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
- printf("port %u configurng rx_offloads=0x%" PRIx64
+ printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
local_port_conf.txmode.offloads);
/* init RX queues */
for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
struct rte_eth_rxconf rxq_conf;
+ struct rte_mempool *pool;
if (portid != qconf->rx_queue_list[queue].port_id)
continue;
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+
+ if (per_port_pool)
+ pool = socket_ctx[socket_id].mbuf_pool[portid];
+ else
+ pool = socket_ctx[socket_id].mbuf_pool[0];
+
ret = rte_eth_rx_queue_setup(portid, rx_queueid,
- nb_rxd, socket_id, &rxq_conf,
- socket_ctx[socket_id].mbuf_pool);
+ nb_rxd, socket_id, &rxq_conf, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
}
static void
-pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
+pool_init(struct socket_ctx *ctx, int32_t socket_id, int portid,
+ uint32_t nb_mbuf)
{
char s[64];
int32_t ms;
- snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
- ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
- MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- frame_buf_size, socket_id);
+
+ /* mbuf_pool is initialised by the pool_init() function*/
+ if (socket_ctx[socket_id].mbuf_pool[portid])
+ return;
+
+ snprintf(s, sizeof(s), "mbuf_pool_%d_%d", socket_id, portid);
+ ctx->mbuf_pool[portid] = rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE,
+ ipsec_metadata_size(),
+ frame_buf_size,
+ socket_id);
/*
* if multi-segment support is enabled, then create a pool
- * for indirect mbufs.
+ * for indirect mbufs. This is not per-port but global.
*/
ms = multi_seg_required();
- if (ms != 0) {
+ if (ms != 0 && !ctx->mbuf_pool_indir) {
snprintf(s, sizeof(s), "mbuf_pool_indir_%d", socket_id);
ctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, 0, 0, socket_id);
}
- if (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))
+ if (ctx->mbuf_pool[portid] == NULL ||
+ (ms != 0 && ctx->mbuf_pool_indir == NULL))
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
socket_id);
else
rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
struct rte_ipv6_hdr *iph;
- struct ipv6_extension_fragment *fh;
+ struct rte_ipv6_fragment_ext *fh;
iph = (struct rte_ipv6_hdr *)(eth + 1);
fh = rte_ipv6_frag_get_ipv6_fragment_header(iph);
struct rte_tel_data *spd4_data = rte_tel_data_alloc();
struct rte_tel_data *spd6_data = rte_tel_data_alloc();
struct rte_tel_data *sad_data = rte_tel_data_alloc();
-
unsigned int coreid = UINT32_MAX;
+ int rc = 0;
/* verify allocated telemetry data structures */
- if (!spd4_data || !spd6_data || !sad_data)
- return -ENOMEM;
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
/* initialize telemetry data structs as dicts */
rte_tel_data_start_dict(data);
if (params) {
coreid = (uint32_t)atoi(params);
- if (rte_lcore_is_enabled(coreid) == 0)
- return -EINVAL;
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
}
update_statistics(&total_stats, coreid);
rte_tel_data_add_dict_container(data, "sad", sad_data, 0);
- return 0;
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
}
static int
struct rte_tel_data *spd4_data = rte_tel_data_alloc();
struct rte_tel_data *spd6_data = rte_tel_data_alloc();
struct rte_tel_data *sad_data = rte_tel_data_alloc();
-
unsigned int coreid = UINT32_MAX;
+ int rc = 0;
/* verify allocated telemetry data structures */
- if (!spd4_data || !spd6_data || !sad_data)
- return -ENOMEM;
+ if (!spd4_data || !spd6_data || !sad_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
/* initialize telemetry data structs as dicts */
rte_tel_data_start_dict(data);
if (params) {
coreid = (uint32_t)atoi(params);
- if (rte_lcore_is_enabled(coreid) == 0)
- return -EINVAL;
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
}
update_statistics(&total_stats, coreid);
rte_tel_data_add_dict_container(data, "spd6", spd6_data, 0);
- return 0;
+exit:
+ if (rc) {
+ rte_tel_data_free(spd4_data);
+ rte_tel_data_free(spd6_data);
+ rte_tel_data_free(sad_data);
+ }
+ return rc;
}
static int
struct rte_tel_data *lpm4_data = rte_tel_data_alloc();
struct rte_tel_data *lpm6_data = rte_tel_data_alloc();
-
unsigned int coreid = UINT32_MAX;
+ int rc = 0;
+
+ /* verify allocated telemetry data structures */
+ if (!lpm4_data || !lpm6_data) {
+ rc = -ENOMEM;
+ goto exit;
+ }
/* initialize telemetry data structs as dicts */
rte_tel_data_start_dict(data);
if (params) {
coreid = (uint32_t)atoi(params);
- if (rte_lcore_is_enabled(coreid) == 0)
- return -EINVAL;
+ if (rte_lcore_is_enabled(coreid) == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
}
update_statistics(&total_stats, coreid);
rte_tel_data_add_dict_container(data, "IPv6 LPM", lpm6_data, 0);
- return 0;
+exit:
+ if (rc) {
+ rte_tel_data_free(lpm4_data);
+ rte_tel_data_free(lpm6_data);
+ }
+ return rc;
}
static void
rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
unprotected_port_mask);
+ if (unprotected_port_mask && !nb_sa_in)
+ rte_exit(EXIT_FAILURE, "Cannot use unprotected portmask without configured SA inbound\n");
+
if (check_poll_mode_params(eh_conf) < 0)
rte_exit(EXIT_FAILURE, "check_poll_mode_params failed\n");
else
socket_id = 0;
- /* mbuf_pool is initialised by the pool_init() function*/
- if (socket_ctx[socket_id].mbuf_pool)
+ if (per_port_pool) {
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ pool_init(&socket_ctx[socket_id], socket_id,
+ portid, nb_bufs_in_pool);
+ }
+ } else {
+ pool_init(&socket_ctx[socket_id], socket_id, 0,
+ nb_bufs_in_pool);
+ }
+
+ if (socket_ctx[socket_id].session_pool)
continue;
- pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool);
session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
session_priv_pool_init(&socket_ctx[socket_id], socket_id,
sess_sz);
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
- /* Create flow before starting the device */
- create_default_ipsec_flow(portid, req_rx_offloads[portid]);
-
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
"err=%d, port=%d\n", ret, portid);
+
+ /* Create flow after starting the device */
+ create_default_ipsec_flow(portid, req_rx_offloads[portid]);
+
/*
* If enabled, put device in promiscuous mode.
* This allows IO forwarding mode to forward packets
/* Replicate each context per socket */
for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
socket_id = rte_socket_id_by_idx(i);
- if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+ if ((socket_ctx[socket_id].session_pool != NULL) &&
(socket_ctx[socket_id].sa_in == NULL) &&
(socket_ctx[socket_id].sa_out == NULL)) {
sa_init(&socket_ctx[socket_id], socket_id);