X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec-secgw.c;h=6bbb920f97f03b8de44f3d04f80a7860c9b1bddc;hb=91b7b40806b6ea9910d6686fddcf7c970771fa72;hp=6201d850ae84d0113add16d0f472faed3b5b6191;hpb=ec17993a145ac18614b3a6e481574e0e47cd4b11;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 6201d850ae..6bbb920f97 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation */ #include @@ -57,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -90,6 +60,7 @@ #define OPTION_CONFIG "config" #define OPTION_SINGLE_SA "single-sa" +#define OPTION_CRYPTODEV_MASK "cryptodev_mask" #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -107,8 +78,8 @@ /* * Configurable number of RX/TX ring descriptors */ -#define IPSEC_SECGW_RX_DESC_DEFAULT 128 -#define IPSEC_SECGW_TX_DESC_DEFAULT 512 +#define IPSEC_SECGW_RX_DESC_DEFAULT 1024 +#define IPSEC_SECGW_TX_DESC_DEFAULT 1024 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; @@ -155,6 +126,7 @@ struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { /* mask of enabled ports */ static uint32_t enabled_port_mask; +static uint64_t enabled_cryptodev_mask = UINT64_MAX; static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ @@ -218,6 +190,8 @@ static struct rte_eth_conf port_conf = { }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS), }, }; @@ -266,6 +240,40 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) RTE_LOG(ERR, IPSEC, "Unsupported packet type\n"); rte_pktmbuf_free(pkt); } + + /* Check if the packet has been processed inline. For inline protocol + * processed packets, the metadata in the mbuf can be used to identify + * the security processing done on the packet. The metadata will be + * used to retrieve the application registered userdata associated + * with the security session. + */ + + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + struct ipsec_sa *sa; + struct ipsec_mbuf_metadata *priv; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + pkt->port); + + /* Retrieve the userdata registered. Here, the userdata + * registered is the SA pointer. + */ + + sa = (struct ipsec_sa *) + rte_security_get_userdata(ctx, pkt->udata64); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return; + } + + /* Save SA as priv member in mbuf. This will be used in the + * IPsec selector(SP-SA) check. + */ + + priv = get_priv(pkt); + priv->sa = sa; + } } static inline void @@ -402,13 +410,20 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, ip->pkts[j++] = m; continue; } - if (res & DISCARD || i < lim) { + if (res & DISCARD) { rte_pktmbuf_free(m); continue; } + /* Only check SPI match for processed IPSec packets */ + if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { + rte_pktmbuf_free(m); + continue; + } + sa_idx = ip->res[i] & PROTECT_MASK; - if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) { + if (sa_idx >= IPSEC_SA_MAX_ENTRIES || + !inbound_sa_check(sa, m, sa_idx)) { rte_pktmbuf_free(m); continue; } @@ -473,9 +488,9 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, for (i = 0; i < ip->num; i++) { m = ip->pkts[i]; sa_idx = ip->res[i] & PROTECT_MASK; - if ((ip->res[i] == 0) || (ip->res[i] & DISCARD)) + if (ip->res[i] & DISCARD) rte_pktmbuf_free(m); - else if (sa_idx != 0) { + else if (sa_idx < IPSEC_SA_MAX_ENTRIES) { ipsec->res[ipsec->num] = sa_idx; ipsec->pkts[ipsec->num++] = m; } else /* BYPASS */ @@ -586,31 +601,81 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, traffic->ip6.num = nb_pkts_out; } +static inline int32_t +get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) +{ + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + priv = get_priv(pkt); + + sa = priv->sa; + if (unlikely(sa == NULL)) { + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); + goto fail; + } + + if (is_ipv6) + return sa->portid; + + /* else */ + return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); + +fail: + if (is_ipv6) + return -1; + + /* else */ + return 0; +} + static inline void route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) { uint32_t hop[MAX_PKT_BURST * 2]; uint32_t dst_ip[MAX_PKT_BURST * 2]; + int32_t pkt_hop = 0; uint16_t i, offset; + uint16_t lpm_pkts = 0; if (nb_pkts == 0) return; + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + for (i = 0; i < nb_pkts; i++) { - offset = offsetof(struct ip, ip_dst); - dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i], - uint32_t *, offset); - dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]); + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip, ip_dst); + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], + uint32_t *, offset); + dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); + lpm_pkts++; + } } - rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts); + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); + + lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { rte_pktmbuf_free(pkts[i]); continue; } - send_single_packet(pkts[i], hop[i] & 0xff); + send_single_packet(pkts[i], pkt_hop & 0xff); } } @@ -620,26 +685,49 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) int32_t hop[MAX_PKT_BURST * 2]; uint8_t dst_ip[MAX_PKT_BURST * 2][16]; uint8_t *ip6_dst; + int32_t pkt_hop = 0; uint16_t i, offset; + uint16_t lpm_pkts = 0; if (nb_pkts == 0) return; + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + for (i = 0; i < nb_pkts; i++) { - offset = offsetof(struct ip6_hdr, ip6_dst); - ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset); - memcpy(&dst_ip[i][0], ip6_dst, 16); + if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, + offset); + memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); + lpm_pkts++; + } } - rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, - hop, nb_pkts); + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, + lpm_pkts); + + lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if (hop[i] == -1) { + if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if (pkt_hop == -1) { rte_pktmbuf_free(pkts[i]); continue; } - send_single_packet(pkts[i], hop[i] & 0xff); + send_single_packet(pkts[i], pkt_hop & 0xff); } } @@ -760,7 +848,7 @@ static int32_t check_params(void) { uint8_t lcore; - uint16_t portid, nb_ports; + uint16_t portid; uint16_t i; int32_t socket_id; @@ -769,8 +857,6 @@ check_params(void) return -1; } - nb_ports = rte_eth_dev_count(); - for (i = 0; i < nb_lcore_params; ++i) { lcore = lcore_params[i].lcore_id; if (!rte_lcore_is_enabled(lcore)) { @@ -789,7 +875,7 @@ check_params(void) printf("port %u is not enabled in port mask\n", portid); return -1; } - if (portid >= nb_ports) { + if (!rte_eth_dev_is_valid_port(portid)) { printf("port %u is not present on the board\n", portid); return -1; } @@ -849,6 +935,8 @@ print_usage(const char *prgname) "rx queues configuration\n" " --single-sa SAIDX: use single SA index for outbound, " "bypassing the SP\n" + " --cryptodev_mask MASK: hexadecimal bitmask of the " + "crypto devices to configure\n" " -f CONFIG_FILE: Configuration file path\n", prgname); } @@ -963,6 +1051,14 @@ parse_args_long_options(struct option *lgopts, int32_t option_index) } } + if (__STRNCMP(optname, OPTION_CRYPTODEV_MASK)) { + ret = parse_portmask(optarg); + if (ret != -1) { + enabled_cryptodev_mask = ret; + ret = 0; + } + } + return ret; } #undef __STRNCMP @@ -977,6 +1073,7 @@ parse_args(int32_t argc, char **argv) static struct option lgopts[] = { {OPTION_CONFIG, 1, 0, 0}, {OPTION_SINGLE_SA, 1, 0, 0}, + {OPTION_CRYPTODEV_MASK, 1, 0, 0}, {NULL, 0, 0, 0} }; int32_t f_present = 0; @@ -1074,7 +1171,7 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr) /* Check the link status of all ports in up to 9s, and print them finally */ static void -check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) +check_all_ports_link_status(uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ @@ -1086,7 +1183,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) fflush(stdout); for (count = 0; count <= MAX_CHECK_TIME; count++) { all_ports_up = 1; - for (portid = 0; portid < port_num; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((port_mask & (1 << portid)) == 0) continue; memset(&link, 0, sizeof(link)); @@ -1239,13 +1336,23 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, return ret; } +/* Check if the device is enabled by cryptodev_mask */ +static int +check_cryptodev_mask(uint8_t cdev_id) +{ + if (enabled_cryptodev_mask & (1 << cdev_id)) + return 0; + + return -1; +} + static int32_t cryptodevs_init(void) { struct rte_cryptodev_config dev_conf; struct rte_cryptodev_qp_conf qp_conf; uint16_t idx, max_nb_qps, qp, i; - int16_t cdev_id; + int16_t cdev_id, port_id; struct rte_hash_parameters params = { 0 }; params.entries = CDEV_MAP_ENTRIES; @@ -1274,12 +1381,28 @@ cryptodevs_init(void) if (sess_sz > max_sess_sz) max_sess_sz = sess_sz; } + RTE_ETH_FOREACH_DEV(port_id) { + void *sec_ctx; + + if ((enabled_port_mask & (1 << port_id)) == 0) + continue; + + sec_ctx = rte_eth_dev_get_sec_ctx(port_id); + if (sec_ctx == NULL) + continue; + + sess_sz = rte_security_session_get_size(sec_ctx); + if (sess_sz > max_sess_sz) + max_sess_sz = sess_sz; + } idx = 0; - /* Start from last cdev id to give HW priority */ - for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) { + for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { struct rte_cryptodev_info cdev_info; + if (check_cryptodev_mask((uint8_t)cdev_id)) + continue; + rte_cryptodev_info_get(cdev_id, &cdev_info); if (nb_lcore_params > cdev_info.max_nb_queue_pairs) @@ -1344,6 +1467,38 @@ cryptodevs_init(void) cdev_id); } + /* create session pools for eth devices that implement security */ + RTE_ETH_FOREACH_DEV(port_id) { + if ((enabled_port_mask & (1 << port_id)) && + rte_eth_dev_get_sec_ctx(port_id)) { + int socket_id = rte_eth_dev_socket_id(port_id); + + if (!socket_ctx[socket_id].session_pool) { + char mp_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *sess_mp; + + snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, + "sess_mp_%u", socket_id); + sess_mp = rte_mempool_create(mp_name, + CDEV_MP_NB_OBJS, + max_sess_sz, + CDEV_MP_CACHE_SZ, + 0, NULL, NULL, NULL, + NULL, socket_id, + 0); + if (sess_mp == NULL) + rte_exit(EXIT_FAILURE, + "Cannot create session pool " + "on socket %d\n", socket_id); + else + printf("Allocated session pool " + "on socket %d\n", socket_id); + socket_ctx[socket_id].session_pool = sess_mp; + } + } + } + + printf("\n"); return 0; @@ -1359,6 +1514,7 @@ port_init(uint16_t portid) int32_t ret, socket_id; struct lcore_conf *qconf; struct ether_addr ethaddr; + struct rte_eth_conf local_port_conf = port_conf; rte_eth_dev_info_get(portid, &dev_info); @@ -1386,17 +1542,19 @@ port_init(uint16_t portid) nb_rx_queue, nb_tx_queue); if (frame_size) { - port_conf.rxmode.max_rx_pkt_len = frame_size; - port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + local_port_conf.rxmode.max_rx_pkt_len = frame_size; + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; } if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) - port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY; + local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY; if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY) - port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY; - + local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY; + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, - &port_conf); + &local_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: " "err=%d, port=%d\n", ret, portid); @@ -1421,7 +1579,8 @@ port_init(uint16_t portid) printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = local_port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, socket_id, txconf); @@ -1435,6 +1594,8 @@ port_init(uint16_t portid) /* init RX queues */ for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + if (portid != qconf->rx_queue_list[queue].port_id) continue; @@ -1443,8 +1604,10 @@ port_init(uint16_t portid) printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, socket_id); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = local_port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, rx_queueid, - nb_rxd, socket_id, NULL, + nb_rxd, socket_id, &rxq_conf, socket_ctx[socket_id].mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -1481,7 +1644,7 @@ main(int32_t argc, char **argv) int32_t ret; uint32_t lcore_id; uint8_t socket_id; - uint16_t portid, nb_ports; + uint16_t portid; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1500,8 +1663,6 @@ main(int32_t argc, char **argv) rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", unprotected_port_mask); - nb_ports = rte_eth_dev_count(); - if (check_params() < 0) rte_exit(EXIT_FAILURE, "check_params failed\n"); @@ -1535,7 +1696,7 @@ main(int32_t argc, char **argv) pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); } - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) continue; @@ -1545,7 +1706,7 @@ main(int32_t argc, char **argv) cryptodevs_init(); /* start ports */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) continue; @@ -1564,7 +1725,7 @@ main(int32_t argc, char **argv) rte_eth_promiscuous_enable(portid); } - check_all_ports_link_status(nb_ports, enabled_port_mask); + check_all_ports_link_status(enabled_port_mask); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);