#include <rte_log.h>
#include <rte_eal.h>
#include <rte_launch.h>
-#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#define CDEV_QUEUE_DESC 2048
#define CDEV_MAP_ENTRIES 16384
#define CDEV_MP_CACHE_SZ 64
+#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* application wide librte_ipsec/SA parameters */
struct app_sa_prm app_sa_prm = {
.enable = 0,
- .cache_sz = SA_CACHE_SZ
+ .cache_sz = SA_CACHE_SZ,
+ .udp_encap = 0
};
static const char *cfgfile;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_CHECKSUM,
+ .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
- ETH_RSS_TCP | ETH_RSS_SCTP,
+ .rss_hf = RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP |
+ RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP,
},
},
.txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
+ .mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- if (iph4->next_proto_id == IPPROTO_ESP)
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
return;
}
- if (next_proto == IPPROTO_ESP)
+ switch (next_proto) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
* with the security session.
*/
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
struct rte_security_ctx *ctx = (struct rte_security_ctx *)
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
*/
-
- sa = (struct ipsec_sa *)
- rte_security_get_userdata(ctx, pkt->udata64);
-
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
if (sa == NULL) {
/* userdata could not be retrieved */
return;
ip->ip_sum = 0;
/* calculate IPv4 cksum in SW */
- if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
}
- memcpy(ðhdr->s_addr, ðaddr_tbl[port].src,
+ memcpy(ðhdr->src_addr, ðaddr_tbl[port].src,
sizeof(struct rte_ether_addr));
- memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst,
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst,
sizeof(struct rte_ether_addr));
}
}
/* Only check SPI match for processed IPSec packets */
- if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
+ if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
free_pkts(&m, 1);
continue;
}
struct rte_mbuf *m;
uint32_t nb_pkts_in, i, idx;
- /* Drop any IPv4 traffic from unprotected ports */
- free_pkts(traffic->ip4.pkts, traffic->ip4.num);
-
- traffic->ip4.num = 0;
-
- /* Drop any IPv6 traffic from unprotected ports */
- free_pkts(traffic->ip6.pkts, traffic->ip6.num);
-
- traffic->ip6.num = 0;
-
if (app_sa_prm.enable == 0) {
nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 0);
} else {
*/
for (i = 0; i < nb_pkts; i++) {
- if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+ if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
/* Security offload not enabled. So an LPM lookup is
* required to get the hop
*/
lpm_pkts = 0;
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) {
+ if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
/* Read hop from the SA */
pkt_hop = get_hop_for_offload_pkt(pkts[i], 1);
} else {
" \"parallel\" : Parallel\n"
" --" CMD_LINE_OPT_RX_OFFLOAD
": bitmask of the RX HW offload capabilities to enable/use\n"
- " (DEV_RX_OFFLOAD_*)\n"
+ " (RTE_ETH_RX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_TX_OFFLOAD
": bitmask of the TX HW offload capabilities to enable/use\n"
- " (DEV_TX_OFFLOAD_*)\n"
+ " (RTE_ETH_TX_OFFLOAD_*)\n"
" --" CMD_LINE_OPT_REASSEMBLE " NUM"
": max number of entries in reassemble(fragment) table\n"
" (zero (default value) disables reassembly)\n"
char *end = NULL;
unsigned long pm;
+ errno = 0;
+
/* parse hexadecimal string */
pm = strtoul(portmask, &end, 16);
if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
+ if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
- uint32_t frame_size;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- frame_size = MTU_TO_FRAMELEN(mtu_size);
- if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ local_port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
local_port_conf.rxmode.offloads |= req_rx_offloads;
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
- local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
printf("port %u configurng rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
qconf->tx_queue_id[portid] = tx_queueid;
/* Pre-populate pkt offloads based on capabilities */
- qconf->outbound.ipv4_offloads = PKT_TX_IPV4;
- qconf->outbound.ipv6_offloads = PKT_TX_IPV6;
- if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
- qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM;
+ qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4;
+ qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6;
+ if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM;
tx_queueid++;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", socket_id);
- /*
- * Doubled due to rte_security_session_create() uses one mempool for
- * session and for session private data.
- */
nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
- rte_lcore_count()) * 2;
+ rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_cryptodev_sym_session_pool_create(
mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
socket_id);
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_priv_%u", socket_id);
- /*
- * Doubled due to rte_security_session_create() uses one mempool for
- * session and for session private data.
- */
nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
- rte_lcore_count()) * 2;
+ rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_mempool_create(mp_name,
nb_sess,
sess_sz,
struct rte_flow *flow;
int ret;
- if (!(rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY))
return;
/* Add the default rte_flow to enable SECURITY for all ESP packets */
#endif /* STATS_INTERVAL */
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_eth_dev_close(portid);
printf(" Done\n");
}
+
+ /* clean up the EAL */
+ rte_eal_cleanup();
printf("Bye...\n");
return 0;