#define CDEV_QUEUE_DESC 2048
#define CDEV_MAP_ENTRIES 16384
#define CDEV_MP_CACHE_SZ 64
+#define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
#define MAX_QUEUE_PAIRS 1
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* application wide librte_ipsec/SA parameters */
struct app_sa_prm app_sa_prm = {
.enable = 0,
- .cache_sz = SA_CACHE_SZ
+ .cache_sz = SA_CACHE_SZ,
+ .udp_encap = 0
};
static const char *cfgfile;
const struct rte_ether_hdr *eth;
const struct rte_ipv4_hdr *iph4;
const struct rte_ipv6_hdr *iph6;
+ const struct rte_udp_hdr *udp;
+ uint16_t ip4_hdr_len;
+ uint16_t nat_port;
eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *);
if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
RTE_ETHER_HDR_LEN);
adjust_ipv4_pktlen(pkt, iph4, 0);
- if (iph4->next_proto_id == IPPROTO_ESP)
+ switch (iph4->next_proto_id) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ ip4_hdr_len = ((iph4->version_ihl &
+ RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER);
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, ip4_hdr_len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
return;
}
- if (next_proto == IPPROTO_ESP)
+ switch (next_proto) {
+ case IPPROTO_ESP:
t->ipsec.pkts[(t->ipsec.num)++] = pkt;
- else {
+ break;
+ case IPPROTO_UDP:
+ if (app_sa_prm.udp_encap == 1) {
+ udp = rte_pktmbuf_mtod_offset(pkt,
+ struct rte_udp_hdr *, l3len);
+ nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT);
+ if (udp->src_port == nat_port ||
+ udp->dst_port == nat_port){
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ pkt->packet_type |=
+ MBUF_PTYPE_TUNNEL_ESP_IN_UDP;
+ break;
+ }
+ }
+ /* Fall through */
+ default:
t->ip6.data[t->ip6.num] = &iph6->proto;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
* with the security session.
*/
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD &&
+ rte_security_dynfield_is_registered()) {
struct ipsec_sa *sa;
struct ipsec_mbuf_metadata *priv;
struct rte_security_ctx *ctx = (struct rte_security_ctx *)
/* Retrieve the userdata registered. Here, the userdata
* registered is the SA pointer.
*/
-
- sa = (struct ipsec_sa *)
- rte_security_get_userdata(ctx, pkt->udata64);
-
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx,
+ *rte_security_dynfield(pkt));
if (sa == NULL) {
/* userdata could not be retrieved */
return;
"sess_mp_%u", socket_id);
nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_cryptodev_sym_session_pool_create(
mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
socket_id);
"sess_mp_priv_%u", socket_id);
nb_sess = (get_nb_crypto_sessions() + CDEV_MP_CACHE_SZ *
rte_lcore_count());
+ nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
+ CDEV_MP_CACHE_MULTIPLIER);
sess_mp = rte_mempool_create(mp_name,
nb_sess,
sess_sz,
#endif /* STATS_INTERVAL */
/* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MAIN);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
return -1;
}
rte_eth_dev_close(portid);
printf(" Done\n");
}
+
+ /* clean up the EAL */
+ rte_eal_cleanup();
printf("Bye...\n");
return 0;