#include "ipsec-secgw.h"
#include "ipsec_worker.h"
+struct port_drv_mode_data {
+ struct rte_security_session *sess;
+ struct rte_security_ctx *ctx;
+};
+
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
struct rte_ether_hdr *eth;
+ uint32_t ptype = pkt->packet_type;
eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+ rte_prefetch0(eth);
+
+ if (RTE_ETH_IS_IPV4_HDR(ptype)) {
*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
offsetof(struct ip, ip_p));
- if (**nlp == IPPROTO_ESP)
+ if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
return PKT_TYPE_IPSEC_IPV4;
else
return PKT_TYPE_PLAIN_IPV4;
- } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+ } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
offsetof(struct ip6_hdr, ip6_nxt));
- if (**nlp == IPPROTO_ESP)
+ if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
return PKT_TYPE_IPSEC_IPV6;
else
return PKT_TYPE_PLAIN_IPV6;
struct rte_ether_hdr *ethhdr;
ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
- memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
+ memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
+ memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
}
static inline void
rte_event_eth_tx_adapter_txq_set(m, 0);
}
+static inline void
+ev_vector_attr_init(struct rte_event_vector *vec)
+{
+ vec->attr_valid = 1;
+ vec->port = 0xFFFF;
+ vec->queue = 0;
+}
+
+static inline void
+ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
+{
+ if (vec->port == 0xFFFF) {
+ vec->port = pkt->port;
+ return;
+ }
+ if (vec->attr_valid && (vec->port != pkt->port))
+ vec->attr_valid = 0;
+}
+
static inline void
prepare_out_sessions_tbl(struct sa_ctx *sa_out,
- struct rte_security_session **sess_tbl, uint16_t size)
+ struct port_drv_mode_data *data,
+ uint16_t size)
{
struct rte_ipsec_session *pri_sess;
struct ipsec_sa *sa;
}
/* Use only first inline session found for a given port */
- if (sess_tbl[sa->portid])
+ if (data[sa->portid].sess)
continue;
- sess_tbl[sa->portid] = pri_sess->security.ses;
+ data[sa->portid].sess = pri_sess->security.ses;
+ data[sa->portid].ctx = pri_sess->security.ctx;
}
}
return 1;
}
+static inline void
+check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
+ struct traffic_type *ipsec)
+{
+ uint32_t i, j, res;
+ struct rte_mbuf *m;
+
+ if (unlikely(sp == NULL || ip->num == 0))
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
+ DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (unlikely(res == DISCARD))
+ free_pkts(&m, 1);
+ else if (res == BYPASS)
+ ip->pkts[j++] = m;
+ else {
+ ipsec->res[ipsec->num] = res - 1;
+ ipsec->pkts[ipsec->num++] = m;
+ }
+ }
+ ip->num = j;
+}
+
+static inline void
+check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
+ struct traffic_type *ip)
+{
+ struct ipsec_sa *sa;
+ uint32_t i, j, res;
+ struct rte_mbuf *m;
+
+ if (unlikely(sp == NULL || ip->num == 0))
+ return;
+
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
+ DEFAULT_MAX_CATEGORIES);
+
+ j = 0;
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (unlikely(res == DISCARD))
+ free_pkts(&m, 1);
+ else if (res == BYPASS)
+ ip->pkts[j++] = m;
+ else {
+ sa = *(struct ipsec_sa **)rte_security_dynfield(m);
+ if (sa == NULL) {
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ /* SPI on the packet should match with the one in SA */
+ if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
+ free_pkts(&m, 1);
+ continue;
+ }
+
+ ip->pkts[j++] = m;
+ }
+ }
+ ip->num = j;
+}
+
static inline uint16_t
route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
{
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
if (unlikely(pkt->ol_flags &
- PKT_RX_SEC_OFFLOAD_FAILED)) {
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
RTE_LOG(ERR, IPSEC,
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
- sa = pkt->userdata;
+ sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
}
/* Check if we have a match */
break;
case PKT_TYPE_PLAIN_IPV6:
- if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
if (unlikely(pkt->ol_flags &
- PKT_RX_SEC_OFFLOAD_FAILED)) {
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
RTE_LOG(ERR, IPSEC,
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
- sa = pkt->userdata;
+ sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
}
/* Check if we have a match */
break;
default:
- RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
+ RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
+ type);
goto drop_pkt_and_exit;
}
}
/* Validate sa_idx */
- if (sa_idx >= ctx->sa_ctx->nb_sa)
+ if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
goto drop_pkt_and_exit;
/* Else the packet has to be protected */
sess = ipsec_get_primary_session(sa);
/* Allow only inline protocol for now */
- if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+ if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
RTE_LOG(ERR, IPSEC, "SA type not supported\n");
goto drop_pkt_and_exit;
}
- if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
- pkt->userdata = sess->security.ses;
+ rte_security_set_pkt_metadata(sess->security.ctx,
+ sess->security.ses, pkt, NULL);
/* Mark the packet for Tx security offload */
- pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
/* Get the port to which this pkt need to be submitted */
port_id = sa->portid;
send_pkt:
+ /* Provide L2 len for Outbound processing */
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
/* Update mac addresses */
update_mac_addrs(pkt, port_id);
return PKT_DROPPED;
}
+static inline int
+ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+ struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
+{
+ struct rte_ipsec_session *sess;
+ uint32_t sa_idx, i, j = 0;
+ uint16_t port_id = 0;
+ struct rte_mbuf *pkt;
+ struct ipsec_sa *sa;
+
+ /* Route IPv4 packets */
+ for (i = 0; i < t->ip4.num; i++) {
+ pkt = t->ip4.pkts[i];
+ port_id = route4_pkt(pkt, rt->rt4_ctx);
+ if (port_id != RTE_MAX_ETHPORTS) {
+ /* Update mac addresses */
+ update_mac_addrs(pkt, port_id);
+ /* Update the event with the dest port */
+ ipsec_event_pre_forward(pkt, port_id);
+ ev_vector_attr_update(vec, pkt);
+ vec->mbufs[j++] = pkt;
+ } else
+ free_pkts(&pkt, 1);
+ }
+
+ /* Route IPv6 packets */
+ for (i = 0; i < t->ip6.num; i++) {
+ pkt = t->ip6.pkts[i];
+ port_id = route6_pkt(pkt, rt->rt6_ctx);
+ if (port_id != RTE_MAX_ETHPORTS) {
+ /* Update mac addresses */
+ update_mac_addrs(pkt, port_id);
+ /* Update the event with the dest port */
+ ipsec_event_pre_forward(pkt, port_id);
+ ev_vector_attr_update(vec, pkt);
+ vec->mbufs[j++] = pkt;
+ } else
+ free_pkts(&pkt, 1);
+ }
+
+ /* Route ESP packets */
+ for (i = 0; i < t->ipsec.num; i++) {
+ /* Validate sa_idx */
+ sa_idx = t->ipsec.res[i];
+ pkt = t->ipsec.pkts[i];
+ if (unlikely(sa_idx >= sa_ctx->nb_sa))
+ free_pkts(&pkt, 1);
+ else {
+ /* Else the packet has to be protected */
+ sa = &(sa_ctx->sa[sa_idx]);
+ /* Get IPsec session */
+ sess = ipsec_get_primary_session(sa);
+ /* Allow only inline protocol for now */
+ if (unlikely(sess->type !=
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
+ RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+ free_pkts(&pkt, 1);
+ continue;
+ }
+ rte_security_set_pkt_metadata(sess->security.ctx,
+ sess->security.ses, pkt, NULL);
+
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+ port_id = sa->portid;
+ update_mac_addrs(pkt, port_id);
+ ipsec_event_pre_forward(pkt, port_id);
+ ev_vector_attr_update(vec, pkt);
+ vec->mbufs[j++] = pkt;
+ }
+ }
+
+ return j;
+}
+
+static inline void
+classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
+{
+ enum pkt_type type;
+ uint8_t *nlp;
+
+ /* Check the packet type */
+ type = process_ipsec_get_pkt_type(pkt, &nlp);
+
+ switch (type) {
+ case PKT_TYPE_PLAIN_IPV4:
+ t->ip4.data[t->ip4.num] = nlp;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ break;
+ case PKT_TYPE_PLAIN_IPV6:
+ t->ip6.data[t->ip6.num] = nlp;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
+ break;
+ default:
+ RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
+ type);
+ free_pkts(&pkt, 1);
+ break;
+ }
+}
+
+static inline int
+process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
+ struct rte_event_vector *vec)
+{
+ struct ipsec_traffic t;
+ struct rte_mbuf *pkt;
+ int i;
+
+ t.ip4.num = 0;
+ t.ip6.num = 0;
+ t.ipsec.num = 0;
+
+ for (i = 0; i < vec->nb_elem; i++) {
+ /* Get pkt from event */
+ pkt = vec->mbufs[i];
+
+ if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
+ if (unlikely(pkt->ol_flags &
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
+ RTE_LOG(ERR, IPSEC,
+ "Inbound security offload failed\n");
+ free_pkts(&pkt, 1);
+ continue;
+ }
+ }
+
+ classify_pkt(pkt, &t);
+ }
+
+ check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
+ check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
+
+ return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+}
+
+static inline int
+process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
+ struct rte_event_vector *vec)
+{
+ struct ipsec_traffic t;
+ struct rte_mbuf *pkt;
+ uint32_t i;
+
+ t.ip4.num = 0;
+ t.ip6.num = 0;
+ t.ipsec.num = 0;
+
+ for (i = 0; i < vec->nb_elem; i++) {
+ /* Get pkt from event */
+ pkt = vec->mbufs[i];
+
+ classify_pkt(pkt, &t);
+
+ /* Provide L2 len for Outbound processing */
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+ }
+
+ check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
+ check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
+
+ return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+}
+
+static inline int
+process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
+ struct port_drv_mode_data *data)
+{
+ struct rte_mbuf *pkt;
+ int16_t port_id;
+ uint32_t i;
+ int j = 0;
+
+ for (i = 0; i < vec->nb_elem; i++) {
+ pkt = vec->mbufs[i];
+ port_id = pkt->port;
+
+ if (unlikely(!data[port_id].sess)) {
+ free_pkts(&pkt, 1);
+ continue;
+ }
+ ipsec_event_pre_forward(pkt, port_id);
+ /* Save security session */
+ rte_security_set_pkt_metadata(data[port_id].ctx,
+ data[port_id].sess, pkt,
+ NULL);
+
+ /* Mark the packet for Tx security offload */
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+
+ /* Provide L2 len for Outbound processing */
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+ vec->mbufs[j++] = pkt;
+ }
+
+ return j;
+}
+
+static inline void
+ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+ struct eh_event_link_info *links,
+ struct rte_event *ev)
+{
+ struct rte_event_vector *vec = ev->vec;
+ struct rte_mbuf *pkt;
+ int ret;
+
+ pkt = vec->mbufs[0];
+
+ ev_vector_attr_init(vec);
+ if (is_unprotected_port(pkt->port))
+ ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
+ &lconf->rt, vec);
+ else
+ ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
+ &lconf->rt, vec);
+
+ if (likely(ret > 0)) {
+ vec->nb_elem = ret;
+ rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ ev, 1, 0);
+ } else {
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+ }
+}
+
+static inline void
+ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
+ struct rte_event *ev,
+ struct port_drv_mode_data *data)
+{
+ struct rte_event_vector *vec = ev->vec;
+ struct rte_mbuf *pkt;
+
+ pkt = vec->mbufs[0];
+
+ if (!is_unprotected_port(pkt->port))
+ vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
+ data);
+ if (vec->nb_elem > 0)
+ rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+ links[0].event_port_id,
+ ev, 1, 0);
+ else
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+}
+
/*
* Event mode exposes various operating modes depending on the
* capabilities of the event device and the operating mode
ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
- struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
+ struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
unsigned int nb_rx = 0;
struct rte_mbuf *pkt;
struct rte_event ev;
return;
}
+ memset(&data, 0, sizeof(struct port_drv_mode_data));
+
/* Get core ID */
lcore_id = rte_lcore_id();
* Prepare security sessions table. In outbound driver mode
* we always use first session configured for a given port
*/
- prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
- RTE_MAX_ETHPORTS);
+ prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
+ RTE_MAX_ETHPORTS);
RTE_LOG(INFO, IPSEC,
"Launching event mode worker (non-burst - Tx internal port - "
if (nb_rx == 0)
continue;
+ switch (ev.event_type) {
+ case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
+ case RTE_EVENT_TYPE_ETHDEV_VECTOR:
+ ipsec_ev_vector_drv_mode_process(links, &ev, data);
+ continue;
+ case RTE_EVENT_TYPE_ETHDEV:
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC, "Invalid event type %u",
+ ev.event_type);
+ continue;
+ }
+
pkt = ev.mbuf;
port_id = pkt->port;
if (!is_unprotected_port(port_id)) {
- if (unlikely(!sess_tbl[port_id])) {
+ if (unlikely(!data[port_id].sess)) {
rte_pktmbuf_free(pkt);
continue;
}
/* Save security session */
- pkt->userdata = sess_tbl[port_id];
+ rte_security_set_pkt_metadata(data[port_id].ctx,
+ data[port_id].sess, pkt,
+ NULL);
/* Mark the packet for Tx security offload */
- pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
+
+ /* Provide L2 len for Outbound processing */
+ pkt->l2_len = RTE_ETHER_HDR_LEN;
}
/*
if (nb_rx == 0)
continue;
- if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
+ switch (ev.event_type) {
+ case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
+ case RTE_EVENT_TYPE_ETHDEV_VECTOR:
+ ipsec_ev_vector_process(&lconf, links, &ev);
+ continue;
+ case RTE_EVENT_TYPE_ETHDEV:
+ break;
+ default:
RTE_LOG(ERR, IPSEC, "Invalid event type %u",
ev.event_type);
-
continue;
}