X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec-secgw.c;h=3911e6a60b385b89a8ecf868d56d0e6d02fe6e57;hb=5908e7e837a62fe62c48094d8c2e947242f8fac3;hp=b253eea2bfe94ecc19716588815614bebdadc432;hpb=da7a540e1dc64495785d4f0d6e6b20b27bf28580;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index b253eea2bf..3911e6a60b 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "ipsec.h" #include "parser.h" @@ -211,7 +212,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; static struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, .split_hdr_size = 0, .offloads = DEV_RX_OFFLOAD_CHECKSUM, }, @@ -233,11 +234,11 @@ static inline void prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) { uint8_t *nlp; - struct ether_hdr *eth; + struct rte_ether_hdr *eth; - eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *); - if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { - nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); + eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + nlp = (uint8_t *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p)); if (*nlp == IPPROTO_ESP) t->ipsec.pkts[(t->ipsec.num)++] = pkt; @@ -247,17 +248,41 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) } pkt->l2_len = 0; pkt->l3_len = sizeof(struct ip); - } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { - nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); - nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt)); - if (*nlp == IPPROTO_ESP) + } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + int next_proto; + size_t l3len, ext_len; + struct rte_ipv6_hdr *v6h; + uint8_t *p; + + /* get protocol type */ + v6h = (struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt, + RTE_ETHER_HDR_LEN); + next_proto = v6h->proto; + + /* determine l3 header size up to ESP extension */ + l3len = sizeof(struct ip6_hdr); + p = rte_pktmbuf_mtod(pkt, uint8_t *); + while (next_proto != IPPROTO_ESP && l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* drop packet when IPv6 header exceeds first segment length */ + if (unlikely(l3len > pkt->data_len)) { + rte_pktmbuf_free(pkt); + return; + } + + if (next_proto == IPPROTO_ESP) t->ipsec.pkts[(t->ipsec.num)++] = pkt; else { - t->ip6.data[t->ip6.num] = nlp; + t->ip6.data[t->ip6.num] = rte_pktmbuf_mtod_offset(pkt, + uint8_t *, + offsetof(struct rte_ipv6_hdr, proto)); t->ip6.pkts[(t->ip6.num)++] = pkt; } pkt->l2_len = 0; - pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l3_len = l3len; } else { /* Unknown/Unsupported type, drop the packet */ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", @@ -325,36 +350,37 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, const struct lcore_conf *qconf) { struct ip *ip; - struct ether_hdr *ethhdr; + struct rte_ether_hdr *ethhdr; ip = rte_pktmbuf_mtod(pkt, struct ip *); - ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN); + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); if (ip->ip_v == IPVERSION) { pkt->ol_flags |= qconf->outbound.ipv4_offloads; pkt->l3_len = sizeof(struct ip); - pkt->l2_len = ETHER_HDR_LEN; + pkt->l2_len = RTE_ETHER_HDR_LEN; ip->ip_sum = 0; /* calculate IPv4 cksum in SW */ if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0) - ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip); + ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); - ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); } else { pkt->ol_flags |= qconf->outbound.ipv6_offloads; pkt->l3_len = sizeof(struct ip6_hdr); - pkt->l2_len = ETHER_HDR_LEN; + pkt->l2_len = RTE_ETHER_HDR_LEN; - ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); } memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, - sizeof(struct ether_addr)); + sizeof(struct rte_ether_addr)); memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, - sizeof(struct ether_addr)); + sizeof(struct rte_ether_addr)); } static inline void @@ -438,11 +464,11 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, for (i = 0; i < ip->num; i++) { m = ip->pkts[i]; res = ip->res[i]; - if (res & BYPASS) { + if (res == BYPASS) { ip->pkts[j++] = m; continue; } - if (res & DISCARD) { + if (res == DISCARD) { rte_pktmbuf_free(m); continue; } @@ -453,9 +479,8 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, continue; } - sa_idx = ip->res[i] & PROTECT_MASK; - if (sa_idx >= IPSEC_SA_MAX_ENTRIES || - !inbound_sa_check(sa, m, sa_idx)) { + sa_idx = SPI2IDX(res); + if (!inbound_sa_check(sa, m, sa_idx)) { rte_pktmbuf_free(m); continue; } @@ -541,16 +566,15 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, j = 0; for (i = 0; i < ip->num; i++) { m = ip->pkts[i]; - sa_idx = ip->res[i] & PROTECT_MASK; - if (ip->res[i] & DISCARD) + sa_idx = SPI2IDX(ip->res[i]); + if (ip->res[i] == DISCARD) rte_pktmbuf_free(m); - else if (ip->res[i] & BYPASS) + else if (ip->res[i] == BYPASS) ip->pkts[j++] = m; - else if (sa_idx < IPSEC_SA_MAX_ENTRIES) { + else { ipsec->res[ipsec->num] = sa_idx; ipsec->pkts[ipsec->num++] = m; - } else /* invalid SA idx */ - rte_pktmbuf_free(m); + } } ip->num = j; } @@ -1426,10 +1450,10 @@ parse_args(int32_t argc, char **argv) } static void -print_ethaddr(const char *name, const struct ether_addr *eth_addr) +print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) { - char buf[ETHER_ADDR_FMT_SIZE]; - ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); printf("%s%s", name, buf); } @@ -1437,9 +1461,9 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr) * Update destination ethaddr for the port. */ int -add_dst_ethaddr(uint16_t port, const struct ether_addr *addr) +add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) { - if (port > RTE_DIM(ethaddr_tbl)) + if (port >= RTE_DIM(ethaddr_tbl)) return -EINVAL; ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr); @@ -1721,6 +1745,7 @@ cryptodevs_init(void) dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); dev_conf.nb_queue_pairs = qp; + dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS) @@ -1793,7 +1818,7 @@ cryptodevs_init(void) rte_eth_dev_get_sec_ctx(port_id)) { int socket_id = rte_eth_dev_socket_id(port_id); - if (!socket_ctx[socket_id].session_pool) { + if (!socket_ctx[socket_id].session_priv_pool) { char mp_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *sess_mp; @@ -1813,7 +1838,8 @@ cryptodevs_init(void) else printf("Allocated session pool " "on socket %d\n", socket_id); - socket_ctx[socket_id].session_pool = sess_mp; + socket_ctx[socket_id].session_priv_pool = + sess_mp; } } } @@ -1833,7 +1859,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) uint16_t tx_queueid, rx_queueid, queue, lcore_id; int32_t ret, socket_id; struct lcore_conf *qconf; - struct ether_addr ethaddr; + struct rte_ether_addr ethaddr; struct rte_eth_conf local_port_conf = port_conf; rte_eth_dev_info_get(portid, &dev_info);