X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=examples%2Fl3fwd%2Fl3fwd_em_sequential.h;h=b231b9994e19f0838be6d5c6e62f4c6730ad7b99;hb=251691f107a565807be613439a431a6072edba15;hp=35cf5eac0983a6b52d42a1d444427937716f7bd0;hpb=3998e2a07220844d3f3c17f76a781ced3efe0de0;p=dpdk.git diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h index 35cf5eac09..b231b9994e 100644 --- a/examples/l3fwd/l3fwd_em_sequential.h +++ b/examples/l3fwd/l3fwd_em_sequential.h @@ -25,8 +25,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, uint16_t portid) { uint8_t next_hop; - struct ipv4_hdr *ipv4_hdr; - struct ipv6_hdr *ipv6_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; uint32_t tcp_or_udp; uint32_t l3_ptypes; @@ -36,8 +36,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) { /* Handle IPv4 headers.*/ - ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *, - sizeof(struct ether_hdr)); + ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid, qconf->ipv4_lookup_struct); @@ -51,8 +51,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, } else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) { /* Handle IPv6 headers.*/ - ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *, - sizeof(struct ether_hdr)); + ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, + sizeof(struct rte_ether_hdr)); next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct); @@ -81,17 +81,44 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, if (nb_rx > 0) { rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0], - struct ether_hdr *) + 1); + struct rte_ether_hdr *) + 1); } for (i = 1, j = 0; j < nb_rx; i++, j++) { if (i < nb_rx) { - rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i], - struct ether_hdr *) + 1); + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[i], + struct rte_ether_hdr *) + 1); } dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid); } send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); } + +/* + * Buffer optimized handling of events, invoked + * from main_loop. + */ +static inline void +l3fwd_em_process_events(int nb_rx, struct rte_event **events, + struct lcore_conf *qconf) +{ + int32_t i, j; + + rte_prefetch0(rte_pktmbuf_mtod(events[0]->mbuf, + struct rte_ether_hdr *) + 1); + + for (i = 1, j = 0; j < nb_rx; i++, j++) { + struct rte_mbuf *mbuf = events[j]->mbuf; + + if (i < nb_rx) { + rte_prefetch0(rte_pktmbuf_mtod( + events[i]->mbuf, + struct rte_ether_hdr *) + 1); + } + mbuf->port = em_get_dst_port(qconf, mbuf, mbuf->port); + process_packet(mbuf, &mbuf->port); + } +} #endif /* __L3FWD_EM_SEQUENTIAL_H__ */