/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
static void
-parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
+parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
{
struct tcp_hdr *tcp_hdr;
/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
static void
-parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
+parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
{
struct tcp_hdr *tcp_hdr;
- info->l3_len = sizeof(struct ipv6_hdr);
+ info->l3_len = sizeof(struct rte_ipv6_hdr);
info->l4_proto = ipv6_hdr->proto;
/* only fill l4_len for TCP, it's useful for TSO */
static void
parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
{
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
info->l2_len = sizeof(struct rte_ether_hdr);
info->ethertype = eth_hdr->ether_type;
switch (info->ethertype) {
case _htons(RTE_ETHER_TYPE_IPv4):
- ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
+ ipv4_hdr = (struct rte_ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
parse_ipv4(ipv4_hdr, info);
break;
case _htons(RTE_ETHER_TYPE_IPv6):
- ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
+ ipv6_hdr = (struct rte_ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
parse_ipv6(ipv6_hdr, info);
break;
default:
struct testpmd_offload_info *info)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
info->outer_l3_len = info->l3_len;
info->outer_l4_proto = info->l4_proto;
- ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr +
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
vxlan_gpe_len);
parse_ipv4(ipv4_hdr, info);
info->outer_l3_len = info->l3_len;
info->outer_l4_proto = info->l4_proto;
- ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
+ ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
vxlan_gpe_len);
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
uint8_t gre_len = 0;
gre_len += sizeof(struct simple_gre_hdr);
info->outer_l3_len = info->l3_len;
info->outer_l4_proto = info->l4_proto;
- ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
parse_ipv4(ipv4_hdr, info);
info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
info->outer_l3_len = info->l3_len;
info->outer_l4_proto = info->l4_proto;
- ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
+ ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
parse_ipv6(ipv6_hdr, info);
static void
parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
{
- struct ipv4_hdr *ipv4_hdr = encap_ip;
- struct ipv6_hdr *ipv6_hdr = encap_ip;
+ struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
+ struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
uint8_t ip_version;
ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
uint64_t tx_offloads)
{
- struct ipv4_hdr *ipv4_hdr = l3_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
struct udp_hdr *udp_hdr;
struct tcp_hdr *tcp_hdr;
struct sctp_hdr *sctp_hdr;
process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
uint64_t tx_offloads, int tso_enabled)
{
- struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
- struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
struct udp_hdr *udp_hdr;
uint64_t ol_flags = 0;
struct rte_mempool *mbp;
struct rte_mbuf *pkt;
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
struct udp_hdr *udp_hdr;
uint16_t vlan_tci, vlan_tci_outer;
uint64_t ol_flags;
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
/* Initialize IP header. */
- ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
memset(ip_hdr, 0, sizeof(*ip_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
pkt->l2_len = sizeof(struct rte_ether_hdr);
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
}
static uint16_t
-ipv4_hdr_cksum(struct ipv4_hdr *ip_h)
+ipv4_hdr_cksum(struct rte_ipv4_hdr *ip_h)
{
uint16_t *v16_h;
uint32_t ip_cksum;
struct rte_ether_hdr *eth_h;
struct rte_vlan_hdr *vlan_h;
struct rte_arp_hdr *arp_h;
- struct ipv4_hdr *ip_h;
+ struct rte_ipv4_hdr *ip_h;
struct rte_icmp_hdr *icmp_h;
struct rte_ether_addr eth_addr;
uint32_t retry;
rte_pktmbuf_free(pkt);
continue;
}
- ip_h = (struct ipv4_hdr *) ((char *)eth_h + l2_len);
+ ip_h = (struct rte_ipv4_hdr *) ((char *)eth_h + l2_len);
if (verbose_level > 0) {
ipv4_addr_dump(" IPV4: src=", ip_h->src_addr);
ipv4_addr_dump(" dst=", ip_h->dst_addr);
* Check if packet is a ICMP echo request.
*/
icmp_h = (struct rte_icmp_hdr *) ((char *)ip_h +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
if (! ((ip_h->next_proto_id == IPPROTO_ICMP) &&
(icmp_h->icmp_type == RTE_IP_ICMP_ECHO_REQUEST) &&
(icmp_h->icmp_code == 0))) {
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
- mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->l3_len = sizeof(struct rte_ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
- mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->l3_len = sizeof(struct rte_ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
+static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
static void
}
static void
-setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
+setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
struct udp_hdr *udp_hdr,
uint16_t pkt_data_len)
{
/*
* Initialize IP header.
*/
- pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
+ pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
ip_hdr->fragment_offset = 0;
sizeof(struct rte_ether_hdr));
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
/*
* Complete first mbuf of packet and append it to the
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
pkt->l2_len = sizeof(struct rte_ether_hdr);
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
uint16_t pkt_data_len;
pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
sizeof(struct udp_hdr)));
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
}
if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
if (is_encapsulation) {
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct udp_hdr *udp_hdr;
uint8_t l2_len;
uint8_t l3_len;
/* Do not support ipv4 option field */
if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
- l3_len = sizeof(struct ipv4_hdr);
+ l3_len = sizeof(struct rte_ipv4_hdr);
ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv4_hdr *,
+ struct rte_ipv4_hdr *,
l2_len);
l4_proto = ipv4_hdr->next_proto_id;
} else {
- l3_len = sizeof(struct ipv6_hdr);
+ l3_len = sizeof(struct rte_ipv6_hdr);
ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv6_hdr *,
+ struct rte_ipv6_hdr *,
l2_len);
l4_proto = ipv6_hdr->proto;
}
.. code-block:: c
- struct struct ipv6_hdr {
+ struct struct rte_ipv6_hdr {
uint32_t vtc_flow; /* IP version, traffic class & flow label. */
uint16_t payload_len; /* IP packet length - includes sizeof(ip_header). */
uint8_t proto; /* Protocol, next header. */
.size = sizeof (uint8_t),
.field_index = 0,
.input_index = 0,
- .offset = offsetof (struct ipv6_hdr, proto),
+ .offset = offsetof (struct rte_ipv6_hdr, proto),
},
{
.size = sizeof (uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof (struct ipv6_hdr, src_addr[0]),
+ .offset = offsetof (struct rte_ipv6_hdr, src_addr[0]),
},
{
.size = sizeof (uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof (struct ipv6_hdr, src_addr[4]),
+ .offset = offsetof (struct rte_ipv6_hdr, src_addr[4]),
},
{
.size = sizeof (uint32_t),
.field_index = 3,
.input_index = 3,
- .offset = offsetof (struct ipv6_hdr, src_addr[8]),
+ .offset = offsetof (struct rte_ipv6_hdr, src_addr[8]),
},
{
.size = sizeof (uint32_t),
.field_index = 4,
.input_index = 4,
- .offset = offsetof (struct ipv6_hdr, src_addr[12]),
+ .offset = offsetof (struct rte_ipv6_hdr, src_addr[12]),
},
};
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, next_proto_id),
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* next input field (IPv4 source address) - 4 consecutive bytes. */
{
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, src_addr),
+ offsetof(struct rte_ipv4_hdr, src_addr),
},
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
{
.field_index = DST_FIELD_IPV4,
.input_index = DST_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, dst_addr),
+ offsetof(struct rte_ipv4_hdr, dst_addr),
},
/*
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, src_port),
},
{
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
/* Remove the Ethernet header from the input packet */
- iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr));
+ iphdr = (struct rte_ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr));
RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
int ret = 0;
union ipv4_5tuple_host key;
- ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
+ ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct rte_ipv4_hdr, time_to_live);
m128i data = _mm_loadu_si128(( m128i*)(ipv4_hdr));
{
// ...
- data[0] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[1] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[2] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
- data[3] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
+ data[0] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
+ data[1] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
+ data[2] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
+ data[3] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
key[0].xmm = _mm_and_si128(data[0], mask0);
key[1].xmm = _mm_and_si128(data[1], mask0);
.. code-block:: c
static inline uint16_t
- get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
+ get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint8_t next_hop;
efd_value_t data[EFD_BURST_MAX];
const void *key_ptrs[EFD_BURST_MAX];
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst_ip[EFD_BURST_MAX];
for (i = 0; i < rx_count; i++) {
/* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
key_ptrs[i] = (void *)&ipv4_dst_ip[i];
static inline void
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst_ip[PKT_READ_SIZE];
const void *key_ptrs[PKT_READ_SIZE];
unsigned int i;
for (i = 0; i < num_packets; i++) {
/* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
key_ptrs[i] = &ipv4_dst_ip[i];
mode6_debug(const char __attribute__((unused)) *info, struct rte_ether_hdr *eth_h,
uint16_t port, uint32_t __attribute__((unused)) *burstnumber)
{
- struct ipv4_hdr *ipv4_h;
+ struct rte_ipv4_hdr *ipv4_h;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
struct rte_arp_hdr *arp_h;
char dst_ip[16];
#endif
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
- ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
+ ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
}
static inline uint32_t
-ipv4_hash(struct ipv4_hdr *ipv4_hdr)
+ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
{
return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
}
static inline uint32_t
-ipv6_hash(struct ipv6_hdr *ipv6_hdr)
+ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
{
unaligned_uint32_t *word_src_addr =
(unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
vlan_offset = get_vlan_offset(eth_hdr, &proto);
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv4_hash(ipv4_hdr);
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
}
l4hash = 0;
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
size_t ip_hdr_offset;
}
}
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
{
struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
RTE_PTYPE_L3_IPV4_EXT)) {
- ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
RTE_PTYPE_L3_IPV6) ||
((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
RTE_PTYPE_L3_IPV6_EXT))
- ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+ ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
/* setup IPCS* fields */
ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
- offsetof(struct ipv4_hdr, hdr_checksum));
+ offsetof(struct rte_ipv4_hdr, hdr_checksum));
/*
* When doing checksum or TCP segmentation with IPv6 headers,
uint32_t i;
struct rte_mbuf *m;
struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
uint64_t ol_flags;
uint16_t frag_field;
if (unlikely(m->l2_len == 0))
m->l2_len = sizeof(struct rte_ether_hdr);
- ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->l2_len);
frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
- struct ipv4_hdr ip4_mask, ip4_val;
- memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
- memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+ struct rte_ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
if (input->flow.ip4_flow.tos) {
ip4_mask.type_of_service = masks->ipv4_mask.tos;
}
enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
- &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
- struct ipv6_hdr ipv6_mask, ipv6_val;
- memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
- memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+ struct rte_ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
if (input->flow.ipv6_flow.proto) {
ipv6_mask.proto = masks->ipv6_mask.proto;
}
enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
- &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
}
}
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
- struct ipv4_hdr supported_mask = {
+ struct rte_ipv4_hdr supported_mask = {
.src_addr = 0xffffffff,
.dst_addr = 0xffffffff,
};
mask = &rte_flow_item_ipv4_mask;
memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
} else {
/* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
+ if ((*inner_ofst + sizeof(struct rte_ipv4_hdr)) >
FILTER_GENERIC_1_KEY_LEN)
return ENOTSUP;
memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv4_hdr));
+ mask, sizeof(struct rte_ipv4_hdr));
memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv4_hdr));
- *inner_ofst += sizeof(struct ipv4_hdr);
+ spec, sizeof(struct rte_ipv4_hdr));
+ *inner_ofst += sizeof(struct rte_ipv4_hdr);
}
return 0;
}
if (*inner_ofst == 0) {
memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
} else {
/* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
- if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
+ if ((*inner_ofst + sizeof(struct rte_ipv6_hdr)) >
FILTER_GENERIC_1_KEY_LEN)
return ENOTSUP;
memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
- mask, sizeof(struct ipv6_hdr));
+ mask, sizeof(struct rte_ipv6_hdr));
memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
- spec, sizeof(struct ipv6_hdr));
- *inner_ofst += sizeof(struct ipv6_hdr);
+ spec, sizeof(struct rte_ipv6_hdr));
+ *inner_ofst += sizeof(struct rte_ipv6_hdr);
}
return 0;
}
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
uint16_t *ether_type;
uint8_t len = 2 * sizeof(struct rte_ether_addr);
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
+ struct rte_ipv4_hdr *ip;
+ struct rte_ipv6_hdr *ip6;
static const uint8_t next_proto[] = {
[RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
case RTE_ETH_FLOW_FRAG_IPV4:
- ip = (struct ipv4_hdr *)raw_pkt;
+ ip = (struct rte_ipv4_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
*/
ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
- len += sizeof(struct ipv4_hdr);
+ len += sizeof(struct rte_ipv4_hdr);
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
case RTE_ETH_FLOW_FRAG_IPV6:
- ip6 = (struct ipv6_hdr *)raw_pkt;
+ ip6 = (struct rte_ipv6_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
ip6->vtc_flow =
rte_memcpy(&(ip6->dst_addr),
&(fdir_input->flow.ipv6_flow.src_ip),
IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
+ len += sizeof(struct rte_ipv6_hdr);
break;
default:
PMD_DRV_LOG(ERR, "unknown flow type %u.",
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
uint16_t *ether_type;
uint8_t len = 2 * sizeof(struct rte_ether_addr);
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
+ struct rte_ipv4_hdr *ip;
+ struct rte_ipv6_hdr *ip6;
uint8_t pctype = fdir_input->pctype;
bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
static const uint8_t next_proto[] = {
pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
is_customized_pctype) {
- ip = (struct ipv4_hdr *)raw_pkt;
+ ip = (struct rte_ipv4_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU)
ip->next_proto_id = IPPROTO_UDP;
- len += sizeof(struct ipv4_hdr);
+ len += sizeof(struct rte_ipv4_hdr);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
- ip6 = (struct ipv6_hdr *)raw_pkt;
+ ip6 = (struct rte_ipv6_hdr *)raw_pkt;
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
ip6->vtc_flow =
rte_memcpy(&ip6->dst_addr,
&fdir_input->flow.ipv6_flow.src_ip,
IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
+ len += sizeof(struct rte_ipv6_hdr);
} else {
PMD_DRV_LOG(ERR, "unknown pctype %u.",
fdir_input->pctype);
struct tcp_hdr *tcp;
struct sctp_hdr *sctp;
struct rte_flow_item_gtp *gtp;
- struct ipv4_hdr *gtp_ipv4;
- struct ipv6_hdr *gtp_ipv6;
+ struct rte_ipv4_hdr *gtp_ipv4;
+ struct rte_ipv6_hdr *gtp_ipv6;
uint8_t size, dst = 0;
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
int len;
if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
- gtp_ipv4 = (struct ipv4_hdr *)
+ gtp_ipv4 = (struct rte_ipv4_hdr *)
((unsigned char *)gtp +
sizeof(struct rte_flow_item_gtp));
gtp_ipv4->version_ihl =
rte_cpu_to_be_16(
I40E_FDIR_INNER_IP_DEFAULT_LEN);
payload = (unsigned char *)gtp_ipv4 +
- sizeof(struct ipv4_hdr);
+ sizeof(struct rte_ipv4_hdr);
} else if (cus_pctype->index ==
I40E_CUSTOMIZED_GTPU_IPV6) {
gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
- gtp_ipv6 = (struct ipv6_hdr *)
+ gtp_ipv6 = (struct rte_ipv6_hdr *)
((unsigned char *)gtp +
sizeof(struct rte_flow_item_gtp));
gtp_ipv6->vtc_flow =
gtp_ipv6->hop_limits =
I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
payload = (unsigned char *)gtp_ipv6 +
- sizeof(struct ipv6_hdr);
+ sizeof(struct rte_ipv6_hdr);
} else
payload = (unsigned char *)gtp +
sizeof(struct rte_flow_item_gtp);
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
.src_addr = input->flow.ip4_flow.src_ip,
.dst_addr = input->flow.ip4_flow.dst_ip,
.time_to_live = input->flow.ip4_flow.ttl,
.type_of_service = input->flow.ip4_flow.tos,
};
- attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+ attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
.src_addr = mask->ipv4_mask.src_ip,
.dst_addr = mask->ipv4_mask.dst_ip,
.time_to_live = mask->ipv4_mask.ttl,
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
.hop_limits = input->flow.ipv6_flow.hop_limits,
.proto = input->flow.ipv6_flow.proto,
};
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys[idx].off =
- offsetof(struct ipv4_hdr, time_to_live);
+ offsetof(struct rte_ipv4_hdr, time_to_live);
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys[idx].off =
- offsetof(struct ipv6_hdr, hop_limits);
+ offsetof(struct rte_ipv6_hdr, hop_limits);
}
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
int off_base =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
- offsetof(struct ipv6_hdr, src_addr) :
- offsetof(struct ipv6_hdr, dst_addr);
+ offsetof(struct rte_ipv6_hdr, src_addr) :
+ offsetof(struct rte_ipv6_hdr, dst_addr);
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)actions->conf;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
- offsetof(struct ipv4_hdr, src_addr) :
- offsetof(struct ipv4_hdr, dst_addr);
+ offsetof(struct rte_ipv4_hdr, src_addr) :
+ offsetof(struct rte_ipv4_hdr, dst_addr);
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys[idx].val =
((const struct rte_flow_action_set_ipv4 *)
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
- struct ipv6_hdr zero;
+ struct rte_ipv6_hdr zero;
uint32_t flow_mask;
int ret;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
uint16_t *ether_type;
uint8_t *raw_pkt;
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
+ struct rte_ipv4_hdr *ip;
+ struct rte_ipv6_hdr *ip6;
struct udp_hdr *udp;
struct tcp_hdr *tcp;
uint16_t len;
*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
switch (arfs->tuple.eth_proto) {
case RTE_ETHER_TYPE_IPv4:
- ip = (struct ipv4_hdr *)raw_pkt;
+ ip = (struct rte_ipv4_hdr *)raw_pkt;
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
- ip->total_length = sizeof(struct ipv4_hdr);
+ ip->total_length = sizeof(struct rte_ipv4_hdr);
ip->next_proto_id = arfs->tuple.ip_proto;
ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
ip->dst_addr = arfs->tuple.dst_ipv4;
ip->src_addr = arfs->tuple.src_ipv4;
- len += sizeof(struct ipv4_hdr);
+ len += sizeof(struct rte_ipv4_hdr);
params->ipv4 = true;
raw_pkt = (uint8_t *)buff;
}
break;
case RTE_ETHER_TYPE_IPv6:
- ip6 = (struct ipv6_hdr *)raw_pkt;
+ ip6 = (struct rte_ipv6_hdr *)raw_pkt;
ip6->proto = arfs->tuple.ip_proto;
ip6->vtc_flow =
rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
IPV6_ADDR_LEN);
rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
+ len += sizeof(struct rte_ipv6_hdr);
params->ipv6 = true;
raw_pkt = (uint8_t *)buff;
{
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct rte_vlan_hdr *vlan_hdr;
uint16_t ethertype;
bool vlan_tagged = 0;
if (ethertype == RTE_ETHER_TYPE_IPv4) {
packet_type |= RTE_PTYPE_L3_IPV4;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, len);
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
packet_type |= RTE_PTYPE_L4_UDP;
} else if (ethertype == RTE_ETHER_TYPE_IPv6) {
packet_type |= RTE_PTYPE_L3_IPV6;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, len);
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv6_hdr->proto == IPPROTO_UDP)
static inline uint8_t
qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
{
- struct ipv4_hdr *ip;
+ struct rte_ipv4_hdr *ip;
uint16_t pkt_csum;
uint16_t calc_csum;
uint16_t val;
if (unlikely(val)) {
m->packet_type = qede_rx_cqe_to_pkt_type(flag);
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
pkt_csum = ip->hdr_checksum;
ip->hdr_checksum = 0;
switch (first_m_seg->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) {
case PKT_TX_IPV4: {
- const struct ipv4_hdr *iphe4;
+ const struct rte_ipv4_hdr *iphe4;
- iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off);
+ iphe4 = (const struct rte_ipv4_hdr *)(hdr_addr + iph_off);
rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
packet_id = rte_be_to_cpu_16(packet_id);
break;
/* Handle IP header */
if (m->ol_flags & PKT_TX_IPV4) {
- const struct ipv4_hdr *iphe4;
+ const struct rte_ipv4_hdr *iphe4;
- iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
+ iphe4 = (const struct rte_ipv4_hdr *)(tsoh + nh_off);
rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
packet_id = rte_be_to_cpu_16(packet_id);
} else if (m->ol_flags & PKT_TX_IPV6) {
.size = sizeof(uint8_t),
.field_index = 0,
.input_index = 0,
- .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ .offset = offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* Source IP address (IPv4) */
.size = sizeof(uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof(struct ipv4_hdr, src_addr),
+ .offset = offsetof(struct rte_ipv4_hdr, src_addr),
},
/* Destination IP address (IPv4) */
.size = sizeof(uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof(struct ipv4_hdr, dst_addr),
+ .offset = offsetof(struct rte_ipv4_hdr, dst_addr),
},
/* Source Port */
.size = sizeof(uint16_t),
.field_index = 3,
.input_index = 3,
- .offset = sizeof(struct ipv4_hdr) +
+ .offset = sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, src_port),
},
.size = sizeof(uint16_t),
.field_index = 4,
.input_index = 3,
- .offset = sizeof(struct ipv4_hdr) +
+ .offset = sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
.size = sizeof(uint8_t),
.field_index = 0,
.input_index = 0,
- .offset = offsetof(struct ipv6_hdr, proto),
+ .offset = offsetof(struct rte_ipv6_hdr, proto),
},
/* Source IP address (IPv6) */
.size = sizeof(uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
},
[2] = {
.size = sizeof(uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
},
[3] = {
.size = sizeof(uint32_t),
.field_index = 3,
.input_index = 3,
- .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
},
[4] = {
.size = sizeof(uint32_t),
.field_index = 4,
.input_index = 4,
- .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
},
/* Destination IP address (IPv6) */
.size = sizeof(uint32_t),
.field_index = 5,
.input_index = 5,
- .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
},
[6] = {
.size = sizeof(uint32_t),
.field_index = 6,
.input_index = 6,
- .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
},
[7] = {
.size = sizeof(uint32_t),
.field_index = 7,
.input_index = 7,
- .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
},
[8] = {
.size = sizeof(uint32_t),
.field_index = 8,
.input_index = 8,
- .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
},
/* Source Port */
.size = sizeof(uint16_t),
.field_index = 9,
.input_index = 9,
- .offset = sizeof(struct ipv6_hdr) +
+ .offset = sizeof(struct rte_ipv6_hdr) +
offsetof(struct tcp_hdr, src_port),
},
.size = sizeof(uint16_t),
.field_index = 10,
.input_index = 9,
- .offset = sizeof(struct ipv6_hdr) +
+ .offset = sizeof(struct rte_ipv6_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
l2_len += 8;
/* Don't verify checksum for packets with discontinuous L2 header */
- if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
+ if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
rte_pktmbuf_data_len(mbuf)))
return;
l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
- struct ipv4_hdr *iph = l3_hdr;
+ struct rte_ipv4_hdr *iph = l3_hdr;
/* ihl contains the number of 4-byte words in the header */
l3_len = 4 * (iph->version_ihl & 0xf);
PKT_RX_IP_CKSUM_BAD :
PKT_RX_IP_CKSUM_GOOD;
} else if (l3 == RTE_PTYPE_L3_IPV6) {
- l3_len = sizeof(struct ipv6_hdr);
+ l3_len = sizeof(struct rte_ipv6_hdr);
} else {
/* IPv6 extensions are not supported */
return;
void *l3_hdr = packet + l2_len;
if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
- struct ipv4_hdr *iph = l3_hdr;
+ struct rte_ipv4_hdr *iph = l3_hdr;
uint16_t cksum;
iph->hdr_checksum = 0;
/* common case: header is not fragmented */
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
m->l4_len)) {
- struct ipv4_hdr *iph;
- struct ipv6_hdr *ip6h;
+ struct rte_ipv4_hdr *iph;
+ struct rte_ipv6_hdr *ip6h;
struct tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);
th = RTE_PTR_ADD(iph, m->l3_len);
if ((iph->version_ihl >> 4) == 4) {
iph->hdr_checksum = 0;
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
m->l3_len);
} else {
- ip6h = (struct ipv6_hdr *)iph;
+ ip6h = (struct rte_ipv6_hdr *)iph;
ip_paylen = ip6h->payload_len;
}
struct rte_mbuf *rxm)
{
uint32_t hlen, slen;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct tcp_hdr *tcp_hdr;
char *ptr;
hlen = sizeof(struct rte_ether_hdr);
if (rcd->v4) {
- if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
- return hw->mtu - sizeof(struct ipv4_hdr)
+ if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr)))
+ return hw->mtu - sizeof(struct rte_ipv4_hdr)
- sizeof(struct tcp_hdr);
- ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
IPV4_IHL_MULTIPLIER;
} else if (rcd->v6) {
- if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
- return hw->mtu - sizeof(struct ipv6_hdr) -
+ if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
+ return hw->mtu - sizeof(struct rte_ipv6_hdr) -
sizeof(struct tcp_hdr);
- ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
- hlen += sizeof(struct ipv6_hdr);
+ ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen);
+ hlen += sizeof(struct rte_ipv6_hdr);
if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
int frag;
struct rte_ether_hdr *eth_hdr;
struct rte_arp_hdr *arp_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t ether_type, offset;
uint16_t rx_cnt;
global_flag_stru_p->port_packets[2]++;
rte_spinlock_unlock(&global_flag_stru_p->lock);
}
- ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
if (ipv4_hdr->dst_addr == bond_ip) {
rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, next_proto_id),
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* next input field (IPv4 source address) - 4 consecutive bytes. */
{
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, src_addr),
+ offsetof(struct rte_ipv4_hdr, src_addr),
},
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
{
.field_index = DST_FIELD_IPV4,
.input_index = DST_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, dst_addr),
+ offsetof(struct rte_ipv4_hdr, dst_addr),
},
/*
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, src_port),
},
{
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
/*
* Default payload in bytes for the IPv6 packet.
*/
-#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr))
-#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct ipv6_hdr))
+#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct rte_ipv4_hdr))
+#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct rte_ipv6_hdr))
/*
* Max number of fragments per packet expected - defined by config file.
/* if this is an IPv4 packet */
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
uint32_t ip_dst;
/* Read the lookup key (i.e. ip_dst) from the input packet */
- ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
+ ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
}
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if this is an IPv6 packet */
- struct ipv6_hdr *ip_hdr;
+ struct rte_ipv6_hdr *ip_hdr;
ipv6 = 1;
/* Read the lookup key (i.e. ip_dst) from the input packet */
- ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
+ ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
/* Find destination port */
if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
.size = sizeof(uint8_t),
.field_index = 0,
.input_index = 0,
- .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ .offset = offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* Source IP address (IPv4) */
.size = sizeof(uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof(struct ipv4_hdr, src_addr),
+ .offset = offsetof(struct rte_ipv4_hdr, src_addr),
},
/* Destination IP address (IPv4) */
.size = sizeof(uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof(struct ipv4_hdr, dst_addr),
+ .offset = offsetof(struct rte_ipv4_hdr, dst_addr),
},
/* Source Port */
.size = sizeof(uint16_t),
.field_index = 3,
.input_index = 3,
- .offset = sizeof(struct ipv4_hdr) +
+ .offset = sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, src_port),
},
.size = sizeof(uint16_t),
.field_index = 4,
.input_index = 3,
- .offset = sizeof(struct ipv4_hdr) +
+ .offset = sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
.size = sizeof(uint8_t),
.field_index = 0,
.input_index = 0,
- .offset = offsetof(struct ipv6_hdr, proto),
+ .offset = offsetof(struct rte_ipv6_hdr, proto),
},
/* Source IP address (IPv6) */
.size = sizeof(uint32_t),
.field_index = 1,
.input_index = 1,
- .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
},
[2] = {
.size = sizeof(uint32_t),
.field_index = 2,
.input_index = 2,
- .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
},
[3] = {
.size = sizeof(uint32_t),
.field_index = 3,
.input_index = 3,
- .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
},
[4] = {
.size = sizeof(uint32_t),
.field_index = 4,
.input_index = 4,
- .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
},
/* Destination IP address (IPv6) */
.size = sizeof(uint32_t),
.field_index = 5,
.input_index = 5,
- .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
},
[6] = {
.size = sizeof(uint32_t),
.field_index = 6,
.input_index = 6,
- .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
},
[7] = {
.size = sizeof(uint32_t),
.field_index = 7,
.input_index = 7,
- .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
},
[8] = {
.size = sizeof(uint32_t),
.field_index = 8,
.input_index = 8,
- .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
},
/* Source Port */
.size = sizeof(uint16_t),
.field_index = 9,
.input_index = 9,
- .offset = sizeof(struct ipv6_hdr) +
+ .offset = sizeof(struct rte_ipv6_hdr) +
offsetof(struct tcp_hdr, src_port),
},
.size = sizeof(uint16_t),
.field_index = 10,
.input_index = 9,
- .offset = sizeof(struct ipv6_hdr) +
+ .offset = sizeof(struct rte_ipv6_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
/* if packet is IPv4 */
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
uint32_t ip_dst;
- ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
/* if it is a fragmented packet, then try to reassemble. */
if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
m = mo;
eth_hdr = rte_pktmbuf_mtod(m,
struct rte_ether_hdr *);
- ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
}
}
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if packet is IPv6 */
struct ipv6_extension_fragment *frag_hdr;
- struct ipv6_hdr *ip_hdr;
+ struct rte_ipv6_hdr *ip_hdr;
- ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
if (mo != m) {
m = mo;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
}
}
mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
{
struct rte_mbuf *mc;
- struct ipv4_hdr *iphdr;
+ struct rte_ipv4_hdr *iphdr;
uint32_t dest_addr, port_mask, port_num, use_clone;
int32_t hash;
uint16_t port;
} dst_eth_addr;
/* Remove the Ethernet header from the input packet */
- iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
+ iphdr = (struct rte_ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
struct l2fwd_crypto_params *cparams)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
uint32_t ipdata_offset, data_len;
uint32_t pad_len = 0;
ipdata_offset = sizeof(struct rte_ether_hdr);
- ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
+ ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
ipdata_offset);
ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK)
/***********************start of ACL part******************************/
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len);
#endif
static inline void
send_single_packet(struct rte_mbuf *m, uint16_t port);
*d = (unsigned char)(ip & 0xff);\
} while (0)
#define OFF_ETHHEAD (sizeof(struct rte_ether_hdr))
-#define OFF_IPV42PROTO (offsetof(struct ipv4_hdr, next_proto_id))
-#define OFF_IPV62PROTO (offsetof(struct ipv6_hdr, proto))
+#define OFF_IPV42PROTO (offsetof(struct rte_ipv4_hdr, next_proto_id))
+#define OFF_IPV62PROTO (offsetof(struct rte_ipv6_hdr, proto))
#define MBUF_IPV4_2PROTO(m) \
rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV42PROTO)
#define MBUF_IPV6_2PROTO(m) \
.size = sizeof(uint32_t),
.field_index = SRC_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_SRC,
- .offset = offsetof(struct ipv4_hdr, src_addr) -
- offsetof(struct ipv4_hdr, next_proto_id),
+ .offset = offsetof(struct rte_ipv4_hdr, src_addr) -
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_DST,
- .offset = offsetof(struct ipv4_hdr, dst_addr) -
- offsetof(struct ipv4_hdr, next_proto_id),
+ .offset = offsetof(struct rte_ipv4_hdr, dst_addr) -
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_PORTS,
- .offset = sizeof(struct ipv4_hdr) -
- offsetof(struct ipv4_hdr, next_proto_id),
+ .offset = sizeof(struct rte_ipv4_hdr) -
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_PORTS,
- .offset = sizeof(struct ipv4_hdr) -
- offsetof(struct ipv4_hdr, next_proto_id) +
+ .offset = sizeof(struct rte_ipv4_hdr) -
+ offsetof(struct rte_ipv4_hdr, next_proto_id) +
sizeof(uint16_t),
},
};
.size = sizeof(uint32_t),
.field_index = SRC1_FIELD_IPV6,
.input_index = SRC1_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, src_addr) -
- offsetof(struct ipv6_hdr, proto),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr) -
+ offsetof(struct rte_ipv6_hdr, proto),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC2_FIELD_IPV6,
.input_index = SRC2_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, src_addr) -
- offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC3_FIELD_IPV6,
.input_index = SRC3_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, src_addr) -
- offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + 2 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC4_FIELD_IPV6,
.input_index = SRC4_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, src_addr) -
- offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, src_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + 3 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST1_FIELD_IPV6,
.input_index = DST1_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, dst_addr)
- - offsetof(struct ipv6_hdr, proto),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr)
+ - offsetof(struct rte_ipv6_hdr, proto),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST2_FIELD_IPV6,
.input_index = DST2_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, dst_addr) -
- offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST3_FIELD_IPV6,
.input_index = DST3_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, dst_addr) -
- offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + 2 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST4_FIELD_IPV6,
.input_index = DST4_FIELD_IPV6,
- .offset = offsetof(struct ipv6_hdr, dst_addr) -
- offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
+ .offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
+ offsetof(struct rte_ipv6_hdr, proto) + 3 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV6,
.input_index = SRCP_FIELD_IPV6,
- .offset = sizeof(struct ipv6_hdr) -
- offsetof(struct ipv6_hdr, proto),
+ .offset = sizeof(struct rte_ipv6_hdr) -
+ offsetof(struct rte_ipv6_hdr, proto),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV6,
.input_index = SRCP_FIELD_IPV6,
- .offset = sizeof(struct ipv6_hdr) -
- offsetof(struct ipv6_hdr, proto) + sizeof(uint16_t),
+ .offset = sizeof(struct rte_ipv6_hdr) -
+ offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint16_t),
},
};
{
uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
unsigned char a, b, c, d;
- struct ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
- struct ipv4_hdr *,
+ struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d);
{
unsigned i;
uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
- struct ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
- struct ipv6_hdr *,
+ struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
printf("Packet Src");
prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
int index)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_mbuf *pkt = pkts_in[index];
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
/* Check to make sure the packet is valid (RFC1812) */
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
{
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
/*
* 1. The packet length reported by the Link Layer must be large
* enough to hold the minimum length legal IP datagram (20 bytes).
*/
- if (link_len < sizeof(struct ipv4_hdr))
+ if (link_len < sizeof(struct rte_ipv4_hdr))
return -1;
/* 2. The IP checksum must be correct. */
* datagram header, whose length is specified in the IP header length
* field.
*/
- if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
return -5;
return 0;
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
{
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
/*
* 1. The packet length reported by the Link Layer must be large
* enough to hold the minimum length legal IP datagram (20 bytes).
*/
- if (link_len < sizeof(struct ipv4_hdr))
+ if (link_len < sizeof(struct rte_ipv4_hdr))
return -1;
/* 2. The IP checksum must be correct. */
* datagram header, whose length is specified in the IP header length
* field.
*/
- if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
return -5;
return 0;
}
static inline uint16_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
switch (ipv4_hdr->next_proto_id) {
case IPPROTO_TCP:
tcp = (struct tcp_hdr *)((unsigned char *)ipv4_hdr +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *)ipv4_hdr +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
break;
}
static inline uint16_t
-get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
+get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
switch (ipv6_hdr->proto) {
case IPPROTO_TCP:
tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
break;
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
static inline uint16_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
struct lcore_conf *qconf)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
void *d_addr_bytes;
uint16_t dst_port;
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
/* Handle IPv4 headers.*/
ipv4_hdr =
- rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* Handle IPv6 headers.*/
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
ipv6_hdr =
- rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
{
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
/*
* 1. The packet length reported by the Link Layer must be large
* enough to hold the minimum length legal IP datagram (20 bytes).
*/
- if (link_len < sizeof(struct ipv4_hdr))
+ if (link_len < sizeof(struct rte_ipv4_hdr))
return -1;
/* 2. The IP checksum must be correct. */
* datagram header, whose length is specified in the IP header length
* field.
*/
- if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
return -5;
return 0;
}
static inline uint16_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
switch (ipv4_hdr->next_proto_id) {
case IPPROTO_TCP:
tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
break;
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
static inline uint32_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *l3fwd_lookup_struct)
{
uint32_t next_hop;
lookup_struct_t *l3fwd_lookup_struct)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
void *tmp;
uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
{
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
/*
* 1. The packet length reported by the Link Layer must be large
* enough to hold the minimum length legal IP datagram (20 bytes).
*/
- if (link_len < sizeof(struct ipv4_hdr))
+ if (link_len < sizeof(struct rte_ipv4_hdr))
return -1;
/* 2. The IP checksum must be correct. */
* datagram header, whose length is specified in the IP header length
* field.
*/
- if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
return -5;
return 0;
*p[2] = te[2];
*p[3] = te[3];
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
&dst_port[1], pkt[1]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
&dst_port[2], pkt[2]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->packet_type);
}
te = *(vector unsigned int *)eth_hdr;
ve = (vector unsigned int)val_eth[dst_port[0]];
- rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
pkt->packet_type);
/* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
/* Minimum value of IPV4 total length (20B) in network byte order. */
-#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
+#define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8)
/*
* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
* to BAD_PORT value.
*/
static __rte_always_inline void
-rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
{
uint8_t ihl;
struct rte_hash *ipv4_l3fwd_lookup_struct =
(struct rte_hash *)lookup_struct;
- ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
+ ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct rte_ipv4_hdr, time_to_live);
/*
* Get 5 tuple: dst port, src port, dst IP address,
struct rte_hash *ipv6_l3fwd_lookup_struct =
(struct rte_hash *)lookup_struct;
- ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
+ ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct rte_ipv6_hdr, payload_len);
void *data0 = ipv6_hdr;
void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
uint16_t ether_type;
void *l3;
int hdr_len;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
l3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr);
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
- ipv4_hdr = (struct ipv4_hdr *)l3;
+ ipv4_hdr = (struct rte_ipv4_hdr *)l3;
hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
IPV4_IHL_MULTIPLIER;
- if (hdr_len == sizeof(struct ipv4_hdr)) {
+ if (hdr_len == sizeof(struct rte_ipv4_hdr)) {
packet_type |= RTE_PTYPE_L3_IPV4;
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
} else
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
- ipv6_hdr = (struct ipv6_hdr *)l3;
+ ipv6_hdr = (struct rte_ipv6_hdr *)l3;
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
else if (ipv6_hdr->proto == IPPROTO_UDP)
struct lcore_conf *qconf)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t dst_port;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
/* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
send_single_packet(qconf, m, dst_port);
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
/* Handle IPv6 headers.*/
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
uint16_t portid)
{
uint16_t next_hop;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
/* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
/* Handle IPv6 headers.*/
- ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
{
int32x4_t tmpdata0 = vld1q_s32(rte_pktmbuf_mtod_offset(m0, int32_t *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
key->xmm = vandq_s32(tmpdata0, mask0);
}
int32x4_t tmpdata0 = vld1q_s32(
rte_pktmbuf_mtod_offset(m0, int *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len)));
+ offsetof(struct rte_ipv6_hdr, payload_len)));
int32x4_t tmpdata1 = vld1q_s32(
rte_pktmbuf_mtod_offset(m0, int *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) + 8));
+ offsetof(struct rte_ipv6_hdr, payload_len) + 8));
int32x4_t tmpdata2 = vld1q_s32(
rte_pktmbuf_mtod_offset(m0, int *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) + 16));
+ offsetof(struct rte_ipv6_hdr, payload_len) + 16));
key->xmm[0] = vandq_s32(tmpdata0, mask0);
key->xmm[1] = tmpdata1;
__m128i tmpdata0 = _mm_loadu_si128(
rte_pktmbuf_mtod_offset(m0, __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
key->xmm = _mm_and_si128(tmpdata0, mask0);
}
__m128i tmpdata0 = _mm_loadu_si128(
rte_pktmbuf_mtod_offset(m0, __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len)));
+ offsetof(struct rte_ipv6_hdr, payload_len)));
__m128i tmpdata1 = _mm_loadu_si128(
rte_pktmbuf_mtod_offset(m0, __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) +
+ offsetof(struct rte_ipv6_hdr, payload_len) +
sizeof(__m128i)));
__m128i tmpdata2 = _mm_loadu_si128(
rte_pktmbuf_mtod_offset(m0, __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) +
+ offsetof(struct rte_ipv6_hdr, payload_len) +
sizeof(__m128i) + sizeof(__m128i)));
key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
uint16_t portid)
{
uint8_t next_hop;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
/* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
/* Handle IPv6 headers.*/
- ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
(struct rte_lpm *)lookup_struct;
return (uint16_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
- rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
+ rte_be_to_cpu_32(((struct rte_ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
(struct rte_lpm6 *)lookup_struct;
return (uint16_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
- ((struct ipv6_hdr *)ipv6_hdr)->dst_addr,
+ ((struct rte_ipv6_hdr *)ipv6_hdr)->dst_addr,
&next_hop) == 0) ? next_hop : portid);
}
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint16_t portid)
{
- struct ipv6_hdr *ipv6_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
qconf->ipv4_lookup_struct);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
qconf->ipv6_lookup_struct);
uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct rte_ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
ipv6_hdr->dst_addr, &next_hop) == 0)
struct lcore_conf *qconf)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
/* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
send_single_packet(qconf, m, dst_port);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* Handle IPv6 headers.*/
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
dst_port = lpm_get_ipv6_dst_port(ipv6_hdr, portid,
vector unsigned int *dip,
uint32_t *ipv4_flag)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ether_hdr *eth_hdr;
uint32_t x0, x1, x2, x3;
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x0 = ipv4_hdr->dst_addr;
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
rte_compiler_barrier();
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x1 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[1]->packet_type;
rte_compiler_barrier();
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x2 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[2]->packet_type;
rte_compiler_barrier();
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x3 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[3]->packet_type;
int32x4_t *dip,
uint32_t *ipv4_flag)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ether_hdr *eth_hdr;
int32_t dst[FWDSTEP];
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
dst[0] = ipv4_hdr->dst_addr;
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
dst[1] = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[1]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
dst[2] = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[2]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
dst[3] = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[3]->packet_type;
__m128i *dip,
uint32_t *ipv4_flag)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ether_hdr *eth_hdr;
uint32_t x0, x1, x2, x3;
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x0 = ipv4_hdr->dst_addr;
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x1 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[1]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x2 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[2]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x3 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[3]->packet_type;
vst1q_u32(p[2], ve[2]);
vst1q_u32(p[3], ve[3]);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
&dst_port[1], pkt[1]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
&dst_port[2], pkt[2]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->packet_type);
}
ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
- rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
pkt->packet_type);
ve = vcopyq_laneq_u32(ve, 3, te, 3);
_mm_storeu_si128(p[2], te[2]);
_mm_storeu_si128(p[3], te[3]);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
&dst_port[1], pkt[1]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
&dst_port[2], pkt[2]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->packet_type);
}
te = _mm_loadu_si128((__m128i *)eth_hdr);
ve = val_eth[dst_port[0]];
- rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
pkt->packet_type);
te = _mm_blend_epi16(te, ve, MASK_ETH);
for (j = 0; j < bsz_rd; j ++) {
struct rte_mbuf *pkt;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst, pos;
uint32_t port;
pkt = lp->mbuf_in.array[j];
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt,
- struct ipv4_hdr *,
+ struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
#ifdef DO_RFC_1812_CHECKS
static inline int
-is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
{
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
/*
* 1. The packet length reported by the Link Layer must be large
* enough to hold the minimum length legal IP datagram (20 bytes).
*/
- if (link_len < sizeof(struct ipv4_hdr))
+ if (link_len < sizeof(struct rte_ipv4_hdr))
return -1;
/* 2. The IP checksum must be correct. */
* datagram header, whose length is specified in the IP header length
* field.
*/
- if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
return -5;
return 0;
int ret = 0;
union ipv4_5tuple_host key;
- ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
+ ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct rte_ipv4_hdr, time_to_live);
__m128i data = _mm_loadu_si128((__m128i *)(ipv4_hdr));
/* Get 5 tuple: dst port, src port, dst IP address, src IP address and
protocol */
int ret = 0;
union ipv6_5tuple_host key;
- ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
+ ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct rte_ipv6_hdr, payload_len);
__m128i data0 = _mm_loadu_si128((__m128i *)(ipv6_hdr));
__m128i data1 = _mm_loadu_si128((__m128i *)(((uint8_t *)ipv6_hdr) +
sizeof(__m128i)));
uint32_t next_hop;
return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
- rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
+ rte_be_to_cpu_32(((struct rte_ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
uint32_t next_hop;
return ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
- ((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
+ ((struct rte_ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
next_hop : portid);
}
#endif
simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
struct rte_ether_hdr *eth_hdr[8];
- struct ipv4_hdr *ipv4_hdr[8];
+ struct rte_ipv4_hdr *ipv4_hdr[8];
uint16_t dst_port[8];
int32_t ret[8];
union ipv4_5tuple_host key[8];
eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct rte_ether_hdr *);
/* Handle IPv4 headers.*/
- ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
+ ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
+ ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
+ ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
+ ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
+ ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
+ ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
+ ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
- ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
+ ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, time_to_live)));
+ offsetof(struct rte_ipv4_hdr, time_to_live)));
key[0].xmm = _mm_and_si128(data[0], mask0);
key[1].xmm = _mm_and_si128(data[1], mask0);
{
__m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
__m128i *, sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len)));
+ offsetof(struct rte_ipv6_hdr, payload_len)));
__m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
__m128i *, sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
+ offsetof(struct rte_ipv6_hdr, payload_len) + sizeof(__m128i)));
__m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
__m128i *, sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) +
+ offsetof(struct rte_ipv6_hdr, payload_len) + sizeof(__m128i) +
sizeof(__m128i)));
key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
key->xmm[1] = tmpdata1;
struct rte_ether_hdr *eth_hdr[8];
union ipv6_5tuple_host key[8];
- __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8];
+ __attribute__((unused)) struct rte_ipv6_hdr *ipv6_hdr[8];
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct rte_ether_hdr *);
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct rte_ether_hdr *);
eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct rte_ether_hdr *);
/* Handle IPv6 headers.*/
- ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
+ ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
+ ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
+ ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
+ ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
+ ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
+ ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
+ ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
- ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
+ ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
get_ipv6_5tuple(m[0], mask1, mask2, &key[0]);
l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
/* Handle IPv4 headers.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
#ifdef DO_RFC_1812_CHECKS
send_single_packet(m, dst_port);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* Handle IPv6 headers.*/
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
sizeof(struct rte_ether_hdr));
dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
/* Minimum value of IPV4 total length (20B) in network byte order. */
-#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
+#define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8)
/*
* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
* to BAD_PORT value.
*/
static __rte_always_inline void
-rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
+rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
{
uint8_t ihl;
get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct rte_ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(
RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint16_t portid)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t dst_ipv4;
uint16_t dp;
__m128i te, ve;
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
dst_ipv4 = ipv4_hdr->dst_addr;
dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
__m128i *dip,
uint32_t *ipv4_flag)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ether_hdr *eth_hdr;
uint32_t x0, x1, x2, x3;
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x0 = ipv4_hdr->dst_addr;
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x1 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[1]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x2 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[2]->packet_type;
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
x3 = ipv4_hdr->dst_addr;
ipv4_flag[0] &= pkt[3]->packet_type;
_mm_store_si128(p[2], te[2]);
_mm_store_si128(p[3], te[3]);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
&dst_port[1], pkt[1]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
&dst_port[2], pkt[2]->packet_type);
- rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
+ rfc1812_process((struct rte_ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->packet_type);
}
static inline void
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst_ip[PKT_READ_SIZE];
const void *key_ptrs[PKT_READ_SIZE];
unsigned int i;
for (i = 0; i < num_packets; i++) {
/* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
key_ptrs[i] = &ipv4_dst_ip[i];
efd_value_t data[RTE_EFD_BURST_MAX];
const void *key_ptrs[RTE_EFD_BURST_MAX];
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
for (i = 0; i < rx_count; i++) {
/* Handle IPv4 header.*/
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
key_ptrs[i] = (void *)&ipv4_dst_ip[i];
parse_ethernet(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *info,
uint8_t *l4_proto)
{
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
uint16_t ethertype;
info->outer_l2_len = sizeof(struct rte_ether_hdr);
switch (ethertype) {
case RTE_ETHER_TYPE_IPv4:
- ipv4_hdr = (struct ipv4_hdr *)
+ ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)eth_hdr + info->outer_l2_len);
- info->outer_l3_len = sizeof(struct ipv4_hdr);
+ info->outer_l3_len = sizeof(struct rte_ipv4_hdr);
*l4_proto = ipv4_hdr->next_proto_id;
break;
case RTE_ETHER_TYPE_IPv6:
- ipv6_hdr = (struct ipv6_hdr *)
+ ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)eth_hdr + info->outer_l2_len);
- info->outer_l3_len = sizeof(struct ipv6_hdr);
+ info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
*l4_proto = ipv6_hdr->proto;
break;
default:
void *l3_hdr = NULL;
uint8_t l4_proto;
uint16_t ethertype;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct udp_hdr *udp_hdr;
struct tcp_hdr *tcp_hdr;
struct sctp_hdr *sctp_hdr;
l3_hdr = (char *)eth_hdr + info->l2_len;
if (ethertype == RTE_ETHER_TYPE_IPv4) {
- ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_IPV4;
ol_flags |= PKT_TX_IP_CKSUM;
- info->l3_len = sizeof(struct ipv4_hdr);
+ info->l3_len = sizeof(struct rte_ipv4_hdr);
l4_proto = ipv4_hdr->next_proto_id;
} else if (ethertype == RTE_ETHER_TYPE_IPv6) {
- ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
- info->l3_len = sizeof(struct ipv6_hdr);
+ ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
+ info->l3_len = sizeof(struct rte_ipv6_hdr);
l4_proto = ipv6_hdr->proto;
ol_flags |= PKT_TX_IPV6;
} else
/*Allocate space for new ethernet, IPv4, UDP and VXLAN headers*/
struct rte_ether_hdr *pneth = (struct rte_ether_hdr *) rte_pktmbuf_prepend(m,
- sizeof(struct rte_ether_hdr) + sizeof(struct ipv4_hdr)
+ sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)
+ sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr));
- struct ipv4_hdr *ip = (struct ipv4_hdr *) &pneth[1];
+ struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *) &pneth[1];
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_vxlan_hdr *vxlan = (struct rte_vxlan_hdr *) &udp[1];
/* copy in IP header */
ip = rte_memcpy(ip, &app_ip_hdr[vport_id],
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
ip->total_length = rte_cpu_to_be_16(m->pkt_len
- sizeof(struct rte_ether_hdr));
}
m->outer_l2_len = sizeof(struct rte_ether_hdr);
- m->outer_l3_len = sizeof(struct ipv4_hdr);
+ m->outer_l3_len = sizeof(struct rte_ipv4_hdr);
ol_flags |= PKT_TX_TUNNEL_VXLAN;
#define VXLAN_HF_VNI 0x08000000
#define DEFAULT_VXLAN_PORT 4789
-extern struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
+extern struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
extern struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
extern uint8_t tx_checksum;
extern uint16_t tso_segsz;
/* VXLAN device */
struct vxlan_conf vxdev;
-struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
+struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
/* local VTEP IP address */
int i, ret;
struct rte_ether_hdr *pkt_hdr;
uint64_t portid = vdev->vid;
- struct ipv4_hdr *ip;
+ struct rte_ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
static void virtio_tx_offload(struct rte_mbuf *m)
{
void *l3_hdr;
- struct ipv4_hdr *ipv4_hdr = NULL;
+ struct rte_ipv4_hdr *ipv4_hdr = NULL;
struct tcp_hdr *tcp_hdr = NULL;
struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
* Note: IPv4 options are handled by dedicated pattern items.
*/
struct rte_flow_item_ipv4 {
- struct ipv4_hdr hdr; /**< IPv4 header definition. */
+ struct rte_ipv4_hdr hdr; /**< IPv4 header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
* RTE_FLOW_ITEM_TYPE_IPV6_EXT.
*/
struct rte_flow_item_ipv6 {
- struct ipv6_hdr hdr; /**< IPv6 header definition. */
+ struct rte_ipv6_hdr hdr; /**< IPv6 header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
}
static inline void
-rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
- struct ipv6_hdr **ipv6_hdr)
+rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
+ struct rte_ipv6_hdr **ipv6_hdr)
{
struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
struct rte_vlan_hdr *vlan_hdr;
switch (eth_hdr->ether_type) {
case RTE_BE16(RTE_ETHER_TYPE_IPv4):
- *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
break;
case RTE_BE16(RTE_ETHER_TYPE_IPv6):
- *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
break;
case RTE_BE16(RTE_ETHER_TYPE_VLAN):
vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
switch (vlan_hdr->eth_proto) {
case RTE_BE16(RTE_ETHER_TYPE_IPv4):
- *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
+ *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
break;
case RTE_BE16(RTE_ETHER_TYPE_IPv6):
- *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
+ *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
break;
default:
break;
void *tuple;
struct rte_ipv4_tuple ipv4_tuple;
struct rte_ipv6_tuple ipv6_tuple;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
static inline void
update_header(struct gro_tcp4_item *item)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct rte_mbuf *pkt = item->firstseg;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
pkt->l2_len);
ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
pkt->l2_len);
uint64_t start_time)
{
struct rte_ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t tcp_dl, ip_id, hdr_len, frag_off;
uint8_t find;
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- ipv4_hdr = (struct ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
uint8_t is_atomic)
{
struct rte_mbuf *pkt_orig = item->firstseg;
- struct ipv4_hdr *iph_orig;
+ struct rte_ipv4_hdr *iph_orig;
struct tcp_hdr *tcph_orig;
uint16_t len, tcp_hl_orig;
- iph_orig = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
+ iph_orig = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
l2_offset + pkt_orig->l2_len);
tcph_orig = (struct tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);
tcp_hl_orig = pkt_orig->l4_len;
static inline void
update_vxlan_header(struct gro_vxlan_tcp4_item *item)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct udp_hdr *udp_hdr;
struct rte_mbuf *pkt = item->inner_item.firstseg;
uint16_t len;
/* Update the outer IPv4 header. */
len = pkt->pkt_len - pkt->outer_l2_len;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
pkt->outer_l2_len);
ipv4_hdr->total_length = rte_cpu_to_be_16(len);
/* Update the inner IPv4 header. */
len -= pkt->l2_len;
- ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
ipv4_hdr->total_length = rte_cpu_to_be_16(len);
}
uint64_t start_time)
{
struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
- struct ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
+ struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
struct tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
struct rte_vxlan_hdr *vxlan_hdr;
uint8_t find;
outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
- outer_ipv4_hdr = (struct ipv4_hdr *)((char *)outer_eth_hdr +
+ outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
pkt->outer_l2_len);
udp_hdr = (struct udp_hdr *)((char *)outer_ipv4_hdr +
pkt->outer_l3_len);
sizeof(struct udp_hdr));
eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
sizeof(struct rte_vxlan_hdr));
- ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
+ ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
/*
static inline void
update_ipv4_header(struct rte_mbuf *pkt, uint16_t l3_offset, uint16_t id)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
l3_offset);
ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - l3_offset);
ipv4_hdr->packet_id = rte_cpu_to_be_16(id);
update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
struct rte_mbuf **segs, uint16_t nb_segs)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t id, tail_idx, i;
uint16_t l3_offset = pkt->l2_len;
uint16_t l4_offset = l3_offset + pkt->l3_len;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
l3_offset);
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
struct rte_mbuf **pkts_out,
uint16_t nb_pkts_out)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t pyld_unit_size, hdr_offset;
uint16_t frag_off;
int ret;
/* Don't process the fragmented packet */
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
pkt->l2_len);
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
struct rte_mbuf **segs, uint16_t nb_segs)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
tcp_offset = inner_ipv4_offset + pkt->l3_len;
/* Outer IPv4 header. */
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
outer_ipv4_offset);
outer_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
/* Inner IPv4 header. */
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
inner_ipv4_offset);
inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
struct rte_mbuf **pkts_out,
uint16_t nb_pkts_out)
{
- struct ipv4_hdr *inner_ipv4_hdr;
+ struct rte_ipv4_hdr *inner_ipv4_hdr;
uint16_t pyld_unit_size, hdr_offset, frag_off;
int ret = 1;
hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len;
- inner_ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ inner_ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
hdr_offset);
/*
* Don't process the packet whose MF bit or offset in the inner
update_ipv4_udp_headers(struct rte_mbuf *pkt, struct rte_mbuf **segs,
uint16_t nb_segs)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t frag_offset = 0, is_mf;
uint16_t l2_hdrlen = pkt->l2_len, l3_hdrlen = pkt->l3_len;
uint16_t tail_idx = nb_segs - 1, length, i;
* length.
*/
for (i = 0; i < nb_segs; i++) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(segs[i], struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(segs[i], struct rte_ipv4_hdr *,
l2_hdrlen);
length = segs[i]->pkt_len - l2_hdrlen;
ipv4_hdr->total_length = rte_cpu_to_be_16(length);
struct rte_mbuf **pkts_out,
uint16_t nb_pkts_out)
{
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
uint16_t pyld_unit_size, hdr_offset;
uint16_t frag_off;
int ret;
/* Don't process the fragmented packet */
- ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
pkt->l2_len);
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
/* Minimum GSO segment size for TCP based packets. */
#define RTE_GSO_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
- sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
+ sizeof(struct rte_ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
/* Minimum GSO segment size for UDP based packets. */
#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
- sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + 1)
+ sizeof(struct rte_ipv4_hdr) + sizeof(struct udp_hdr) + 1)
/* GSO flags for rte_gso_ctx. */
#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)
* Pointer to rte_ipv6_tuple structure
*/
static inline void
-rte_thash_load_v6_addrs(const struct ipv6_hdr *orig, union rte_thash_tuple *targ)
+rte_thash_load_v6_addrs(const struct rte_ipv6_hdr *orig, union rte_thash_tuple *targ)
{
#ifdef RTE_ARCH_X86
__m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
*/
struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr,
+ struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr,
struct ipv6_extension_fragment *frag_hdr);
/**
* present.
*/
static inline struct ipv6_extension_fragment *
-rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
+rte_ipv6_frag_get_ipv6_fragment_header(struct rte_ipv6_hdr *hdr)
{
if (hdr->proto == IPPROTO_FRAGMENT) {
return (struct ipv6_extension_fragment *) ++hdr;
*/
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr);
+ struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr);
/**
* Check if the IPv4 packet is fragmented
* 1 if fragmented, 0 if not fragmented
*/
static inline int
-rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr * hdr) {
+rte_ipv4_frag_pkt_is_fragmented(const struct rte_ipv4_hdr * hdr) {
uint16_t flag_offset, ip_flag, ip_ofs;
flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
#define IPV4_HDR_FO_ALIGN (1 << IPV4_HDR_FO_SHIFT)
-static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
- const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
+static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
+ const struct rte_ipv4_hdr *src, uint16_t len, uint16_t fofs,
uint16_t dofs, uint32_t mf)
{
rte_memcpy(dst, src, sizeof(*dst));
struct rte_mempool *pool_indirect)
{
struct rte_mbuf *in_seg = NULL;
- struct ipv4_hdr *in_hdr;
+ struct rte_ipv4_hdr *in_hdr;
uint32_t out_pkt_pos, in_seg_data_pos;
uint32_t more_in_segs;
uint16_t fragment_offset, flag_offset, frag_size;
* Ensure the IP payload length of all fragments is aligned to a
* multiple of 8 bytes as per RFC791 section 2.3.
*/
- frag_size = RTE_ALIGN_FLOOR((mtu_size - sizeof(struct ipv4_hdr)),
+ frag_size = RTE_ALIGN_FLOOR((mtu_size - sizeof(struct rte_ipv4_hdr)),
IPV4_HDR_FO_ALIGN);
- in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely(frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct rte_ipv4_hdr))))
return -EINVAL;
in_seg = pkt_in;
- in_seg_data_pos = sizeof(struct ipv4_hdr);
+ in_seg_data_pos = sizeof(struct rte_ipv4_hdr);
out_pkt_pos = 0;
fragment_offset = 0;
while (likely(more_in_segs)) {
struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
uint32_t more_out_segs;
- struct ipv4_hdr *out_hdr;
+ struct rte_ipv4_hdr *out_hdr;
/* Allocate direct buffer */
out_pkt = rte_pktmbuf_alloc(pool_direct);
}
/* Reserve space for the IP header that will be built later */
- out_pkt->data_len = sizeof(struct ipv4_hdr);
- out_pkt->pkt_len = sizeof(struct ipv4_hdr);
+ out_pkt->data_len = sizeof(struct rte_ipv4_hdr);
+ out_pkt->pkt_len = sizeof(struct rte_ipv4_hdr);
frag_bytes_remaining = frag_size;
out_seg_prev = out_pkt;
/* Build the IP header */
- out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
__fill_ipv4hdr_frag(out_hdr, in_hdr,
(uint16_t)out_pkt->pkt_len,
flag_offset, fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt_len - sizeof(struct ipv4_hdr));
+ out_pkt->pkt_len - sizeof(struct rte_ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->l3_len = sizeof(struct rte_ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
struct rte_mbuf *
ipv4_frag_reassemble(struct ip_frag_pkt *fp)
{
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
uint32_t i, n, ofs, first_len;
uint32_t curr_idx = 0;
m->ol_flags |= PKT_TX_IP_CKSUM;
/* update ipv4 header for the reassembled packet */
- ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
m->l3_len));
struct rte_mbuf *
rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
- struct ipv4_hdr *ip_hdr)
+ struct rte_ipv4_hdr *ip_hdr)
{
struct ip_frag_pkt *fp;
struct ip_frag_key key;
*/
static inline void
-__fill_ipv6hdr_frag(struct ipv6_hdr *dst,
- const struct ipv6_hdr *src, uint16_t len, uint16_t fofs,
+__fill_ipv6hdr_frag(struct rte_ipv6_hdr *dst,
+ const struct rte_ipv6_hdr *src, uint16_t len, uint16_t fofs,
uint32_t mf)
{
struct ipv6_extension_fragment *fh;
struct rte_mempool *pool_indirect)
{
struct rte_mbuf *in_seg = NULL;
- struct ipv6_hdr *in_hdr;
+ struct rte_ipv6_hdr *in_hdr;
uint32_t out_pkt_pos, in_seg_data_pos;
uint32_t more_in_segs;
uint16_t fragment_offset, frag_size;
- frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
+ frag_size = (uint16_t)(mtu_size - sizeof(struct rte_ipv6_hdr));
/* Fragment size should be a multiple of 8. */
RTE_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct rte_ipv6_hdr))))
return -EINVAL;
- in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
+ in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv6_hdr *);
in_seg = pkt_in;
- in_seg_data_pos = sizeof(struct ipv6_hdr);
+ in_seg_data_pos = sizeof(struct rte_ipv6_hdr);
out_pkt_pos = 0;
fragment_offset = 0;
while (likely(more_in_segs)) {
struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
uint32_t more_out_segs;
- struct ipv6_hdr *out_hdr;
+ struct rte_ipv6_hdr *out_hdr;
/* Allocate direct buffer */
out_pkt = rte_pktmbuf_alloc(pool_direct);
}
/* Reserve space for the IP header that will be built later */
- out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
- out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->data_len = sizeof(struct rte_ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->pkt_len = sizeof(struct rte_ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
out_seg_prev = out_pkt;
more_out_segs = 1;
/* Build the IP header */
- out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv6_hdr *);
+ out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv6_hdr *);
__fill_ipv6hdr_frag(out_hdr, in_hdr,
- (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
+ (uint16_t) out_pkt->pkt_len - sizeof(struct rte_ipv6_hdr),
fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt_len - sizeof(struct ipv6_hdr)
+ out_pkt->pkt_len - sizeof(struct rte_ipv6_hdr)
- sizeof(struct ipv6_extension_fragment));
/* Write the fragment to the output list */
struct rte_mbuf *
ipv6_frag_reassemble(struct ip_frag_pkt *fp)
{
- struct ipv6_hdr *ip_hdr;
+ struct rte_ipv6_hdr *ip_hdr;
struct ipv6_extension_fragment *frag_hdr;
struct rte_mbuf *m, *prev;
uint32_t i, n, ofs, first_len;
m->ol_flags |= PKT_TX_IP_CKSUM;
/* update ipv6 header for the reassembled datagram */
- ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
struct rte_mbuf *
rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
- struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
+ struct rte_ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
{
struct ip_frag_pkt *fp;
struct ip_frag_key key;
/**
* IPv4 Header
*/
-struct ipv4_hdr {
+struct rte_ipv4_hdr {
uint8_t version_ihl; /**< version and header length */
uint8_t type_of_service; /**< type of service */
uint16_t total_length; /**< length of packet */
* The complemented checksum to set in the IP packet.
*/
static inline uint16_t
-rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
+rte_ipv4_cksum(const struct rte_ipv4_hdr *ipv4_hdr)
{
uint16_t cksum;
- cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
+ cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct rte_ipv4_hdr));
return (cksum == 0xffff) ? cksum : (uint16_t)~cksum;
}
* The non-complemented checksum to set in the L4 header.
*/
static inline uint16_t
-rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
+rte_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
{
struct ipv4_psd_header {
uint32_t src_addr; /* IP address of source host. */
} else {
psd_hdr.len = rte_cpu_to_be_16(
(uint16_t)(rte_be_to_cpu_16(ipv4_hdr->total_length)
- - sizeof(struct ipv4_hdr)));
+ - sizeof(struct rte_ipv4_hdr)));
}
return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
}
* The complemented checksum to set in the IP packet.
*/
static inline uint16_t
-rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
+rte_ipv4_udptcp_cksum(const struct rte_ipv4_hdr *ipv4_hdr, const void *l4_hdr)
{
uint32_t cksum;
uint32_t l4_len;
l4_len = (uint32_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) -
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
cksum = rte_raw_cksum(l4_hdr, l4_len);
cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0);
/**
* IPv6 Header
*/
-struct ipv6_hdr {
+struct rte_ipv6_hdr {
uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
uint8_t proto; /**< Protocol, next header. */
* The non-complemented checksum to set in the L4 header.
*/
static inline uint16_t
-rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
+rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
{
uint32_t sum;
struct {
* The complemented checksum to set in the IP packet.
*/
static inline uint16_t
-rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr)
+rte_ipv6_udptcp_cksum(const struct rte_ipv6_hdr *ipv6_hdr, const void *l4_hdr)
{
uint32_t cksum;
uint32_t l4_len;
/* get the ipv4 header length */
static uint8_t
-ip4_hlen(const struct ipv4_hdr *hdr)
+ip4_hlen(const struct rte_ipv4_hdr *hdr)
{
return (hdr->version_ihl & 0xf) * 4;
}
return pkt_type;
if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
- const struct ipv4_hdr *ip4h;
- struct ipv4_hdr ip4h_copy;
+ const struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv4_hdr ip4h_copy;
ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
if (unlikely(ip4h == NULL))
proto = ip4h->next_proto_id;
pkt_type |= ptype_l4(proto);
} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
- const struct ipv6_hdr *ip6h;
- struct ipv6_hdr ip6h_copy;
+ const struct rte_ipv6_hdr *ip6h;
+ struct rte_ipv6_hdr ip6h_copy;
int frag = 0;
ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
return pkt_type;
if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
- const struct ipv4_hdr *ip4h;
- struct ipv4_hdr ip4h_copy;
+ const struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv4_hdr ip4h_copy;
ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
if (unlikely(ip4h == NULL))
proto = ip4h->next_proto_id;
pkt_type |= ptype_inner_l4(proto);
} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
- const struct ipv6_hdr *ip6h;
- struct ipv6_hdr ip6h_copy;
+ const struct rte_ipv6_hdr *ip6h;
+ struct rte_ipv6_hdr ip6h_copy;
int frag = 0;
ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
static inline int
rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
{
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
struct tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
uint64_t inner_l3_offset = m->l2_len;
inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
inner_l3_offset);
if (ol_flags & PKT_TX_IP_CKSUM)
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
ol_flags);
} else {
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
inner_l3_offset);
/* non-TSO udp */
udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
ol_flags);
} else {
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
inner_l3_offset);
/* non-TSO tcp or TSO */
tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
struct encap_vxlan_ipv4_data {
struct rte_ether_hdr ether;
- struct ipv4_hdr ipv4;
+ struct rte_ipv4_hdr ipv4;
struct udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct encap_vxlan_ipv4_vlan_data {
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
- struct ipv4_hdr ipv4;
+ struct rte_ipv4_hdr ipv4;
struct udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct encap_vxlan_ipv6_data {
struct rte_ether_hdr ether;
- struct ipv6_hdr ipv6;
+ struct rte_ipv6_hdr ipv6;
struct udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct encap_vxlan_ipv6_vlan_data {
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
- struct ipv6_hdr ipv6;
+ struct rte_ipv6_hdr ipv6;
struct udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr) +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr) +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
}
static __rte_always_inline void
-pkt_ipv4_work_nat(struct ipv4_hdr *ip,
+pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
struct nat_ipv4_data *data,
struct rte_table_action_nat_config *cfg)
{
}
static __rte_always_inline void
-pkt_ipv6_work_nat(struct ipv6_hdr *ip,
+pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
struct nat_ipv6_data *data,
struct rte_table_action_nat_config *cfg)
{
}
static __rte_always_inline uint64_t
-pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
+pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
struct ttl_data *data)
{
uint32_t drop;
}
static __rte_always_inline uint64_t
-pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
+pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
struct ttl_data *data)
{
uint32_t drop;
uint16_t total_length;
if (cfg->common.ip_version) {
- struct ipv4_hdr *hdr = ip;
+ struct rte_ipv4_hdr *hdr = ip;
dscp = hdr->type_of_service >> 2;
total_length = rte_ntohs(hdr->total_length);
} else {
- struct ipv6_hdr *hdr = ip;
+ struct rte_ipv6_hdr *hdr = ip;
dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
total_length =
- rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
+ rte_ntohs(hdr->payload_len) + sizeof(struct rte_ipv6_hdr);
}
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
uint16_t total_length0, total_length1, total_length2, total_length3;
if (cfg->common.ip_version) {
- struct ipv4_hdr *hdr0 = ip0;
- struct ipv4_hdr *hdr1 = ip1;
- struct ipv4_hdr *hdr2 = ip2;
- struct ipv4_hdr *hdr3 = ip3;
+ struct rte_ipv4_hdr *hdr0 = ip0;
+ struct rte_ipv4_hdr *hdr1 = ip1;
+ struct rte_ipv4_hdr *hdr2 = ip2;
+ struct rte_ipv4_hdr *hdr3 = ip3;
dscp0 = hdr0->type_of_service >> 2;
dscp1 = hdr1->type_of_service >> 2;
total_length2 = rte_ntohs(hdr2->total_length);
total_length3 = rte_ntohs(hdr3->total_length);
} else {
- struct ipv6_hdr *hdr0 = ip0;
- struct ipv6_hdr *hdr1 = ip1;
- struct ipv6_hdr *hdr2 = ip2;
- struct ipv6_hdr *hdr3 = ip3;
+ struct rte_ipv6_hdr *hdr0 = ip0;
+ struct rte_ipv6_hdr *hdr1 = ip1;
+ struct rte_ipv6_hdr *hdr2 = ip2;
+ struct rte_ipv6_hdr *hdr3 = ip3;
dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
total_length0 =
- rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
+ rte_ntohs(hdr0->payload_len) + sizeof(struct rte_ipv6_hdr);
total_length1 =
- rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
+ rte_ntohs(hdr1->payload_len) + sizeof(struct rte_ipv6_hdr);
total_length2 =
- rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
+ rte_ntohs(hdr2->payload_len) + sizeof(struct rte_ipv6_hdr);
total_length3 =
- rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
+ rte_ntohs(hdr3->payload_len) + sizeof(struct rte_ipv6_hdr);
}
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
{
/* Assume there is no ethernet header */
- struct ipv4_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv4_hdr *);
+ struct rte_ipv4_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
/* Get "More fragments" flag and fragment offset */
uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
{
/* Assume there is no ethernet header */
- struct ipv6_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv6_hdr *);
+ struct rte_ipv6_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
struct ipv6_extension_fragment *frag_hdr;
uint16_t frag_data = 0;
/* IP cksum verification cannot be bypassed, then calculate here */
if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
- struct ipv4_hdr *ipv4_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
m_buf->l2_len);
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
static void
parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
{
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
void *l3_hdr = NULL;
struct rte_ether_hdr *eth_hdr;
uint16_t ethertype;
case RTE_ETHER_TYPE_IPv6:
ipv6_hdr = l3_hdr;
*l4_proto = ipv6_hdr->proto;
- m->l3_len = sizeof(struct ipv6_hdr);
+ m->l3_len = sizeof(struct rte_ipv6_hdr);
*l4_hdr = (char *)l3_hdr + m->l3_len;
m->ol_flags |= PKT_TX_IPV6;
break;
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_FIELD_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, next_proto_id),
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_FIELD_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, src_addr),
+ offsetof(struct rte_ipv4_hdr, src_addr),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.field_index = DST_FIELD_IPV4,
.input_index = DST_FIELD_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, dst_addr),
+ offsetof(struct rte_ipv4_hdr, dst_addr),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_FIELD_IPV4,
- .offset = sizeof(struct rte_ether_hdr) + sizeof(struct ipv4_hdr),
+ .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_FIELD_IPV4,
- .offset = sizeof(struct rte_ether_hdr) + sizeof(struct ipv4_hdr) +
+ .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
sizeof(uint16_t),
},
};
for (j = 0; j < n_mbufs; j++) {
struct rte_mbuf *m;
uint8_t *m_data, *key;
- struct ipv4_hdr *ip_hdr;
- struct ipv6_hdr *ipv6_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
uint32_t ip_dst;
uint8_t *ipv6_dst;
uint32_t *signature, *k32;
APP_METADATA_OFFSET(32));
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- ip_hdr = (struct ipv4_hdr *)
+ ip_hdr = (struct rte_ipv4_hdr *)
&m_data[sizeof(struct rte_ether_hdr)];
ip_dst = ip_hdr->dst_addr;
k32 = (uint32_t *) key;
k32[0] = ip_dst & 0xFFFFFF00;
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
- ipv6_hdr = (struct ipv6_hdr *)
+ ipv6_hdr = (struct rte_ipv6_hdr *)
&m_data[sizeof(struct rte_ether_hdr)];
ipv6_dst = ipv6_hdr->dst_addr;
}
uint16_t
-initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
+initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
uint8_t *dst_addr, uint16_t pkt_data_len)
{
ip_hdr->vtc_flow = 0;
rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
- return (uint16_t) (pkt_data_len + sizeof(struct ipv6_hdr));
+ return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
}
uint16_t
-initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
uint32_t dst_addr, uint16_t pkt_data_len)
{
uint16_t pkt_len;
/*
* Initialize IP header.
*/
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
}
uint16_t
-initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
{
uint16_t pkt_len;
/*
* Initialize IP header.
*/
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
ip_hdr->version_ihl = IP_VHL_DEF;
ip_hdr->type_of_service = 0;
copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
if (ipv4) {
- copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt, eth_hdr_size);
+ copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr), pkt, eth_hdr_size);
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
} else {
- copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt, eth_hdr_size);
+ copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr), pkt, eth_hdr_size);
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
}
/*
if (ipv4) {
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
} else {
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
- pkt->l3_len = sizeof(struct ipv6_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
}
pkts_burst[nb_pkt] = pkt;
copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
if (ipv4) {
- copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt,
+ copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr), pkt,
eth_hdr_size);
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct udp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv4_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv4_hdr));
break;
case IPPROTO_TCP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct tcp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv4_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv4_hdr));
break;
case IPPROTO_SCTP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct sctp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv4_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv4_hdr));
break;
default:
break;
}
} else {
- copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt,
+ copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr), pkt,
eth_hdr_size);
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct udp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv6_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv6_hdr));
break;
case IPPROTO_TCP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct tcp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv6_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv6_hdr));
break;
case IPPROTO_SCTP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct sctp_hdr), pkt,
- eth_hdr_size + sizeof(struct ipv6_hdr));
+ eth_hdr_size + sizeof(struct rte_ipv6_hdr));
break;
default:
break;
if (ipv4) {
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
- pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv4_hdr);
} else {
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
- pkt->l3_len = sizeof(struct ipv6_hdr);
+ pkt->l3_len = sizeof(struct rte_ipv6_hdr);
}
pkts_burst[nb_pkt] = pkt;
uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
-initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
+initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
uint8_t *dst_addr, uint16_t pkt_data_len);
uint16_t
-initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
uint32_t dst_addr, uint16_t pkt_data_len);
uint16_t
-initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
+initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto);
int
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, next_proto_id),
+ offsetof(struct rte_ipv4_hdr, next_proto_id),
},
/* next input field (IPv4 source address) - 4 consecutive bytes. */
{
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, src_addr),
+ offsetof(struct rte_ipv4_hdr, src_addr),
},
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
{
.field_index = DST_FIELD_IPV4,
.input_index = DST_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct ipv4_hdr, dst_addr),
+ offsetof(struct rte_ipv4_hdr, dst_addr),
},
/*
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, src_port),
},
{
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct ipv4_hdr) +
+ sizeof(struct rte_ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
},
};
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
- struct ipv4_hdr pkt_ipv4_hdr;
+ struct rte_ipv4_hdr pkt_ipv4_hdr;
struct udp_hdr pkt_udp_hdr;
uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
- struct ipv4_hdr pkt_ipv4_hdr;
+ struct rte_ipv4_hdr pkt_ipv4_hdr;
struct tcp_hdr pkt_tcp_hdr;
uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
- struct ipv4_hdr pkt_ipv4_hdr;
+ struct rte_ipv4_hdr pkt_ipv4_hdr;
struct sctp_hdr pkt_sctp_hdr;
uint32_t src_addr = IPV4_ADDR(11, 12, 13, 14);
uint32_t dst_addr = IPV4_ADDR(15, 16, 17, 18);
/* Packet Headers */
struct rte_ether_hdr *pkt_eth_hdr;
- struct ipv4_hdr *pkt_ipv4_hdr;
- struct ipv6_hdr *pkt_ipv6_hdr;
+ struct rte_ipv4_hdr *pkt_ipv4_hdr;
+ struct rte_ipv6_hdr *pkt_ipv6_hdr;
struct udp_hdr *pkt_udp_hdr;
};
-static struct ipv4_hdr pkt_ipv4_hdr;
-static struct ipv6_hdr pkt_ipv6_hdr;
+static struct rte_ipv4_hdr pkt_ipv4_hdr;
+static struct rte_ipv6_hdr pkt_ipv6_hdr;
static struct udp_hdr pkt_udp_hdr;
static struct link_bonding_unittest_params default_params = {
struct rte_ether_hdr pkt_eth_hdr;
struct udp_hdr pkt_udp_hdr;
union {
- struct ipv4_hdr v4;
- struct ipv6_hdr v6;
+ struct rte_ipv4_hdr v4;
+ struct rte_ipv6_hdr v6;
} pkt_ip_hdr;
int retval;
struct rte_mbuf **pkts_burst, uint32_t burst_size)
{
struct rte_ether_hdr pkt_eth_hdr;
- struct ipv4_hdr pkt_ipv4_hdr;
+ struct rte_ipv4_hdr pkt_ipv4_hdr;
struct udp_hdr pkt_udp_hdr;
uint32_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
{
struct rte_ether_hdr *eth_hdr;
struct rte_vlan_hdr *vlan1, *vlan2;
- struct ipv4_hdr *ip_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
/* Simulate a classifier */
eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
vlan1 = (struct rte_vlan_hdr *)(ð_hdr->ether_type );
vlan2 = (struct rte_vlan_hdr *)((uintptr_t)ð_hdr->ether_type + sizeof(struct rte_vlan_hdr));
eth_hdr = (struct rte_ether_hdr *)((uintptr_t)ð_hdr->ether_type + 2 *sizeof(struct rte_vlan_hdr));
- ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type));
+ ip_hdr = (struct rte_ipv4_hdr *)((uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type));
vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
union rte_thash_tuple tuple;
uint32_t rss_l3, rss_l3l4;
uint8_t rss_key_be[RTE_DIM(default_rss_key)];
- struct ipv6_hdr ipv6_hdr;
+ struct rte_ipv6_hdr ipv6_hdr;
/* Convert RSS key*/
rte_convert_rss_key((uint32_t *)&default_rss_key,