((char *)ipv4_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
- info->l4_len = sizeof(struct udp_hdr);
+ info->l4_len = sizeof(struct rte_udp_hdr);
else
info->l4_len = 0;
}
((char *)ipv6_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
- info->l4_len = sizeof(struct udp_hdr);
+ info->l4_len = sizeof(struct rte_udp_hdr);
else
info->l4_len = 0;
}
/* Parse a vxlan header */
static void
-parse_vxlan(struct udp_hdr *udp_hdr,
+parse_vxlan(struct rte_udp_hdr *udp_hdr,
struct testpmd_offload_info *info,
uint32_t pkt_type)
{
info->outer_l4_proto = info->l4_proto;
eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
- sizeof(struct udp_hdr) +
+ sizeof(struct rte_udp_hdr) +
sizeof(struct rte_vxlan_hdr));
parse_ethernet(eth_hdr, info);
/* Parse a vxlan-gpe header */
static void
-parse_vxlan_gpe(struct udp_hdr *udp_hdr,
+parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
struct testpmd_offload_info *info)
{
struct rte_ether_hdr *eth_hdr;
return;
vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
RTE_VXLAN_GPE_TYPE_IPV4) {
uint64_t tx_offloads)
{
struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
return 0; /* packet type not supported, nothing to do */
if (info->l4_proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
+ udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
{
struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
uint64_t ol_flags = 0;
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
return ol_flags;
}
- udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
+ udp_hdr = (struct rte_udp_hdr *)
+ ((char *)outer_l3_hdr + info->outer_l3_len);
/* outer UDP checksum is done in software. In the other side, for
* UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
/* check if it's a supported tunnel */
if (txp->parse_tunnel) {
if (info.l4_proto == IPPROTO_UDP) {
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
- udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
- info.l3_len);
+ udp_hdr = (struct rte_udp_hdr *)
+ ((char *)l3_hdr + info.l3_len);
parse_vxlan_gpe(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE;
struct rte_mbuf *pkt;
struct rte_ether_hdr *eth_hdr;
struct rte_ipv4_hdr *ip_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
uint16_t vlan_tci, vlan_tci_outer;
uint64_t ol_flags = 0;
uint16_t nb_rx;
sizeof(*ip_hdr));
/* Initialize UDP header. */
- udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
+ udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src);
udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst);
udp_hdr->dgram_cksum = 0; /* No UDP checksum. */
static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
-static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
+static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
static void
setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
- struct udp_hdr *udp_hdr,
+ struct rte_udp_hdr *udp_hdr,
uint16_t pkt_data_len)
{
uint16_t *ptr16;
/*
* Initialize UDP header.
*/
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
pkt_data_len = (uint16_t) (tx_pkt_length - (
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- sizeof(struct udp_hdr)));
+ sizeof(struct rte_udp_hdr)));
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
}
if (is_encapsulation) {
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
uint8_t l2_len;
uint8_t l3_len;
uint8_t l4_len;
}
if (l4_proto == IPPROTO_UDP) {
udp_hdr = rte_pktmbuf_mtod_offset(mb,
- struct udp_hdr *,
+ struct rte_udp_hdr *,
l2_len + l3_len);
- l4_len = sizeof(struct udp_hdr);
+ l4_len = sizeof(struct rte_udp_hdr);
vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
struct rte_vxlan_hdr *,
l2_len + l3_len + l4_len);
}
uint16_t
-initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
+initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len)
{
uint16_t pkt_len;
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->src_port = rte_cpu_to_be_16(src_port);
udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
{
uint16_t pkt_len;
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
- void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr,
+ void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
{
int i, nb_pkt = 0;
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
- sizeof(struct udp_hdr), pkt,
+ sizeof(struct rte_udp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv4_hdr));
break;
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
- sizeof(struct udp_hdr), pkt,
+ sizeof(struct rte_udp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv6_hdr));
break;
uint32_t src_ip, uint32_t dst_ip, uint32_t opcode);
uint16_t
-initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
+initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
- void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr,
+ void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
int
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct udp_hdr pkt_udp_hdr;
+ struct rte_udp_hdr pkt_udp_hdr;
uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
uint16_t src_port = 32;
struct rte_ether_hdr *pkt_eth_hdr;
struct rte_ipv4_hdr *pkt_ipv4_hdr;
struct rte_ipv6_hdr *pkt_ipv6_hdr;
- struct udp_hdr *pkt_udp_hdr;
+ struct rte_udp_hdr *pkt_udp_hdr;
};
static struct rte_ipv4_hdr pkt_ipv4_hdr;
static struct rte_ipv6_hdr pkt_ipv6_hdr;
-static struct udp_hdr pkt_udp_hdr;
+static struct rte_udp_hdr pkt_udp_hdr;
static struct link_bonding_unittest_params default_params = {
.bonded_port_id = -1,
uint32_t ip_dst[4] = { [0 ... 2] = 0xFEEDFACE, [3] = RTE_IPv4(192, 168, 0, 2) };
struct rte_ether_hdr pkt_eth_hdr;
- struct udp_hdr pkt_udp_hdr;
+ struct rte_udp_hdr pkt_udp_hdr;
union {
struct rte_ipv4_hdr v4;
struct rte_ipv6_hdr v6;
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct udp_hdr pkt_udp_hdr;
+ struct rte_udp_hdr pkt_udp_hdr;
uint32_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
size_t vlan_offset;
int i;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
uint32_t hash, l3hash, l4hash;
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv4_hdr->next_proto_id ==
IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
+ udp_hdr = (struct rte_udp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
if ((size_t)udp_hdr + sizeof(*udp_hdr)
tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+ udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
tcp_hdr);
} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
RTE_PTYPE_L4_UDP) {
- struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+ struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
mbuf->l3_len);
udp_hdr->dgram_cksum = 0;
if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
switch (flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
- offsetof(struct udp_hdr, dgram_cksum));
+ offsetof(struct rte_udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:
case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+ << E1000_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
memset(gp, 0, sizeof(*gp));
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
udp_val.dst_port = input->flow.udp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
- struct udp_hdr supported_mask = {
+ struct rte_udp_hdr supported_mask = {
.src_port = 0xffff,
.dst_port = 0xffff,
};
mask = &rte_flow_item_udp_mask;
/* Append udp header to L5 and set ip proto = udp */
return copy_inner_common(&arg->filter->u.generic_1, off,
- arg->item->spec, mask, sizeof(struct udp_hdr),
+ arg->item->spec, mask, sizeof(struct rte_udp_hdr),
arg->l3_proto_off, IPPROTO_UDP, 1);
}
mask = &rte_flow_item_udp_mask;
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
return 0;
}
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
FLOW_TRACE();
*/
gp->mask_flags |= FILTER_GENERIC_1_UDP;
gp->val_flags |= FILTER_GENERIC_1_UDP;
- udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
+ udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
udp->dst_port = 0xffff;
- udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
+ udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
udp->dst_port = RTE_BE16(4789);
/* Match all if no spec */
if (!spec)
return EINVAL;
/* Need non-null pattern that fits within the NIC's filter pattern */
if (spec->length == 0 ||
- spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
+ spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
!spec->pattern || !mask->pattern)
return EINVAL;
/*
*/
if (mask->length != 0 && mask->length < spec->length)
return EINVAL;
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
mask->pattern, spec->length);
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
spec->pattern, spec->length);
return 0;
return;
FLOW_TRACE();
vxlan = sizeof(struct rte_vxlan_hdr);
- memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
- memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
inner = inner_ofst - vxlan;
memset(layer, 0, sizeof(layer));
unsigned char *raw_pkt)
{
unsigned char *payload, *ptr;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
uint8_t size, dst = 0;
/* fill the L4 head */
switch (fdir_input->flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- udp = (struct udp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- udp = (struct udp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
{
unsigned char *payload = NULL;
unsigned char *ptr;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
struct rte_flow_item_gtp *gtp;
/* fill the L4 head */
if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)udp + sizeof(struct udp_hdr);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
- udp = (struct udp_hdr *)(raw_pkt + len);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->dgram_len =
rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
gtp = (struct rte_flow_item_gtp *)
- ((unsigned char *)udp + sizeof(struct udp_hdr));
+ ((unsigned char *)udp +
+ sizeof(struct rte_udp_hdr));
gtp->msg_len =
rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
gtp->teid = fdir_input->flow.gtp_flow.teid;
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
default:
case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_udp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
/* Handle L4. */
switch (fdir_filter->input.flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- attributes->l4.udp.hdr = (struct udp_hdr){
+ attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp4_flow.src_port,
.dst_port = input->flow.udp4_flow.dst_port,
};
- attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- attributes->l4.udp.hdr = (struct udp_hdr){
+ attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp6_flow.src_port,
.dst_port = input->flow.udp6_flow.dst_port,
};
- attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
struct rte_vlan_hdr *vlan = NULL;
struct rte_ipv4_hdr *ipv4 = NULL;
struct rte_ipv6_hdr *ipv6 = NULL;
- struct udp_hdr *udp = NULL;
+ struct rte_udp_hdr *udp = NULL;
struct rte_vxlan_hdr *vxlan = NULL;
struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
struct rte_gre_hdr *gre = NULL;
ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- udp = (struct udp_hdr *)&buf[temp_size];
+ udp = (struct rte_udp_hdr *)&buf[temp_size];
if (!ipv4 && !ipv6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
uint8_t *raw_pkt;
struct rte_ipv4_hdr *ip;
struct rte_ipv6_hdr *ip6;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
uint16_t len;
raw_pkt = (uint8_t *)buff;
/* UDP */
if (arfs->tuple.ip_proto == IPPROTO_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->dst_port = arfs->tuple.dst_port;
udp->src_port = arfs->tuple.src_port;
- udp->dgram_len = sizeof(struct udp_hdr);
- len += sizeof(struct udp_hdr);
+ udp->dgram_len = sizeof(struct rte_udp_hdr);
+ len += sizeof(struct rte_udp_hdr);
/* adjust ip total_length */
- ip->total_length += sizeof(struct udp_hdr);
+ ip->total_length += sizeof(struct rte_udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
raw_pkt = (uint8_t *)buff;
/* UDP */
if (arfs->tuple.ip_proto == IPPROTO_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
+ udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->src_port = arfs->tuple.src_port;
udp->dst_port = arfs->tuple.dst_port;
- len += sizeof(struct udp_hdr);
+ len += sizeof(struct rte_udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
l4_hdr = packet + l2_len + l3_len;
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
- *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
*l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
else
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
+ hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
offsetof(struct rte_tcp_hdr, cksum);
break;
case PKT_TX_UDP_CKSUM:
- gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
+ gdesc->txd.msscof = gdesc->txd.hlen +
+ offsetof(struct rte_udp_hdr,
+ dgram_cksum);
break;
default:
PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
{
struct ipv4_5tuple key;
struct rte_tcp_hdr *tcp;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
int ret = 0;
key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
break;
case IPPROTO_UDP:
- udp = (struct udp_hdr *)((unsigned char *)ipv4_hdr +
+ udp = (struct rte_udp_hdr *)((unsigned char *)ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
{
struct ipv6_5tuple key;
struct rte_tcp_hdr *tcp;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
int ret = 0;
memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
break;
case IPPROTO_UDP:
- udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
+ udp = (struct rte_udp_hdr *)((unsigned char *) ipv6_hdr +
sizeof(struct rte_ipv6_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
{
struct ipv4_5tuple key;
struct rte_tcp_hdr *tcp;
- struct udp_hdr *udp;
+ struct rte_udp_hdr *udp;
int ret = 0;
key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
break;
case IPPROTO_UDP:
- udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
+ udp = (struct rte_udp_hdr *)((unsigned char *) ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
uint16_t ethertype;
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
return 0; /* packet type not supported, nothing to do */
if (l4_proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
+ udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
ol_flags |= PKT_TX_UDP_CKSUM;
udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
ethertype, ol_flags);
{
uint8_t l4_proto = 0;
uint16_t outer_header_len;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
union tunnel_offload_info info = { .data = 0 };
struct rte_ether_hdr *phdr =
rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
if (l4_proto != IPPROTO_UDP)
return -1;
- udp_hdr = (struct udp_hdr *)((char *)phdr +
+ udp_hdr = (struct rte_udp_hdr *)((char *)phdr +
info.outer_l2_len + info.outer_l3_len);
/** check udp destination port, 4789 is the default vxlan port
(pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
return -1;
outer_header_len = info.outer_l2_len + info.outer_l3_len
- + sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr);
+ + sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
rte_pktmbuf_adj(pkt, outer_header_len);
struct rte_ether_hdr *pneth =
(struct rte_ether_hdr *) rte_pktmbuf_prepend(m,
sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)
- + sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr));
+ + sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr));
struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *) &pneth[1];
- struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
struct rte_vxlan_hdr *vxlan = (struct rte_vxlan_hdr *) &udp[1];
/* convert TX queue ID to vport ID */
/*UDP HEADER*/
udp->dgram_cksum = 0;
udp->dgram_len = rte_cpu_to_be_16(old_len
- + sizeof(struct udp_hdr)
+ + sizeof(struct rte_udp_hdr)
+ sizeof(struct rte_vxlan_hdr));
udp->dst_port = rte_cpu_to_be_16(vxdev.dst_port);
* Matches a UDP header.
*/
struct rte_flow_item_udp {
- struct udp_hdr hdr; /**< UDP header definition. */
+ struct rte_udp_hdr hdr; /**< UDP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
update_vxlan_header(struct gro_vxlan_tcp4_item *item)
{
struct rte_ipv4_hdr *ipv4_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_mbuf *pkt = item->inner_item.firstseg;
uint16_t len;
/* Update the outer UDP header. */
len -= pkt->outer_l3_len;
- udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
udp_hdr->dgram_len = rte_cpu_to_be_16(len);
/* Update the inner IPv4 header. */
struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
struct rte_tcp_hdr *tcp_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_vxlan_hdr *vxlan_hdr;
uint32_t sent_seq;
int32_t tcp_dl;
outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
pkt->outer_l2_len);
- udp_hdr = (struct udp_hdr *)((char *)outer_ipv4_hdr +
+ udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr +
pkt->outer_l3_len);
vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
sizeof(struct rte_vxlan_hdr));
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
static inline void
update_udp_header(struct rte_mbuf *pkt, uint16_t udp_offset)
{
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
- udp_hdr = (struct udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ udp_hdr = (struct rte_udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
udp_offset);
udp_hdr->dgram_len = rte_cpu_to_be_16(pkt->pkt_len - udp_offset);
}
/* Minimum GSO segment size for UDP based packets. */
#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
- sizeof(struct rte_ipv4_hdr) + sizeof(struct udp_hdr) + 1)
+ sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + 1)
/* GSO flags for rte_gso_ctx. */
#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)
#define RTE_ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */
#define RTE_ETHER_VXLAN_HLEN \
- (sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))
+ (sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr))
/**< VXLAN tunnel header length. */
/**
#define RTE_VXLAN_GPE_TYPE_GBP 6 /**< GBP Protocol. */
#define RTE_VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */
-#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \
+#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct rte_udp_hdr) + \
sizeof(struct rte_vxlan_gpe_hdr))
/**< VXLAN-GPE tunnel header length. */
}
if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
- hdr_lens->l4_len = sizeof(struct udp_hdr);
+ hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
return pkt_type;
} else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
const struct rte_tcp_hdr *th;
}
if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
- hdr_lens->inner_l4_len = sizeof(struct udp_hdr);
+ hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
} else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
RTE_PTYPE_INNER_L4_TCP) {
const struct rte_tcp_hdr *th;
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
struct rte_tcp_hdr *tcp_hdr;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
uint64_t inner_l3_offset = m->l2_len;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
if (ol_flags & PKT_TX_IPV4) {
- udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
+ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
m->l3_len);
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
ol_flags);
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_ipv6_hdr *, inner_l3_offset);
/* non-TSO udp */
- udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
+ udp_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_udp_hdr *,
inner_l3_offset + m->l3_len);
udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
ol_flags);
/**
* UDP Header
*/
-struct udp_hdr {
+struct rte_udp_hdr {
uint16_t src_port; /**< UDP source port. */
uint16_t dst_port; /**< UDP destination port. */
uint16_t dgram_len; /**< UDP datagram length */
struct encap_vxlan_ipv4_data {
struct rte_ether_hdr ether;
struct rte_ipv4_hdr ipv4;
- struct udp_hdr udp;
+ struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
struct rte_ipv4_hdr ipv4;
- struct udp_hdr udp;
+ struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct encap_vxlan_ipv6_data {
struct rte_ether_hdr ether;
struct rte_ipv6_hdr ipv6;
- struct udp_hdr udp;
+ struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
struct rte_ipv6_hdr ipv6;
- struct udp_hdr udp;
+ struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
ether_length = (uint16_t)mbuf->pkt_len;
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr) +
+ sizeof(struct rte_udp_hdr) +
sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
ether_length = (uint16_t)mbuf->pkt_len;
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr) +
+ sizeof(struct rte_udp_hdr) +
sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
ether_length = (uint16_t)mbuf->pkt_len;
ipv6_payload_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
ether_length = (uint16_t)mbuf->pkt_len;
ipv6_payload_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
tcp->src_port = data->port;
tcp->cksum = tcp_cksum;
} else {
- struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t ip_cksum, udp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
tcp->dst_port = data->port;
tcp->cksum = tcp_cksum;
} else {
- struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t ip_cksum, udp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
tcp->src_port = data->port;
tcp->cksum = tcp_cksum;
} else {
- struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
tcp->dst_port = data->port;
tcp->cksum = tcp_cksum;
} else {
- struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
cksum));
break;
case PKT_TX_UDP_CKSUM:
- net_hdr->csum_offset = (offsetof(struct udp_hdr,
+ net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
dgram_cksum));
break;
case PKT_TX_SCTP_CKSUM:
if (l4_proto == IPPROTO_TCP)
m->ol_flags |= PKT_TX_TCP_CKSUM;
break;
- case (offsetof(struct udp_hdr, dgram_cksum)):
+ case (offsetof(struct rte_udp_hdr, dgram_cksum)):
if (l4_proto == IPPROTO_UDP)
m->ol_flags |= PKT_TX_UDP_CKSUM;
break;
case VIRTIO_NET_HDR_GSO_UDP:
m->ol_flags |= PKT_TX_UDP_SEG;
m->tso_segsz = hdr->gso_size;
- m->l4_len = sizeof(struct udp_hdr);
+ m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
RTE_LOG(WARNING, VHOST_DATA,