static void
parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
{
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
info->l4_proto = ipv4_hdr->next_proto_id;
/* only fill l4_len for TCP, it's useful for TSO */
if (info->l4_proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)
+ ((char *)ipv4_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
info->l4_len = sizeof(struct udp_hdr);
static void
parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
{
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
info->l3_len = sizeof(struct rte_ipv6_hdr);
info->l4_proto = ipv6_hdr->proto;
/* only fill l4_len for TCP, it's useful for TSO */
if (info->l4_proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)
+ ((char *)ipv6_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
info->l4_len = sizeof(struct udp_hdr);
{
struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
struct udp_hdr *udp_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
uint32_t max_pkt_len, tso_segsz = 0;
if (info->gso_enable)
ol_flags |= PKT_TX_UDP_SEG;
} else if (info->l4_proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
tcp_hdr->cksum = 0;
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
}
uint16_t
-initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port,
+initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len)
{
uint16_t pkt_len;
- pkt_len = (uint16_t) (pkt_data_len + sizeof(struct tcp_hdr));
+ pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_tcp_hdr));
- memset(tcp_hdr, 0, sizeof(struct tcp_hdr));
+ memset(tcp_hdr, 0, sizeof(struct rte_tcp_hdr));
tcp_hdr->src_port = rte_cpu_to_be_16(src_port);
tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
break;
case IPPROTO_TCP:
copy_buf_to_pkt(proto_hdr,
- sizeof(struct tcp_hdr), pkt,
+ sizeof(struct rte_tcp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv4_hdr));
break;
break;
case IPPROTO_TCP:
copy_buf_to_pkt(proto_hdr,
- sizeof(struct tcp_hdr), pkt,
+ sizeof(struct rte_tcp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv6_hdr));
break;
uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
-initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port,
+initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
{
/* rte_flow uses a bit mask for protocol ports */
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct tcp_hdr pkt_tcp_hdr;
+ struct rte_tcp_hdr pkt_tcp_hdr;
uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
uint16_t src_port = 16;
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
{
/* rte_flow uses a bit mask for protocol ports */
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
int i;
struct udp_hdr *udp_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
uint32_t hash, l3hash, l4hash;
for (i = 0; i < nb_pkts; i++) {
RTE_IPV4_IHL_MULTIPLIER;
if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)
+ tcp_hdr = (struct rte_tcp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
l3hash = ipv6_hash(ipv6_hdr);
if (ipv6_hdr->proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
+ tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv6_hdr->proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+ struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
mbuf->l3_len);
tcp_hdr->cksum = 0;
if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
break;
case PKT_TX_TCP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
- offsetof(struct tcp_hdr, cksum));
+ offsetof(struct rte_tcp_hdr, cksum));
cmd_len |= E1000_TXD_CMD_TCP;
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+ << E1000_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
#define GET_L4_HDR_LEN(mbuf) \
- ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \
+ ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \
mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
#define ENA_RX_RSS_TABLE_LOG_SIZE 7
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
&udp_mask, &udp_val, sizeof(struct udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
struct rte_sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
&udp_mask, &udp_val, sizeof(struct udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
struct rte_sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
- struct tcp_hdr supported_mask = {
+ struct rte_tcp_hdr supported_mask = {
.src_port = 0xffff,
.dst_port = 0xffff,
};
mask = &rte_flow_item_tcp_mask;
/* Append tcp header to L5 and set ip proto = tcp */
return copy_inner_common(&arg->filter->u.generic_1, off,
- arg->item->spec, mask, sizeof(struct tcp_hdr),
+ arg->item->spec, mask, sizeof(struct rte_tcp_hdr),
arg->l3_proto_off, IPPROTO_TCP, 1);
}
return ENOTSUP;
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
- sizeof(struct tcp_hdr));
+ sizeof(struct rte_tcp_hdr));
memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
- sizeof(struct tcp_hdr));
+ sizeof(struct rte_tcp_hdr));
return 0;
}
{
unsigned char *payload, *ptr;
struct udp_hdr *udp;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
uint8_t size, dst = 0;
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
unsigned char *payload = NULL;
unsigned char *ptr;
struct udp_hdr *udp;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
struct rte_flow_item_gtp *gtp;
struct rte_ipv4_hdr *gtp_ipv4;
udp->dst_port = fdir_input->flow.udp4_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
udp->dst_port = fdir_input->flow.udp6_flow.src_port;
udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
+ payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
+ << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- attributes->l4.tcp.hdr = (struct tcp_hdr){
+ attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
.src_port = input->flow.tcp4_flow.src_port,
.dst_port = input->flow.tcp4_flow.dst_port,
};
- attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- attributes->l4.tcp.hdr = (struct tcp_hdr){
+ attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
.src_port = input->flow.tcp6_flow.src_port,
.dst_port = input->flow.tcp6_flow.dst_port,
};
- attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
/* offset of src/dst port is same for TCP and UDP */
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
- offsetof(struct tcp_hdr, src_port) :
- offsetof(struct tcp_hdr, dst_port);
+ offsetof(struct rte_tcp_hdr, src_port) :
+ offsetof(struct rte_tcp_hdr, dst_port);
p_parser->keys[idx].mask = 0xFFFF0000;
p_parser->keys[idx].val =
(__u32)((const struct rte_flow_action_set_tp *)
struct rte_ipv4_hdr *ip;
struct rte_ipv6_hdr *ip6;
struct udp_hdr *udp;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
uint16_t len;
raw_pkt = (uint8_t *)buff;
ip->total_length += sizeof(struct udp_hdr);
params->udp = true;
} else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
tcp->src_port = arfs->tuple.src_port;
tcp->dst_port = arfs->tuple.dst_port;
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
+ len += sizeof(struct rte_tcp_hdr);
/* adjust ip total_length */
- ip->total_length += sizeof(struct tcp_hdr);
+ ip->total_length += sizeof(struct rte_tcp_hdr);
params->tcp = true;
}
break;
len += sizeof(struct udp_hdr);
params->udp = true;
} else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
tcp->src_port = arfs->tuple.src_port;
tcp->dst_port = arfs->tuple.dst_port;
tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
+ len += sizeof(struct rte_tcp_hdr);
params->tcp = true;
}
break;
size_t header_len = tcph_off + m_seg->l4_len;
/* Offset of the payload in the last segment that contains the header */
size_t in_off = 0;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
uint16_t packet_id = 0;
uint16_t outer_packet_id = 0;
uint32_t sent_seq;
outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
first_m_seg->outer_l2_len);
- th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
+ th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off);
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
unsigned int *pkt_descs, size_t *pkt_len)
{
uint8_t *tsoh;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
efsys_dma_addr_t header_paddr;
uint16_t packet_id = 0;
uint32_t sent_seq;
packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off);
/* Handle TCP header */
- th = (const struct tcp_hdr *)(tsoh + tcph_off);
+ th = (const struct rte_tcp_hdr *)(tsoh + tcph_off);
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
.field_index = 3,
.input_index = 3,
.offset = sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
/* Destination Port */
.field_index = 4,
.input_index = 3,
.offset = sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
.field_index = 9,
.input_index = 9,
.offset = sizeof(struct rte_ipv6_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
/* Destination Port */
.field_index = 10,
.input_index = 9,
.offset = sizeof(struct rte_ipv6_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
*l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
- *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
else
return;
**l4_cksum = 0;
m->l4_len)) {
struct rte_ipv4_hdr *iph;
struct rte_ipv6_hdr *ip6h;
- struct tcp_hdr *th;
+ struct rte_tcp_hdr *th;
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
uint32_t tmp;
case PKT_TX_TCP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
switch (txm->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
- gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
+ gdesc->txd.msscof = gdesc->txd.hlen +
+ offsetof(struct rte_tcp_hdr, cksum);
break;
case PKT_TX_UDP_CKSUM:
gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
uint32_t hlen, slen;
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
char *ptr;
RTE_ASSERT(rcd->tcp);
if (rcd->v4) {
if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr)))
return hw->mtu - sizeof(struct rte_ipv4_hdr)
- - sizeof(struct tcp_hdr);
+ - sizeof(struct rte_tcp_hdr);
ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
} else if (rcd->v6) {
if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
return hw->mtu - sizeof(struct rte_ipv6_hdr) -
- sizeof(struct tcp_hdr);
+ sizeof(struct rte_tcp_hdr);
ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen);
hlen += sizeof(struct rte_ipv6_hdr);
}
}
- if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
- return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+ if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr)))
+ return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) +
sizeof(struct rte_ether_hdr);
- tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+ tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
hlen += (tcp_hdr->data_off & 0xf0) >> 2;
if (rxm->udata64 > 1)
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
{
/* rte_flow uses a bit mask for protocol ports */
.input_index = SRCP_DESTP_INPUT_IPV4,
.offset = sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
.field_index = 3,
.input_index = 3,
.offset = sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
/* Destination Port */
.field_index = 4,
.input_index = 3,
.offset = sizeof(struct rte_ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
.field_index = 9,
.input_index = 9,
.offset = sizeof(struct rte_ipv6_hdr) +
- offsetof(struct tcp_hdr, src_port),
+ offsetof(struct rte_tcp_hdr, src_port),
},
/* Destination Port */
.field_index = 10,
.input_index = 9,
.offset = sizeof(struct rte_ipv6_hdr) +
- offsetof(struct tcp_hdr, dst_port),
+ offsetof(struct rte_tcp_hdr, dst_port),
},
};
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
int ret = 0;
switch (ipv4_hdr->next_proto_id) {
case IPPROTO_TCP:
- tcp = (struct tcp_hdr *)((unsigned char *)ipv4_hdr +
+ tcp = (struct rte_tcp_hdr *)((unsigned char *)ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
int ret = 0;
switch (ipv6_hdr->proto) {
case IPPROTO_TCP:
- tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
+ tcp = (struct rte_tcp_hdr *)((unsigned char *) ipv6_hdr +
sizeof(struct rte_ipv6_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
lookup_struct_t *l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
- struct tcp_hdr *tcp;
+ struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
int ret = 0;
switch (ipv4_hdr->next_proto_id) {
case IPPROTO_TCP:
- tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
+ tcp = (struct rte_tcp_hdr *)((unsigned char *) ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
key.port_src = rte_be_to_cpu_16(tcp->src_port);
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
struct udp_hdr *udp_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
ethertype, ol_flags);
} else if (l4_proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
/* Put PKT_TX_TCP_SEG bit setting before get_psd_sum(), because
* it depends on PKT_TX_TCP_SEG to calculate pseudo-header
* checksum.
{
void *l3_hdr;
struct rte_ipv4_hdr *ipv4_hdr = NULL;
- struct tcp_hdr *tcp_hdr = NULL;
+ struct rte_tcp_hdr *tcp_hdr = NULL;
struct rte_ether_hdr *eth_hdr =
rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
m->ol_flags |= PKT_TX_IP_CKSUM;
}
- tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len);
tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
}
* Matches a TCP header.
*/
struct rte_flow_item_tcp {
- struct tcp_hdr hdr; /**< TCP header definition. */
+ struct rte_tcp_hdr hdr; /**< TCP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
{
struct rte_ether_hdr *eth_hdr;
struct rte_ipv4_hdr *ipv4_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
uint32_t sent_seq;
int32_t tcp_dl;
uint16_t ip_id, hdr_len, frag_off;
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
/*
/* The maximum TCP header length */
#define MAX_TCP_HLEN 60
#define INVALID_TCP_HDRLEN(len) \
- (((len) < sizeof(struct tcp_hdr)) || ((len) > MAX_TCP_HLEN))
+ (((len) < sizeof(struct rte_tcp_hdr)) || ((len) > MAX_TCP_HLEN))
/* Header fields representing a TCP/IPv4 flow */
struct tcp4_flow_key {
*/
static inline int
check_seq_option(struct gro_tcp4_item *item,
- struct tcp_hdr *tcph,
+ struct rte_tcp_hdr *tcph,
uint32_t sent_seq,
uint16_t ip_id,
uint16_t tcp_hl,
{
struct rte_mbuf *pkt_orig = item->firstseg;
struct rte_ipv4_hdr *iph_orig;
- struct tcp_hdr *tcph_orig;
+ struct rte_tcp_hdr *tcph_orig;
uint16_t len, tcp_hl_orig;
iph_orig = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
l2_offset + pkt_orig->l2_len);
- tcph_orig = (struct tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);
+ tcph_orig = (struct rte_tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);
tcp_hl_orig = pkt_orig->l4_len;
/* Check if TCP option fields equal */
- len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct tcp_hdr);
+ len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct rte_tcp_hdr);
if ((tcp_hl != tcp_hl_orig) || ((len > 0) &&
(memcmp(tcph + 1, tcph_orig + 1,
len) != 0)))
static inline int
check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item,
- struct tcp_hdr *tcp_hdr,
+ struct rte_tcp_hdr *tcp_hdr,
uint32_t sent_seq,
uint16_t outer_ip_id,
uint16_t ip_id,
{
struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
struct rte_vxlan_hdr *vxlan_hdr;
uint32_t sent_seq;
eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
sizeof(struct rte_vxlan_hdr));
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
/*
* Don't process the packet which has FIN, SYN, RST, PSH, URG,
update_tcp_header(struct rte_mbuf *pkt, uint16_t l4_offset, uint32_t sent_seq,
uint8_t non_tail)
{
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
- tcp_hdr = (struct tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+ tcp_hdr = (struct rte_tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
l4_offset);
tcp_hdr->sent_seq = rte_cpu_to_be_32(sent_seq);
if (likely(non_tail))
struct rte_mbuf **segs, uint16_t nb_segs)
{
struct rte_ipv4_hdr *ipv4_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t id, tail_idx, i;
uint16_t l3_offset = pkt->l2_len;
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
l3_offset);
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
struct rte_mbuf **segs, uint16_t nb_segs)
{
struct rte_ipv4_hdr *ipv4_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
uint16_t outer_ipv4_offset, inner_ipv4_offset;
inner_ipv4_offset);
inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
/* Minimum GSO segment size for TCP based packets. */
#define RTE_GSO_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
- sizeof(struct rte_ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
+ sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_tcp_hdr) + 1)
/* Minimum GSO segment size for UDP based packets. */
#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
hdr_lens->l4_len = sizeof(struct udp_hdr);
return pkt_type;
} else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
- const struct tcp_hdr *th;
- struct tcp_hdr th_copy;
+ const struct rte_tcp_hdr *th;
+ struct rte_tcp_hdr th_copy;
th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
if (unlikely(th == NULL))
hdr_lens->inner_l4_len = sizeof(struct udp_hdr);
} else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
RTE_PTYPE_INNER_L4_TCP) {
- const struct tcp_hdr *th;
- struct tcp_hdr th_copy;
+ const struct rte_tcp_hdr *th;
+ struct rte_tcp_hdr th_copy;
th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy);
if (unlikely(th == NULL))
{
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
- struct tcp_hdr *tcp_hdr;
+ struct rte_tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
uint64_t inner_l3_offset = m->l2_len;
(ol_flags & PKT_TX_TCP_SEG)) {
if (ol_flags & PKT_TX_IPV4) {
/* non-TSO tcp or TSO */
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
+ tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
m->l3_len);
tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
ol_flags);
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_ipv6_hdr *, inner_l3_offset);
/* non-TSO tcp or TSO */
- tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
+ tcp_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_tcp_hdr *,
inner_l3_offset + m->l3_len);
tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
ol_flags);
/**
* TCP Header
*/
-struct tcp_hdr {
+struct rte_tcp_hdr {
uint16_t src_port; /**< TCP source port. */
uint16_t dst_port; /**< TCP destination port. */
uint32_t sent_seq; /**< TX data sequence number. */
{
if (cfg->source_nat) {
if (cfg->proto == 0x6) {
- struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
uint16_t ip_cksum, tcp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
}
} else {
if (cfg->proto == 0x6) {
- struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
uint16_t ip_cksum, tcp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
{
if (cfg->source_nat) {
if (cfg->proto == 0x6) {
- struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
uint16_t tcp_cksum;
tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
}
} else {
if (cfg->proto == 0x6) {
- struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
uint16_t tcp_cksum;
tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
switch (csum_l4) {
case PKT_TX_TCP_CKSUM:
- net_hdr->csum_offset = (offsetof(struct tcp_hdr,
+ net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
cksum));
break;
case PKT_TX_UDP_CKSUM:
{
uint16_t l4_proto = 0;
void *l4_hdr = NULL;
- struct tcp_hdr *tcp_hdr = NULL;
+ struct rte_tcp_hdr *tcp_hdr = NULL;
if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
return;
if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (hdr->csum_start == (m->l2_len + m->l3_len)) {
switch (hdr->csum_offset) {
- case (offsetof(struct tcp_hdr, cksum)):
+ case (offsetof(struct rte_tcp_hdr, cksum)):
if (l4_proto == IPPROTO_TCP)
m->ol_flags |= PKT_TX_TCP_CKSUM;
break;