From: Olivier Matz Date: Fri, 26 Oct 2018 07:04:05 +0000 (+0200) Subject: net: add rte prefix to tcp structure X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=2fbf029d3afa6edee397cb7a6a7aaf1d3bcc7ffd;p=dpdk.git net: add rte prefix to tcp structure Add 'rte_' prefix to structures: - rename struct tcp_hdr as struct rte_tcp_hdr. Signed-off-by: Olivier Matz --- diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index d8b8929411..bcff65eeed 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -102,14 +102,14 @@ get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype) static void parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info) { - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; info->l4_proto = ipv4_hdr->next_proto_id; /* only fill l4_len for TCP, it's useful for TSO */ if (info->l4_proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + info->l3_len); info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; } else info->l4_len = 0; @@ -119,14 +119,14 @@ parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info) static void parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info) { - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; info->l3_len = sizeof(struct rte_ipv6_hdr); info->l4_proto = ipv6_hdr->proto; /* only fill l4_len for TCP, it's useful for TSO */ if (info->l4_proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv6_hdr + info->l3_len); info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; } else info->l4_len = 0; @@ -360,7 +360,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, { struct rte_ipv4_hdr *ipv4_hdr = l3_hdr; struct udp_hdr *udp_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; struct rte_sctp_hdr *sctp_hdr; uint64_t ol_flags = 0; uint32_t max_pkt_len, tso_segsz = 0; @@ -414,7 +414,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, if (info->gso_enable) ol_flags |= PKT_TX_UDP_SEG; } else if (info->l4_proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; if (tso_segsz) ol_flags |= PKT_TX_TCP_SEG; diff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst index 8172a96c42..1b354c31ab 100644 --- a/doc/guides/sample_app_ug/flow_classify.rst +++ b/doc/guides/sample_app_ug/flow_classify.rst @@ -126,7 +126,7 @@ initialisation of the ``Flow Classify`` application.. .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, { /* rte_flow uses a bit mask for protocol ports */ @@ -136,7 +136,7 @@ initialisation of the ``Flow Classify`` application.. .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c index 157744937c..d3a63e6c03 100644 --- a/drivers/net/avf/avf_rxtx.c +++ b/drivers/net/avf/avf_rxtx.c @@ -1417,7 +1417,7 @@ avf_txd_enable_checksum(uint64_t ol_flags, switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct tcp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index dd7d3fb204..8a6d52ea9e 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -835,7 +835,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, int i; struct udp_hdr *udp_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t hash, l3hash, l4hash; for (i = 0; i < nb_pkts; i++) { @@ -860,7 +860,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, RTE_IPV4_IHL_MULTIPLIER; if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *) + tcp_hdr = (struct rte_tcp_hdr *) ((char *)ipv4_hdr + ip_hdr_offset); l4hash = HASH_L4_PORTS(tcp_hdr); @@ -878,7 +878,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, l3hash = ipv6_hash(ipv6_hdr); if (ipv6_hdr->proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); + tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv6_hdr->proto == IPPROTO_UDP) { udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c index 9aa31ad426..fb7f3cd528 100644 --- a/drivers/net/dpaa/dpaa_rxtx.c +++ b/drivers/net/dpaa/dpaa_rxtx.c @@ -218,7 +218,7 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf) ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { - struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr + + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + mbuf->l3_len); tcp_hdr->cksum = 0; if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4)) diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 84c0c537b0..27146607cc 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -247,7 +247,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq, break; case PKT_TX_TCP_CKSUM: ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse + - offsetof(struct tcp_hdr, cksum)); + offsetof(struct rte_tcp_hdr, cksum)); cmd_len |= E1000_TXD_CMD_TCP; cmp_mask |= TX_MACIP_LEN_CMP_MASK; break; diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 8b6fddbd71..9046d21a94 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -290,7 +290,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq, case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index eaeed9fee3..198d731c9e 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -74,7 +74,7 @@ #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) #define GET_L4_HDR_LEN(mbuf) \ - ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ + ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) #define ENA_RX_RSS_TABLE_LOG_SIZE 7 diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index 96a3aac873..c9d8980447 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -132,7 +132,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, &udp_mask, &udp_val, sizeof(struct udp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) { - struct tcp_hdr tcp_mask, tcp_val; + struct rte_tcp_hdr tcp_mask, tcp_val; memset(&tcp_mask, 0, sizeof(tcp_mask)); memset(&tcp_val, 0, sizeof(tcp_val)); @@ -146,7 +146,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, } enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, - &tcp_mask, &tcp_val, sizeof(struct tcp_hdr)); + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) { struct rte_sctp_hdr sctp_mask, sctp_val; memset(&sctp_mask, 0, sizeof(sctp_mask)); @@ -221,7 +221,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, &udp_mask, &udp_val, sizeof(struct udp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) { - struct tcp_hdr tcp_mask, tcp_val; + struct rte_tcp_hdr tcp_mask, tcp_val; memset(&tcp_mask, 0, sizeof(tcp_mask)); memset(&tcp_val, 0, sizeof(tcp_val)); @@ -234,7 +234,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, tcp_val.dst_port = input->flow.tcp6_flow.dst_port; } enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, - &tcp_mask, &tcp_val, sizeof(struct tcp_hdr)); + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) { struct rte_sctp_hdr sctp_mask, sctp_val; memset(&sctp_mask, 0, sizeof(sctp_mask)); diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index d3cbbd4c75..fd5b2d1909 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -448,7 +448,7 @@ enic_copy_item_tcp_v1(const struct rte_flow_item *item, const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct tcp_hdr supported_mask = { + struct rte_tcp_hdr supported_mask = { .src_port = 0xffff, .dst_port = 0xffff, }; @@ -785,19 +785,19 @@ enic_copy_item_tcp_v2(const struct rte_flow_item *item, if (*inner_ofst == 0) { memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct tcp_hdr)); + sizeof(struct rte_tcp_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct tcp_hdr)); + sizeof(struct rte_tcp_hdr)); } else { /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct tcp_hdr)) > + if ((*inner_ofst + sizeof(struct rte_tcp_hdr)) > FILTER_GENERIC_1_KEY_LEN) return ENOTSUP; memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct tcp_hdr)); + mask, sizeof(struct rte_tcp_hdr)); memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct tcp_hdr)); - *inner_ofst += sizeof(struct tcp_hdr); + spec, sizeof(struct rte_tcp_hdr)); + *inner_ofst += sizeof(struct rte_tcp_hdr); } return 0; } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 14a82426e1..46dfa79b7f 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -800,7 +800,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, { unsigned char *payload, *ptr; struct udp_hdr *udp; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; struct rte_sctp_hdr *sctp; uint8_t size, dst = 0; uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/ @@ -828,8 +828,8 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - tcp = (struct tcp_hdr *)(raw_pkt + len); - payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); /* * The source and destination fields in the transmitted packet * need to be presented in a reversed order with respect @@ -873,8 +873,8 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - tcp = (struct tcp_hdr *)(raw_pkt + len); - payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); /* * The source and destination fields in the transmitted packet * need to be presented in a reversed order with respect @@ -1090,7 +1090,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, unsigned char *payload = NULL; unsigned char *ptr; struct udp_hdr *udp; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; struct rte_sctp_hdr *sctp; struct rte_flow_item_gtp *gtp; struct rte_ipv4_hdr *gtp_ipv4; @@ -1127,8 +1127,8 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, udp->dst_port = fdir_input->flow.udp4_flow.src_port; udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) { - tcp = (struct tcp_hdr *)(raw_pkt + len); - payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); /** * The source and destination fields in the transmitted packet * need to be presented in a reversed order with respect @@ -1164,8 +1164,8 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, udp->dst_port = fdir_input->flow.udp6_flow.src_port; udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) { - tcp = (struct tcp_hdr *)(raw_pkt + len); - payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); + payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr); /** * The source and destination fields in the transmitted packet * need to be presented in a reversed order with respect diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 82bbac2ce9..e5f18538f4 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -298,7 +298,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (sizeof(struct tcp_hdr) >> 2) << + *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case PKT_TX_SCTP_CKSUM: diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index eb41f5ed0d..036acaa14b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -425,7 +425,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, case PKT_TX_TCP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; - mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= sizeof(struct rte_tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; tx_offload_mask.l2_len |= ~0; tx_offload_mask.l3_len |= ~0; break; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d99e2c5266..76af6a8b60 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -2563,11 +2563,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; @@ -2593,11 +2593,11 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l4.tcp.hdr = (struct tcp_hdr){ + attributes->l4.tcp.hdr = (struct rte_tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){ .src_port = mask->src_port_mask, .dst_port = mask->dst_port_mask, }; diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c index f287762a45..7cfc72cdff 100644 --- a/drivers/net/mlx5/mlx5_flow_tcf.c +++ b/drivers/net/mlx5/mlx5_flow_tcf.c @@ -452,8 +452,8 @@ flow_tcf_pedit_key_set_tp_port(const struct rte_flow_action *actions, /* offset of src/dst port is same for TCP and UDP */ p_parser->keys[idx].off = actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ? - offsetof(struct tcp_hdr, src_port) : - offsetof(struct tcp_hdr, dst_port); + offsetof(struct rte_tcp_hdr, src_port) : + offsetof(struct rte_tcp_hdr, dst_port); p_parser->keys[idx].mask = 0xFFFF0000; p_parser->keys[idx].val = (__u32)((const struct rte_flow_action_set_tp *) diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c index 34299b9933..cd762cddb1 100644 --- a/drivers/net/qede/qede_filter.c +++ b/drivers/net/qede/qede_filter.c @@ -460,7 +460,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, struct rte_ipv4_hdr *ip; struct rte_ipv6_hdr *ip6; struct udp_hdr *udp; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; uint16_t len; raw_pkt = (uint8_t *)buff; @@ -496,13 +496,13 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, ip->total_length += sizeof(struct udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); /* adjust ip total_length */ - ip->total_length += sizeof(struct tcp_hdr); + ip->total_length += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; @@ -528,11 +528,11 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, len += sizeof(struct udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index e52de8ebd0..06dc5b6e41 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -329,7 +329,7 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len; /* Offset of the payload in the last segment that contains the header */ size_t in_off = 0; - const struct tcp_hdr *th; + const struct rte_tcp_hdr *th; uint16_t packet_id; uint32_t sent_seq; uint8_t *hdr_addr; @@ -437,7 +437,7 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, return EINVAL; } - th = (const struct tcp_hdr *)(hdr_addr + tcph_off); + th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off); rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c index cfa5c037a6..2675ee19e4 100644 --- a/drivers/net/sfc/sfc_tso.c +++ b/drivers/net/sfc/sfc_tso.c @@ -95,7 +95,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, unsigned int *pkt_descs, size_t *pkt_len) { uint8_t *tsoh; - const struct tcp_hdr *th; + const struct rte_tcp_hdr *th; efsys_dma_addr_t header_paddr; uint16_t packet_id; uint32_t sent_seq; @@ -158,7 +158,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, } /* Handle TCP header */ - th = (const struct tcp_hdr *)(tsoh + tcph_off); + th = (const struct rte_tcp_hdr *)(tsoh + tcph_off); rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c index 4e6735c75e..9e85f461bc 100644 --- a/drivers/net/softnic/rte_eth_softnic_pipeline.c +++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c @@ -620,7 +620,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { .field_index = 3, .input_index = 3, .offset = sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, /* Destination Port */ @@ -630,7 +630,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { .field_index = 4, .input_index = 3, .offset = sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; @@ -717,7 +717,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { .field_index = 9, .input_index = 9, .offset = sizeof(struct rte_ipv6_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, /* Destination Port */ @@ -727,7 +727,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { .field_index = 10, .input_index = 9, .offset = sizeof(struct rte_ipv6_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 3d7f56a9ae..dd19f0119f 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -475,7 +475,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum; else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) - *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum; + *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum; else return; **l4_cksum = 0; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index b364508713..429beded8f 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -318,7 +318,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) m->l4_len)) { struct rte_ipv4_hdr *iph; struct rte_ipv6_hdr *ip6h; - struct tcp_hdr *th; + struct rte_tcp_hdr *th; uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; uint32_t tmp; @@ -373,7 +373,7 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, case PKT_TX_TCP_CKSUM: hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct tcp_hdr, cksum); + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; break; diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 3bbf9adf71..8726eabb9a 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -539,7 +539,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, switch (txm->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: - gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum); + gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct rte_tcp_hdr, cksum); break; case PKT_TX_UDP_CKSUM: gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum); @@ -667,7 +667,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, uint32_t hlen, slen; struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; char *ptr; RTE_ASSERT(rcd->tcp); @@ -679,7 +679,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, if (rcd->v4) { if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr))) return hw->mtu - sizeof(struct rte_ipv4_hdr) - - sizeof(struct tcp_hdr); + - sizeof(struct rte_tcp_hdr); ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen); hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) * @@ -687,7 +687,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, } else if (rcd->v6) { if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr))) return hw->mtu - sizeof(struct rte_ipv6_hdr) - - sizeof(struct tcp_hdr); + sizeof(struct rte_tcp_hdr); ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen); hlen += sizeof(struct rte_ipv6_hdr); @@ -699,11 +699,11 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, } } - if (unlikely(slen < hlen + sizeof(struct tcp_hdr))) - return hw->mtu - hlen - sizeof(struct tcp_hdr) + + if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr))) + return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) + sizeof(struct rte_ether_hdr); - tcp_hdr = (struct tcp_hdr *)(ptr + hlen); + tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen); hlen += (tcp_hdr->data_off & 0xf0) >> 2; if (rxm->udata64 > 1) diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c index e68d0f694b..75a66fd5bc 100644 --- a/examples/flow_classify/flow_classify.c +++ b/examples/flow_classify/flow_classify.c @@ -133,7 +133,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, { /* rte_flow uses a bit mask for protocol ports */ @@ -143,7 +143,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c index 1e44fe37c8..e0a8e9a6bb 100644 --- a/examples/ip_pipeline/pipeline.c +++ b/examples/ip_pipeline/pipeline.c @@ -664,7 +664,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { .field_index = 3, .input_index = 3, .offset = sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, /* Destination Port */ @@ -674,7 +674,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { .field_index = 4, .input_index = 3, .offset = sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; @@ -761,7 +761,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { .field_index = 9, .input_index = 9, .offset = sizeof(struct rte_ipv6_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, /* Destination Port */ @@ -771,7 +771,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { .field_index = 10, .input_index = 9, .offset = sizeof(struct rte_ipv6_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index f78aeab532..72d2c53e1f 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -485,7 +485,7 @@ get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t * ipv4_l3fwd_lookup_struct) { struct ipv4_5tuple key; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; struct udp_hdr *udp; int ret = 0; @@ -495,7 +495,7 @@ get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, switch (ipv4_hdr->next_proto_id) { case IPPROTO_TCP: - tcp = (struct tcp_hdr *)((unsigned char *)ipv4_hdr + + tcp = (struct rte_tcp_hdr *)((unsigned char *)ipv4_hdr + sizeof(struct rte_ipv4_hdr)); key.port_dst = rte_be_to_cpu_16(tcp->dst_port); key.port_src = rte_be_to_cpu_16(tcp->src_port); @@ -524,7 +524,7 @@ get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid, lookup_struct_t *ipv6_l3fwd_lookup_struct) { struct ipv6_5tuple key; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; struct udp_hdr *udp; int ret = 0; @@ -535,7 +535,7 @@ get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid, switch (ipv6_hdr->proto) { case IPPROTO_TCP: - tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr + + tcp = (struct rte_tcp_hdr *)((unsigned char *) ipv6_hdr + sizeof(struct rte_ipv6_hdr)); key.port_dst = rte_be_to_cpu_16(tcp->dst_port); key.port_src = rte_be_to_cpu_16(tcp->src_port); diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c index 5e3809b8d2..d57ac262b6 100644 --- a/examples/l3fwd-vf/main.c +++ b/examples/l3fwd-vf/main.c @@ -366,7 +366,7 @@ get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *l3fwd_lookup_struct) { struct ipv4_5tuple key; - struct tcp_hdr *tcp; + struct rte_tcp_hdr *tcp; struct udp_hdr *udp; int ret = 0; @@ -376,7 +376,7 @@ get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, switch (ipv4_hdr->next_proto_id) { case IPPROTO_TCP: - tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr + + tcp = (struct rte_tcp_hdr *)((unsigned char *) ipv4_hdr + sizeof(struct rte_ipv4_hdr)); key.port_dst = rte_be_to_cpu_16(tcp->dst_port); key.port_src = rte_be_to_cpu_16(tcp->src_port); diff --git a/examples/tep_termination/vxlan.c b/examples/tep_termination/vxlan.c index 07a53abc31..261332adf9 100644 --- a/examples/tep_termination/vxlan.c +++ b/examples/tep_termination/vxlan.c @@ -76,7 +76,7 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *i struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; struct udp_hdr *udp_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; struct rte_sctp_hdr *sctp_hdr; uint64_t ol_flags = 0; @@ -112,7 +112,7 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *i udp_hdr->dgram_cksum = get_psd_sum(l3_hdr, ethertype, ol_flags); } else if (l4_proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len); /* Put PKT_TX_TCP_SEG bit setting before get_psd_sum(), because * it depends on PKT_TX_TCP_SEG to calculate pseudo-header * checksum. diff --git a/examples/vhost/main.c b/examples/vhost/main.c index d7ee4530e8..a973cb4b17 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -879,7 +879,7 @@ static void virtio_tx_offload(struct rte_mbuf *m) { void *l3_hdr; struct rte_ipv4_hdr *ipv4_hdr = NULL; - struct tcp_hdr *tcp_hdr = NULL; + struct rte_tcp_hdr *tcp_hdr = NULL; struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); l3_hdr = (char *)eth_hdr + m->l2_len; @@ -890,7 +890,7 @@ static void virtio_tx_offload(struct rte_mbuf *m) m->ol_flags |= PKT_TX_IP_CKSUM; } - tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + m->l3_len); tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags); } diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h index 23f7cb9b57..cea95b98ab 100644 --- a/lib/librte_ethdev/rte_flow.h +++ b/lib/librte_ethdev/rte_flow.h @@ -704,7 +704,7 @@ static const struct rte_flow_item_udp rte_flow_item_udp_mask = { * Matches a TCP header. */ struct rte_flow_item_tcp { - struct tcp_hdr hdr; /**< TCP header definition. */ + struct rte_tcp_hdr hdr; /**< TCP header definition. */ }; /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */ diff --git a/lib/librte_gro/gro_tcp4.c b/lib/librte_gro/gro_tcp4.c index 3ed8cda029..73dbb8dd57 100644 --- a/lib/librte_gro/gro_tcp4.c +++ b/lib/librte_gro/gro_tcp4.c @@ -196,7 +196,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt, { struct rte_ether_hdr *eth_hdr; struct rte_ipv4_hdr *ipv4_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t sent_seq; uint16_t tcp_dl, ip_id, hdr_len, frag_off; uint8_t is_atomic; @@ -209,7 +209,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt, eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len); - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len; /* diff --git a/lib/librte_gro/gro_tcp4.h b/lib/librte_gro/gro_tcp4.h index e33b650839..63f06bec40 100644 --- a/lib/librte_gro/gro_tcp4.h +++ b/lib/librte_gro/gro_tcp4.h @@ -255,7 +255,7 @@ merge_two_tcp4_packets(struct gro_tcp4_item *item, */ static inline int check_seq_option(struct gro_tcp4_item *item, - struct tcp_hdr *tcph, + struct rte_tcp_hdr *tcph, uint32_t sent_seq, uint16_t ip_id, uint16_t tcp_hl, @@ -265,16 +265,16 @@ check_seq_option(struct gro_tcp4_item *item, { struct rte_mbuf *pkt_orig = item->firstseg; struct rte_ipv4_hdr *iph_orig; - struct tcp_hdr *tcph_orig; + struct rte_tcp_hdr *tcph_orig; uint16_t len, tcp_hl_orig; iph_orig = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) + l2_offset + pkt_orig->l2_len); - tcph_orig = (struct tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len); + tcph_orig = (struct rte_tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len); tcp_hl_orig = pkt_orig->l4_len; /* Check if TCP option fields equal */ - len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct tcp_hdr); + len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct rte_tcp_hdr); if ((tcp_hl != tcp_hl_orig) || ((len > 0) && (memcmp(tcph + 1, tcph_orig + 1, len) != 0))) diff --git a/lib/librte_gro/gro_vxlan_tcp4.c b/lib/librte_gro/gro_vxlan_tcp4.c index 80d2d52884..3cfe20f752 100644 --- a/lib/librte_gro/gro_vxlan_tcp4.c +++ b/lib/librte_gro/gro_vxlan_tcp4.c @@ -207,7 +207,7 @@ is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1, static inline int check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item, - struct tcp_hdr *tcp_hdr, + struct rte_tcp_hdr *tcp_hdr, uint32_t sent_seq, uint16_t outer_ip_id, uint16_t ip_id, @@ -291,7 +291,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt, { struct rte_ether_hdr *outer_eth_hdr, *eth_hdr; struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; struct udp_hdr *udp_hdr; struct rte_vxlan_hdr *vxlan_hdr; uint32_t sent_seq; @@ -315,7 +315,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt, eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr + sizeof(struct rte_vxlan_hdr)); ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len); - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); /* * Don't process the packet which has FIN, SYN, RST, PSH, URG, diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h index e5c8d83e77..a350301532 100644 --- a/lib/librte_gso/gso_common.h +++ b/lib/librte_gso/gso_common.h @@ -71,9 +71,9 @@ static inline void update_tcp_header(struct rte_mbuf *pkt, uint16_t l4_offset, uint32_t sent_seq, uint8_t non_tail) { - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; - tcp_hdr = (struct tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) + + tcp_hdr = (struct rte_tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) + l4_offset); tcp_hdr->sent_seq = rte_cpu_to_be_32(sent_seq); if (likely(non_tail)) diff --git a/lib/librte_gso/gso_tcp4.c b/lib/librte_gso/gso_tcp4.c index ad0cce6f90..ade172ac73 100644 --- a/lib/librte_gso/gso_tcp4.c +++ b/lib/librte_gso/gso_tcp4.c @@ -10,7 +10,7 @@ update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, struct rte_mbuf **segs, uint16_t nb_segs) { struct rte_ipv4_hdr *ipv4_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t sent_seq; uint16_t id, tail_idx, i; uint16_t l3_offset = pkt->l2_len; @@ -18,7 +18,7 @@ update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) + l3_offset); - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); id = rte_be_to_cpu_16(ipv4_hdr->packet_id); sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); tail_idx = nb_segs - 1; diff --git a/lib/librte_gso/gso_tunnel_tcp4.c b/lib/librte_gso/gso_tunnel_tcp4.c index f5a19bc43b..e0384c26d0 100644 --- a/lib/librte_gso/gso_tunnel_tcp4.c +++ b/lib/librte_gso/gso_tunnel_tcp4.c @@ -10,7 +10,7 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, struct rte_mbuf **segs, uint16_t nb_segs) { struct rte_ipv4_hdr *ipv4_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; uint32_t sent_seq; uint16_t outer_id, inner_id, tail_idx, i; uint16_t outer_ipv4_offset, inner_ipv4_offset; @@ -32,7 +32,7 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, inner_ipv4_offset); inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id); - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len); sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); tail_idx = nb_segs - 1; diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h index ee879968c8..8f65adf1cd 100644 --- a/lib/librte_gso/rte_gso.h +++ b/lib/librte_gso/rte_gso.h @@ -19,7 +19,7 @@ extern "C" { /* Minimum GSO segment size for TCP based packets. */ #define RTE_GSO_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \ - sizeof(struct rte_ipv4_hdr) + sizeof(struct tcp_hdr) + 1) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_tcp_hdr) + 1) /* Minimum GSO segment size for UDP based packets. */ #define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \ diff --git a/lib/librte_net/rte_net.c b/lib/librte_net/rte_net.c index 3e9d2d23d3..9712c75ac3 100644 --- a/lib/librte_net/rte_net.c +++ b/lib/librte_net/rte_net.c @@ -341,8 +341,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, hdr_lens->l4_len = sizeof(struct udp_hdr); return pkt_type; } else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { - const struct tcp_hdr *th; - struct tcp_hdr th_copy; + const struct rte_tcp_hdr *th; + struct rte_tcp_hdr th_copy; th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy); if (unlikely(th == NULL)) @@ -477,8 +477,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, hdr_lens->inner_l4_len = sizeof(struct udp_hdr); } else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_TCP) { - const struct tcp_hdr *th; - struct tcp_hdr th_copy; + const struct rte_tcp_hdr *th; + struct rte_tcp_hdr th_copy; th = rte_pktmbuf_read(m, off, sizeof(*th), &th_copy); if (unlikely(th == NULL)) diff --git a/lib/librte_net/rte_net.h b/lib/librte_net/rte_net.h index 84f8dfe1cc..4c27f4611b 100644 --- a/lib/librte_net/rte_net.h +++ b/lib/librte_net/rte_net.h @@ -114,7 +114,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) { struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; struct udp_hdr *udp_hdr; uint64_t inner_l3_offset = m->l2_len; @@ -149,7 +149,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) (ol_flags & PKT_TX_TCP_SEG)) { if (ol_flags & PKT_TX_IPV4) { /* non-TSO tcp or TSO */ - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + + tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + m->l3_len); tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); @@ -157,7 +157,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, inner_l3_offset); /* non-TSO tcp or TSO */ - tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, + tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, inner_l3_offset + m->l3_len); tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); diff --git a/lib/librte_net/rte_tcp.h b/lib/librte_net/rte_tcp.h index 91f58987b0..4bcda55656 100644 --- a/lib/librte_net/rte_tcp.h +++ b/lib/librte_net/rte_tcp.h @@ -23,7 +23,7 @@ extern "C" { /** * TCP Header */ -struct tcp_hdr { +struct rte_tcp_hdr { uint16_t src_port; /**< TCP source port. */ uint16_t dst_port; /**< TCP destination port. */ uint32_t sent_seq; /**< TX data sequence number. */ diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c index c7b04d6f54..15bbdd674b 100644 --- a/lib/librte_pipeline/rte_table_action.c +++ b/lib/librte_pipeline/rte_table_action.c @@ -1293,7 +1293,7 @@ pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip, { if (cfg->source_nat) { if (cfg->proto == 0x6) { - struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1]; uint16_t ip_cksum, tcp_cksum; ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, @@ -1332,7 +1332,7 @@ pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip, } } else { if (cfg->proto == 0x6) { - struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1]; uint16_t ip_cksum, tcp_cksum; ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum, @@ -1379,7 +1379,7 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip, { if (cfg->source_nat) { if (cfg->proto == 0x6) { - struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1]; uint16_t tcp_cksum; tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum, @@ -1407,7 +1407,7 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip, } } else { if (cfg->proto == 0x6) { - struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1]; + struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1]; uint16_t tcp_cksum; tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum, diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index cbfc1a627f..11e76386c4 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -244,7 +244,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) switch (csum_l4) { case PKT_TX_TCP_CKSUM: - net_hdr->csum_offset = (offsetof(struct tcp_hdr, + net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr, cksum)); break; case PKT_TX_UDP_CKSUM: @@ -1014,7 +1014,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) { uint16_t l4_proto = 0; void *l4_hdr = NULL; - struct tcp_hdr *tcp_hdr = NULL; + struct rte_tcp_hdr *tcp_hdr = NULL; if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) return; @@ -1023,7 +1023,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (hdr->csum_start == (m->l2_len + m->l3_len)) { switch (hdr->csum_offset) { - case (offsetof(struct tcp_hdr, cksum)): + case (offsetof(struct rte_tcp_hdr, cksum)): if (l4_proto == IPPROTO_TCP) m->ol_flags |= PKT_TX_TCP_CKSUM; break; diff --git a/test/test/packet_burst_generator.c b/test/test/packet_burst_generator.c index 9aa07e1765..5730f8129a 100644 --- a/test/test/packet_burst_generator.c +++ b/test/test/packet_burst_generator.c @@ -106,14 +106,14 @@ initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port, } uint16_t -initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port, +initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port, uint16_t dst_port, uint16_t pkt_data_len) { uint16_t pkt_len; - pkt_len = (uint16_t) (pkt_data_len + sizeof(struct tcp_hdr)); + pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_tcp_hdr)); - memset(tcp_hdr, 0, sizeof(struct tcp_hdr)); + memset(tcp_hdr, 0, sizeof(struct rte_tcp_hdr)); tcp_hdr->src_port = rte_cpu_to_be_16(src_port); tcp_hdr->dst_port = rte_cpu_to_be_16(dst_port); @@ -389,7 +389,7 @@ nomore_mbuf: break; case IPPROTO_TCP: copy_buf_to_pkt(proto_hdr, - sizeof(struct tcp_hdr), pkt, + sizeof(struct rte_tcp_hdr), pkt, eth_hdr_size + sizeof(struct rte_ipv4_hdr)); break; case IPPROTO_SCTP: @@ -411,7 +411,7 @@ nomore_mbuf: break; case IPPROTO_TCP: copy_buf_to_pkt(proto_hdr, - sizeof(struct tcp_hdr), pkt, + sizeof(struct rte_tcp_hdr), pkt, eth_hdr_size + sizeof(struct rte_ipv6_hdr)); break; case IPPROTO_SCTP: diff --git a/test/test/packet_burst_generator.h b/test/test/packet_burst_generator.h index 98185c1623..5cfe0c6008 100644 --- a/test/test/packet_burst_generator.h +++ b/test/test/packet_burst_generator.h @@ -38,7 +38,7 @@ initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port, uint16_t dst_port, uint16_t pkt_data_len); uint16_t -initialize_tcp_header(struct tcp_hdr *tcp_hdr, uint16_t src_port, +initialize_tcp_header(struct rte_tcp_hdr *tcp_hdr, uint16_t src_port, uint16_t dst_port, uint16_t pkt_data_len); uint16_t diff --git a/test/test/test_flow_classify.c b/test/test/test_flow_classify.c index 85228486d1..1a79326b82 100644 --- a/test/test/test_flow_classify.c +++ b/test/test/test_flow_classify.c @@ -74,7 +74,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, src_port), + offsetof(struct rte_tcp_hdr, src_port), }, { /* rte_flow uses a bit mask for protocol ports */ @@ -84,7 +84,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = { .input_index = SRCP_DESTP_INPUT_IPV4, .offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + - offsetof(struct tcp_hdr, dst_port), + offsetof(struct rte_tcp_hdr, dst_port), }, }; @@ -528,7 +528,7 @@ init_ipv4_tcp_traffic(struct rte_mempool *mp, { struct rte_ether_hdr pkt_eth_hdr; struct rte_ipv4_hdr pkt_ipv4_hdr; - struct tcp_hdr pkt_tcp_hdr; + struct rte_tcp_hdr pkt_tcp_hdr; uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4); uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8); uint16_t src_port = 16;