X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhinic%2Fhinic_pmd_tx.c;h=669f82389cf2baa65f07b82441c607a68ee11d20;hb=7c0a233eef4f458a35cfc2105208f1772be62c9b;hp=2dd4fe184c866f5a1396e946f6c4dc8c004fff94;hpb=9863627f52b87f71351c01a6040cf3c68cae8a33;p=dpdk.git diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c index 2dd4fe184c..669f82389c 100644 --- a/drivers/net/hinic/hinic_pmd_tx.c +++ b/drivers/net/hinic/hinic_pmd_tx.c @@ -7,7 +7,7 @@ #include #include #include -#ifdef __ARM64_NEON__ +#ifdef RTE_ARCH_ARM64 #include #endif @@ -203,7 +203,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) { -#if defined(__X86_64_SSE__) +#if defined(RTE_ARCH_X86_64) int i; __m128i *wqe_line = (__m128i *)data; __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, @@ -217,7 +217,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask); wqe_line += 4; } -#elif defined(__ARM64_NEON__) +#elif defined(RTE_ARCH_ARM64) int i; uint8x16_t *wqe_line = (uint8x16_t *)data; const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, @@ -237,7 +237,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) { -#if defined(__X86_64_SSE__) +#if defined(RTE_ARCH_X86_64) int i; __m128i *sge_line = (__m128i *)data; __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, @@ -248,7 +248,7 @@ static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask); sge_line++; } -#elif defined(__ARM64_NEON__) +#elif defined(RTE_ARCH_ARM64) int i; uint8x16_t *sge_line = (uint8x16_t *)data; const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, @@ -767,7 +767,8 @@ static inline void hinic_get_pld_offset(struct rte_mbuf *m, { uint64_t ol_flags = m->ol_flags; - if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) + if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) || + ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM)) off_info->payload_offset = m->l2_len + m->l3_len; else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG)) off_info->payload_offset = m->l2_len + m->l3_len + @@ -779,26 +780,25 @@ static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf, { struct rte_ether_hdr *eth_hdr; struct rte_vlan_hdr *vlan_hdr; - struct rte_ipv4_hdr *ip4h; - u16 pkt_type; - u8 *hdr; + struct rte_ipv4_hdr *ipv4_hdr; + u16 eth_type; - hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*); - eth_hdr = (struct rte_ether_hdr *)hdr; - pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type); + eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); + eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); - if (pkt_type == RTE_ETHER_TYPE_VLAN) { + if (eth_type == RTE_ETHER_TYPE_VLAN) { off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; - vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1); - pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); + vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); + eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); } else { off_info->outer_l2_len = ETHER_LEN_NO_VLAN; } - if (pkt_type == RTE_ETHER_TYPE_IPV4) { - ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len); - off_info->outer_l3_len = rte_ipv4_hdr_len(ip4h); - } else if (pkt_type == RTE_ETHER_TYPE_IPV6) { + if (eth_type == RTE_ETHER_TYPE_IPV4) { + ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, + off_info->outer_l2_len); + off_info->outer_l3_len = rte_ipv4_hdr_len(ipv4_hdr); + } else if (eth_type == RTE_ETHER_TYPE_IPV6) { /* not support ipv6 extension header */ off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); }