mbuf: add rte prefix to offload flags
[dpdk.git] / drivers / net / hinic / hinic_pmd_tx.c
index 258f2c1..f29086f 100644 (file)
@@ -7,7 +7,7 @@
 #include <rte_sctp.h>
 #include <rte_udp.h>
 #include <rte_ip.h>
-#ifdef __ARM64_NEON__
+#ifdef RTE_ARCH_ARM64
 #include <arm_neon.h>
 #endif
 
@@ -23,7 +23,6 @@
 /* packet header and tx offload info */
 #define ETHER_LEN_NO_VLAN              14
 #define ETHER_LEN_WITH_VLAN            18
-#define HEADER_LEN_OFFSET              2
 #define VXLANLEN                       8
 #define MAX_PLD_OFFSET                 221
 #define MAX_SINGLE_SGE_SIZE            65536
@@ -38,9 +37,6 @@
 #define HINIC_TSO_PKT_MAX_SGE                  127     /* tso max sge 127 */
 #define HINIC_TSO_SEG_NUM_INVALID(num)         ((num) > HINIC_TSO_PKT_MAX_SGE)
 
-#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET       1
-#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET    0
-
 /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
 #define HINIC_BUF_DESC_SIZE(nr_descs)  (SIZE_8BYTES(((u32)nr_descs) << 4))
 
 
 static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb)
 {
-#if defined(__X86_64_SSE__)
+#if defined(RTE_ARCH_X86_64)
        int i;
        __m128i *wqe_line = (__m128i *)data;
        __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
@@ -221,7 +217,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb)
                wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask);
                wqe_line += 4;
        }
-#elif defined(__ARM64_NEON__)
+#elif defined(RTE_ARCH_ARM64)
        int i;
        uint8x16_t *wqe_line = (uint8x16_t *)data;
        const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
@@ -241,7 +237,7 @@ static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb)
 
 static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge)
 {
-#if defined(__X86_64_SSE__)
+#if defined(RTE_ARCH_X86_64)
        int i;
        __m128i *sge_line = (__m128i *)data;
        __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,
@@ -252,7 +248,7 @@ static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge)
                *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask);
                sge_line++;
        }
-#elif defined(__ARM64_NEON__)
+#elif defined(RTE_ARCH_ARM64)
        int i;
        uint8x16_t *sge_line = (uint8x16_t *)data;
        const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,
@@ -313,6 +309,8 @@ static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev,
                mbuf = mbuf->next;
        }
 
+       dst_mbuf->pkt_len = dst_mbuf->data_len;
+
        return dst_mbuf;
 }
 
@@ -453,7 +451,7 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf,
                                          *poff_info,
                                          struct hinic_wqe_info *sqe_info)
 {
-       u32 total_len, limit_len, checked_len, left_len;
+       u32 total_len, limit_len, checked_len, left_len, adjust_mss;
        u32 i, first_mss_sges, left_sges;
        struct rte_mbuf *mbuf_head, *mbuf_pre;
 
@@ -463,7 +461,9 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf,
        /* tso sge number validation */
        if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) {
                checked_len = 0;
-               limit_len = mbuf->tso_segsz + poff_info->payload_offset;
+               adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ?
+                               mbuf->tso_segsz : TX_MSS_MIN;
+               limit_len = adjust_mss + poff_info->payload_offset;
                first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE;
 
                /* each continues 17 mbufs segmust do one check */
@@ -477,7 +477,7 @@ static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf,
                                mbuf_pre = mbuf;
                                mbuf = mbuf->next;
                                if (total_len >= limit_len) {
-                                       limit_len = mbuf_head->tso_segsz;
+                                       limit_len = adjust_mss;
                                        break;
                                }
                        }
@@ -592,7 +592,7 @@ hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
        task->pkt_info2 = 0;
 
        /* Base VLAN */
-       if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+       if (unlikely(ol_flags & RTE_MBUF_F_TX_VLAN_PKT)) {
                vlan_tag = mbuf->vlan_tci;
                hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
                                          vlan_tag >> VLAN_PRIO_SHIFT);
@@ -602,7 +602,7 @@ hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
        if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)))
                return;
 
-       if ((ol_flags & PKT_TX_TCP_SEG))
+       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG))
                /* set tso info for task and qsf */
                hinic_set_tso_info(task, queue_info, mbuf, tx_off_info);
        else /* just support l4 checksum offload */
@@ -667,7 +667,7 @@ static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq)
 
 static inline struct hinic_sq_wqe *
 hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt,
-               struct hinic_wqe_info *wqe_info)
+                struct hinic_wqe_info *wqe_info)
 {
        u32 cur_pi, end_pi;
        u16 remain_wqebbs;
@@ -713,22 +713,17 @@ hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
                uint8_t  proto;    /* L4 protocol type. */
                uint16_t len;      /* L4 length. */
        } psd_hdr;
-       uint8_t ihl;
 
        psd_hdr.src_addr = ipv4_hdr->src_addr;
        psd_hdr.dst_addr = ipv4_hdr->dst_addr;
        psd_hdr.zero = 0;
        psd_hdr.proto = ipv4_hdr->next_proto_id;
-       if (ol_flags & PKT_TX_TCP_SEG) {
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                psd_hdr.len = 0;
        } else {
-               /* ipv4_hdr->version_ihl is uint8_t big endian, ihl locates
-                * lower 4 bits and unit is 4 bytes
-                */
-               ihl = (ipv4_hdr->version_ihl & 0xF) << 2;
                psd_hdr.len =
                rte_cpu_to_be_16(rte_be_to_cpu_16(ipv4_hdr->total_length) -
-                                ihl);
+                                rte_ipv4_hdr_len(ipv4_hdr));
        }
        return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
 }
@@ -743,7 +738,7 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
        } psd_hdr;
 
        psd_hdr.proto = (ipv6_hdr->proto << 24);
-       if (ol_flags & PKT_TX_TCP_SEG)
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
                psd_hdr.len = 0;
        else
                psd_hdr.len = ipv6_hdr->payload_len;
@@ -754,76 +749,219 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
        return __rte_raw_cksum_reduce(sum);
 }
 
-static inline void
-hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
-                    int outer_cs_flag)
+static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m,
+                                       struct hinic_tx_offload_info *off_info)
 {
        uint64_t ol_flags = m->ol_flags;
 
-       if (outer_cs_flag == 1) {
-               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
-                       off_info->payload_offset = m->outer_l2_len +
-                               m->outer_l3_len + m->l2_len + m->l3_len;
-               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
-                               (ol_flags & PKT_TX_TCP_SEG)) {
-                       off_info->payload_offset = m->outer_l2_len +
-                                       m->outer_l3_len + m->l2_len +
-                                       m->l3_len + m->l4_len;
-               }
-       } else {
-               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
-                       off_info->payload_offset = m->l2_len + m->l3_len;
-               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
-                       (ol_flags & PKT_TX_TCP_SEG)) {
-                       off_info->payload_offset = m->l2_len + m->l3_len +
-                                                  m->l4_len;
-               }
-       }
+       if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
+               off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+                                          m->l2_len + m->l3_len;
+       else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
+               off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
+                                          m->l2_len + m->l3_len + m->l4_len;
 }
 
-static inline void
-hinic_analyze_tx_info(struct rte_mbuf *mbuf,
-                     struct hinic_tx_offload_info *off_info)
+static inline void hinic_get_pld_offset(struct rte_mbuf *m,
+                                       struct hinic_tx_offload_info *off_info)
+{
+       uint64_t ol_flags = m->ol_flags;
+
+       if (((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) ||
+           ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_SCTP_CKSUM))
+               off_info->payload_offset = m->l2_len + m->l3_len;
+       else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
+               off_info->payload_offset = m->l2_len + m->l3_len +
+                                          m->l4_len;
+}
+
+static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+                                        struct hinic_tx_offload_info *off_info)
 {
        struct rte_ether_hdr *eth_hdr;
        struct rte_vlan_hdr *vlan_hdr;
-       struct rte_ipv4_hdr *ip4h;
-       u16 pkt_type;
-       u8 *hdr;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       u16 eth_type;
 
-       hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
-       eth_hdr = (struct rte_ether_hdr *)hdr;
-       pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+       eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+       eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
 
-       if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+       if (eth_type == RTE_ETHER_TYPE_VLAN) {
                off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
-               vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
-               pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+               vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+               eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
        } else {
                off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
        }
 
-       if (pkt_type == RTE_ETHER_TYPE_IPV4) {
-               ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
-               off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
-                                       HEADER_LEN_OFFSET;
-       } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+       if (eth_type == RTE_ETHER_TYPE_IPV4) {
+               ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+                                                  off_info->outer_l2_len);
+               off_info->outer_l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+       } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
                /* not support ipv6 extension header */
                off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
        }
 }
 
-static inline int
-hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
-                               struct hinic_tx_offload_info *off_info)
+static inline void hinic_analyze_outer_ip_vxlan(struct rte_mbuf *mbuf,
+                                       struct hinic_tx_offload_info *off_info)
+{
+       struct rte_ether_hdr *eth_hdr;
+       struct rte_vlan_hdr *vlan_hdr;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_udp_hdr *udp_hdr;
+       u16 eth_type = 0;
+
+       eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+       eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+       if (eth_type == RTE_ETHER_TYPE_VLAN) {
+               vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+               eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+       }
+
+       if (eth_type == RTE_ETHER_TYPE_IPV4) {
+               ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+                                                  mbuf->outer_l2_len);
+               off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+               ipv4_hdr->hdr_checksum = 0;
+
+               udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+                                                mbuf->outer_l3_len);
+               udp_hdr->dgram_cksum = 0;
+       } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
+               off_info->outer_l3_type = IPV6_PKT;
+
+               udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+                                                 (mbuf->outer_l2_len +
+                                                  mbuf->outer_l3_len));
+               udp_hdr->dgram_cksum = 0;
+       }
+}
+
+static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf)
+{
+       uint8_t l3_type;
+       uint64_t ol_flags = mbuf->ol_flags;
+
+       if (ol_flags & RTE_MBUF_F_TX_IPV4)
+               l3_type = (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ?
+                         IPV4_PKT_WITH_CHKSUM_OFFLOAD :
+                         IPV4_PKT_NO_CHKSUM_OFFLOAD;
+       else if (ol_flags & RTE_MBUF_F_TX_IPV6)
+               l3_type = IPV6_PKT;
+       else
+               l3_type = UNKNOWN_L3TYPE;
+
+       return l3_type;
+}
+
+static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf,
+                                       struct hinic_tx_offload_info *off_info,
+                                       uint64_t inner_l3_offset)
 {
        struct rte_ipv4_hdr *ipv4_hdr;
        struct rte_ipv6_hdr *ipv6_hdr;
        struct rte_tcp_hdr *tcp_hdr;
+       uint64_t ol_flags = mbuf->ol_flags;
+
+       if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+               ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+                                                  inner_l3_offset);
+
+               if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                       ipv4_hdr->hdr_checksum = 0;
+
+               tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
+                                                mbuf->l3_len);
+               tcp_hdr->cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+       } else {
+               ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+                                                  inner_l3_offset);
+               tcp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *,
+                                                 (inner_l3_offset +
+                                                  mbuf->l3_len));
+               tcp_hdr->cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+       }
+
+       off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
+       off_info->inner_l4_tcp_udp = 1;
+}
+
+static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf,
+                                       struct hinic_tx_offload_info *off_info,
+                                       uint64_t inner_l3_offset)
+{
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_ipv6_hdr *ipv6_hdr;
        struct rte_udp_hdr *udp_hdr;
-       struct rte_ether_hdr *eth_hdr;
-       struct rte_vlan_hdr *vlan_hdr;
-       u16 eth_type = 0;
+       uint64_t ol_flags = mbuf->ol_flags;
+
+       if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+               ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+                                                  inner_l3_offset);
+
+               if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                       ipv4_hdr->hdr_checksum = 0;
+
+               udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
+                                                mbuf->l3_len);
+               udp_hdr->dgram_cksum = hinic_ipv4_phdr_cksum(ipv4_hdr,
+                                                            ol_flags);
+       } else {
+               ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *,
+                                                  inner_l3_offset);
+
+               udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *,
+                                                 (inner_l3_offset +
+                                                  mbuf->l3_len));
+               udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr,
+                                                            ol_flags);
+       }
+
+       off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
+       off_info->inner_l4_tcp_udp = 1;
+}
+
+static inline void
+hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info)
+{
+       off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
+       off_info->inner_l4_tcp_udp = 0;
+       off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
+}
+
+static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf,
+                                       struct hinic_tx_offload_info *off_info,
+                                       uint64_t inner_l3_offset)
+{
+       uint64_t ol_flags = mbuf->ol_flags;
+
+       switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+       case RTE_MBUF_F_TX_UDP_CKSUM:
+               hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
+               break;
+
+       case RTE_MBUF_F_TX_TCP_CKSUM:
+               hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
+               break;
+
+       case RTE_MBUF_F_TX_SCTP_CKSUM:
+               hinic_calculate_sctp_checksum(off_info);
+               break;
+
+       default:
+               if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+                       hinic_calculate_tcp_checksum(mbuf, off_info,
+                                                    inner_l3_offset);
+               break;
+       }
+}
+
+static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
+                                       struct hinic_tx_offload_info *off_info)
+{
        uint64_t inner_l3_offset;
        uint64_t ol_flags = m->ol_flags;
 
@@ -832,8 +970,8 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
                return 0;
 
        /* Support only vxlan offload */
-       if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
-           !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+       if (unlikely((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+                    !(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)))
                return -ENOTSUP;
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -841,170 +979,62 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
                return -EINVAL;
 #endif
 
-       if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
-               if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-                   (ol_flags & PKT_TX_OUTER_IPV6) ||
-                   (ol_flags & PKT_TX_TCP_SEG)) {
+       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) {
+               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+               /* inner_l4_tcp_udp csum should be set to calculate outer
+                * udp checksum when vxlan packets without inner l3 and l4
+                */
+               off_info->inner_l4_tcp_udp = 1;
+
+               if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
+                   (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) ||
+                   (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
                        inner_l3_offset = m->l2_len + m->outer_l2_len +
-                               m->outer_l3_len;
+                                         m->outer_l3_len;
                        off_info->outer_l2_len = m->outer_l2_len;
                        off_info->outer_l3_len = m->outer_l3_len;
                        /* just support vxlan tunneling pkt */
                        off_info->inner_l2_len = m->l2_len - VXLANLEN -
-                               sizeof(*udp_hdr);
-                       off_info->inner_l3_len = m->l3_len;
-                       off_info->inner_l4_len = m->l4_len;
+                                                sizeof(struct rte_udp_hdr);
                        off_info->tunnel_length = m->l2_len;
-                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
 
-                       hinic_get_pld_offset(m, off_info,
-                                            HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+                       hinic_analyze_outer_ip_vxlan(m, off_info);
+
+                       hinic_get_outer_cs_pld_offset(m, off_info);
                } else {
                        inner_l3_offset = m->l2_len;
                        hinic_analyze_tx_info(m, off_info);
                        /* just support vxlan tunneling pkt */
                        off_info->inner_l2_len = m->l2_len - VXLANLEN -
-                               sizeof(*udp_hdr) - off_info->outer_l2_len -
-                               off_info->outer_l3_len;
-                       off_info->inner_l3_len = m->l3_len;
-                       off_info->inner_l4_len = m->l4_len;
+                                                sizeof(struct rte_udp_hdr) -
+                                                off_info->outer_l2_len -
+                                                off_info->outer_l3_len;
                        off_info->tunnel_length = m->l2_len -
-                               off_info->outer_l2_len - off_info->outer_l3_len;
-                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+                                                 off_info->outer_l2_len -
+                                                 off_info->outer_l3_len;
+                       off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
 
-                       hinic_get_pld_offset(m, off_info,
-                               HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+                       hinic_get_pld_offset(m, off_info);
                }
        } else {
                inner_l3_offset = m->l2_len;
                off_info->inner_l2_len = m->l2_len;
-               off_info->inner_l3_len = m->l3_len;
-               off_info->inner_l4_len = m->l4_len;
                off_info->tunnel_type = NOT_TUNNEL;
 
-               hinic_get_pld_offset(m, off_info,
-                                    HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+               hinic_get_pld_offset(m, off_info);
        }
 
        /* invalid udp or tcp header */
        if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
                return -EINVAL;
 
-       /* Process outter udp pseudo-header checksum */
-       if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
-                       (ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-                       (ol_flags & PKT_TX_OUTER_IPV6))) {
-
-               /* inner_l4_tcp_udp csum should be setted to calculate outter
-                * udp checksum when vxlan packets without inner l3 and l4
-                */
-               off_info->inner_l4_tcp_udp = 1;
-
-               eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-               eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
-
-               if (eth_type == RTE_ETHER_TYPE_VLAN) {
-                       vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
-                       eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
-               }
-
-               if (eth_type == RTE_ETHER_TYPE_IPV4) {
-                       ipv4_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                                               m->outer_l2_len);
-                       off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
-                       ipv4_hdr->hdr_checksum = 0;
-
-                       udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
-                                                       m->outer_l3_len);
-                       udp_hdr->dgram_cksum = 0;
-               } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
-                       off_info->outer_l3_type = IPV6_PKT;
-                       ipv6_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-                                               m->outer_l2_len);
-
-                       udp_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
-                                               (m->outer_l2_len +
-                                               m->outer_l3_len));
-                       udp_hdr->dgram_cksum = 0;
-               }
-       } else if (ol_flags & PKT_TX_OUTER_IPV4) {
-               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
-               off_info->inner_l4_tcp_udp = 1;
-               off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
-       }
-
-       if (ol_flags & PKT_TX_IPV4)
-               off_info->inner_l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
-                                       IPV4_PKT_WITH_CHKSUM_OFFLOAD :
-                                       IPV4_PKT_NO_CHKSUM_OFFLOAD;
-       else if (ol_flags & PKT_TX_IPV6)
-               off_info->inner_l3_type = IPV6_PKT;
+       off_info->inner_l3_len = m->l3_len;
+       off_info->inner_l4_len = m->l4_len;
+       off_info->inner_l3_type = hinic_analyze_l3_type(m);
 
        /* Process the pseudo-header checksum */
-       if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
-               if (ol_flags & PKT_TX_IPV4) {
-                       ipv4_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                                               inner_l3_offset);
-
-                       if (ol_flags & PKT_TX_IP_CKSUM)
-                               ipv4_hdr->hdr_checksum = 0;
-
-                       udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
-                                                               m->l3_len);
-                       udp_hdr->dgram_cksum =
-                               hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
-               } else {
-                       ipv6_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-                                               inner_l3_offset);
-
-                       udp_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
-                                               (inner_l3_offset + m->l3_len));
-                       udp_hdr->dgram_cksum =
-                               hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
-               }
-
-               off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
-               off_info->inner_l4_tcp_udp = 1;
-       } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
-                       (ol_flags & PKT_TX_TCP_SEG)) {
-               if (ol_flags & PKT_TX_IPV4) {
-                       ipv4_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-                                               inner_l3_offset);
-
-                       if (ol_flags & PKT_TX_IP_CKSUM)
-                               ipv4_hdr->hdr_checksum = 0;
-
-                       /* non-TSO tcp */
-                       tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
-                                                               m->l3_len);
-                       tcp_hdr->cksum =
-                               hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
-               } else {
-                       ipv6_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-                                               inner_l3_offset);
-                       /* non-TSO tcp */
-                       tcp_hdr =
-                       rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
-                                               (inner_l3_offset + m->l3_len));
-                       tcp_hdr->cksum =
-                               hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
-               }
-
-               off_info->inner_l4_type = TCP_OFFLOAD_ENABLE;
-               off_info->inner_l4_tcp_udp = 1;
-       } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) {
-               off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE;
-               off_info->inner_l4_tcp_udp = 0;
-               off_info->inner_l4_len = sizeof(struct rte_sctp_hdr);
-       }
+       hinic_calculate_checksum(m, off_info, inner_l3_offset);
 
        return 0;
 }
@@ -1027,7 +1057,7 @@ static inline bool hinic_get_sge_txoff_info(struct rte_mbuf *mbuf_pkt,
        sqe_info->cpy_mbuf_cnt = 0;
 
        /* non tso mbuf */
-       if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) {
+       if (likely(!(mbuf_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) {
                if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) {
                        /* non tso packet len must less than 64KB */
                        return false;
@@ -1217,7 +1247,8 @@ void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev)
                                HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
 
        for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
-               eth_dev->data->tx_queues[q_id] = NULL;
+               if (eth_dev->data->tx_queues != NULL)
+                       eth_dev->data->tx_queues[q_id] = NULL;
 
                if (nic_dev->txqs[q_id] == NULL)
                        continue;