net/hinic: support inner L3 checksum offload
authorXiaoyun Wang <cloud.wangxiaoyun@huawei.com>
Thu, 10 Oct 2019 14:51:58 +0000 (22:51 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 23 Oct 2019 14:43:08 +0000 (16:43 +0200)
This patch supports inner L3 checksum offload for VXLAN packets,
modifies rx checksum offload.

Signed-off-by: Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hinic/hinic_pmd_ethdev.h
drivers/net/hinic/hinic_pmd_rx.c
drivers/net/hinic/hinic_pmd_rx.h
drivers/net/hinic/hinic_pmd_tx.c
drivers/net/hinic/hinic_pmd_tx.h

index 48ec467..b2cc0e4 100644 (file)
@@ -255,7 +255,7 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
  * specific event.
  *
  * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
- **/
+ */
 static void hinic_dev_interrupt_handler(void *param)
 {
        struct rte_eth_dev *dev = param;
@@ -1088,7 +1088,7 @@ static void hinic_rx_queue_release(void *queue)
        nic_dev = rxq->nic_dev;
 
        /* free rxq_pkt mbuf */
-       hinic_free_all_rx_skbs(rxq);
+       hinic_free_all_rx_mbufs(rxq);
 
        /* free rxq_cqe, rxq_info */
        hinic_free_rx_resources(rxq);
@@ -1120,7 +1120,7 @@ static void hinic_tx_queue_release(void *queue)
        nic_dev = txq->nic_dev;
 
        /* free txq_pkt mbuf */
-       hinic_free_all_tx_skbs(txq);
+       hinic_free_all_tx_mbufs(txq);
 
        /* free txq_info */
        hinic_free_tx_resources(txq);
index dd96667..3e3f3b3 100644 (file)
@@ -178,6 +178,7 @@ struct hinic_nic_dev {
         * vf: the same with associate pf
         */
        u32 default_cos;
+       u32 rx_csum_en;
 
        struct hinic_filter_info    filter;
        struct hinic_ntuple_filter_list filter_ntuple_list;
index 08e02ae..a9f3962 100644 (file)
@@ -309,7 +309,6 @@ static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq)
 {
        size_t cqe_mem_size;
 
-       /* allocate continuous cqe memory for saving number of memory zone */
        cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
        rxq->cqe_start_vaddr =
                dma_zalloc_coherent(rxq->nic_dev->hwdev,
@@ -421,7 +420,7 @@ void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
                if (nic_dev->rxqs[q_id] == NULL)
                        continue;
 
-               hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
+               hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
                hinic_free_rx_resources(nic_dev->rxqs[q_id]);
                kfree(nic_dev->rxqs[q_id]);
                nic_dev->rxqs[q_id] = NULL;
@@ -435,11 +434,11 @@ void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)
        u16 q_id;
 
        for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
-               hinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);
+               hinic_free_all_rx_mbufs(nic_dev->rxqs[q_id]);
 }
 
 static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
-                                struct rte_mbuf *head_skb,
+                                struct rte_mbuf *head_mbuf,
                                 u32 remain_pkt_len)
 {
        struct hinic_nic_dev *nic_dev = rxq->nic_dev;
@@ -462,11 +461,11 @@ static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,
                cur_mbuf->data_len = (u16)pkt_len;
                cur_mbuf->next = NULL;
 
-               head_skb->pkt_len += cur_mbuf->data_len;
-               head_skb->nb_segs++;
+               head_mbuf->pkt_len += cur_mbuf->data_len;
+               head_mbuf->nb_segs++;
 
                if (!rxm)
-                       head_skb->next = cur_mbuf;
+                       head_mbuf->next = cur_mbuf;
                else
                        rxm->next = cur_mbuf;
 
@@ -658,7 +657,6 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
        struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
        struct rte_eth_rss_conf rss_conf =
                dev->data->dev_conf.rx_adv_conf.rss_conf;
-       u32 csum_en = 0;
        int err;
 
        if (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {
@@ -678,9 +676,10 @@ int hinic_rx_configure(struct rte_eth_dev *dev)
 
        /* Enable both L3/L4 rx checksum offload */
        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)
-               csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+               nic_dev->rx_csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
 
-       err = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);
+       err = hinic_set_rx_csum_offload(nic_dev->hwdev,
+                                       HINIC_RX_CSUM_OFFLOAD_EN);
        if (err)
                goto rx_csum_ofl_err;
 
@@ -703,7 +702,7 @@ void hinic_rx_remove_configure(struct rte_eth_dev *dev)
        }
 }
 
-void hinic_free_all_rx_skbs(struct hinic_rxq *rxq)
+void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq)
 {
        struct hinic_nic_dev *nic_dev = rxq->nic_dev;
        struct hinic_rx_info *rx_info;
@@ -781,6 +780,10 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
 {
        uint32_t checksum_err;
        uint64_t flags;
+       struct hinic_nic_dev *nic_dev = rxq->nic_dev;
+
+       if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
+               return PKT_RX_IP_CKSUM_UNKNOWN;
 
        /* most case checksum is ok */
        checksum_err = HINIC_GET_RX_CSUM_ERR(status);
@@ -999,8 +1002,8 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
                        rxm->data_len = rx_buf_len;
                        rxm->pkt_len = rx_buf_len;
 
-                       /* if jumbo use multi-wqebb update ci,
-                        * recv_jumbo_pkt will also update ci
+                       /* if receive jumbo, updating ci will be done by
+                        * hinic_recv_jumbo_pkt function.
                         */
                        HINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);
                        wqebb_cnt = 0;
index fe2735b..5cd17ec 100644 (file)
@@ -105,7 +105,7 @@ void hinic_free_rx_resources(struct hinic_rxq *rxq);
 
 u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);
 
-void hinic_free_all_rx_skbs(struct hinic_rxq *rxq);
+void hinic_free_all_rx_mbufs(struct hinic_rxq *rxq);
 
 void hinic_rx_alloc_pkts(struct hinic_rxq *rxq);
 
index 0ef7add..bdbb0f4 100644 (file)
@@ -20,6 +20,9 @@
 #include "hinic_pmd_tx.h"
 
 /* packet header and tx offload info */
+#define ETHER_LEN_NO_VLAN              14
+#define ETHER_LEN_WITH_VLAN            18
+#define HEADER_LEN_OFFSET              2
 #define VXLANLEN                       8
 #define MAX_PLD_OFFSET                 221
 #define MAX_SINGLE_SGE_SIZE            65536
@@ -34,6 +37,9 @@
 #define HINIC_TSO_PKT_MAX_SGE                  127     /* tso max sge 127 */
 #define HINIC_TSO_SEG_NUM_INVALID(num)         ((num) > HINIC_TSO_PKT_MAX_SGE)
 
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_SET       1
+#define HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET    0
+
 /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */
 #define HINIC_BUF_DESC_SIZE(nr_descs)  (SIZE_8BYTES(((u32)nr_descs) << 4))
 
@@ -476,16 +482,16 @@ static inline void
 hinic_set_l4_csum_info(struct hinic_sq_task *task,
                u32 *queue_info, struct hinic_tx_offload_info *poff_info)
 {
-       u32 tcp_udp_cs, sctp;
+       u32 tcp_udp_cs, sctp = 0;
        u16 l2hdr_len;
 
-       sctp = 0;
        if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE))
                sctp = 1;
 
        tcp_udp_cs = poff_info->inner_l4_tcp_udp;
 
-       if (poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
+       if (poff_info->tunnel_type == TUNNEL_UDP_CSUM ||
+           poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) {
                l2hdr_len =  poff_info->outer_l2_len;
 
                task->pkt_info2 |=
@@ -665,50 +671,6 @@ hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt,
        return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi);
 }
 
-static inline int
-hinic_validate_tx_offload(const struct rte_mbuf *m)
-{
-       uint64_t ol_flags = m->ol_flags;
-       uint64_t inner_l3_offset = m->l2_len;
-
-       /* just support vxlan offload */
-       if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
-           !(ol_flags & PKT_TX_TUNNEL_VXLAN))
-               return -ENOTSUP;
-
-       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-               inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-
-       /* Headers are fragmented */
-       if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
-               return -ENOTSUP;
-
-       /* IP checksum can be counted only for IPv4 packet */
-       if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
-               return -EINVAL;
-
-       /* IP type not set when required */
-       if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
-               if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
-                       return -EINVAL;
-       }
-
-       /* Check requirements for TSO packet */
-       if (ol_flags & PKT_TX_TCP_SEG) {
-               if (m->tso_segsz == 0 ||
-                       ((ol_flags & PKT_TX_IPV4) &&
-                       !(ol_flags & PKT_TX_IP_CKSUM)))
-                       return -EINVAL;
-       }
-
-       /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
-       if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
-               !(ol_flags & PKT_TX_OUTER_IPV4))
-               return -EINVAL;
-
-       return 0;
-}
-
 static inline uint16_t
 hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
 {
@@ -760,6 +722,65 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
        return __rte_raw_cksum_reduce(sum);
 }
 
+static inline void
+hinic_get_pld_offset(struct rte_mbuf *m, struct hinic_tx_offload_info *off_info,
+                    int outer_cs_flag)
+{
+       uint64_t ol_flags = m->ol_flags;
+
+       if (outer_cs_flag == 1) {
+               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+                       off_info->payload_offset = m->outer_l2_len +
+                               m->outer_l3_len + m->l2_len + m->l3_len;
+               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+                               (ol_flags & PKT_TX_TCP_SEG)) {
+                       off_info->payload_offset = m->outer_l2_len +
+                                       m->outer_l3_len + m->l2_len +
+                                       m->l3_len + m->l4_len;
+               }
+       } else {
+               if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+                       off_info->payload_offset = m->l2_len + m->l3_len;
+               } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+                       (ol_flags & PKT_TX_TCP_SEG)) {
+                       off_info->payload_offset = m->l2_len + m->l3_len +
+                                                  m->l4_len;
+               }
+       }
+}
+
+static inline void
+hinic_analyze_tx_info(struct rte_mbuf *mbuf,
+                     struct hinic_tx_offload_info *off_info)
+{
+       struct rte_ether_hdr *eth_hdr;
+       struct rte_vlan_hdr *vlan_hdr;
+       struct rte_ipv4_hdr *ip4h;
+       u16 pkt_type;
+       u8 *hdr;
+
+       hdr = (u8 *)rte_pktmbuf_mtod(mbuf, u8*);
+       eth_hdr = (struct rte_ether_hdr *)hdr;
+       pkt_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+
+       if (pkt_type == RTE_ETHER_TYPE_VLAN) {
+               off_info->outer_l2_len = ETHER_LEN_WITH_VLAN;
+               vlan_hdr = (struct rte_vlan_hdr *)(hdr + 1);
+               pkt_type = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+       } else {
+               off_info->outer_l2_len = ETHER_LEN_NO_VLAN;
+       }
+
+       if (pkt_type == RTE_ETHER_TYPE_IPV4) {
+               ip4h = (struct rte_ipv4_hdr *)(hdr + off_info->outer_l2_len);
+               off_info->outer_l3_len = (ip4h->version_ihl & 0xf) <<
+                                       HEADER_LEN_OFFSET;
+       } else if (pkt_type == RTE_ETHER_TYPE_IPV6) {
+               /* not support ipv6 extension header */
+               off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
+       }
+}
+
 static inline int
 hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
                                struct hinic_tx_offload_info *off_info)
@@ -771,42 +792,66 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
        struct rte_ether_hdr *eth_hdr;
        struct rte_vlan_hdr *vlan_hdr;
        u16 eth_type = 0;
-       uint64_t inner_l3_offset = m->l2_len;
+       uint64_t inner_l3_offset;
        uint64_t ol_flags = m->ol_flags;
 
-       /* Does packet set any of available offloads */
+       /* Check if the packets set available offload flags */
        if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))
                return 0;
 
-       if (unlikely(hinic_validate_tx_offload(m)))
+       /* Support only vxlan offload */
+       if ((ol_flags & PKT_TX_TUNNEL_MASK) &&
+           !(ol_flags & PKT_TX_TUNNEL_VXLAN))
+               return -ENOTSUP;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+       if (rte_validate_tx_offload(m) != 0)
                return -EINVAL;
+#endif
 
-       if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
-                       (ol_flags & PKT_TX_OUTER_IPV6) ||
-                       (ol_flags & PKT_TX_TUNNEL_VXLAN)) {
-               inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
-               off_info->outer_l2_len = m->outer_l2_len;
-               off_info->outer_l3_len = m->outer_l3_len;
-               /* just support vxlan tunneling pkt */
-               off_info->inner_l2_len = m->l2_len - VXLANLEN -
-                                               sizeof(struct rte_udp_hdr);
-               off_info->inner_l3_len = m->l3_len;
-               off_info->inner_l4_len = m->l4_len;
-               off_info->tunnel_length = m->l2_len;
-               off_info->payload_offset = m->outer_l2_len +
-                               m->outer_l3_len + m->l2_len + m->l3_len;
-               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+       if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
+               if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+                   (ol_flags & PKT_TX_OUTER_IPV6)) {
+                       inner_l3_offset = m->l2_len + m->outer_l2_len +
+                               m->outer_l3_len;
+                       off_info->outer_l2_len = m->outer_l2_len;
+                       off_info->outer_l3_len = m->outer_l3_len;
+                       /* just support vxlan tunneling pkt */
+                       off_info->inner_l2_len = m->l2_len - VXLANLEN -
+                               sizeof(*udp_hdr);
+                       off_info->inner_l3_len = m->l3_len;
+                       off_info->inner_l4_len = m->l4_len;
+                       off_info->tunnel_length = m->l2_len;
+                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+                       hinic_get_pld_offset(m, off_info,
+                                            HINIC_TX_OUTER_CHECKSUM_FLAG_SET);
+               } else {
+                       inner_l3_offset = m->l2_len;
+                       hinic_analyze_tx_info(m, off_info);
+                       /* just support vxlan tunneling pkt */
+                       off_info->inner_l2_len = m->l2_len - VXLANLEN -
+                               sizeof(*udp_hdr) - off_info->outer_l2_len -
+                               off_info->outer_l3_len;
+                       off_info->inner_l3_len = m->l3_len;
+                       off_info->inner_l4_len = m->l4_len;
+                       off_info->tunnel_length = m->l2_len -
+                               off_info->outer_l2_len - off_info->outer_l3_len;
+                       off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+
+                       hinic_get_pld_offset(m, off_info,
+                               HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+               }
        } else {
+               inner_l3_offset = m->l2_len;
                off_info->inner_l2_len = m->l2_len;
                off_info->inner_l3_len = m->l3_len;
                off_info->inner_l4_len = m->l4_len;
                off_info->tunnel_type = NOT_TUNNEL;
-               off_info->payload_offset = m->l2_len + m->l3_len;
-       }
 
-       if (((ol_flags & PKT_TX_L4_MASK) != PKT_TX_SCTP_CKSUM) &&
-           ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM))
-               off_info->payload_offset += m->l4_len;
+               hinic_get_pld_offset(m, off_info,
+                                    HINIC_TX_OUTER_CHECKSUM_FLAG_NO_SET);
+       }
 
        /* invalid udp or tcp header */
        if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET))
@@ -816,7 +861,6 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
        if ((ol_flags & PKT_TX_TUNNEL_VXLAN) && ((ol_flags & PKT_TX_TCP_SEG) ||
                        (ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
                        (ol_flags & PKT_TX_OUTER_IPV6))) {
-               off_info->tunnel_type = TUNNEL_UDP_CSUM;
 
                /* inner_l4_tcp_udp csum should be setted to calculate outter
                 * udp checksum when vxlan packets without inner l3 and l4
@@ -840,8 +884,7 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
 
                        udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
                                                        m->outer_l3_len);
-                       udp_hdr->dgram_cksum =
-                               hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
+                       udp_hdr->dgram_cksum = 0;
                } else if (eth_type == RTE_ETHER_TYPE_IPV6) {
                        off_info->outer_l3_type = IPV6_PKT;
                        ipv6_hdr =
@@ -852,9 +895,12 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
                        rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
                                                (m->outer_l2_len +
                                                m->outer_l3_len));
-                       udp_hdr->dgram_cksum =
-                               hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
+                       udp_hdr->dgram_cksum = 0;
                }
+       } else if (ol_flags & PKT_TX_OUTER_IPV4) {
+               off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
+               off_info->inner_l4_tcp_udp = 1;
+               off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
        }
 
        if (ol_flags & PKT_TX_IPV4)
@@ -892,7 +938,6 @@ hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
 
                off_info->inner_l4_type = UDP_OFFLOAD_ENABLE;
                off_info->inner_l4_tcp_udp = 1;
-               off_info->inner_l4_len = sizeof(struct rte_udp_hdr);
        } else if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
                        (ol_flags & PKT_TX_TCP_SEG)) {
                if (ol_flags & PKT_TX_IPV4) {
@@ -1105,7 +1150,7 @@ u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts)
        return nb_tx;
 }
 
-void hinic_free_all_tx_skbs(struct hinic_txq *txq)
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq)
 {
        u16 ci;
        struct hinic_nic_dev *nic_dev = txq->nic_dev;
@@ -1145,7 +1190,7 @@ void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev)
                        continue;
 
                /* stop tx queue free tx mbuf */
-               hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+               hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
                hinic_free_tx_resources(nic_dev->txqs[q_id]);
 
                /* free txq */
@@ -1162,7 +1207,7 @@ void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev)
 
        for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
                /* stop tx queue free tx mbuf */
-               hinic_free_all_tx_skbs(nic_dev->txqs[q_id]);
+               hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]);
 }
 
 int hinic_setup_tx_resources(struct hinic_txq *txq)
index 8a3df27..a1ca580 100644 (file)
@@ -131,7 +131,7 @@ void hinic_free_tx_resources(struct hinic_txq *txq);
 
 u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts);
 
-void hinic_free_all_tx_skbs(struct hinic_txq *txq);
+void hinic_free_all_tx_mbufs(struct hinic_txq *txq);
 
 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);