net/mlx5: move Linux-specific functions
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index 0c965b1..57017b6 100644 (file)
@@ -315,7 +315,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
                       HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
 }
 
-static void
+void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
        uint16_t nb_rx_q = hw->data->nb_rx_queues;
@@ -543,6 +543,26 @@ hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
        hns3_write_dev(hw, addr, value);
 }
 
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+       uint16_t nb_rx_q = hw->data->nb_rx_queues;
+       int i;
+
+       if (dev->data->dev_conf.intr_conf.rxq == 0)
+               return;
+
+       for (i = 0; i < nb_rx_q; i++)
+               hns3_queue_intr_enable(hw, i, en);
+}
+
 int
 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -742,6 +762,10 @@ hns3_start_tx_queues(struct hns3_adapter *hns)
        hns3_init_tx_ring_tc(hns);
 }
 
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
 int
 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -763,7 +787,6 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
        }
 
        hns3_start_tx_queues(hns);
-       hns3_enable_all_queues(hw, true);
 
        return 0;
 }
@@ -1232,9 +1255,9 @@ rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
 
        static const uint32_t l2table[HNS3_L2TBL_NUM] = {
                RTE_PTYPE_L2_ETHER,
-               RTE_PTYPE_L2_ETHER_VLAN,
                RTE_PTYPE_L2_ETHER_QINQ,
-               0
+               RTE_PTYPE_L2_ETHER_VLAN,
+               RTE_PTYPE_L2_ETHER_VLAN
        };
 
        static const uint32_t l3table[HNS3_L3TBL_NUM] = {
@@ -1427,6 +1450,58 @@ hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
        }
 }
 
+static inline void
+hns3_rxd_to_vlan_tci(struct rte_eth_dev *dev, struct rte_mbuf *mb,
+                    uint32_t l234_info, const struct hns3_desc *rxd)
+{
+#define HNS3_STRP_STATUS_NUM           0x4
+
+#define HNS3_NO_STRP_VLAN_VLD          0x0
+#define HNS3_INNER_STRP_VLAN_VLD       0x1
+#define HNS3_OUTER_STRP_VLAN_VLD       0x2
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+       uint32_t strip_status;
+       uint32_t report_mode;
+
+       /*
+        * Since HW limitation, the vlan tag will always be inserted into RX
+        * descriptor when strip the tag from packet, driver needs to determine
+        * reporting which tag to mbuf according to the PVID configuration
+        * and vlan striped status.
+        */
+       static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
+               {
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_OUTER_STRP_VLAN_VLD,
+                       HNS3_INNER_STRP_VLAN_VLD,
+                       HNS3_OUTER_STRP_VLAN_VLD
+               },
+               {
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_INNER_STRP_VLAN_VLD
+               }
+       };
+       strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
+                                     HNS3_RXD_STRP_TAGP_S);
+       report_mode = report_type[hw->port_base_vlan_cfg.state][strip_status];
+       switch (report_mode) {
+       case HNS3_NO_STRP_VLAN_VLD:
+               mb->vlan_tci = 0;
+               return;
+       case HNS3_INNER_STRP_VLAN_VLD:
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
+               return;
+       case HNS3_OUTER_STRP_VLAN_VLD:
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
+               return;
+       }
+}
+
 uint16_t
 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
@@ -1602,10 +1677,8 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        hns3_rx_set_cksum_flag(first_seg,
                                               first_seg->packet_type,
                                               cksum_err);
+               hns3_rxd_to_vlan_tci(dev, first_seg, l234_info, &rxd);
 
-               first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
-               first_seg->vlan_tci_outer =
-                       rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
                rx_pkts[nb_rx++] = first_seg;
                first_seg = NULL;
                continue;
@@ -1685,6 +1758,12 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        txq->configured = true;
        txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
+       txq->over_length_pkt_cnt = 0;
+       txq->exceed_limit_bd_pkt_cnt = 0;
+       txq->exceed_limit_bd_reassem_fail = 0;
+       txq->unsupported_tunnel_pkt_cnt = 0;
+       txq->queue_full_cnt = 0;
+       txq->pkt_padding_fail_cnt = 0;
        rte_spinlock_lock(&hw->lock);
        dev->data->tx_queues[idx] = txq;
        rte_spinlock_unlock(&hw->lock);
@@ -1771,6 +1850,12 @@ hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
        return 0;
 }
 
+static inline bool
+hns3_pkt_is_tso(struct rte_mbuf *m)
+{
+       return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+}
+
 static void
 hns3_set_tso(struct hns3_desc *desc,
             uint64_t ol_flags, struct rte_mbuf *rxm)
@@ -1779,7 +1864,7 @@ hns3_set_tso(struct hns3_desc *desc,
        uint32_t tmp;
        uint8_t l2_len = rxm->l2_len;
 
-       if (!(ol_flags & PKT_TX_TCP_SEG))
+       if (!hns3_pkt_is_tso(rxm))
                return;
 
        if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
@@ -2228,12 +2313,6 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
        }
 }
 
-static inline bool
-hns3_pkt_is_tso(struct rte_mbuf *m)
-{
-       return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
-}
-
 static int
 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
 {
@@ -2284,12 +2363,6 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
        for (i = 0; i < nb_pkts; i++) {
                m = tx_pkts[i];
 
-               /* check the size of packet */
-               if (m->pkt_len < RTE_ETHER_MIN_LEN) {
-                       rte_errno = EINVAL;
-                       return i;
-               }
-
                if (hns3_pkt_is_tso(m) &&
                    (hns3_pkt_need_linearized(m, m->nb_segs) ||
                     hns3_check_tso_pkt_valid(m))) {
@@ -2324,8 +2397,10 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
        if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
                (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
                if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
-                                               hdr_lens))
+                                               hdr_lens)) {
+                       txq->unsupported_tunnel_pkt_cnt++;
                        return -EINVAL;
+               }
        }
        /* Enable checksum offloading */
        if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
@@ -2348,13 +2423,18 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
         * If packet length is greater than HNS3_MAX_FRAME_LEN
         * driver support, the packet will be ignored.
         */
-       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
+               txq->over_length_pkt_cnt++;
                return -EINVAL;
+       }
 
        if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+               txq->exceed_limit_bd_pkt_cnt++;
                ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
-               if (ret)
+               if (ret) {
+                       txq->exceed_limit_bd_reassem_fail++;
                        return ret;
+               }
                *m_seg = new_pkt;
        }
 
@@ -2392,6 +2472,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                nb_buf = tx_pkt->nb_segs;
 
                if (nb_buf > txq->tx_bd_ready) {
+                       txq->queue_full_cnt++;
                        if (nb_tx == 0)
                                return 0;
 
@@ -2409,8 +2490,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        add_len = HNS3_MIN_PKT_SIZE -
                                         rte_pktmbuf_pkt_len(tx_pkt);
                        appended = rte_pktmbuf_append(tx_pkt, add_len);
-                       if (appended == NULL)
+                       if (appended == NULL) {
+                               txq->pkt_padding_fail_cnt++;
                                break;
+                       }
 
                        memset(appended, 0, add_len);
                }