net/mlx5: move Linux-specific functions
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index aaccf7e..57017b6 100644 (file)
@@ -315,7 +315,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
                       HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
 }
 
-static void
+void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
        uint16_t nb_rx_q = hw->data->nb_rx_queues;
@@ -499,16 +499,70 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 }
 
 void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+                      uint8_t gl_idx, uint16_t gl_value)
+{
+       uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+                            HNS3_TQP_INTR_GL1_REG,
+                            HNS3_TQP_INTR_GL2_REG};
+       uint32_t addr, value;
+
+       if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+               return;
+
+       addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       value = HNS3_GL_USEC_TO_REG(gl_value);
+
+       hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+       uint32_t addr, value;
+
+       if (rl_value > HNS3_TQP_INTR_RL_MAX)
+               return;
+
+       addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       value = HNS3_RL_USEC_TO_REG(rl_value);
+       if (value > 0)
+               value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+       hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
 {
        uint32_t addr, value;
 
-       addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+       addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
        value = en ? 1 : 0;
 
        hns3_write_dev(hw, addr, value);
 }
 
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+       uint16_t nb_rx_q = hw->data->nb_rx_queues;
+       int i;
+
+       if (dev->data->dev_conf.intr_conf.rxq == 0)
+               return;
+
+       for (i = 0; i < nb_rx_q; i++)
+               hns3_queue_intr_enable(hw, i, en);
+}
+
 int
 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -519,8 +573,7 @@ hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        if (dev->data->dev_conf.intr_conf.rxq == 0)
                return -ENOTSUP;
 
-       /* enable the vectors */
-       hns3_tqp_intr_enable(hw, queue_id, true);
+       hns3_queue_intr_enable(hw, queue_id, true);
 
        return rte_intr_ack(intr_handle);
 }
@@ -533,8 +586,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        if (dev->data->dev_conf.intr_conf.rxq == 0)
                return -ENOTSUP;
 
-       /* disable the vectors */
-       hns3_tqp_intr_enable(hw, queue_id, false);
+       hns3_queue_intr_enable(hw, queue_id, false);
 
        return 0;
 }
@@ -710,6 +762,10 @@ hns3_start_tx_queues(struct hns3_adapter *hns)
        hns3_init_tx_ring_tc(hns);
 }
 
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
 int
 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -731,7 +787,6 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
        }
 
        hns3_start_tx_queues(hns);
-       hns3_enable_all_queues(hw, true);
 
        return 0;
 }
@@ -940,7 +995,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
        if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
-
                uint32_t size;
                size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
                hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
@@ -951,7 +1005,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                }
        } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
                /* re-configure */
-
                rxq = hw->fkq_data.rx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
                        hns3_dev_rx_queue_release(rxq[i]);
@@ -989,7 +1042,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
        if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
-
                uint32_t size;
                size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
                hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
@@ -1000,7 +1052,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                }
        } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
                /* re-configure */
-
                txq = hw->fkq_data.tx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
                        hns3_dev_tx_queue_release(txq[i]);
@@ -1204,9 +1255,9 @@ rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)
 
        static const uint32_t l2table[HNS3_L2TBL_NUM] = {
                RTE_PTYPE_L2_ETHER,
-               RTE_PTYPE_L2_ETHER_VLAN,
                RTE_PTYPE_L2_ETHER_QINQ,
-               0
+               RTE_PTYPE_L2_ETHER_VLAN,
+               RTE_PTYPE_L2_ETHER_VLAN
        };
 
        static const uint32_t l3table[HNS3_L3TBL_NUM] = {
@@ -1399,6 +1450,58 @@ hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,
        }
 }
 
+static inline void
+hns3_rxd_to_vlan_tci(struct rte_eth_dev *dev, struct rte_mbuf *mb,
+                    uint32_t l234_info, const struct hns3_desc *rxd)
+{
+#define HNS3_STRP_STATUS_NUM           0x4
+
+#define HNS3_NO_STRP_VLAN_VLD          0x0
+#define HNS3_INNER_STRP_VLAN_VLD       0x1
+#define HNS3_OUTER_STRP_VLAN_VLD       0x2
+       struct hns3_adapter *hns = dev->data->dev_private;
+       struct hns3_hw *hw = &hns->hw;
+       uint32_t strip_status;
+       uint32_t report_mode;
+
+       /*
+        * Since HW limitation, the vlan tag will always be inserted into RX
+        * descriptor when strip the tag from packet, driver needs to determine
+        * reporting which tag to mbuf according to the PVID configuration
+        * and vlan striped status.
+        */
+       static const uint32_t report_type[][HNS3_STRP_STATUS_NUM] = {
+               {
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_OUTER_STRP_VLAN_VLD,
+                       HNS3_INNER_STRP_VLAN_VLD,
+                       HNS3_OUTER_STRP_VLAN_VLD
+               },
+               {
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_NO_STRP_VLAN_VLD,
+                       HNS3_INNER_STRP_VLAN_VLD
+               }
+       };
+       strip_status = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,
+                                     HNS3_RXD_STRP_TAGP_S);
+       report_mode = report_type[hw->port_base_vlan_cfg.state][strip_status];
+       switch (report_mode) {
+       case HNS3_NO_STRP_VLAN_VLD:
+               mb->vlan_tci = 0;
+               return;
+       case HNS3_INNER_STRP_VLAN_VLD:
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
+               return;
+       case HNS3_OUTER_STRP_VLAN_VLD:
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
+               return;
+       }
+}
+
 uint16_t
 hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
@@ -1554,7 +1657,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                first_seg->pkt_len = pkt_len;
                first_seg->port = rxq->port_id;
                first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
-               first_seg->ol_flags |= PKT_RX_RSS_HASH;
+               first_seg->ol_flags = PKT_RX_RSS_HASH;
                if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
                        first_seg->hash.fdir.hi =
                                rte_le_to_cpu_32(rxd.rx.fd_id);
@@ -1571,12 +1674,11 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                                                  ol_info);
 
                if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
-                       hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
+                       hns3_rx_set_cksum_flag(first_seg,
+                                              first_seg->packet_type,
                                               cksum_err);
+               hns3_rxd_to_vlan_tci(dev, first_seg, l234_info, &rxd);
 
-               first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
-               first_seg->vlan_tci_outer =
-                       rte_le_to_cpu_16(rxd.rx.ot_vlan_tag);
                rx_pkts[nb_rx++] = first_seg;
                first_seg = NULL;
                continue;
@@ -1656,6 +1758,12 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        txq->configured = true;
        txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
+       txq->over_length_pkt_cnt = 0;
+       txq->exceed_limit_bd_pkt_cnt = 0;
+       txq->exceed_limit_bd_reassem_fail = 0;
+       txq->unsupported_tunnel_pkt_cnt = 0;
+       txq->queue_full_cnt = 0;
+       txq->pkt_padding_fail_cnt = 0;
        rte_spinlock_lock(&hw->lock);
        dev->data->tx_queues[idx] = txq;
        rte_spinlock_unlock(&hw->lock);
@@ -1742,6 +1850,12 @@ hns3_tso_proc_tunnel(struct hns3_desc *desc, uint64_t ol_flags,
        return 0;
 }
 
+static inline bool
+hns3_pkt_is_tso(struct rte_mbuf *m)
+{
+       return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
+}
+
 static void
 hns3_set_tso(struct hns3_desc *desc,
             uint64_t ol_flags, struct rte_mbuf *rxm)
@@ -1750,7 +1864,7 @@ hns3_set_tso(struct hns3_desc *desc,
        uint32_t tmp;
        uint8_t l2_len = rxm->l2_len;
 
-       if (!(ol_flags & PKT_TX_TCP_SEG))
+       if (!hns3_pkt_is_tso(rxm))
                return;
 
        if (hns3_tso_proc_tunnel(desc, ol_flags, rxm, &l2_len))
@@ -2199,12 +2313,6 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
        }
 }
 
-static inline bool
-hns3_pkt_is_tso(struct rte_mbuf *m)
-{
-       return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
-}
-
 static int
 hns3_check_tso_pkt_valid(struct rte_mbuf *m)
 {
@@ -2255,12 +2363,6 @@ hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
        for (i = 0; i < nb_pkts; i++) {
                m = tx_pkts[i];
 
-               /* check the size of packet */
-               if (m->pkt_len < RTE_ETHER_MIN_LEN) {
-                       rte_errno = EINVAL;
-                       return i;
-               }
-
                if (hns3_pkt_is_tso(m) &&
                    (hns3_pkt_need_linearized(m, m->nb_segs) ||
                     hns3_check_tso_pkt_valid(m))) {
@@ -2295,8 +2397,10 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
        if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
                (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
                if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
-                                               hdr_lens))
+                                               hdr_lens)) {
+                       txq->unsupported_tunnel_pkt_cnt++;
                        return -EINVAL;
+               }
        }
        /* Enable checksum offloading */
        if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
@@ -2319,13 +2423,18 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
         * If packet length is greater than HNS3_MAX_FRAME_LEN
         * driver support, the packet will be ignored.
         */
-       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
+               txq->over_length_pkt_cnt++;
                return -EINVAL;
+       }
 
        if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+               txq->exceed_limit_bd_pkt_cnt++;
                ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
-               if (ret)
+               if (ret) {
+                       txq->exceed_limit_bd_reassem_fail++;
                        return ret;
+               }
                *m_seg = new_pkt;
        }
 
@@ -2363,6 +2472,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                nb_buf = tx_pkt->nb_segs;
 
                if (nb_buf > txq->tx_bd_ready) {
+                       txq->queue_full_cnt++;
                        if (nb_tx == 0)
                                return 0;
 
@@ -2380,8 +2490,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        add_len = HNS3_MIN_PKT_SIZE -
                                         rte_pktmbuf_pkt_len(tx_pkt);
                        appended = rte_pktmbuf_append(tx_pkt, add_len);
-                       if (appended == NULL)
+                       if (appended == NULL) {
+                               txq->pkt_padding_fail_cnt++;
                                break;
+                       }
 
                        memset(appended, 0, add_len);
                }