doc: support IPsec Multi-buffer lib v0.54
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index aaccf7e..8b3ced1 100644 (file)
@@ -315,7 +315,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)
                       HNS3_CFG_DESC_NUM(txq->nb_tx_desc));
 }
 
-static void
+void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
        uint16_t nb_rx_q = hw->data->nb_rx_queues;
@@ -499,16 +499,70 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
 }
 
 void
-hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
+                      uint8_t gl_idx, uint16_t gl_value)
+{
+       uint32_t offset[] = {HNS3_TQP_INTR_GL0_REG,
+                            HNS3_TQP_INTR_GL1_REG,
+                            HNS3_TQP_INTR_GL2_REG};
+       uint32_t addr, value;
+
+       if (gl_idx >= RTE_DIM(offset) || gl_value > HNS3_TQP_INTR_GL_MAX)
+               return;
+
+       addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       value = HNS3_GL_USEC_TO_REG(gl_value);
+
+       hns3_write_dev(hw, addr, value);
+}
+
+void
+hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
+{
+       uint32_t addr, value;
+
+       if (rl_value > HNS3_TQP_INTR_RL_MAX)
+               return;
+
+       addr = HNS3_TQP_INTR_RL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       value = HNS3_RL_USEC_TO_REG(rl_value);
+       if (value > 0)
+               value |= HNS3_TQP_INTR_RL_ENABLE_MASK;
+
+       hns3_write_dev(hw, addr, value);
+}
+
+static void
+hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
 {
        uint32_t addr, value;
 
-       addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+       addr = HNS3_TQP_INTR_CTRL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
        value = en ? 1 : 0;
 
        hns3_write_dev(hw, addr, value);
 }
 
+/*
+ * Enable all rx queue interrupt when in interrupt rx mode.
+ * This api was called before enable queue rx&tx (in normal start or reset
+ * recover scenes), used to fix hardware rx queue interrupt enable was clear
+ * when FLR.
+ */
+void
+hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en)
+{
+       struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
+       uint16_t nb_rx_q = hw->data->nb_rx_queues;
+       int i;
+
+       if (dev->data->dev_conf.intr_conf.rxq == 0)
+               return;
+
+       for (i = 0; i < nb_rx_q; i++)
+               hns3_queue_intr_enable(hw, i, en);
+}
+
 int
 hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -519,8 +573,7 @@ hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        if (dev->data->dev_conf.intr_conf.rxq == 0)
                return -ENOTSUP;
 
-       /* enable the vectors */
-       hns3_tqp_intr_enable(hw, queue_id, true);
+       hns3_queue_intr_enable(hw, queue_id, true);
 
        return rte_intr_ack(intr_handle);
 }
@@ -533,8 +586,7 @@ hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        if (dev->data->dev_conf.intr_conf.rxq == 0)
                return -ENOTSUP;
 
-       /* disable the vectors */
-       hns3_tqp_intr_enable(hw, queue_id, false);
+       hns3_queue_intr_enable(hw, queue_id, false);
 
        return 0;
 }
@@ -710,6 +762,10 @@ hns3_start_tx_queues(struct hns3_adapter *hns)
        hns3_init_tx_ring_tc(hns);
 }
 
+/*
+ * Start all queues.
+ * Note: just init and setup queues, and don't enable queue rx&tx.
+ */
 int
 hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
 {
@@ -731,7 +787,6 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)
        }
 
        hns3_start_tx_queues(hns);
-       hns3_enable_all_queues(hw, true);
 
        return 0;
 }
@@ -940,7 +995,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
        if (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
-
                uint32_t size;
                size = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;
                hw->fkq_data.rx_queues = rte_zmalloc("fake_rx_queues", size,
@@ -951,7 +1005,6 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                }
        } else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {
                /* re-configure */
-
                rxq = hw->fkq_data.rx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
                        hns3_dev_rx_queue_release(rxq[i]);
@@ -989,7 +1042,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
 
        if (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {
                /* first time configuration */
-
                uint32_t size;
                size = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;
                hw->fkq_data.tx_queues = rte_zmalloc("fake_tx_queues", size,
@@ -1000,7 +1052,6 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
                }
        } else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {
                /* re-configure */
-
                txq = hw->fkq_data.tx_queues;
                for (i = nb_queues; i < old_nb_queues; i++)
                        hns3_dev_tx_queue_release(txq[i]);
@@ -1554,7 +1605,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                first_seg->pkt_len = pkt_len;
                first_seg->port = rxq->port_id;
                first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
-               first_seg->ol_flags |= PKT_RX_RSS_HASH;
+               first_seg->ol_flags = PKT_RX_RSS_HASH;
                if (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {
                        first_seg->hash.fdir.hi =
                                rte_le_to_cpu_32(rxd.rx.fd_id);
@@ -1571,7 +1622,8 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                                                  ol_info);
 
                if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
-                       hns3_rx_set_cksum_flag(rxm, first_seg->packet_type,
+                       hns3_rx_set_cksum_flag(first_seg,
+                                              first_seg->packet_type,
                                               cksum_err);
 
                first_seg->vlan_tci = rte_le_to_cpu_16(rxd.rx.vlan_tag);
@@ -1656,6 +1708,12 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        txq->configured = true;
        txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
+       txq->over_length_pkt_cnt = 0;
+       txq->exceed_limit_bd_pkt_cnt = 0;
+       txq->exceed_limit_bd_reassem_fail = 0;
+       txq->unsupported_tunnel_pkt_cnt = 0;
+       txq->queue_full_cnt = 0;
+       txq->pkt_padding_fail_cnt = 0;
        rte_spinlock_lock(&hw->lock);
        dev->data->tx_queues[idx] = txq;
        rte_spinlock_unlock(&hw->lock);
@@ -2295,8 +2353,10 @@ hns3_parse_cksum(struct hns3_tx_queue *txq, uint16_t tx_desc_id,
        if (m->ol_flags & PKT_TX_TUNNEL_MASK) {
                (void)rte_net_get_ptype(m, hdr_lens, RTE_PTYPE_ALL_MASK);
                if (hns3_parse_tunneling_params(txq, tx_desc_id, m->ol_flags,
-                                               hdr_lens))
+                                               hdr_lens)) {
+                       txq->unsupported_tunnel_pkt_cnt++;
                        return -EINVAL;
+               }
        }
        /* Enable checksum offloading */
        if (m->ol_flags & HNS3_TX_CKSUM_OFFLOAD_MASK)
@@ -2319,13 +2379,18 @@ hns3_check_non_tso_pkt(uint16_t nb_buf, struct rte_mbuf **m_seg,
         * If packet length is greater than HNS3_MAX_FRAME_LEN
         * driver support, the packet will be ignored.
         */
-       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN))
+       if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) > HNS3_MAX_FRAME_LEN)) {
+               txq->over_length_pkt_cnt++;
                return -EINVAL;
+       }
 
        if (unlikely(nb_buf > HNS3_MAX_NON_TSO_BD_PER_PKT)) {
+               txq->exceed_limit_bd_pkt_cnt++;
                ret = hns3_reassemble_tx_pkts(txq, tx_pkt, &new_pkt);
-               if (ret)
+               if (ret) {
+                       txq->exceed_limit_bd_reassem_fail++;
                        return ret;
+               }
                *m_seg = new_pkt;
        }
 
@@ -2363,6 +2428,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                nb_buf = tx_pkt->nb_segs;
 
                if (nb_buf > txq->tx_bd_ready) {
+                       txq->queue_full_cnt++;
                        if (nb_tx == 0)
                                return 0;
 
@@ -2380,8 +2446,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        add_len = HNS3_MIN_PKT_SIZE -
                                         rte_pktmbuf_pkt_len(tx_pkt);
                        appended = rte_pktmbuf_append(tx_pkt, add_len);
-                       if (appended == NULL)
+                       if (appended == NULL) {
+                               txq->pkt_padding_fail_cnt++;
                                break;
+                       }
 
                        memset(appended, 0, add_len);
                }