net/ionic: make minor refactorings
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index bd3ccf6..88d3bab 100644 (file)
@@ -256,7 +256,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
        for (i = 0; i < rxq->nb_rx_desc; i++) {
                mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
                if (unlikely(mbuf == NULL)) {
-                       hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+                       hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
                                 i);
                        hns3_rx_queue_release_mbufs(rxq);
                        return -ENOMEM;
@@ -353,6 +353,19 @@ hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw)
        }
 }
 
+static void
+hns3_stop_unused_queue(void *tqp_base, enum hns3_ring_type queue_type)
+{
+       uint32_t reg_offset;
+       uint32_t reg;
+
+       reg_offset = queue_type == HNS3_RING_TYPE_TX ?
+                                  HNS3_RING_TX_EN_REG : HNS3_RING_RX_EN_REG;
+       reg = hns3_read_reg(tqp_base, reg_offset);
+       reg &= ~BIT(HNS3_RING_EN_B);
+       hns3_write_reg(tqp_base, reg_offset, reg);
+}
+
 void
 hns3_enable_all_queues(struct hns3_hw *hw, bool en)
 {
@@ -368,16 +381,22 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en)
                if (hns3_dev_indep_txrx_supported(hw)) {
                        rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL;
                        txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL;
+
+                       tqp_base = (void *)((char *)hw->io_base +
+                                       hns3_get_tqp_reg_offset(i));
                        /*
-                        * After initialization, rxq and txq won't be NULL at
-                        * the same time.
+                        * If queue struct is not initialized, it means the
+                        * related HW ring has not been initialized yet.
+                        * So, these queues should be disabled before enable
+                        * the tqps to avoid a HW exception since the queues
+                        * are enabled by default.
                         */
-                       if (rxq != NULL)
-                               tqp_base = rxq->io_base;
-                       else if (txq != NULL)
-                               tqp_base = txq->io_base;
-                       else
-                               return;
+                       if (rxq == NULL)
+                               hns3_stop_unused_queue(tqp_base,
+                                                       HNS3_RING_TYPE_RX);
+                       if (txq == NULL)
+                               hns3_stop_unused_queue(tqp_base,
+                                                       HNS3_RING_TYPE_TX);
                } else {
                        rxq = i < nb_rx_q ? hw->data->rx_queues[i] :
                              hw->fkq_data.rx_queues[i - nb_rx_q];
@@ -502,6 +521,26 @@ start_rxqs_fail:
        return -EINVAL;
 }
 
+void
+hns3_restore_tqp_enable_state(struct hns3_hw *hw)
+{
+       struct hns3_rx_queue *rxq;
+       struct hns3_tx_queue *txq;
+       uint16_t i;
+
+       for (i = 0; i < hw->data->nb_rx_queues; i++) {
+               rxq = hw->data->rx_queues[i];
+               if (rxq != NULL)
+                       hns3_enable_rxq(rxq, rxq->enabled);
+       }
+
+       for (i = 0; i < hw->data->nb_tx_queues; i++) {
+               txq = hw->data->tx_queues[i];
+               if (txq != NULL)
+                       hns3_enable_txq(txq, txq->enabled);
+       }
+}
+
 void
 hns3_stop_all_txqs(struct rte_eth_dev *dev)
 {
@@ -1192,7 +1231,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
        rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
                                 RTE_CACHE_LINE_SIZE, q_info->socket_id);
        if (rxq == NULL) {
-               hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+               hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
                         q_info->idx);
                return NULL;
        }
@@ -1211,7 +1250,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
                                         rx_desc, HNS3_RING_BASE_ALIGN,
                                         q_info->socket_id);
        if (rx_mz == NULL) {
-               hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+               hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
                         q_info->idx);
                hns3_rx_queue_release(rxq);
                return NULL;
@@ -1220,7 +1259,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
        rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
        rxq->rx_ring_phys_addr = rx_mz->iova;
 
-       hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+       hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
                 rxq->rx_ring_phys_addr);
 
        return rxq;
@@ -1248,7 +1287,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
        q_info.ring_name = "rx_fake_ring";
        rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
        if (rxq == NULL) {
-               hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+               hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
                return -ENOMEM;
        }
 
@@ -1285,7 +1324,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
        txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
                                 RTE_CACHE_LINE_SIZE, q_info->socket_id);
        if (txq == NULL) {
-               hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+               hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
                         q_info->idx);
                return NULL;
        }
@@ -1298,7 +1337,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
                                         tx_desc, HNS3_RING_BASE_ALIGN,
                                         q_info->socket_id);
        if (tx_mz == NULL) {
-               hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+               hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
                         q_info->idx);
                hns3_tx_queue_release(txq);
                return NULL;
@@ -1307,7 +1346,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
        txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
        txq->tx_ring_phys_addr = tx_mz->iova;
 
-       hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+       hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
                 txq->tx_ring_phys_addr);
 
        /* Clear tx bd */
@@ -1342,7 +1381,7 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
        q_info.ring_name = "tx_fake_ring";
        txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
        if (txq == NULL) {
-               hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+               hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
                return -ENOMEM;
        }
 
@@ -1564,7 +1603,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)
 
        vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
                        RTE_PKTMBUF_HEADROOM);
-
        if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE)
                return -EINVAL;
 
@@ -2475,8 +2513,8 @@ hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
        if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
            rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
            free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
-               hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
-                        "(%d) of tx descriptors for port=%d queue=%d check "
+               hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+                        "(%u) of tx descriptors for port=%u queue=%u check "
                         "fail!",
                         rs_thresh, free_thresh, nb_desc, hw->data->port_id,
                         idx);