net/ice/base: enable QinQ filter for switch advanced rule
[dpdk.git] / drivers / net / hns3 / hns3_rxtx.c
index c0f7981..308d0a6 100644 (file)
@@ -536,7 +536,10 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
                return;
 
        addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
-       value = HNS3_GL_USEC_TO_REG(gl_value);
+       if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
+               value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
+       else
+               value = HNS3_GL_USEC_TO_REG(gl_value);
 
        hns3_write_dev(hw, addr, value);
 }
@@ -557,6 +560,21 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
        hns3_write_dev(hw, addr, value);
 }
 
+void
+hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
+{
+       uint32_t addr;
+
+       if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+               return;
+
+       addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       hns3_write_dev(hw, addr, ql_value);
+
+       addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+       hns3_write_dev(hw, addr, ql_value);
+}
+
 static void
 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
 {
@@ -1251,6 +1269,12 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
                return -EINVAL;
        }
 
+       if (conf->rx_drop_en == 0)
+               hns3_warn(hw, "if there are no available Rx descriptors,"
+                         "incoming packets are always dropped. input parameter"
+                         " conf->rx_drop_en(%u) is uneffective.",
+                         conf->rx_drop_en);
+
        if (dev->data->rx_queues[idx]) {
                hns3_rx_queue_release(dev->data->rx_queues[idx]);
                dev->data->rx_queues[idx] = NULL;
@@ -1891,6 +1915,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
        txq->configured = true;
        txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
                                idx * HNS3_TQP_REG_SIZE);
+       txq->min_tx_pkt_len = hw->min_tx_pkt_len;
        txq->over_length_pkt_cnt = 0;
        txq->exceed_limit_bd_pkt_cnt = 0;
        txq->exceed_limit_bd_reassem_fail = 0;
@@ -2719,14 +2744,16 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                }
 
                /*
-                * If packet length is less than minimum packet size, driver
-                * need to pad it.
+                * If packet length is less than minimum packet length supported
+                * by hardware in Tx direction, driver need to pad it to avoid
+                * error.
                 */
-               if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
+               if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
+                                               txq->min_tx_pkt_len)) {
                        uint16_t add_len;
                        char *appended;
 
-                       add_len = HNS3_MIN_PKT_SIZE -
+                       add_len = txq->min_tx_pkt_len -
                                         rte_pktmbuf_pkt_len(tx_pkt);
                        appended = rte_pktmbuf_append(tx_pkt, add_len);
                        if (appended == NULL) {
@@ -2814,3 +2841,34 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
                eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
        }
 }
+
+void
+hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                 struct rte_eth_rxq_info *qinfo)
+{
+       struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+
+       /*
+        * If there are no available Rx buffer descriptors, incoming packets
+        * are always dropped by hardware based on hns3 network engine.
+        */
+       qinfo->conf.rx_drop_en = 1;
+       qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                 struct rte_eth_txq_info *qinfo)
+{
+       struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+       qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}