net/cnxk: add TM shaper and node operations
[dpdk.git] / drivers / net / iavf / iavf_ethdev.c
index d054e41..985ccc9 100644 (file)
@@ -16,6 +16,7 @@
 #include <rte_interrupts.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
+#include <rte_alarm.h>
 #include <rte_atomic.h>
 #include <rte_eal.h>
 #include <rte_ether.h>
@@ -705,9 +706,9 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev,
                         */
                        vf->msix_base = IAVF_MISC_VEC_ID;
 
-                       /* set ITR to max */
+                       /* set ITR to default */
                        interval = iavf_calc_itr_interval(
-                                       IAVF_QUEUE_ITR_INTERVAL_MAX);
+                                       IAVF_QUEUE_ITR_INTERVAL_DEFAULT);
                        IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01,
                                       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
                                       (IAVF_ITR_INDEX_DEFAULT <<
@@ -868,7 +869,8 @@ iavf_dev_start(struct rte_eth_dev *dev)
        }
        /* re-enable intr again, because efd assign may change */
        if (dev->data->dev_conf.intr_conf.rxq != 0) {
-               rte_intr_disable(intr_handle);
+               if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+                       rte_intr_disable(intr_handle);
                rte_intr_enable(intr_handle);
        }
 
@@ -902,6 +904,10 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
+           dev->data->dev_conf.intr_conf.rxq != 0)
+               rte_intr_disable(intr_handle);
+
        if (adapter->stopped == 1)
                return 0;
 
@@ -1487,23 +1493,13 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
        if (ret)
                PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
                            RTE_ETHER_ADDR_PRT_FMT,
-                           old_addr->addr_bytes[0],
-                           old_addr->addr_bytes[1],
-                           old_addr->addr_bytes[2],
-                           old_addr->addr_bytes[3],
-                           old_addr->addr_bytes[4],
-                           old_addr->addr_bytes[5]);
+                               RTE_ETHER_ADDR_BYTES(old_addr));
 
        ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
        if (ret)
                PMD_DRV_LOG(ERR, "Fail to add new MAC:"
                            RTE_ETHER_ADDR_PRT_FMT,
-                           mac_addr->addr_bytes[0],
-                           mac_addr->addr_bytes[1],
-                           mac_addr->addr_bytes[2],
-                           mac_addr->addr_bytes[3],
-                           mac_addr->addr_bytes[4],
-                           mac_addr->addr_bytes[5]);
+                               RTE_ETHER_ADDR_BYTES(mac_addr));
 
        if (ret)
                return -EIO;
@@ -1660,6 +1656,7 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
        uint16_t msix_intr;
 
        msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
@@ -1680,7 +1677,8 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 
        IAVF_WRITE_FLUSH(hw);
 
-       rte_intr_ack(&pci_dev->intr_handle);
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               rte_intr_ack(&pci_dev->intr_handle);
 
        return 0;
 }
@@ -2186,6 +2184,30 @@ err:
        return -1;
 }
 
+static void
+iavf_uninit_vf(struct rte_eth_dev *dev)
+{
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+       iavf_shutdown_adminq(hw);
+
+       rte_free(vf->vf_res);
+       vf->vsi_res = NULL;
+       vf->vf_res = NULL;
+
+       rte_free(vf->aq_resp);
+       vf->aq_resp = NULL;
+
+       rte_free(vf->qos_cap);
+       vf->qos_cap = NULL;
+
+       rte_free(vf->rss_lut);
+       vf->rss_lut = NULL;
+       rte_free(vf->rss_key);
+       vf->rss_key = NULL;
+}
+
 /* Enable default admin queue interrupt setting */
 static inline void
 iavf_enable_irq0(struct iavf_hw *hw)
@@ -2225,6 +2247,29 @@ iavf_dev_interrupt_handler(void *param)
        iavf_enable_irq0(hw);
 }
 
+void
+iavf_dev_alarm_handler(void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t icr0;
+
+       iavf_disable_irq0(hw);
+
+       /* read out interrupt causes */
+       icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01);
+
+       if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) {
+               PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
+               iavf_handle_virtchnl_msg(dev);
+       }
+
+       iavf_enable_irq0(hw);
+
+       rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
+                         iavf_dev_alarm_handler, dev);
+}
+
 static int
 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
                      const struct rte_flow_ops **ops)
@@ -2261,6 +2306,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
        struct iavf_adapter *adapter =
                IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        int ret = 0;
 
@@ -2314,7 +2360,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
                             " store MAC addresses",
                             RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto init_vf_err;
        }
        /* If the MAC address is not configured by host,
         * generate a random one.
@@ -2325,13 +2372,18 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
        rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
                        &eth_dev->data->mac_addrs[0]);
 
-       /* register callback func to eal lib */
-       rte_intr_callback_register(&pci_dev->intr_handle,
-                                  iavf_dev_interrupt_handler,
-                                  (void *)eth_dev);
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+               /* register callback func to eal lib */
+               rte_intr_callback_register(&pci_dev->intr_handle,
+                                          iavf_dev_interrupt_handler,
+                                          (void *)eth_dev);
 
-       /* enable uio intr after callback register */
-       rte_intr_enable(&pci_dev->intr_handle);
+               /* enable uio intr after callback register */
+               rte_intr_enable(&pci_dev->intr_handle);
+       } else {
+               rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
+                                 iavf_dev_alarm_handler, eth_dev);
+       }
 
        /* configure and enable device interrupt */
        iavf_enable_irq0(hw);
@@ -2339,12 +2391,21 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
        ret = iavf_flow_init(adapter);
        if (ret) {
                PMD_INIT_LOG(ERR, "Failed to initialize flow");
-               return ret;
+               goto flow_init_err;
        }
 
        iavf_default_rss_disable(adapter);
 
        return 0;
+
+flow_init_err:
+       rte_free(eth_dev->data->mac_addrs);
+       eth_dev->data->mac_addrs = NULL;
+
+init_vf_err:
+       iavf_uninit_vf(eth_dev);
+
+       return ret;
 }
 
 static int
@@ -2375,12 +2436,16 @@ iavf_dev_close(struct rte_eth_dev *dev)
                iavf_config_promisc(adapter, false, false);
 
        iavf_shutdown_adminq(hw);
-       /* disable uio intr before callback unregister */
-       rte_intr_disable(intr_handle);
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+               /* disable uio intr before callback unregister */
+               rte_intr_disable(intr_handle);
 
-       /* unregister callback func from eal lib */
-       rte_intr_callback_unregister(intr_handle,
-                                    iavf_dev_interrupt_handler, dev);
+               /* unregister callback func from eal lib */
+               rte_intr_callback_unregister(intr_handle,
+                                            iavf_dev_interrupt_handler, dev);
+       } else {
+               rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
+       }
        iavf_disable_irq0(hw);
 
        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)