drivers/net: check process type in close operation
[dpdk.git] / drivers / net / enic / enic_ethdev.c
index 562401a..60ee5e0 100644 (file)
@@ -21,8 +21,6 @@
 #include "vnic_enet.h"
 #include "enic.h"
 
-int enic_pmd_logtype;
-
 /*
  * The set of PCI devices this driver supports
  */
@@ -68,14 +66,11 @@ static const struct vic_speed_capa {
 
 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
+#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+#define ENIC_DEVARG_REPRESENTOR "representor"
 
-RTE_INIT(enicpmd_init_log)
-{
-       enic_pmd_logtype = rte_log_register("pmd.net.enic");
-       if (enic_pmd_logtype >= 0)
-               rte_log_set_level(enic_pmd_logtype, RTE_LOG_INFO);
-}
+RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
 
 static int
 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
@@ -128,15 +123,25 @@ enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
                     enum rte_filter_op filter_op,
                     void *arg)
 {
+       struct enic *enic = pmd_priv(dev);
        int ret = 0;
 
        ENICPMD_FUNC_TRACE();
 
+       /*
+        * Currently, when Geneve with options offload is enabled, host
+        * cannot insert match-action rules.
+        */
+       if (enic->geneve_opt_enabled)
+               return -ENOTSUP;
        switch (filter_type) {
        case RTE_ETH_FILTER_GENERIC:
                if (filter_op != RTE_ETH_FILTER_GET)
                        return -EINVAL;
-               *(const void **)arg = &enic_flow_ops;
+               if (enic->flow_filter_mode == FILTER_FLOWMAN)
+                       *(const void **)arg = &enic_fm_flow_ops;
+               else
+                       *(const void **)arg = &enic_flow_ops;
                break;
        case RTE_ETH_FILTER_FDIR:
                ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
@@ -363,18 +368,6 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
                        enic->ig_vlan_strip_en = 0;
        }
 
-       if ((mask & ETH_VLAN_FILTER_MASK) &&
-           (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
-               dev_warning(enic,
-                       "Configuration of VLAN filter is not supported\n");
-       }
-
-       if ((mask & ETH_VLAN_EXTEND_MASK) &&
-           (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
-               dev_warning(enic,
-                       "Configuration of extended VLAN is not supported\n");
-       }
-
        return enic_set_vlan_strip(enic);
 }
 
@@ -394,6 +387,10 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
                return ret;
        }
 
+       if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+               eth_dev->data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_RSS_HASH;
+
        enic->mc_count = 0;
        enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
                                  DEV_RX_OFFLOAD_CHECKSUM);
@@ -449,12 +446,17 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
 /*
  * Stop device.
  */
-static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
 {
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
        enic_remove(enic);
+
+       return 0;
 }
 
 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
@@ -496,6 +498,12 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
        /* 1300 and later models are at least 40G */
        if (id >= 0x0100)
                return ETH_LINK_SPEED_40G;
+       /* VFs have subsystem id 0, check device id */
+       if (id == 0) {
+               /* Newer VF implies at least 40G model */
+               if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
+                       return ETH_LINK_SPEED_40G;
+       }
        return ETH_LINK_SPEED_10G;
 }
 
@@ -919,7 +927,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 
        ENICPMD_FUNC_TRACE();
        sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
-       data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+       data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
        rq_sop = &enic->rq[sop_queue_idx];
        rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
        qinfo->mp = rq_sop->mp;
@@ -955,6 +963,49 @@ static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
        /* tx_thresh, and all the other fields are not applicable for enic */
 }
 
+static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+                                        __rte_unused uint16_t queue_id,
+                                        struct rte_eth_burst_mode *mode)
+{
+       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       struct enic *enic = pmd_priv(dev);
+       const char *info_str = NULL;
+       int ret = -EINVAL;
+
+       ENICPMD_FUNC_TRACE();
+       if (enic->use_noscatter_vec_rx_handler)
+               info_str = "Vector AVX2 No Scatter";
+       else if (pkt_burst == enic_noscatter_recv_pkts)
+               info_str = "Scalar No Scatter";
+       else if (pkt_burst == enic_recv_pkts)
+               info_str = "Scalar";
+       if (info_str) {
+               strlcpy(mode->info, info_str, sizeof(mode->info));
+               ret = 0;
+       }
+       return ret;
+}
+
+static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+                                        __rte_unused uint16_t queue_id,
+                                        struct rte_eth_burst_mode *mode)
+{
+       eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+       const char *info_str = NULL;
+       int ret = -EINVAL;
+
+       ENICPMD_FUNC_TRACE();
+       if (pkt_burst == enic_simple_xmit_pkts)
+               info_str = "Scalar Simplified";
+       else if (pkt_burst == enic_xmit_pkts)
+               info_str = "Scalar";
+       if (info_str) {
+               strlcpy(mode->info, info_str, sizeof(mode->info));
+               ret = 0;
+       }
+       return ret;
+}
+
 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
                                            uint16_t rx_queue_id)
 {
@@ -1045,7 +1096,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
                             tnl->udp_port);
                return -EINVAL;
        }
-       return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+       return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
 }
 
 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
@@ -1096,14 +1147,14 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
        .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
        .rx_queue_release     = enicpmd_dev_rx_queue_release,
-       .rx_queue_count       = enicpmd_dev_rx_queue_count,
-       .rx_descriptor_done   = NULL,
        .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
        .tx_queue_release     = enicpmd_dev_tx_queue_release,
        .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
        .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
        .rxq_info_get         = enicpmd_dev_rxq_info_get,
        .txq_info_get         = enicpmd_dev_txq_info_get,
+       .rx_burst_mode_get    = enicpmd_dev_rx_burst_mode_get,
+       .tx_burst_mode_get    = enicpmd_dev_tx_burst_mode_get,
        .dev_led_on           = NULL,
        .dev_led_off          = NULL,
        .flow_ctrl_get        = NULL,
@@ -1144,6 +1195,8 @@ static int enic_parse_zero_one(const char *key,
                enic->disable_overlay = b;
        if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
                enic->enable_avx2_rx = b;
+       if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0)
+               enic->geneve_opt_request = b;
        return 0;
 }
 
@@ -1185,7 +1238,9 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
        static const char *const valid_keys[] = {
                ENIC_DEVARG_DISABLE_OVERLAY,
                ENIC_DEVARG_ENABLE_AVX2_RX,
+               ENIC_DEVARG_GENEVE_OPT,
                ENIC_DEVARG_IG_VLAN_REWRITE,
+               ENIC_DEVARG_REPRESENTOR,
                NULL};
        struct enic *enic = pmd_priv(dev);
        struct rte_kvargs *kvlist;
@@ -1194,6 +1249,7 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
 
        enic->disable_overlay = false;
        enic->enable_avx2_rx = false;
+       enic->geneve_opt_request = false;
        enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
        if (!dev->device->devargs)
                return 0;
@@ -1204,6 +1260,8 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
                               enic_parse_zero_one, enic) < 0 ||
            rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
                               enic_parse_zero_one, enic) < 0 ||
+           rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT,
+                              enic_parse_zero_one, enic) < 0 ||
            rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
                               enic_parse_ig_vlan_rewrite, enic) < 0) {
                rte_kvargs_free(kvlist);
@@ -1213,10 +1271,9 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
        return 0;
 }
 
-/* Initialize the driver
- * It returns 0 on success.
- */
-static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
+/* Initialize the driver for PF */
+static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
+                            void *init_params __rte_unused)
 {
        struct rte_pci_device *pdev;
        struct rte_pci_addr *addr;
@@ -1224,8 +1281,8 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
        int err;
 
        ENICPMD_FUNC_TRACE();
-
        eth_dev->dev_ops = &enicpmd_eth_dev_ops;
+       eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
        eth_dev->rx_pkt_burst = &enic_recv_pkts;
        eth_dev->tx_pkt_burst = &enic_xmit_pkts;
        eth_dev->tx_pkt_prepare = &enic_prep_pkts;
@@ -1238,8 +1295,6 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
        enic->port_id = eth_dev->data->port_id;
        enic->rte_dev = eth_dev;
        enic->dev_data = eth_dev->data;
-       /* Let rte_eth_dev_close() release the port resources */
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
 
        pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pdev);
@@ -1252,19 +1307,108 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
        err = enic_check_devargs(eth_dev);
        if (err)
                return err;
-       return enic_probe(enic);
+       err = enic_probe(enic);
+       if (!err && enic->fm) {
+               err = enic_fm_allocate_switch_domain(enic);
+               if (err)
+                       ENICPMD_LOG(ERR, "failed to allocate switch domain id");
+       }
+       return err;
+}
+
+static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct enic *enic = pmd_priv(eth_dev);
+       int err;
+
+       ENICPMD_FUNC_TRACE();
+       eth_dev->device = NULL;
+       eth_dev->intr_handle = NULL;
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+       err = rte_eth_switch_domain_free(enic->switch_domain_id);
+       if (err)
+               ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
+       return 0;
 }
 
 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        struct rte_pci_device *pci_dev)
 {
-       return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
-               eth_enicpmd_dev_init);
+       char name[RTE_ETH_NAME_MAX_LEN];
+       struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+       struct rte_eth_dev *pf_ethdev;
+       struct enic *pf_enic;
+       int i, retval;
+
+       ENICPMD_FUNC_TRACE();
+       if (pci_dev->device.devargs) {
+               retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+                               &eth_da);
+               if (retval)
+                       return retval;
+       }
+       retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+               sizeof(struct enic),
+               eth_dev_pci_specific_init, pci_dev,
+               eth_enic_dev_init, NULL);
+       if (retval || eth_da.nb_representor_ports < 1)
+               return retval;
+
+       /* Probe VF representor */
+       pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+       if (pf_ethdev == NULL)
+               return -ENODEV;
+       /* Representors require flowman */
+       pf_enic = pmd_priv(pf_ethdev);
+       if (pf_enic->fm == NULL) {
+               ENICPMD_LOG(ERR, "VF representors require flowman");
+               return -ENOTSUP;
+       }
+       /*
+        * For now representors imply switchdev, as firmware does not support
+        * legacy mode SR-IOV
+        */
+       pf_enic->switchdev_mode = 1;
+       /* Calculate max VF ID before initializing representor*/
+       pf_enic->max_vf_id = 0;
+       for (i = 0; i < eth_da.nb_representor_ports; i++) {
+               pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
+                                            eth_da.representor_ports[i]);
+       }
+       for (i = 0; i < eth_da.nb_representor_ports; i++) {
+               struct enic_vf_representor representor;
+
+               representor.vf_id = eth_da.representor_ports[i];
+                               representor.switch_domain_id =
+                       pmd_priv(pf_ethdev)->switch_domain_id;
+               representor.pf = pmd_priv(pf_ethdev);
+               snprintf(name, sizeof(name), "net_%s_representor_%d",
+                       pci_dev->device.name, eth_da.representor_ports[i]);
+               retval = rte_eth_dev_create(&pci_dev->device, name,
+                       sizeof(struct enic_vf_representor), NULL, NULL,
+                       enic_vf_representor_init, &representor);
+               if (retval) {
+                       ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
+                                   name);
+                       return retval;
+               }
+       }
+       return 0;
 }
 
 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
 {
-       return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+       struct rte_eth_dev *ethdev;
+
+       ENICPMD_FUNC_TRACE();
+       ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+       if (!ethdev)
+               return -ENODEV;
+       if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+               return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
+       else
+               return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
 }
 
 static struct rte_pci_driver rte_enic_pmd = {
@@ -1274,10 +1418,16 @@ static struct rte_pci_driver rte_enic_pmd = {
        .remove = eth_enic_pci_remove,
 };
 
+int dev_is_enic(struct rte_eth_dev *dev)
+{
+       return dev->device->driver == &rte_enic_pmd.driver;
+}
+
 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
        ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
        ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
+       ENIC_DEVARG_GENEVE_OPT "=0|1 "
        ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");