net/ngbe: support MAC filters
[dpdk.git] / drivers / net / sfc / sfc_switch.c
index 395fc40..a28e861 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
  * Copyright(c) 2019 Solarflare Communications Inc.
  *
  * This software was jointly developed between OKTET Labs (under contract
  * This mapping comprises a port type to ensure that RTE switch port ID
  * of a represented entity and that of its representor are different in
  * the case when the entity gets plugged into DPDK and not into a guest.
+ *
+ * Entry data also comprises RTE ethdev's own MPORT. This value
+ * coincides with the entity MPORT in the case of independent ports.
+ * In the case of representors, this ID is not a selector and refers
+ * to an allocatable object (that is, it's likely to change on RTE
+ * ethdev replug). Flow API backend must use this value rather
+ * than entity_mport to support flow rule action PORT_ID.
  */
 struct sfc_mae_switch_port {
        TAILQ_ENTRY(sfc_mae_switch_port)        switch_domain_ports;
 
+       /** RTE ethdev MPORT */
+       efx_mport_sel_t                         ethdev_mport;
+       /** RTE ethdev port ID */
+       uint16_t                                ethdev_port_id;
+
        /** Entity (PCIe function) MPORT selector */
        efx_mport_sel_t                         entity_mport;
        /** Port type (independent/representor) */
        enum sfc_mae_switch_port_type           type;
        /** RTE switch port ID */
        uint16_t                                id;
+
+       union sfc_mae_switch_port_data          data;
 };
 
 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
@@ -75,6 +89,12 @@ struct sfc_mae_switch_domain {
        struct sfc_mae_switch_ports             ports;
        /** RTE switch domain ID allocated for a group of devices */
        uint16_t                                id;
+       /** DPDK controller -> EFX interface mapping */
+       efx_pcie_interface_t                    *controllers;
+       /** Number of DPDK controllers and EFX interfaces */
+       size_t                                  nb_controllers;
+       /** MAE admin port */
+       struct sfc_mae_switch_port              *mae_admin_port;
 };
 
 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
@@ -133,6 +153,34 @@ sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
        return NULL;
 }
 
+int
+sfc_mae_switch_ports_iterate(uint16_t switch_domain_id,
+                            sfc_mae_switch_port_iterator_cb *cb,
+                            void *data)
+{
+       struct sfc_mae_switch_domain *domain;
+       struct sfc_mae_switch_port *port;
+
+       if (cb == NULL)
+               return EINVAL;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL) {
+               rte_spinlock_unlock(&sfc_mae_switch.lock);
+               return EINVAL;
+       }
+
+       TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
+               cb(port->type, &port->ethdev_mport, port->ethdev_port_id,
+                  &port->entity_mport, port->id, &port->data, data);
+       }
+
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+       return 0;
+}
+
 /* This function expects to be called only when the lock is held */
 static struct sfc_mae_switch_domain *
 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
@@ -202,12 +250,130 @@ fail_domain_alloc:
 
 fail_mem_alloc:
        sfc_hw_switch_id_fini(sa, hw_switch_id);
-       rte_spinlock_unlock(&sfc_mae_switch.lock);
 
 fail_hw_switch_id_init:
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
        return rc;
 }
 
+int
+sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
+                                 const efx_pcie_interface_t **controllers,
+                                 size_t *nb_controllers)
+{
+       struct sfc_mae_switch_domain *domain;
+
+       if (controllers == NULL || nb_controllers == NULL)
+               return EINVAL;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL) {
+               rte_spinlock_unlock(&sfc_mae_switch.lock);
+               return EINVAL;
+       }
+
+       *controllers = domain->controllers;
+       *nb_controllers = domain->nb_controllers;
+
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+       return 0;
+}
+
+int
+sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
+                                     efx_pcie_interface_t *controllers,
+                                     size_t nb_controllers)
+{
+       struct sfc_mae_switch_domain *domain;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL) {
+               rte_spinlock_unlock(&sfc_mae_switch.lock);
+               return EINVAL;
+       }
+
+       /* Controller mapping may be set only once */
+       if (domain->controllers != NULL) {
+               rte_spinlock_unlock(&sfc_mae_switch.lock);
+               return EINVAL;
+       }
+
+       domain->controllers = controllers;
+       domain->nb_controllers = nb_controllers;
+
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+       return 0;
+}
+
+int
+sfc_mae_switch_controller_from_mapping(const efx_pcie_interface_t *controllers,
+                                      size_t nb_controllers,
+                                      efx_pcie_interface_t intf,
+                                      int *controller)
+{
+       size_t i;
+
+       if (controllers == NULL)
+               return ENOENT;
+
+       for (i = 0; i < nb_controllers; i++) {
+               if (controllers[i] == intf) {
+                       *controller = i;
+                       return 0;
+               }
+       }
+
+       return ENOENT;
+}
+
+int
+sfc_mae_switch_domain_get_controller(uint16_t switch_domain_id,
+                                    efx_pcie_interface_t intf,
+                                    int *controller)
+{
+       const efx_pcie_interface_t *controllers;
+       size_t nb_controllers;
+       int rc;
+
+       rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
+                                              &nb_controllers);
+       if (rc != 0)
+               return rc;
+
+       return sfc_mae_switch_controller_from_mapping(controllers,
+                                                     nb_controllers,
+                                                     intf,
+                                                     controller);
+}
+
+int sfc_mae_switch_domain_get_intf(uint16_t switch_domain_id,
+                                  int controller,
+                                  efx_pcie_interface_t *intf)
+{
+       const efx_pcie_interface_t *controllers;
+       size_t nb_controllers;
+       int rc;
+
+       rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
+                                              &nb_controllers);
+       if (rc != 0)
+               return rc;
+
+       if (controllers == NULL)
+               return ENOENT;
+
+       if ((size_t)controller > nb_controllers)
+               return EINVAL;
+
+       *intf = controllers[controller];
+
+       return 0;
+}
+
 /* This function expects to be called only when the lock is held */
 static struct sfc_mae_switch_port *
 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
@@ -227,6 +393,30 @@ sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
        return NULL;
 }
 
+/* This function expects to be called only when the lock is held */
+static int
+sfc_mae_find_switch_port_id_by_entity(uint16_t switch_domain_id,
+                                     const efx_mport_sel_t *entity_mportp,
+                                     enum sfc_mae_switch_port_type type,
+                                     uint16_t *switch_port_id)
+{
+       struct sfc_mae_switch_domain *domain;
+       struct sfc_mae_switch_port *port;
+
+       SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL)
+               return EINVAL;
+
+       port = sfc_mae_find_switch_port_by_entity(domain, entity_mportp, type);
+       if (port == NULL)
+               return ENOENT;
+
+       *switch_port_id = port->id;
+       return 0;
+}
+
 int
 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
                           const struct sfc_mae_switch_port_request *req,
@@ -263,6 +453,25 @@ sfc_mae_assign_switch_port(uint16_t switch_domain_id,
        TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
 
 done:
+       port->ethdev_mport = *req->ethdev_mportp;
+       port->ethdev_port_id = req->ethdev_port_id;
+
+       memcpy(&port->data, &req->port_data,
+              sizeof(port->data));
+
+       switch (req->type) {
+       case SFC_MAE_SWITCH_PORT_INDEPENDENT:
+               if (port->data.indep.mae_admin) {
+                       SFC_ASSERT(domain->mae_admin_port == NULL);
+                       domain->mae_admin_port = port;
+               }
+               break;
+       case SFC_MAE_SWITCH_PORT_REPRESENTOR:
+               break;
+       default:
+               SFC_ASSERT(B_FALSE);
+       }
+
        *switch_port_id = port->id;
 
        rte_spinlock_unlock(&sfc_mae_switch.lock);
@@ -274,3 +483,119 @@ fail_find_switch_domain_by_id:
        rte_spinlock_unlock(&sfc_mae_switch.lock);
        return rc;
 }
+
+int
+sfc_mae_clear_switch_port(uint16_t switch_domain_id,
+                         uint16_t switch_port_id)
+{
+       struct sfc_mae_switch_domain *domain;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL) {
+               rte_spinlock_unlock(&sfc_mae_switch.lock);
+               return EINVAL;
+       }
+
+       if (domain->mae_admin_port != NULL &&
+           domain->mae_admin_port->id == switch_port_id) {
+               domain->mae_admin_port->data.indep.mae_admin = B_FALSE;
+               domain->mae_admin_port = NULL;
+       }
+
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+       return 0;
+}
+
+/* This function expects to be called only when the lock is held */
+static int
+sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
+                                  uint16_t ethdev_port_id,
+                                  efx_mport_sel_t *mport_sel)
+{
+       struct sfc_mae_switch_domain *domain;
+       struct sfc_mae_switch_port *port;
+
+       SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
+
+       if (ethdev_port_id == RTE_MAX_ETHPORTS)
+               return EINVAL;
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL)
+               return EINVAL;
+
+       TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
+               if (port->ethdev_port_id == ethdev_port_id) {
+                       *mport_sel = port->ethdev_mport;
+                       return 0;
+               }
+       }
+
+       return ENOENT;
+}
+
+int
+sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
+                             uint16_t ethdev_port_id,
+                             efx_mport_sel_t *mport_sel)
+{
+       int rc;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+       rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
+                                               ethdev_port_id, mport_sel);
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+
+       return rc;
+}
+
+int
+sfc_mae_switch_port_id_by_entity(uint16_t switch_domain_id,
+                                const efx_mport_sel_t *entity_mportp,
+                                enum sfc_mae_switch_port_type type,
+                                uint16_t *switch_port_id)
+{
+       int rc;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+       rc = sfc_mae_find_switch_port_id_by_entity(switch_domain_id,
+                                                  entity_mportp, type,
+                                                  switch_port_id);
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+
+       return rc;
+}
+
+static int
+sfc_mae_get_switch_domain_admin_locked(uint16_t switch_domain_id,
+                                      uint16_t *port_id)
+{
+       struct sfc_mae_switch_domain *domain;
+
+       SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
+
+       domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
+       if (domain == NULL)
+               return EINVAL;
+
+       if (domain->mae_admin_port != NULL) {
+               *port_id = domain->mae_admin_port->ethdev_port_id;
+               return 0;
+       }
+
+       return ENOENT;
+}
+
+int
+sfc_mae_get_switch_domain_admin(uint16_t switch_domain_id,
+                               uint16_t *port_id)
+{
+       int rc;
+
+       rte_spinlock_lock(&sfc_mae_switch.lock);
+       rc = sfc_mae_get_switch_domain_admin_locked(switch_domain_id, port_id);
+       rte_spinlock_unlock(&sfc_mae_switch.lock);
+       return rc;
+}