1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
19 #include "sfc_switch.h"
22 * Switch port registry entry.
24 * Drivers aware of RTE switch domains also have to maintain RTE switch
25 * port IDs for RTE ethdev instances they operate. These IDs are supposed
26 * to stand for physical interconnect entities, in example, PCIe functions.
28 * In terms of MAE, a physical interconnect entity can be referred to using
29 * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30 * are 16-bit values, so indirect mapping has to be maintained:
32 * +--------------------+ +---------------------------------------+
33 * | RTE switch port ID | ------ | MAE switch port entry |
34 * +--------------------+ | --------------------- |
36 * | Entity (PCIe function) MPORT selector |
38 * | Port type (independent/representor) |
39 * +---------------------------------------+
41 * This mapping comprises a port type to ensure that RTE switch port ID
42 * of a represented entity and that of its representor are different in
43 * the case when the entity gets plugged into DPDK and not into a guest.
45 struct sfc_mae_switch_port {
46 TAILQ_ENTRY(sfc_mae_switch_port) switch_domain_ports;
48 /** Entity (PCIe function) MPORT selector */
49 efx_mport_sel_t entity_mport;
50 /** Port type (independent/representor) */
51 enum sfc_mae_switch_port_type type;
52 /** RTE switch port ID */
56 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
59 * Switch domain registry entry.
61 * Even if an RTE ethdev instance gets unplugged, the corresponding
62 * entry in the switch port registry will not be removed because the
63 * entity (PCIe function) MPORT is static and cannot change. If this
64 * RTE ethdev gets plugged back, the entry will be reused, and
65 * RTE switch port ID will be the same.
67 struct sfc_mae_switch_domain {
68 TAILQ_ENTRY(sfc_mae_switch_domain) entries;
71 struct sfc_hw_switch_id *hw_switch_id;
72 /** The number of ports in the switch port registry */
73 unsigned int nb_ports;
74 /** Switch port registry */
75 struct sfc_mae_switch_ports ports;
76 /** RTE switch domain ID allocated for a group of devices */
80 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
83 * MAE representation of RTE switch infrastructure.
85 * It is possible that an RTE flow API client tries to insert a rule
86 * referencing an RTE ethdev deployed on top of a different physical
87 * device (it may belong to the same vendor or not). This particular
88 * driver/engine cannot support this and has to turn down such rules.
90 * Technically, it's HW switch identifier which, if queried for each
91 * RTE ethdev instance, indicates relationship between the instances.
92 * In the meantime, RTE flow API clients also need to somehow figure
93 * out relationship between RTE ethdev instances in advance.
95 * The concept of RTE switch domains resolves this issue. The driver
96 * maintains a static list of switch domains which is easy to browse,
97 * and each RTE ethdev fills RTE switch parameters in device
98 * information structure which is made available to clients.
100 * Even if all RTE ethdev instances belonging to a switch domain get
101 * unplugged, the corresponding entry in the switch domain registry
102 * will not be removed because the corresponding HW switch exists
103 * regardless of its ports being plugged to DPDK or kept aside.
104 * If a port gets plugged back to DPDK, the corresponding
105 * RTE ethdev will indicate the same RTE switch domain ID.
107 struct sfc_mae_switch {
108 /** A lock to protect the whole structure */
110 /** Switch domain registry */
111 struct sfc_mae_switch_domains domains;
114 static struct sfc_mae_switch sfc_mae_switch = {
115 .lock = RTE_SPINLOCK_INITIALIZER,
116 .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
120 /* This function expects to be called only when the lock is held */
121 static struct sfc_mae_switch_domain *
122 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
124 struct sfc_mae_switch_domain *domain;
126 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
128 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
129 if (domain->id == switch_domain_id)
136 /* This function expects to be called only when the lock is held */
137 static struct sfc_mae_switch_domain *
138 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
140 struct sfc_mae_switch_domain *domain;
142 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
144 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
145 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
153 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
154 uint16_t *switch_domain_id)
156 struct sfc_hw_switch_id *hw_switch_id;
157 struct sfc_mae_switch_domain *domain;
160 rte_spinlock_lock(&sfc_mae_switch.lock);
162 rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
164 goto fail_hw_switch_id_init;
166 domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
167 if (domain != NULL) {
168 sfc_hw_switch_id_fini(sa, hw_switch_id);
172 domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
173 if (domain == NULL) {
179 * This code belongs to driver init path, that is, negation is
180 * done at the end of the path by sfc_eth_dev_init(). RTE APIs
181 * negate error codes, so drop negation here.
183 rc = -rte_eth_switch_domain_alloc(&domain->id);
185 goto fail_domain_alloc;
187 domain->hw_switch_id = hw_switch_id;
189 TAILQ_INIT(&domain->ports);
191 TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
194 *switch_domain_id = domain->id;
196 rte_spinlock_unlock(&sfc_mae_switch.lock);
204 sfc_hw_switch_id_fini(sa, hw_switch_id);
205 rte_spinlock_unlock(&sfc_mae_switch.lock);
207 fail_hw_switch_id_init:
211 /* This function expects to be called only when the lock is held */
212 static struct sfc_mae_switch_port *
213 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
214 const efx_mport_sel_t *entity_mportp,
215 enum sfc_mae_switch_port_type type)
217 struct sfc_mae_switch_port *port;
219 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
221 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
222 if (port->entity_mport.sel == entity_mportp->sel &&
231 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
232 const struct sfc_mae_switch_port_request *req,
233 uint16_t *switch_port_id)
235 struct sfc_mae_switch_domain *domain;
236 struct sfc_mae_switch_port *port;
239 rte_spinlock_lock(&sfc_mae_switch.lock);
241 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
242 if (domain == NULL) {
244 goto fail_find_switch_domain_by_id;
247 port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
252 port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
258 port->entity_mport.sel = req->entity_mportp->sel;
259 port->type = req->type;
261 port->id = (domain->nb_ports++);
263 TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
266 *switch_port_id = port->id;
268 rte_spinlock_unlock(&sfc_mae_switch.lock);
273 fail_find_switch_domain_by_id:
274 rte_spinlock_unlock(&sfc_mae_switch.lock);