1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
19 #include "sfc_switch.h"
22 * Switch port registry entry.
24 * Drivers aware of RTE switch domains also have to maintain RTE switch
25 * port IDs for RTE ethdev instances they operate. These IDs are supposed
26 * to stand for physical interconnect entities, in example, PCIe functions.
28 * In terms of MAE, a physical interconnect entity can be referred to using
29 * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30 * are 16-bit values, so indirect mapping has to be maintained:
32 * +--------------------+ +---------------------------------------+
33 * | RTE switch port ID | ------ | MAE switch port entry |
34 * +--------------------+ | --------------------- |
36 * | Entity (PCIe function) MPORT selector |
38 * | Port type (independent/representor) |
39 * +---------------------------------------+
41 * This mapping comprises a port type to ensure that RTE switch port ID
42 * of a represented entity and that of its representor are different in
43 * the case when the entity gets plugged into DPDK and not into a guest.
45 * Entry data also comprises RTE ethdev's own MPORT. This value
46 * coincides with the entity MPORT in the case of independent ports.
47 * In the case of representors, this ID is not a selector and refers
48 * to an allocatable object (that is, it's likely to change on RTE
49 * ethdev replug). Flow API backend must use this value rather
50 * than entity_mport to support flow rule action PORT_ID.
52 struct sfc_mae_switch_port {
53 TAILQ_ENTRY(sfc_mae_switch_port) switch_domain_ports;
55 /** RTE ethdev MPORT */
56 efx_mport_sel_t ethdev_mport;
57 /** RTE ethdev port ID */
58 uint16_t ethdev_port_id;
60 /** Entity (PCIe function) MPORT selector */
61 efx_mport_sel_t entity_mport;
62 /** Port type (independent/representor) */
63 enum sfc_mae_switch_port_type type;
64 /** RTE switch port ID */
68 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
71 * Switch domain registry entry.
73 * Even if an RTE ethdev instance gets unplugged, the corresponding
74 * entry in the switch port registry will not be removed because the
75 * entity (PCIe function) MPORT is static and cannot change. If this
76 * RTE ethdev gets plugged back, the entry will be reused, and
77 * RTE switch port ID will be the same.
79 struct sfc_mae_switch_domain {
80 TAILQ_ENTRY(sfc_mae_switch_domain) entries;
83 struct sfc_hw_switch_id *hw_switch_id;
84 /** The number of ports in the switch port registry */
85 unsigned int nb_ports;
86 /** Switch port registry */
87 struct sfc_mae_switch_ports ports;
88 /** RTE switch domain ID allocated for a group of devices */
90 /** DPDK controller -> EFX interface mapping */
91 efx_pcie_interface_t *controllers;
92 /** Number of DPDK controllers and EFX interfaces */
93 size_t nb_controllers;
96 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
99 * MAE representation of RTE switch infrastructure.
101 * It is possible that an RTE flow API client tries to insert a rule
102 * referencing an RTE ethdev deployed on top of a different physical
103 * device (it may belong to the same vendor or not). This particular
104 * driver/engine cannot support this and has to turn down such rules.
106 * Technically, it's HW switch identifier which, if queried for each
107 * RTE ethdev instance, indicates relationship between the instances.
108 * In the meantime, RTE flow API clients also need to somehow figure
109 * out relationship between RTE ethdev instances in advance.
111 * The concept of RTE switch domains resolves this issue. The driver
112 * maintains a static list of switch domains which is easy to browse,
113 * and each RTE ethdev fills RTE switch parameters in device
114 * information structure which is made available to clients.
116 * Even if all RTE ethdev instances belonging to a switch domain get
117 * unplugged, the corresponding entry in the switch domain registry
118 * will not be removed because the corresponding HW switch exists
119 * regardless of its ports being plugged to DPDK or kept aside.
120 * If a port gets plugged back to DPDK, the corresponding
121 * RTE ethdev will indicate the same RTE switch domain ID.
123 struct sfc_mae_switch {
124 /** A lock to protect the whole structure */
126 /** Switch domain registry */
127 struct sfc_mae_switch_domains domains;
130 static struct sfc_mae_switch sfc_mae_switch = {
131 .lock = RTE_SPINLOCK_INITIALIZER,
132 .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
136 /* This function expects to be called only when the lock is held */
137 static struct sfc_mae_switch_domain *
138 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
140 struct sfc_mae_switch_domain *domain;
142 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
144 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
145 if (domain->id == switch_domain_id)
152 /* This function expects to be called only when the lock is held */
153 static struct sfc_mae_switch_domain *
154 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
156 struct sfc_mae_switch_domain *domain;
158 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
160 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
161 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
169 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
170 uint16_t *switch_domain_id)
172 struct sfc_hw_switch_id *hw_switch_id;
173 struct sfc_mae_switch_domain *domain;
176 rte_spinlock_lock(&sfc_mae_switch.lock);
178 rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
180 goto fail_hw_switch_id_init;
182 domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
183 if (domain != NULL) {
184 sfc_hw_switch_id_fini(sa, hw_switch_id);
188 domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
189 if (domain == NULL) {
195 * This code belongs to driver init path, that is, negation is
196 * done at the end of the path by sfc_eth_dev_init(). RTE APIs
197 * negate error codes, so drop negation here.
199 rc = -rte_eth_switch_domain_alloc(&domain->id);
201 goto fail_domain_alloc;
203 domain->hw_switch_id = hw_switch_id;
205 TAILQ_INIT(&domain->ports);
207 TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
210 *switch_domain_id = domain->id;
212 rte_spinlock_unlock(&sfc_mae_switch.lock);
220 sfc_hw_switch_id_fini(sa, hw_switch_id);
222 fail_hw_switch_id_init:
223 rte_spinlock_unlock(&sfc_mae_switch.lock);
228 sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
229 const efx_pcie_interface_t **controllers,
230 size_t *nb_controllers)
232 struct sfc_mae_switch_domain *domain;
234 if (controllers == NULL || nb_controllers == NULL)
237 rte_spinlock_lock(&sfc_mae_switch.lock);
239 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
240 if (domain == NULL) {
241 rte_spinlock_unlock(&sfc_mae_switch.lock);
245 *controllers = domain->controllers;
246 *nb_controllers = domain->nb_controllers;
248 rte_spinlock_unlock(&sfc_mae_switch.lock);
253 sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
254 efx_pcie_interface_t *controllers,
255 size_t nb_controllers)
257 struct sfc_mae_switch_domain *domain;
259 rte_spinlock_lock(&sfc_mae_switch.lock);
261 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
262 if (domain == NULL) {
263 rte_spinlock_unlock(&sfc_mae_switch.lock);
267 /* Controller mapping may be set only once */
268 if (domain->controllers != NULL) {
269 rte_spinlock_unlock(&sfc_mae_switch.lock);
273 domain->controllers = controllers;
274 domain->nb_controllers = nb_controllers;
276 rte_spinlock_unlock(&sfc_mae_switch.lock);
280 /* This function expects to be called only when the lock is held */
281 static struct sfc_mae_switch_port *
282 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
283 const efx_mport_sel_t *entity_mportp,
284 enum sfc_mae_switch_port_type type)
286 struct sfc_mae_switch_port *port;
288 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
290 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
291 if (port->entity_mport.sel == entity_mportp->sel &&
300 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
301 const struct sfc_mae_switch_port_request *req,
302 uint16_t *switch_port_id)
304 struct sfc_mae_switch_domain *domain;
305 struct sfc_mae_switch_port *port;
308 rte_spinlock_lock(&sfc_mae_switch.lock);
310 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
311 if (domain == NULL) {
313 goto fail_find_switch_domain_by_id;
316 port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
321 port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
327 port->entity_mport.sel = req->entity_mportp->sel;
328 port->type = req->type;
330 port->id = (domain->nb_ports++);
332 TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
335 port->ethdev_mport = *req->ethdev_mportp;
336 port->ethdev_port_id = req->ethdev_port_id;
338 *switch_port_id = port->id;
340 rte_spinlock_unlock(&sfc_mae_switch.lock);
345 fail_find_switch_domain_by_id:
346 rte_spinlock_unlock(&sfc_mae_switch.lock);
350 /* This function expects to be called only when the lock is held */
352 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
353 uint16_t ethdev_port_id,
354 efx_mport_sel_t *mport_sel)
356 struct sfc_mae_switch_domain *domain;
357 struct sfc_mae_switch_port *port;
359 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
361 if (ethdev_port_id == RTE_MAX_ETHPORTS)
364 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
368 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
369 if (port->ethdev_port_id == ethdev_port_id) {
370 *mport_sel = port->ethdev_mport;
379 sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
380 uint16_t ethdev_port_id,
381 efx_mport_sel_t *mport_sel)
385 rte_spinlock_lock(&sfc_mae_switch.lock);
386 rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
387 ethdev_port_id, mport_sel);
388 rte_spinlock_unlock(&sfc_mae_switch.lock);