1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
19 #include "sfc_switch.h"
22 * Switch port registry entry.
24 * Drivers aware of RTE switch domains also have to maintain RTE switch
25 * port IDs for RTE ethdev instances they operate. These IDs are supposed
26 * to stand for physical interconnect entities, in example, PCIe functions.
28 * In terms of MAE, a physical interconnect entity can be referred to using
29 * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30 * are 16-bit values, so indirect mapping has to be maintained:
32 * +--------------------+ +---------------------------------------+
33 * | RTE switch port ID | ------ | MAE switch port entry |
34 * +--------------------+ | --------------------- |
36 * | Entity (PCIe function) MPORT selector |
38 * | Port type (independent/representor) |
39 * +---------------------------------------+
41 * This mapping comprises a port type to ensure that RTE switch port ID
42 * of a represented entity and that of its representor are different in
43 * the case when the entity gets plugged into DPDK and not into a guest.
45 * Entry data also comprises RTE ethdev's own MPORT. This value
46 * coincides with the entity MPORT in the case of independent ports.
47 * In the case of representors, this ID is not a selector and refers
48 * to an allocatable object (that is, it's likely to change on RTE
49 * ethdev replug). Flow API backend must use this value rather
50 * than entity_mport to support flow rule action PORT_ID.
52 struct sfc_mae_switch_port {
53 TAILQ_ENTRY(sfc_mae_switch_port) switch_domain_ports;
55 /** RTE ethdev MPORT */
56 efx_mport_sel_t ethdev_mport;
57 /** RTE ethdev port ID */
58 uint16_t ethdev_port_id;
60 /** Entity (PCIe function) MPORT selector */
61 efx_mport_sel_t entity_mport;
62 /** Port type (independent/representor) */
63 enum sfc_mae_switch_port_type type;
64 /** RTE switch port ID */
67 union sfc_mae_switch_port_data data;
70 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
73 * Switch domain registry entry.
75 * Even if an RTE ethdev instance gets unplugged, the corresponding
76 * entry in the switch port registry will not be removed because the
77 * entity (PCIe function) MPORT is static and cannot change. If this
78 * RTE ethdev gets plugged back, the entry will be reused, and
79 * RTE switch port ID will be the same.
81 struct sfc_mae_switch_domain {
82 TAILQ_ENTRY(sfc_mae_switch_domain) entries;
85 struct sfc_hw_switch_id *hw_switch_id;
86 /** The number of ports in the switch port registry */
87 unsigned int nb_ports;
88 /** Switch port registry */
89 struct sfc_mae_switch_ports ports;
90 /** RTE switch domain ID allocated for a group of devices */
92 /** DPDK controller -> EFX interface mapping */
93 efx_pcie_interface_t *controllers;
94 /** Number of DPDK controllers and EFX interfaces */
95 size_t nb_controllers;
97 struct sfc_mae_switch_port *mae_admin_port;
100 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
103 * MAE representation of RTE switch infrastructure.
105 * It is possible that an RTE flow API client tries to insert a rule
106 * referencing an RTE ethdev deployed on top of a different physical
107 * device (it may belong to the same vendor or not). This particular
108 * driver/engine cannot support this and has to turn down such rules.
110 * Technically, it's HW switch identifier which, if queried for each
111 * RTE ethdev instance, indicates relationship between the instances.
112 * In the meantime, RTE flow API clients also need to somehow figure
113 * out relationship between RTE ethdev instances in advance.
115 * The concept of RTE switch domains resolves this issue. The driver
116 * maintains a static list of switch domains which is easy to browse,
117 * and each RTE ethdev fills RTE switch parameters in device
118 * information structure which is made available to clients.
120 * Even if all RTE ethdev instances belonging to a switch domain get
121 * unplugged, the corresponding entry in the switch domain registry
122 * will not be removed because the corresponding HW switch exists
123 * regardless of its ports being plugged to DPDK or kept aside.
124 * If a port gets plugged back to DPDK, the corresponding
125 * RTE ethdev will indicate the same RTE switch domain ID.
127 struct sfc_mae_switch {
128 /** A lock to protect the whole structure */
130 /** Switch domain registry */
131 struct sfc_mae_switch_domains domains;
134 static struct sfc_mae_switch sfc_mae_switch = {
135 .lock = RTE_SPINLOCK_INITIALIZER,
136 .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
140 /* This function expects to be called only when the lock is held */
141 static struct sfc_mae_switch_domain *
142 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
144 struct sfc_mae_switch_domain *domain;
146 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
148 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
149 if (domain->id == switch_domain_id)
157 sfc_mae_switch_ports_iterate(uint16_t switch_domain_id,
158 sfc_mae_switch_port_iterator_cb *cb,
161 struct sfc_mae_switch_domain *domain;
162 struct sfc_mae_switch_port *port;
167 rte_spinlock_lock(&sfc_mae_switch.lock);
169 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
170 if (domain == NULL) {
171 rte_spinlock_unlock(&sfc_mae_switch.lock);
175 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
176 cb(port->type, &port->ethdev_mport, port->ethdev_port_id,
177 &port->entity_mport, port->id, &port->data, data);
180 rte_spinlock_unlock(&sfc_mae_switch.lock);
184 /* This function expects to be called only when the lock is held */
185 static struct sfc_mae_switch_domain *
186 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
188 struct sfc_mae_switch_domain *domain;
190 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
192 TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
193 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
201 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
202 uint16_t *switch_domain_id)
204 struct sfc_hw_switch_id *hw_switch_id;
205 struct sfc_mae_switch_domain *domain;
208 rte_spinlock_lock(&sfc_mae_switch.lock);
210 rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
212 goto fail_hw_switch_id_init;
214 domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
215 if (domain != NULL) {
216 sfc_hw_switch_id_fini(sa, hw_switch_id);
220 domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
221 if (domain == NULL) {
227 * This code belongs to driver init path, that is, negation is
228 * done at the end of the path by sfc_eth_dev_init(). RTE APIs
229 * negate error codes, so drop negation here.
231 rc = -rte_eth_switch_domain_alloc(&domain->id);
233 goto fail_domain_alloc;
235 domain->hw_switch_id = hw_switch_id;
237 TAILQ_INIT(&domain->ports);
239 TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
242 *switch_domain_id = domain->id;
244 rte_spinlock_unlock(&sfc_mae_switch.lock);
252 sfc_hw_switch_id_fini(sa, hw_switch_id);
254 fail_hw_switch_id_init:
255 rte_spinlock_unlock(&sfc_mae_switch.lock);
260 sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
261 const efx_pcie_interface_t **controllers,
262 size_t *nb_controllers)
264 struct sfc_mae_switch_domain *domain;
266 if (controllers == NULL || nb_controllers == NULL)
269 rte_spinlock_lock(&sfc_mae_switch.lock);
271 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
272 if (domain == NULL) {
273 rte_spinlock_unlock(&sfc_mae_switch.lock);
277 *controllers = domain->controllers;
278 *nb_controllers = domain->nb_controllers;
280 rte_spinlock_unlock(&sfc_mae_switch.lock);
285 sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
286 efx_pcie_interface_t *controllers,
287 size_t nb_controllers)
289 struct sfc_mae_switch_domain *domain;
291 rte_spinlock_lock(&sfc_mae_switch.lock);
293 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
294 if (domain == NULL) {
295 rte_spinlock_unlock(&sfc_mae_switch.lock);
299 /* Controller mapping may be set only once */
300 if (domain->controllers != NULL) {
301 rte_spinlock_unlock(&sfc_mae_switch.lock);
305 domain->controllers = controllers;
306 domain->nb_controllers = nb_controllers;
308 rte_spinlock_unlock(&sfc_mae_switch.lock);
313 sfc_mae_switch_controller_from_mapping(const efx_pcie_interface_t *controllers,
314 size_t nb_controllers,
315 efx_pcie_interface_t intf,
320 if (controllers == NULL)
323 for (i = 0; i < nb_controllers; i++) {
324 if (controllers[i] == intf) {
334 sfc_mae_switch_domain_get_controller(uint16_t switch_domain_id,
335 efx_pcie_interface_t intf,
338 const efx_pcie_interface_t *controllers;
339 size_t nb_controllers;
342 rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
347 return sfc_mae_switch_controller_from_mapping(controllers,
353 int sfc_mae_switch_domain_get_intf(uint16_t switch_domain_id,
355 efx_pcie_interface_t *intf)
357 const efx_pcie_interface_t *controllers;
358 size_t nb_controllers;
361 rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
366 if (controllers == NULL)
369 if ((size_t)controller > nb_controllers)
372 *intf = controllers[controller];
377 /* This function expects to be called only when the lock is held */
378 static struct sfc_mae_switch_port *
379 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
380 const efx_mport_sel_t *entity_mportp,
381 enum sfc_mae_switch_port_type type)
383 struct sfc_mae_switch_port *port;
385 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
387 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
388 if (port->entity_mport.sel == entity_mportp->sel &&
396 /* This function expects to be called only when the lock is held */
398 sfc_mae_find_switch_port_id_by_entity(uint16_t switch_domain_id,
399 const efx_mport_sel_t *entity_mportp,
400 enum sfc_mae_switch_port_type type,
401 uint16_t *switch_port_id)
403 struct sfc_mae_switch_domain *domain;
404 struct sfc_mae_switch_port *port;
406 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
408 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
412 port = sfc_mae_find_switch_port_by_entity(domain, entity_mportp, type);
416 *switch_port_id = port->id;
421 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
422 const struct sfc_mae_switch_port_request *req,
423 uint16_t *switch_port_id)
425 struct sfc_mae_switch_domain *domain;
426 struct sfc_mae_switch_port *port;
429 rte_spinlock_lock(&sfc_mae_switch.lock);
431 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
432 if (domain == NULL) {
434 goto fail_find_switch_domain_by_id;
437 port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
442 port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
448 port->entity_mport.sel = req->entity_mportp->sel;
449 port->type = req->type;
451 port->id = (domain->nb_ports++);
453 TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
456 port->ethdev_mport = *req->ethdev_mportp;
457 port->ethdev_port_id = req->ethdev_port_id;
459 memcpy(&port->data, &req->port_data,
463 case SFC_MAE_SWITCH_PORT_INDEPENDENT:
464 if (port->data.indep.mae_admin) {
465 SFC_ASSERT(domain->mae_admin_port == NULL);
466 domain->mae_admin_port = port;
469 case SFC_MAE_SWITCH_PORT_REPRESENTOR:
475 *switch_port_id = port->id;
477 rte_spinlock_unlock(&sfc_mae_switch.lock);
482 fail_find_switch_domain_by_id:
483 rte_spinlock_unlock(&sfc_mae_switch.lock);
488 sfc_mae_clear_switch_port(uint16_t switch_domain_id,
489 uint16_t switch_port_id)
491 struct sfc_mae_switch_domain *domain;
493 rte_spinlock_lock(&sfc_mae_switch.lock);
495 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
496 if (domain == NULL) {
497 rte_spinlock_unlock(&sfc_mae_switch.lock);
501 if (domain->mae_admin_port != NULL &&
502 domain->mae_admin_port->id == switch_port_id) {
503 domain->mae_admin_port->data.indep.mae_admin = B_FALSE;
504 domain->mae_admin_port = NULL;
507 rte_spinlock_unlock(&sfc_mae_switch.lock);
511 /* This function expects to be called only when the lock is held */
513 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
514 uint16_t ethdev_port_id,
515 struct sfc_mae_switch_port **switch_port)
517 struct sfc_mae_switch_domain *domain;
518 struct sfc_mae_switch_port *port;
520 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
522 if (ethdev_port_id == RTE_MAX_ETHPORTS)
525 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
529 TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
530 if (port->ethdev_port_id == ethdev_port_id) {
540 sfc_mae_switch_get_ethdev_mport(uint16_t switch_domain_id,
541 uint16_t ethdev_port_id,
542 efx_mport_sel_t *mport_sel)
544 struct sfc_mae_switch_port *port;
547 rte_spinlock_lock(&sfc_mae_switch.lock);
548 rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
549 ethdev_port_id, &port);
553 if (port->type != SFC_MAE_SWITCH_PORT_INDEPENDENT) {
555 * The ethdev is a "VF representor". It does not own
556 * a dedicated m-port suitable for use in flow rules.
562 *mport_sel = port->ethdev_mport;
565 rte_spinlock_unlock(&sfc_mae_switch.lock);
571 sfc_mae_switch_get_entity_mport(uint16_t switch_domain_id,
572 uint16_t ethdev_port_id,
573 efx_mport_sel_t *mport_sel)
575 static struct sfc_mae_switch_port *port;
578 rte_spinlock_lock(&sfc_mae_switch.lock);
579 rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
580 ethdev_port_id, &port);
584 if (port->type == SFC_MAE_SWITCH_PORT_INDEPENDENT &&
585 !port->data.indep.mae_admin) {
586 /* See sfc_mae_assign_entity_mport() */
591 *mport_sel = port->entity_mport;
594 rte_spinlock_unlock(&sfc_mae_switch.lock);
600 sfc_mae_switch_port_id_by_entity(uint16_t switch_domain_id,
601 const efx_mport_sel_t *entity_mportp,
602 enum sfc_mae_switch_port_type type,
603 uint16_t *switch_port_id)
607 rte_spinlock_lock(&sfc_mae_switch.lock);
608 rc = sfc_mae_find_switch_port_id_by_entity(switch_domain_id,
611 rte_spinlock_unlock(&sfc_mae_switch.lock);
617 sfc_mae_get_switch_domain_admin_locked(uint16_t switch_domain_id,
620 struct sfc_mae_switch_domain *domain;
622 SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
624 domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
628 if (domain->mae_admin_port != NULL) {
629 *port_id = domain->mae_admin_port->ethdev_port_id;
637 sfc_mae_get_switch_domain_admin(uint16_t switch_domain_id,
642 rte_spinlock_lock(&sfc_mae_switch.lock);
643 rc = sfc_mae_get_switch_domain_admin_locked(switch_domain_id, port_id);
644 rte_spinlock_unlock(&sfc_mae_switch.lock);