net/sfc: support new representor parameter syntax
[dpdk.git] / drivers / net / sfc / sfc_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14
15 #include "efx.h"
16
17 #include "sfc.h"
18 #include "sfc_log.h"
19 #include "sfc_switch.h"
20
21 /**
22  * Switch port registry entry.
23  *
24  * Drivers aware of RTE switch domains also have to maintain RTE switch
25  * port IDs for RTE ethdev instances they operate. These IDs are supposed
26  * to stand for physical interconnect entities, in example, PCIe functions.
27  *
28  * In terms of MAE, a physical interconnect entity can be referred to using
29  * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30  * are 16-bit values, so indirect mapping has to be maintained:
31  *
32  * +--------------------+          +---------------------------------------+
33  * | RTE switch port ID |  ------  |         MAE switch port entry         |
34  * +--------------------+          |         ---------------------         |
35  *                                 |                                       |
36  *                                 | Entity (PCIe function) MPORT selector |
37  *                                 |                   +                   |
38  *                                 |  Port type (independent/representor)  |
39  *                                 +---------------------------------------+
40  *
41  * This mapping comprises a port type to ensure that RTE switch port ID
42  * of a represented entity and that of its representor are different in
43  * the case when the entity gets plugged into DPDK and not into a guest.
44  *
45  * Entry data also comprises RTE ethdev's own MPORT. This value
46  * coincides with the entity MPORT in the case of independent ports.
47  * In the case of representors, this ID is not a selector and refers
48  * to an allocatable object (that is, it's likely to change on RTE
49  * ethdev replug). Flow API backend must use this value rather
50  * than entity_mport to support flow rule action PORT_ID.
51  */
52 struct sfc_mae_switch_port {
53         TAILQ_ENTRY(sfc_mae_switch_port)        switch_domain_ports;
54
55         /** RTE ethdev MPORT */
56         efx_mport_sel_t                         ethdev_mport;
57         /** RTE ethdev port ID */
58         uint16_t                                ethdev_port_id;
59
60         /** Entity (PCIe function) MPORT selector */
61         efx_mport_sel_t                         entity_mport;
62         /** Port type (independent/representor) */
63         enum sfc_mae_switch_port_type           type;
64         /** RTE switch port ID */
65         uint16_t                                id;
66
67         union sfc_mae_switch_port_data          data;
68 };
69
70 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
71
72 /**
73  * Switch domain registry entry.
74  *
75  * Even if an RTE ethdev instance gets unplugged, the corresponding
76  * entry in the switch port registry will not be removed because the
77  * entity (PCIe function) MPORT is static and cannot change. If this
78  * RTE ethdev gets plugged back, the entry will be reused, and
79  * RTE switch port ID will be the same.
80  */
81 struct sfc_mae_switch_domain {
82         TAILQ_ENTRY(sfc_mae_switch_domain)      entries;
83
84         /** HW switch ID */
85         struct sfc_hw_switch_id                 *hw_switch_id;
86         /** The number of ports in the switch port registry */
87         unsigned int                            nb_ports;
88         /** Switch port registry */
89         struct sfc_mae_switch_ports             ports;
90         /** RTE switch domain ID allocated for a group of devices */
91         uint16_t                                id;
92         /** DPDK controller -> EFX interface mapping */
93         efx_pcie_interface_t                    *controllers;
94         /** Number of DPDK controllers and EFX interfaces */
95         size_t                                  nb_controllers;
96 };
97
98 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
99
100 /**
101  * MAE representation of RTE switch infrastructure.
102  *
103  * It is possible that an RTE flow API client tries to insert a rule
104  * referencing an RTE ethdev deployed on top of a different physical
105  * device (it may belong to the same vendor or not). This particular
106  * driver/engine cannot support this and has to turn down such rules.
107  *
108  * Technically, it's HW switch identifier which, if queried for each
109  * RTE ethdev instance, indicates relationship between the instances.
110  * In the meantime, RTE flow API clients also need to somehow figure
111  * out relationship between RTE ethdev instances in advance.
112  *
113  * The concept of RTE switch domains resolves this issue. The driver
114  * maintains a static list of switch domains which is easy to browse,
115  * and each RTE ethdev fills RTE switch parameters in device
116  * information structure which is made available to clients.
117  *
118  * Even if all RTE ethdev instances belonging to a switch domain get
119  * unplugged, the corresponding entry in the switch domain registry
120  * will not be removed because the corresponding HW switch exists
121  * regardless of its ports being plugged to DPDK or kept aside.
122  * If a port gets plugged back to DPDK, the corresponding
123  * RTE ethdev will indicate the same RTE switch domain ID.
124  */
125 struct sfc_mae_switch {
126         /** A lock to protect the whole structure */
127         rte_spinlock_t                  lock;
128         /** Switch domain registry */
129         struct sfc_mae_switch_domains   domains;
130 };
131
132 static struct sfc_mae_switch sfc_mae_switch = {
133         .lock = RTE_SPINLOCK_INITIALIZER,
134         .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
135 };
136
137
138 /* This function expects to be called only when the lock is held */
139 static struct sfc_mae_switch_domain *
140 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
141 {
142         struct sfc_mae_switch_domain *domain;
143
144         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
145
146         TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
147                 if (domain->id == switch_domain_id)
148                         return domain;
149         }
150
151         return NULL;
152 }
153
154 /* This function expects to be called only when the lock is held */
155 static struct sfc_mae_switch_domain *
156 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
157 {
158         struct sfc_mae_switch_domain *domain;
159
160         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
161
162         TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
163                 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
164                         return domain;
165         }
166
167         return NULL;
168 }
169
170 int
171 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
172                              uint16_t *switch_domain_id)
173 {
174         struct sfc_hw_switch_id *hw_switch_id;
175         struct sfc_mae_switch_domain *domain;
176         int rc;
177
178         rte_spinlock_lock(&sfc_mae_switch.lock);
179
180         rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
181         if (rc != 0)
182                 goto fail_hw_switch_id_init;
183
184         domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
185         if (domain != NULL) {
186                 sfc_hw_switch_id_fini(sa, hw_switch_id);
187                 goto done;
188         }
189
190         domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
191         if (domain == NULL) {
192                 rc = ENOMEM;
193                 goto fail_mem_alloc;
194         }
195
196         /*
197          * This code belongs to driver init path, that is, negation is
198          * done at the end of the path by sfc_eth_dev_init(). RTE APIs
199          * negate error codes, so drop negation here.
200          */
201         rc = -rte_eth_switch_domain_alloc(&domain->id);
202         if (rc != 0)
203                 goto fail_domain_alloc;
204
205         domain->hw_switch_id = hw_switch_id;
206
207         TAILQ_INIT(&domain->ports);
208
209         TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
210
211 done:
212         *switch_domain_id = domain->id;
213
214         rte_spinlock_unlock(&sfc_mae_switch.lock);
215
216         return 0;
217
218 fail_domain_alloc:
219         rte_free(domain);
220
221 fail_mem_alloc:
222         sfc_hw_switch_id_fini(sa, hw_switch_id);
223
224 fail_hw_switch_id_init:
225         rte_spinlock_unlock(&sfc_mae_switch.lock);
226         return rc;
227 }
228
229 int
230 sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
231                                   const efx_pcie_interface_t **controllers,
232                                   size_t *nb_controllers)
233 {
234         struct sfc_mae_switch_domain *domain;
235
236         if (controllers == NULL || nb_controllers == NULL)
237                 return EINVAL;
238
239         rte_spinlock_lock(&sfc_mae_switch.lock);
240
241         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
242         if (domain == NULL) {
243                 rte_spinlock_unlock(&sfc_mae_switch.lock);
244                 return EINVAL;
245         }
246
247         *controllers = domain->controllers;
248         *nb_controllers = domain->nb_controllers;
249
250         rte_spinlock_unlock(&sfc_mae_switch.lock);
251         return 0;
252 }
253
254 int
255 sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
256                                       efx_pcie_interface_t *controllers,
257                                       size_t nb_controllers)
258 {
259         struct sfc_mae_switch_domain *domain;
260
261         rte_spinlock_lock(&sfc_mae_switch.lock);
262
263         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
264         if (domain == NULL) {
265                 rte_spinlock_unlock(&sfc_mae_switch.lock);
266                 return EINVAL;
267         }
268
269         /* Controller mapping may be set only once */
270         if (domain->controllers != NULL) {
271                 rte_spinlock_unlock(&sfc_mae_switch.lock);
272                 return EINVAL;
273         }
274
275         domain->controllers = controllers;
276         domain->nb_controllers = nb_controllers;
277
278         rte_spinlock_unlock(&sfc_mae_switch.lock);
279         return 0;
280 }
281
282 int
283 sfc_mae_switch_domain_get_controller(uint16_t switch_domain_id,
284                                      efx_pcie_interface_t intf,
285                                      int *controller)
286 {
287         const efx_pcie_interface_t *controllers;
288         size_t nb_controllers;
289         size_t i;
290         int rc;
291
292         rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
293                                                &nb_controllers);
294         if (rc != 0)
295                 return rc;
296
297         if (controllers == NULL)
298                 return ENOENT;
299
300         for (i = 0; i < nb_controllers; i++) {
301                 if (controllers[i] == intf) {
302                         *controller = i;
303                         return 0;
304                 }
305         }
306
307         return ENOENT;
308 }
309
310 int sfc_mae_switch_domain_get_intf(uint16_t switch_domain_id,
311                                    int controller,
312                                    efx_pcie_interface_t *intf)
313 {
314         const efx_pcie_interface_t *controllers;
315         size_t nb_controllers;
316         int rc;
317
318         rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
319                                                &nb_controllers);
320         if (rc != 0)
321                 return rc;
322
323         if (controllers == NULL)
324                 return ENOENT;
325
326         if ((size_t)controller > nb_controllers)
327                 return EINVAL;
328
329         *intf = controllers[controller];
330
331         return 0;
332 }
333
334 /* This function expects to be called only when the lock is held */
335 static struct sfc_mae_switch_port *
336 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
337                                    const efx_mport_sel_t *entity_mportp,
338                                    enum sfc_mae_switch_port_type type)
339 {
340         struct sfc_mae_switch_port *port;
341
342         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
343
344         TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
345                 if (port->entity_mport.sel == entity_mportp->sel &&
346                     port->type == type)
347                         return port;
348         }
349
350         return NULL;
351 }
352
353 int
354 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
355                            const struct sfc_mae_switch_port_request *req,
356                            uint16_t *switch_port_id)
357 {
358         struct sfc_mae_switch_domain *domain;
359         struct sfc_mae_switch_port *port;
360         int rc;
361
362         rte_spinlock_lock(&sfc_mae_switch.lock);
363
364         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
365         if (domain == NULL) {
366                 rc = EINVAL;
367                 goto fail_find_switch_domain_by_id;
368         }
369
370         port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
371                                                   req->type);
372         if (port != NULL)
373                 goto done;
374
375         port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
376         if (port == NULL) {
377                 rc = ENOMEM;
378                 goto fail_mem_alloc;
379         }
380
381         port->entity_mport.sel = req->entity_mportp->sel;
382         port->type = req->type;
383
384         port->id = (domain->nb_ports++);
385
386         TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
387
388 done:
389         port->ethdev_mport = *req->ethdev_mportp;
390         port->ethdev_port_id = req->ethdev_port_id;
391
392         switch (req->type) {
393         case SFC_MAE_SWITCH_PORT_INDEPENDENT:
394                 /* No data */
395                 break;
396         case SFC_MAE_SWITCH_PORT_REPRESENTOR:
397                 memcpy(&port->data.repr, &req->port_data,
398                        sizeof(port->data.repr));
399                 break;
400         default:
401                 SFC_ASSERT(B_FALSE);
402         }
403
404         *switch_port_id = port->id;
405
406         rte_spinlock_unlock(&sfc_mae_switch.lock);
407
408         return 0;
409
410 fail_mem_alloc:
411 fail_find_switch_domain_by_id:
412         rte_spinlock_unlock(&sfc_mae_switch.lock);
413         return rc;
414 }
415
416 /* This function expects to be called only when the lock is held */
417 static int
418 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
419                                    uint16_t ethdev_port_id,
420                                    efx_mport_sel_t *mport_sel)
421 {
422         struct sfc_mae_switch_domain *domain;
423         struct sfc_mae_switch_port *port;
424
425         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
426
427         if (ethdev_port_id == RTE_MAX_ETHPORTS)
428                 return EINVAL;
429
430         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
431         if (domain == NULL)
432                 return EINVAL;
433
434         TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
435                 if (port->ethdev_port_id == ethdev_port_id) {
436                         *mport_sel = port->ethdev_mport;
437                         return 0;
438                 }
439         }
440
441         return ENOENT;
442 }
443
444 int
445 sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
446                               uint16_t ethdev_port_id,
447                               efx_mport_sel_t *mport_sel)
448 {
449         int rc;
450
451         rte_spinlock_lock(&sfc_mae_switch.lock);
452         rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
453                                                 ethdev_port_id, mport_sel);
454         rte_spinlock_unlock(&sfc_mae_switch.lock);
455
456         return rc;
457 }