net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / sfc / sfc_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdbool.h>
11
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14
15 #include "efx.h"
16
17 #include "sfc.h"
18 #include "sfc_log.h"
19 #include "sfc_switch.h"
20
21 /**
22  * Switch port registry entry.
23  *
24  * Drivers aware of RTE switch domains also have to maintain RTE switch
25  * port IDs for RTE ethdev instances they operate. These IDs are supposed
26  * to stand for physical interconnect entities, in example, PCIe functions.
27  *
28  * In terms of MAE, a physical interconnect entity can be referred to using
29  * an MPORT selector, that is, a 32-bit value. RTE switch port IDs, in turn,
30  * are 16-bit values, so indirect mapping has to be maintained:
31  *
32  * +--------------------+          +---------------------------------------+
33  * | RTE switch port ID |  ------  |         MAE switch port entry         |
34  * +--------------------+          |         ---------------------         |
35  *                                 |                                       |
36  *                                 | Entity (PCIe function) MPORT selector |
37  *                                 |                   +                   |
38  *                                 |  Port type (independent/representor)  |
39  *                                 +---------------------------------------+
40  *
41  * This mapping comprises a port type to ensure that RTE switch port ID
42  * of a represented entity and that of its representor are different in
43  * the case when the entity gets plugged into DPDK and not into a guest.
44  *
45  * Entry data also comprises RTE ethdev's own MPORT. This value
46  * coincides with the entity MPORT in the case of independent ports.
47  * In the case of representors, this ID is not a selector and refers
48  * to an allocatable object (that is, it's likely to change on RTE
49  * ethdev replug). Flow API backend must use this value rather
50  * than entity_mport to support flow rule action PORT_ID.
51  */
52 struct sfc_mae_switch_port {
53         TAILQ_ENTRY(sfc_mae_switch_port)        switch_domain_ports;
54
55         /** RTE ethdev MPORT */
56         efx_mport_sel_t                         ethdev_mport;
57         /** RTE ethdev port ID */
58         uint16_t                                ethdev_port_id;
59
60         /** Entity (PCIe function) MPORT selector */
61         efx_mport_sel_t                         entity_mport;
62         /** Port type (independent/representor) */
63         enum sfc_mae_switch_port_type           type;
64         /** RTE switch port ID */
65         uint16_t                                id;
66
67         union sfc_mae_switch_port_data          data;
68 };
69
70 TAILQ_HEAD(sfc_mae_switch_ports, sfc_mae_switch_port);
71
72 /**
73  * Switch domain registry entry.
74  *
75  * Even if an RTE ethdev instance gets unplugged, the corresponding
76  * entry in the switch port registry will not be removed because the
77  * entity (PCIe function) MPORT is static and cannot change. If this
78  * RTE ethdev gets plugged back, the entry will be reused, and
79  * RTE switch port ID will be the same.
80  */
81 struct sfc_mae_switch_domain {
82         TAILQ_ENTRY(sfc_mae_switch_domain)      entries;
83
84         /** HW switch ID */
85         struct sfc_hw_switch_id                 *hw_switch_id;
86         /** The number of ports in the switch port registry */
87         unsigned int                            nb_ports;
88         /** Switch port registry */
89         struct sfc_mae_switch_ports             ports;
90         /** RTE switch domain ID allocated for a group of devices */
91         uint16_t                                id;
92         /** DPDK controller -> EFX interface mapping */
93         efx_pcie_interface_t                    *controllers;
94         /** Number of DPDK controllers and EFX interfaces */
95         size_t                                  nb_controllers;
96 };
97
98 TAILQ_HEAD(sfc_mae_switch_domains, sfc_mae_switch_domain);
99
100 /**
101  * MAE representation of RTE switch infrastructure.
102  *
103  * It is possible that an RTE flow API client tries to insert a rule
104  * referencing an RTE ethdev deployed on top of a different physical
105  * device (it may belong to the same vendor or not). This particular
106  * driver/engine cannot support this and has to turn down such rules.
107  *
108  * Technically, it's HW switch identifier which, if queried for each
109  * RTE ethdev instance, indicates relationship between the instances.
110  * In the meantime, RTE flow API clients also need to somehow figure
111  * out relationship between RTE ethdev instances in advance.
112  *
113  * The concept of RTE switch domains resolves this issue. The driver
114  * maintains a static list of switch domains which is easy to browse,
115  * and each RTE ethdev fills RTE switch parameters in device
116  * information structure which is made available to clients.
117  *
118  * Even if all RTE ethdev instances belonging to a switch domain get
119  * unplugged, the corresponding entry in the switch domain registry
120  * will not be removed because the corresponding HW switch exists
121  * regardless of its ports being plugged to DPDK or kept aside.
122  * If a port gets plugged back to DPDK, the corresponding
123  * RTE ethdev will indicate the same RTE switch domain ID.
124  */
125 struct sfc_mae_switch {
126         /** A lock to protect the whole structure */
127         rte_spinlock_t                  lock;
128         /** Switch domain registry */
129         struct sfc_mae_switch_domains   domains;
130 };
131
132 static struct sfc_mae_switch sfc_mae_switch = {
133         .lock = RTE_SPINLOCK_INITIALIZER,
134         .domains = TAILQ_HEAD_INITIALIZER(sfc_mae_switch.domains),
135 };
136
137
138 /* This function expects to be called only when the lock is held */
139 static struct sfc_mae_switch_domain *
140 sfc_mae_find_switch_domain_by_id(uint16_t switch_domain_id)
141 {
142         struct sfc_mae_switch_domain *domain;
143
144         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
145
146         TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
147                 if (domain->id == switch_domain_id)
148                         return domain;
149         }
150
151         return NULL;
152 }
153
154 int
155 sfc_mae_switch_ports_iterate(uint16_t switch_domain_id,
156                              sfc_mae_switch_port_iterator_cb *cb,
157                              void *data)
158 {
159         struct sfc_mae_switch_domain *domain;
160         struct sfc_mae_switch_port *port;
161
162         if (cb == NULL)
163                 return EINVAL;
164
165         rte_spinlock_lock(&sfc_mae_switch.lock);
166
167         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
168         if (domain == NULL) {
169                 rte_spinlock_unlock(&sfc_mae_switch.lock);
170                 return EINVAL;
171         }
172
173         TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
174                 cb(port->type, &port->ethdev_mport, port->ethdev_port_id,
175                    &port->entity_mport, port->id, &port->data, data);
176         }
177
178         rte_spinlock_unlock(&sfc_mae_switch.lock);
179         return 0;
180 }
181
182 /* This function expects to be called only when the lock is held */
183 static struct sfc_mae_switch_domain *
184 sfc_mae_find_switch_domain_by_hw_switch_id(const struct sfc_hw_switch_id *id)
185 {
186         struct sfc_mae_switch_domain *domain;
187
188         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
189
190         TAILQ_FOREACH(domain, &sfc_mae_switch.domains, entries) {
191                 if (sfc_hw_switch_ids_equal(domain->hw_switch_id, id))
192                         return domain;
193         }
194
195         return NULL;
196 }
197
198 int
199 sfc_mae_assign_switch_domain(struct sfc_adapter *sa,
200                              uint16_t *switch_domain_id)
201 {
202         struct sfc_hw_switch_id *hw_switch_id;
203         struct sfc_mae_switch_domain *domain;
204         int rc;
205
206         rte_spinlock_lock(&sfc_mae_switch.lock);
207
208         rc = sfc_hw_switch_id_init(sa, &hw_switch_id);
209         if (rc != 0)
210                 goto fail_hw_switch_id_init;
211
212         domain = sfc_mae_find_switch_domain_by_hw_switch_id(hw_switch_id);
213         if (domain != NULL) {
214                 sfc_hw_switch_id_fini(sa, hw_switch_id);
215                 goto done;
216         }
217
218         domain = rte_zmalloc("sfc_mae_switch_domain", sizeof(*domain), 0);
219         if (domain == NULL) {
220                 rc = ENOMEM;
221                 goto fail_mem_alloc;
222         }
223
224         /*
225          * This code belongs to driver init path, that is, negation is
226          * done at the end of the path by sfc_eth_dev_init(). RTE APIs
227          * negate error codes, so drop negation here.
228          */
229         rc = -rte_eth_switch_domain_alloc(&domain->id);
230         if (rc != 0)
231                 goto fail_domain_alloc;
232
233         domain->hw_switch_id = hw_switch_id;
234
235         TAILQ_INIT(&domain->ports);
236
237         TAILQ_INSERT_TAIL(&sfc_mae_switch.domains, domain, entries);
238
239 done:
240         *switch_domain_id = domain->id;
241
242         rte_spinlock_unlock(&sfc_mae_switch.lock);
243
244         return 0;
245
246 fail_domain_alloc:
247         rte_free(domain);
248
249 fail_mem_alloc:
250         sfc_hw_switch_id_fini(sa, hw_switch_id);
251
252 fail_hw_switch_id_init:
253         rte_spinlock_unlock(&sfc_mae_switch.lock);
254         return rc;
255 }
256
257 int
258 sfc_mae_switch_domain_controllers(uint16_t switch_domain_id,
259                                   const efx_pcie_interface_t **controllers,
260                                   size_t *nb_controllers)
261 {
262         struct sfc_mae_switch_domain *domain;
263
264         if (controllers == NULL || nb_controllers == NULL)
265                 return EINVAL;
266
267         rte_spinlock_lock(&sfc_mae_switch.lock);
268
269         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
270         if (domain == NULL) {
271                 rte_spinlock_unlock(&sfc_mae_switch.lock);
272                 return EINVAL;
273         }
274
275         *controllers = domain->controllers;
276         *nb_controllers = domain->nb_controllers;
277
278         rte_spinlock_unlock(&sfc_mae_switch.lock);
279         return 0;
280 }
281
282 int
283 sfc_mae_switch_domain_map_controllers(uint16_t switch_domain_id,
284                                       efx_pcie_interface_t *controllers,
285                                       size_t nb_controllers)
286 {
287         struct sfc_mae_switch_domain *domain;
288
289         rte_spinlock_lock(&sfc_mae_switch.lock);
290
291         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
292         if (domain == NULL) {
293                 rte_spinlock_unlock(&sfc_mae_switch.lock);
294                 return EINVAL;
295         }
296
297         /* Controller mapping may be set only once */
298         if (domain->controllers != NULL) {
299                 rte_spinlock_unlock(&sfc_mae_switch.lock);
300                 return EINVAL;
301         }
302
303         domain->controllers = controllers;
304         domain->nb_controllers = nb_controllers;
305
306         rte_spinlock_unlock(&sfc_mae_switch.lock);
307         return 0;
308 }
309
310 int
311 sfc_mae_switch_controller_from_mapping(const efx_pcie_interface_t *controllers,
312                                        size_t nb_controllers,
313                                        efx_pcie_interface_t intf,
314                                        int *controller)
315 {
316         size_t i;
317
318         if (controllers == NULL)
319                 return ENOENT;
320
321         for (i = 0; i < nb_controllers; i++) {
322                 if (controllers[i] == intf) {
323                         *controller = i;
324                         return 0;
325                 }
326         }
327
328         return ENOENT;
329 }
330
331 int
332 sfc_mae_switch_domain_get_controller(uint16_t switch_domain_id,
333                                      efx_pcie_interface_t intf,
334                                      int *controller)
335 {
336         const efx_pcie_interface_t *controllers;
337         size_t nb_controllers;
338         int rc;
339
340         rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
341                                                &nb_controllers);
342         if (rc != 0)
343                 return rc;
344
345         return sfc_mae_switch_controller_from_mapping(controllers,
346                                                       nb_controllers,
347                                                       intf,
348                                                       controller);
349 }
350
351 int sfc_mae_switch_domain_get_intf(uint16_t switch_domain_id,
352                                    int controller,
353                                    efx_pcie_interface_t *intf)
354 {
355         const efx_pcie_interface_t *controllers;
356         size_t nb_controllers;
357         int rc;
358
359         rc = sfc_mae_switch_domain_controllers(switch_domain_id, &controllers,
360                                                &nb_controllers);
361         if (rc != 0)
362                 return rc;
363
364         if (controllers == NULL)
365                 return ENOENT;
366
367         if ((size_t)controller > nb_controllers)
368                 return EINVAL;
369
370         *intf = controllers[controller];
371
372         return 0;
373 }
374
375 /* This function expects to be called only when the lock is held */
376 static struct sfc_mae_switch_port *
377 sfc_mae_find_switch_port_by_entity(const struct sfc_mae_switch_domain *domain,
378                                    const efx_mport_sel_t *entity_mportp,
379                                    enum sfc_mae_switch_port_type type)
380 {
381         struct sfc_mae_switch_port *port;
382
383         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
384
385         TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
386                 if (port->entity_mport.sel == entity_mportp->sel &&
387                     port->type == type)
388                         return port;
389         }
390
391         return NULL;
392 }
393
394 /* This function expects to be called only when the lock is held */
395 static int
396 sfc_mae_find_switch_port_id_by_entity(uint16_t switch_domain_id,
397                                       const efx_mport_sel_t *entity_mportp,
398                                       enum sfc_mae_switch_port_type type,
399                                       uint16_t *switch_port_id)
400 {
401         struct sfc_mae_switch_domain *domain;
402         struct sfc_mae_switch_port *port;
403
404         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
405
406         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
407         if (domain == NULL)
408                 return EINVAL;
409
410         port = sfc_mae_find_switch_port_by_entity(domain, entity_mportp, type);
411         if (port == NULL)
412                 return ENOENT;
413
414         *switch_port_id = port->id;
415         return 0;
416 }
417
418 int
419 sfc_mae_assign_switch_port(uint16_t switch_domain_id,
420                            const struct sfc_mae_switch_port_request *req,
421                            uint16_t *switch_port_id)
422 {
423         struct sfc_mae_switch_domain *domain;
424         struct sfc_mae_switch_port *port;
425         int rc;
426
427         rte_spinlock_lock(&sfc_mae_switch.lock);
428
429         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
430         if (domain == NULL) {
431                 rc = EINVAL;
432                 goto fail_find_switch_domain_by_id;
433         }
434
435         port = sfc_mae_find_switch_port_by_entity(domain, req->entity_mportp,
436                                                   req->type);
437         if (port != NULL)
438                 goto done;
439
440         port = rte_zmalloc("sfc_mae_switch_port", sizeof(*port), 0);
441         if (port == NULL) {
442                 rc = ENOMEM;
443                 goto fail_mem_alloc;
444         }
445
446         port->entity_mport.sel = req->entity_mportp->sel;
447         port->type = req->type;
448
449         port->id = (domain->nb_ports++);
450
451         TAILQ_INSERT_TAIL(&domain->ports, port, switch_domain_ports);
452
453 done:
454         port->ethdev_mport = *req->ethdev_mportp;
455         port->ethdev_port_id = req->ethdev_port_id;
456
457         switch (req->type) {
458         case SFC_MAE_SWITCH_PORT_INDEPENDENT:
459                 /* No data */
460                 break;
461         case SFC_MAE_SWITCH_PORT_REPRESENTOR:
462                 memcpy(&port->data.repr, &req->port_data,
463                        sizeof(port->data.repr));
464                 break;
465         default:
466                 SFC_ASSERT(B_FALSE);
467         }
468
469         *switch_port_id = port->id;
470
471         rte_spinlock_unlock(&sfc_mae_switch.lock);
472
473         return 0;
474
475 fail_mem_alloc:
476 fail_find_switch_domain_by_id:
477         rte_spinlock_unlock(&sfc_mae_switch.lock);
478         return rc;
479 }
480
481 /* This function expects to be called only when the lock is held */
482 static int
483 sfc_mae_find_switch_port_by_ethdev(uint16_t switch_domain_id,
484                                    uint16_t ethdev_port_id,
485                                    efx_mport_sel_t *mport_sel)
486 {
487         struct sfc_mae_switch_domain *domain;
488         struct sfc_mae_switch_port *port;
489
490         SFC_ASSERT(rte_spinlock_is_locked(&sfc_mae_switch.lock));
491
492         if (ethdev_port_id == RTE_MAX_ETHPORTS)
493                 return EINVAL;
494
495         domain = sfc_mae_find_switch_domain_by_id(switch_domain_id);
496         if (domain == NULL)
497                 return EINVAL;
498
499         TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) {
500                 if (port->ethdev_port_id == ethdev_port_id) {
501                         *mport_sel = port->ethdev_mport;
502                         return 0;
503                 }
504         }
505
506         return ENOENT;
507 }
508
509 int
510 sfc_mae_switch_port_by_ethdev(uint16_t switch_domain_id,
511                               uint16_t ethdev_port_id,
512                               efx_mport_sel_t *mport_sel)
513 {
514         int rc;
515
516         rte_spinlock_lock(&sfc_mae_switch.lock);
517         rc = sfc_mae_find_switch_port_by_ethdev(switch_domain_id,
518                                                 ethdev_port_id, mport_sel);
519         rte_spinlock_unlock(&sfc_mae_switch.lock);
520
521         return rc;
522 }
523
524 int
525 sfc_mae_switch_port_id_by_entity(uint16_t switch_domain_id,
526                                  const efx_mport_sel_t *entity_mportp,
527                                  enum sfc_mae_switch_port_type type,
528                                  uint16_t *switch_port_id)
529 {
530         int rc;
531
532         rte_spinlock_lock(&sfc_mae_switch.lock);
533         rc = sfc_mae_find_switch_port_id_by_entity(switch_domain_id,
534                                                    entity_mportp, type,
535                                                    switch_port_id);
536         rte_spinlock_unlock(&sfc_mae_switch.lock);
537
538         return rc;
539 }