net/sfc: implement port representor link update
[dpdk.git] / drivers / net / sfc / sfc_repr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdint.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_ethdev.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16
17 #include "efx.h"
18
19 #include "sfc_log.h"
20 #include "sfc_debug.h"
21 #include "sfc_repr.h"
22 #include "sfc_ethdev_state.h"
23 #include "sfc_repr_proxy_api.h"
24 #include "sfc_switch.h"
25 #include "sfc_dp_tx.h"
26
27 /** Multi-process shared representor private data */
28 struct sfc_repr_shared {
29         uint16_t                pf_port_id;
30         uint16_t                repr_id;
31         uint16_t                switch_domain_id;
32         uint16_t                switch_port_id;
33 };
34
35 struct sfc_repr_rxq {
36         /* Datapath members */
37         struct rte_ring                 *ring;
38 };
39
40 struct sfc_repr_txq {
41         /* Datapath members */
42         struct rte_ring                 *ring;
43         efx_mport_id_t                  egress_mport;
44 };
45
46 /** Primary process representor private data */
47 struct sfc_repr {
48         /**
49          * PMD setup and configuration is not thread safe. Since it is not
50          * performance sensitive, it is better to guarantee thread-safety
51          * and add device level lock. Adapter control operations which
52          * change its state should acquire the lock.
53          */
54         rte_spinlock_t                  lock;
55         enum sfc_ethdev_state           state;
56 };
57
58 #define sfcr_err(sr, ...) \
59         do {                                                            \
60                 const struct sfc_repr *_sr = (sr);                      \
61                                                                         \
62                 (void)_sr;                                              \
63                 SFC_GENERIC_LOG(ERR, __VA_ARGS__);                      \
64         } while (0)
65
66 #define sfcr_warn(sr, ...) \
67         do {                                                            \
68                 const struct sfc_repr *_sr = (sr);                      \
69                                                                         \
70                 (void)_sr;                                              \
71                 SFC_GENERIC_LOG(WARNING, __VA_ARGS__);                  \
72         } while (0)
73
74 #define sfcr_info(sr, ...) \
75         do {                                                            \
76                 const struct sfc_repr *_sr = (sr);                      \
77                                                                         \
78                 (void)_sr;                                              \
79                 SFC_GENERIC_LOG(INFO,                                   \
80                                 RTE_FMT("%s() "                         \
81                                 RTE_FMT_HEAD(__VA_ARGS__ ,),            \
82                                 __func__,                               \
83                                 RTE_FMT_TAIL(__VA_ARGS__ ,)));          \
84         } while (0)
85
86 static inline struct sfc_repr_shared *
87 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
88 {
89         struct sfc_repr_shared *srs = eth_dev->data->dev_private;
90
91         return srs;
92 }
93
94 static inline struct sfc_repr *
95 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
96 {
97         struct sfc_repr *sr = eth_dev->process_private;
98
99         return sr;
100 }
101
102 /*
103  * Add wrapper functions to acquire/release lock to be able to remove or
104  * change the lock in one place.
105  */
106
107 static inline void
108 sfc_repr_lock_init(struct sfc_repr *sr)
109 {
110         rte_spinlock_init(&sr->lock);
111 }
112
113 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
114
115 static inline int
116 sfc_repr_lock_is_locked(struct sfc_repr *sr)
117 {
118         return rte_spinlock_is_locked(&sr->lock);
119 }
120
121 #endif
122
123 static inline void
124 sfc_repr_lock(struct sfc_repr *sr)
125 {
126         rte_spinlock_lock(&sr->lock);
127 }
128
129 static inline void
130 sfc_repr_unlock(struct sfc_repr *sr)
131 {
132         rte_spinlock_unlock(&sr->lock);
133 }
134
135 static inline void
136 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
137 {
138         /* Just for symmetry of the API */
139 }
140
141 static void
142 sfc_repr_rx_queue_stop(void *queue)
143 {
144         struct sfc_repr_rxq *rxq = queue;
145
146         if (rxq == NULL)
147                 return;
148
149         rte_ring_reset(rxq->ring);
150 }
151
152 static void
153 sfc_repr_tx_queue_stop(void *queue)
154 {
155         struct sfc_repr_txq *txq = queue;
156
157         if (txq == NULL)
158                 return;
159
160         rte_ring_reset(txq->ring);
161 }
162
163 static int
164 sfc_repr_start(struct rte_eth_dev *dev)
165 {
166         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
167         struct sfc_repr_shared *srs;
168         int ret;
169
170         sfcr_info(sr, "entry");
171
172         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
173
174         switch (sr->state) {
175         case SFC_ETHDEV_CONFIGURED:
176                 break;
177         case SFC_ETHDEV_STARTED:
178                 sfcr_info(sr, "already started");
179                 return 0;
180         default:
181                 ret = -EINVAL;
182                 goto fail_bad_state;
183         }
184
185         sr->state = SFC_ETHDEV_STARTING;
186
187         srs = sfc_repr_shared_by_eth_dev(dev);
188         ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
189         if (ret != 0) {
190                 SFC_ASSERT(ret > 0);
191                 ret = -ret;
192                 goto fail_start;
193         }
194
195         sr->state = SFC_ETHDEV_STARTED;
196
197         sfcr_info(sr, "done");
198
199         return 0;
200
201 fail_start:
202         sr->state = SFC_ETHDEV_CONFIGURED;
203
204 fail_bad_state:
205         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
206         return ret;
207 }
208
209 static int
210 sfc_repr_dev_start(struct rte_eth_dev *dev)
211 {
212         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
213         int ret;
214
215         sfcr_info(sr, "entry");
216
217         sfc_repr_lock(sr);
218         ret = sfc_repr_start(dev);
219         sfc_repr_unlock(sr);
220
221         if (ret != 0)
222                 goto fail_start;
223
224         sfcr_info(sr, "done");
225
226         return 0;
227
228 fail_start:
229         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
230         return ret;
231 }
232
233 static int
234 sfc_repr_stop(struct rte_eth_dev *dev)
235 {
236         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
237         struct sfc_repr_shared *srs;
238         unsigned int i;
239         int ret;
240
241         sfcr_info(sr, "entry");
242
243         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
244
245         switch (sr->state) {
246         case SFC_ETHDEV_STARTED:
247                 break;
248         case SFC_ETHDEV_CONFIGURED:
249                 sfcr_info(sr, "already stopped");
250                 return 0;
251         default:
252                 sfcr_err(sr, "stop in unexpected state %u", sr->state);
253                 SFC_ASSERT(B_FALSE);
254                 ret = -EINVAL;
255                 goto fail_bad_state;
256         }
257
258         srs = sfc_repr_shared_by_eth_dev(dev);
259         ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
260         if (ret != 0) {
261                 SFC_ASSERT(ret > 0);
262                 ret = -ret;
263                 goto fail_stop;
264         }
265
266         for (i = 0; i < dev->data->nb_rx_queues; i++)
267                 sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
268
269         for (i = 0; i < dev->data->nb_tx_queues; i++)
270                 sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
271
272         sr->state = SFC_ETHDEV_CONFIGURED;
273         sfcr_info(sr, "done");
274
275         return 0;
276
277 fail_bad_state:
278 fail_stop:
279         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
280
281         return ret;
282 }
283
284 static int
285 sfc_repr_dev_stop(struct rte_eth_dev *dev)
286 {
287         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
288         int ret;
289
290         sfcr_info(sr, "entry");
291
292         sfc_repr_lock(sr);
293
294         ret = sfc_repr_stop(dev);
295         if (ret != 0) {
296                 sfcr_err(sr, "%s() failed to stop representor", __func__);
297                 goto fail_stop;
298         }
299
300         sfc_repr_unlock(sr);
301
302         sfcr_info(sr, "done");
303
304         return 0;
305
306 fail_stop:
307         sfc_repr_unlock(sr);
308
309         sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
310
311         return ret;
312 }
313
314 static int
315 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
316                     const struct rte_eth_conf *conf)
317 {
318         const struct rte_eth_rss_conf *rss_conf;
319         int ret = 0;
320
321         sfcr_info(sr, "entry");
322
323         if (conf->link_speeds != 0) {
324                 sfcr_err(sr, "specific link speeds not supported");
325                 ret = -EINVAL;
326         }
327
328         switch (conf->rxmode.mq_mode) {
329         case ETH_MQ_RX_RSS:
330                 if (nb_rx_queues != 1) {
331                         sfcr_err(sr, "Rx RSS is not supported with %u queues",
332                                  nb_rx_queues);
333                         ret = -EINVAL;
334                         break;
335                 }
336
337                 rss_conf = &conf->rx_adv_conf.rss_conf;
338                 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
339                     rss_conf->rss_hf != 0) {
340                         sfcr_err(sr, "Rx RSS configuration is not supported");
341                         ret = -EINVAL;
342                 }
343                 break;
344         case ETH_MQ_RX_NONE:
345                 break;
346         default:
347                 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
348                 ret = -EINVAL;
349                 break;
350         }
351
352         if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
353                 sfcr_err(sr, "Tx mode MQ modes not supported");
354                 ret = -EINVAL;
355         }
356
357         if (conf->lpbk_mode != 0) {
358                 sfcr_err(sr, "loopback not supported");
359                 ret = -EINVAL;
360         }
361
362         if (conf->dcb_capability_en != 0) {
363                 sfcr_err(sr, "priority-based flow control not supported");
364                 ret = -EINVAL;
365         }
366
367         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
368                 sfcr_err(sr, "Flow Director not supported");
369                 ret = -EINVAL;
370         }
371
372         if (conf->intr_conf.lsc != 0) {
373                 sfcr_err(sr, "link status change interrupt not supported");
374                 ret = -EINVAL;
375         }
376
377         if (conf->intr_conf.rxq != 0) {
378                 sfcr_err(sr, "receive queue interrupt not supported");
379                 ret = -EINVAL;
380         }
381
382         if (conf->intr_conf.rmv != 0) {
383                 sfcr_err(sr, "remove interrupt not supported");
384                 ret = -EINVAL;
385         }
386
387         sfcr_info(sr, "done %d", ret);
388
389         return ret;
390 }
391
392
393 static int
394 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
395                    const struct rte_eth_conf *conf)
396 {
397         int ret;
398
399         sfcr_info(sr, "entry");
400
401         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
402
403         ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
404         if (ret != 0)
405                 goto fail_check_conf;
406
407         sr->state = SFC_ETHDEV_CONFIGURED;
408
409         sfcr_info(sr, "done");
410
411         return 0;
412
413 fail_check_conf:
414         sfcr_info(sr, "failed %s", rte_strerror(-ret));
415         return ret;
416 }
417
418 static int
419 sfc_repr_dev_configure(struct rte_eth_dev *dev)
420 {
421         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
422         struct rte_eth_dev_data *dev_data = dev->data;
423         int ret;
424
425         sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
426                   dev_data->nb_rx_queues, dev_data->nb_tx_queues);
427
428         sfc_repr_lock(sr);
429         switch (sr->state) {
430         case SFC_ETHDEV_CONFIGURED:
431                 /* FALLTHROUGH */
432         case SFC_ETHDEV_INITIALIZED:
433                 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
434                                          &dev_data->dev_conf);
435                 break;
436         default:
437                 sfcr_err(sr, "unexpected adapter state %u to configure",
438                          sr->state);
439                 ret = -EINVAL;
440                 break;
441         }
442         sfc_repr_unlock(sr);
443
444         sfcr_info(sr, "done %s", rte_strerror(-ret));
445
446         return ret;
447 }
448
449 static int
450 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
451                        struct rte_eth_dev_info *dev_info)
452 {
453         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
454
455         dev_info->device = dev->device;
456
457         dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
458         dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
459         dev_info->default_rxconf.rx_drop_en = 1;
460         dev_info->switch_info.domain_id = srs->switch_domain_id;
461         dev_info->switch_info.port_id = srs->switch_port_id;
462
463         return 0;
464 }
465
466 static int
467 sfc_repr_dev_link_update(struct rte_eth_dev *dev,
468                          __rte_unused int wait_to_complete)
469 {
470         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
471         struct rte_eth_link link;
472
473         if (sr->state != SFC_ETHDEV_STARTED) {
474                 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
475         } else {
476                 memset(&link, 0, sizeof(link));
477                 link.link_status = ETH_LINK_UP;
478                 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
479         }
480
481         return rte_eth_linkstatus_set(dev, &link);
482 }
483
484 static int
485 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
486                      const char *type_name, uint16_t qid, uint16_t nb_desc,
487                      unsigned int socket_id, struct rte_ring **ring)
488 {
489         char ring_name[RTE_RING_NAMESIZE];
490         int ret;
491
492         ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
493                        pf_port_id, repr_id, type_name, qid);
494         if (ret >= (int)sizeof(ring_name))
495                 return -ENAMETOOLONG;
496
497         /*
498          * Single producer/consumer rings are used since the API for Tx/Rx
499          * packet burst for representors are guaranteed to be called from
500          * a single thread, and the user of the other end (representor proxy)
501          * is also single-threaded.
502          */
503         *ring = rte_ring_create(ring_name, nb_desc, socket_id,
504                                RING_F_SP_ENQ | RING_F_SC_DEQ);
505         if (*ring == NULL)
506                 return -rte_errno;
507
508         return 0;
509 }
510
511 static int
512 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
513                         const struct rte_eth_rxconf *rx_conf)
514 {
515         int ret = 0;
516
517         sfcr_info(sr, "entry");
518
519         if (rx_conf->rx_thresh.pthresh != 0 ||
520             rx_conf->rx_thresh.hthresh != 0 ||
521             rx_conf->rx_thresh.wthresh != 0) {
522                 sfcr_warn(sr,
523                         "RxQ prefetch/host/writeback thresholds are not supported");
524         }
525
526         if (rx_conf->rx_free_thresh != 0)
527                 sfcr_warn(sr, "RxQ free threshold is not supported");
528
529         if (rx_conf->rx_drop_en == 0)
530                 sfcr_warn(sr, "RxQ drop disable is not supported");
531
532         if (rx_conf->rx_deferred_start) {
533                 sfcr_err(sr, "Deferred start is not supported");
534                 ret = -EINVAL;
535         }
536
537         sfcr_info(sr, "done: %s", rte_strerror(-ret));
538
539         return ret;
540 }
541
542 static int
543 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
544                         uint16_t nb_rx_desc, unsigned int socket_id,
545                         __rte_unused const struct rte_eth_rxconf *rx_conf,
546                         struct rte_mempool *mb_pool)
547 {
548         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
549         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
550         struct sfc_repr_rxq *rxq;
551         int ret;
552
553         sfcr_info(sr, "entry");
554
555         ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
556         if (ret != 0)
557                 goto fail_check_conf;
558
559         ret = -ENOMEM;
560         rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
561                                  RTE_CACHE_LINE_SIZE, socket_id);
562         if (rxq == NULL) {
563                 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
564                 goto fail_rxq_alloc;
565         }
566
567         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
568                                    "rx", rx_queue_id, nb_rx_desc,
569                                    socket_id, &rxq->ring);
570         if (ret != 0) {
571                 sfcr_err(sr, "%s() failed to create ring", __func__);
572                 goto fail_ring_create;
573         }
574
575         ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
576                                      rx_queue_id, rxq->ring, mb_pool);
577         if (ret != 0) {
578                 SFC_ASSERT(ret > 0);
579                 ret = -ret;
580                 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
581                 goto fail_proxy_add_rxq;
582         }
583
584         dev->data->rx_queues[rx_queue_id] = rxq;
585
586         sfcr_info(sr, "done");
587
588         return 0;
589
590 fail_proxy_add_rxq:
591         rte_ring_free(rxq->ring);
592
593 fail_ring_create:
594         rte_free(rxq);
595
596 fail_rxq_alloc:
597 fail_check_conf:
598         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
599         return ret;
600 }
601
602 static void
603 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
604 {
605         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
606         struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
607
608         sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
609         rte_ring_free(rxq->ring);
610         rte_free(rxq);
611 }
612
613 static int
614 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
615                         const struct rte_eth_txconf *tx_conf)
616 {
617         int ret = 0;
618
619         sfcr_info(sr, "entry");
620
621         if (tx_conf->tx_rs_thresh != 0)
622                 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
623
624         if (tx_conf->tx_free_thresh != 0)
625                 sfcr_warn(sr, "TxQ free threshold is not supported");
626
627         if (tx_conf->tx_thresh.pthresh != 0 ||
628             tx_conf->tx_thresh.hthresh != 0 ||
629             tx_conf->tx_thresh.wthresh != 0) {
630                 sfcr_warn(sr,
631                         "prefetch/host/writeback thresholds are not supported");
632         }
633
634         if (tx_conf->tx_deferred_start) {
635                 sfcr_err(sr, "Deferred start is not supported");
636                 ret = -EINVAL;
637         }
638
639         sfcr_info(sr, "done: %s", rte_strerror(-ret));
640
641         return ret;
642 }
643
644 static int
645 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
646                         uint16_t nb_tx_desc, unsigned int socket_id,
647                         const struct rte_eth_txconf *tx_conf)
648 {
649         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
650         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
651         struct sfc_repr_txq *txq;
652         int ret;
653
654         sfcr_info(sr, "entry");
655
656         ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
657         if (ret != 0)
658                 goto fail_check_conf;
659
660         ret = -ENOMEM;
661         txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
662                                  RTE_CACHE_LINE_SIZE, socket_id);
663         if (txq == NULL)
664                 goto fail_txq_alloc;
665
666         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
667                                    "tx", tx_queue_id, nb_tx_desc,
668                                    socket_id, &txq->ring);
669         if (ret != 0)
670                 goto fail_ring_create;
671
672         ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
673                                      tx_queue_id, txq->ring,
674                                      &txq->egress_mport);
675         if (ret != 0)
676                 goto fail_proxy_add_txq;
677
678         dev->data->tx_queues[tx_queue_id] = txq;
679
680         sfcr_info(sr, "done");
681
682         return 0;
683
684 fail_proxy_add_txq:
685         rte_ring_free(txq->ring);
686
687 fail_ring_create:
688         rte_free(txq);
689
690 fail_txq_alloc:
691 fail_check_conf:
692         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
693         return ret;
694 }
695
696 static void
697 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
698 {
699         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
700         struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
701
702         sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
703         rte_ring_free(txq->ring);
704         rte_free(txq);
705 }
706
707 static void
708 sfc_repr_close(struct sfc_repr *sr)
709 {
710         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
711
712         SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
713         sr->state = SFC_ETHDEV_CLOSING;
714
715         /* Put representor close actions here */
716
717         sr->state = SFC_ETHDEV_INITIALIZED;
718 }
719
720 static int
721 sfc_repr_dev_close(struct rte_eth_dev *dev)
722 {
723         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
724         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
725         unsigned int i;
726
727         sfcr_info(sr, "entry");
728
729         sfc_repr_lock(sr);
730         switch (sr->state) {
731         case SFC_ETHDEV_STARTED:
732                 sfc_repr_stop(dev);
733                 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
734                 /* FALLTHROUGH */
735         case SFC_ETHDEV_CONFIGURED:
736                 sfc_repr_close(sr);
737                 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
738                 /* FALLTHROUGH */
739         case SFC_ETHDEV_INITIALIZED:
740                 break;
741         default:
742                 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
743                 break;
744         }
745
746         for (i = 0; i < dev->data->nb_rx_queues; i++) {
747                 sfc_repr_rx_queue_release(dev, i);
748                 dev->data->rx_queues[i] = NULL;
749         }
750
751         for (i = 0; i < dev->data->nb_tx_queues; i++) {
752                 sfc_repr_tx_queue_release(dev, i);
753                 dev->data->tx_queues[i] = NULL;
754         }
755
756         /*
757          * Cleanup all resources.
758          * Rollback primary process sfc_repr_eth_dev_init() below.
759          */
760
761         (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
762
763         dev->dev_ops = NULL;
764
765         sfc_repr_unlock(sr);
766         sfc_repr_lock_fini(sr);
767
768         sfcr_info(sr, "done");
769
770         free(sr);
771
772         return 0;
773 }
774
775 static const struct eth_dev_ops sfc_repr_dev_ops = {
776         .dev_configure                  = sfc_repr_dev_configure,
777         .dev_start                      = sfc_repr_dev_start,
778         .dev_stop                       = sfc_repr_dev_stop,
779         .dev_close                      = sfc_repr_dev_close,
780         .dev_infos_get                  = sfc_repr_dev_infos_get,
781         .link_update                    = sfc_repr_dev_link_update,
782         .rx_queue_setup                 = sfc_repr_rx_queue_setup,
783         .rx_queue_release               = sfc_repr_rx_queue_release,
784         .tx_queue_setup                 = sfc_repr_tx_queue_setup,
785         .tx_queue_release               = sfc_repr_tx_queue_release,
786 };
787
788
789 struct sfc_repr_init_data {
790         uint16_t                pf_port_id;
791         uint16_t                repr_id;
792         uint16_t                switch_domain_id;
793         efx_mport_sel_t         mport_sel;
794 };
795
796 static int
797 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
798                                 const struct sfc_mae_switch_port_request *req,
799                                 uint16_t *switch_port_id)
800 {
801         int rc;
802
803         rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
804
805         SFC_ASSERT(rc >= 0);
806         return -rc;
807 }
808
809 static int
810 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
811 {
812         const struct sfc_repr_init_data *repr_data = init_params;
813         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
814         struct sfc_mae_switch_port_request switch_port_request;
815         efx_mport_sel_t ethdev_mport_sel;
816         struct sfc_repr *sr;
817         int ret;
818
819         /*
820          * Currently there is no mport we can use for representor's
821          * ethdev. Use an invalid one for now. This way representors
822          * can be instantiated.
823          */
824         efx_mae_mport_invalid(&ethdev_mport_sel);
825
826         memset(&switch_port_request, 0, sizeof(switch_port_request));
827         switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
828         switch_port_request.ethdev_mportp = &ethdev_mport_sel;
829         switch_port_request.entity_mportp = &repr_data->mport_sel;
830         switch_port_request.ethdev_port_id = dev->data->port_id;
831
832         ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
833                                               &switch_port_request,
834                                               &srs->switch_port_id);
835         if (ret != 0) {
836                 SFC_GENERIC_LOG(ERR,
837                         "%s() failed to assign MAE switch port (domain id %u)",
838                         __func__, repr_data->switch_domain_id);
839                 goto fail_mae_assign_switch_port;
840         }
841
842         ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
843                                       repr_data->repr_id,
844                                       dev->data->port_id,
845                                       &repr_data->mport_sel);
846         if (ret != 0) {
847                 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
848                                 __func__);
849                 SFC_ASSERT(ret > 0);
850                 ret = -ret;
851                 goto fail_create_port;
852         }
853
854         /*
855          * Allocate process private data from heap, since it should not
856          * be located in shared memory allocated using rte_malloc() API.
857          */
858         sr = calloc(1, sizeof(*sr));
859         if (sr == NULL) {
860                 ret = -ENOMEM;
861                 goto fail_alloc_sr;
862         }
863
864         sfc_repr_lock_init(sr);
865         sfc_repr_lock(sr);
866
867         dev->process_private = sr;
868
869         srs->pf_port_id = repr_data->pf_port_id;
870         srs->repr_id = repr_data->repr_id;
871         srs->switch_domain_id = repr_data->switch_domain_id;
872
873         dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
874         dev->data->representor_id = srs->repr_id;
875         dev->data->backer_port_id = srs->pf_port_id;
876
877         dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
878         if (dev->data->mac_addrs == NULL) {
879                 ret = -ENOMEM;
880                 goto fail_mac_addrs;
881         }
882
883         dev->dev_ops = &sfc_repr_dev_ops;
884
885         sr->state = SFC_ETHDEV_INITIALIZED;
886         sfc_repr_unlock(sr);
887
888         return 0;
889
890 fail_mac_addrs:
891         sfc_repr_unlock(sr);
892         free(sr);
893
894 fail_alloc_sr:
895         (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
896                                       repr_data->repr_id);
897
898 fail_create_port:
899 fail_mae_assign_switch_port:
900         SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
901         return ret;
902 }
903
904 int
905 sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
906                 uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
907 {
908         struct sfc_repr_init_data repr_data;
909         char name[RTE_ETH_NAME_MAX_LEN];
910         int ret;
911
912         if (snprintf(name, sizeof(name), "net_%s_representor_%u",
913                      parent->device->name, representor_id) >=
914                         (int)sizeof(name)) {
915                 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
916                 return -ENAMETOOLONG;
917         }
918
919         memset(&repr_data, 0, sizeof(repr_data));
920         repr_data.pf_port_id = parent->data->port_id;
921         repr_data.repr_id = representor_id;
922         repr_data.switch_domain_id = switch_domain_id;
923         repr_data.mport_sel = *mport_sel;
924
925         ret = rte_eth_dev_create(parent->device, name,
926                                   sizeof(struct sfc_repr_shared),
927                                   NULL, NULL,
928                                   sfc_repr_eth_dev_init, &repr_data);
929         if (ret != 0)
930                 SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
931
932         SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
933
934         return ret;
935 }