net/sfc: implement representor Rx
[dpdk.git] / drivers / net / sfc / sfc_repr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdint.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_ethdev.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16
17 #include "efx.h"
18
19 #include "sfc_log.h"
20 #include "sfc_debug.h"
21 #include "sfc_repr.h"
22 #include "sfc_ethdev_state.h"
23 #include "sfc_repr_proxy_api.h"
24 #include "sfc_switch.h"
25 #include "sfc_dp_tx.h"
26
27 /** Multi-process shared representor private data */
28 struct sfc_repr_shared {
29         uint16_t                pf_port_id;
30         uint16_t                repr_id;
31         uint16_t                switch_domain_id;
32         uint16_t                switch_port_id;
33 };
34
35 struct sfc_repr_rxq {
36         /* Datapath members */
37         struct rte_ring                 *ring;
38 };
39
40 struct sfc_repr_txq {
41         /* Datapath members */
42         struct rte_ring                 *ring;
43         efx_mport_id_t                  egress_mport;
44 };
45
46 /** Primary process representor private data */
47 struct sfc_repr {
48         /**
49          * PMD setup and configuration is not thread safe. Since it is not
50          * performance sensitive, it is better to guarantee thread-safety
51          * and add device level lock. Adapter control operations which
52          * change its state should acquire the lock.
53          */
54         rte_spinlock_t                  lock;
55         enum sfc_ethdev_state           state;
56 };
57
58 #define sfcr_err(sr, ...) \
59         do {                                                            \
60                 const struct sfc_repr *_sr = (sr);                      \
61                                                                         \
62                 (void)_sr;                                              \
63                 SFC_GENERIC_LOG(ERR, __VA_ARGS__);                      \
64         } while (0)
65
66 #define sfcr_warn(sr, ...) \
67         do {                                                            \
68                 const struct sfc_repr *_sr = (sr);                      \
69                                                                         \
70                 (void)_sr;                                              \
71                 SFC_GENERIC_LOG(WARNING, __VA_ARGS__);                  \
72         } while (0)
73
74 #define sfcr_info(sr, ...) \
75         do {                                                            \
76                 const struct sfc_repr *_sr = (sr);                      \
77                                                                         \
78                 (void)_sr;                                              \
79                 SFC_GENERIC_LOG(INFO,                                   \
80                                 RTE_FMT("%s() "                         \
81                                 RTE_FMT_HEAD(__VA_ARGS__ ,),            \
82                                 __func__,                               \
83                                 RTE_FMT_TAIL(__VA_ARGS__ ,)));          \
84         } while (0)
85
86 static inline struct sfc_repr_shared *
87 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
88 {
89         struct sfc_repr_shared *srs = eth_dev->data->dev_private;
90
91         return srs;
92 }
93
94 static inline struct sfc_repr *
95 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
96 {
97         struct sfc_repr *sr = eth_dev->process_private;
98
99         return sr;
100 }
101
102 /*
103  * Add wrapper functions to acquire/release lock to be able to remove or
104  * change the lock in one place.
105  */
106
107 static inline void
108 sfc_repr_lock_init(struct sfc_repr *sr)
109 {
110         rte_spinlock_init(&sr->lock);
111 }
112
113 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
114
115 static inline int
116 sfc_repr_lock_is_locked(struct sfc_repr *sr)
117 {
118         return rte_spinlock_is_locked(&sr->lock);
119 }
120
121 #endif
122
123 static inline void
124 sfc_repr_lock(struct sfc_repr *sr)
125 {
126         rte_spinlock_lock(&sr->lock);
127 }
128
129 static inline void
130 sfc_repr_unlock(struct sfc_repr *sr)
131 {
132         rte_spinlock_unlock(&sr->lock);
133 }
134
135 static inline void
136 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
137 {
138         /* Just for symmetry of the API */
139 }
140
141 static void
142 sfc_repr_rx_queue_stop(void *queue)
143 {
144         struct sfc_repr_rxq *rxq = queue;
145
146         if (rxq == NULL)
147                 return;
148
149         rte_ring_reset(rxq->ring);
150 }
151
152 static void
153 sfc_repr_tx_queue_stop(void *queue)
154 {
155         struct sfc_repr_txq *txq = queue;
156
157         if (txq == NULL)
158                 return;
159
160         rte_ring_reset(txq->ring);
161 }
162
163 static uint16_t
164 sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
165 {
166         struct sfc_repr_rxq *rxq = rx_queue;
167         void **objs = (void *)&rx_pkts[0];
168
169         /* mbufs port is already filled correctly by representors proxy */
170         return rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
171 }
172
173 static uint16_t
174 sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
175 {
176         struct sfc_repr_txq *txq = tx_queue;
177         unsigned int n_tx;
178         void **objs;
179         uint16_t i;
180
181         /*
182          * mbuf is likely cache-hot. Set flag and egress m-port here instead of
183          * doing that in representors proxy. Also, it should help to avoid
184          * cache bounce. Moreover, potentially, it allows to use one
185          * multi-producer single-consumer ring for all representors.
186          *
187          * The only potential problem is doing so many times if enqueue
188          * fails and sender retries.
189          */
190         for (i = 0; i < nb_pkts; ++i) {
191                 struct rte_mbuf *m = tx_pkts[i];
192
193                 m->ol_flags |= sfc_dp_mport_override;
194                 *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
195                                    efx_mport_id_t *) = txq->egress_mport;
196         }
197
198         objs = (void *)&tx_pkts[0];
199         n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
200
201         /*
202          * Remove m-port override flag from packets that were not enqueued
203          * Setting the flag only for enqueued packets after the burst is
204          * not possible since the ownership of enqueued packets is
205          * transferred to representor proxy.
206          */
207         for (i = n_tx; i < nb_pkts; ++i) {
208                 struct rte_mbuf *m = tx_pkts[i];
209
210                 m->ol_flags &= ~sfc_dp_mport_override;
211         }
212
213         return n_tx;
214 }
215
216 static int
217 sfc_repr_start(struct rte_eth_dev *dev)
218 {
219         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
220         struct sfc_repr_shared *srs;
221         int ret;
222
223         sfcr_info(sr, "entry");
224
225         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
226
227         switch (sr->state) {
228         case SFC_ETHDEV_CONFIGURED:
229                 break;
230         case SFC_ETHDEV_STARTED:
231                 sfcr_info(sr, "already started");
232                 return 0;
233         default:
234                 ret = -EINVAL;
235                 goto fail_bad_state;
236         }
237
238         sr->state = SFC_ETHDEV_STARTING;
239
240         srs = sfc_repr_shared_by_eth_dev(dev);
241         ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
242         if (ret != 0) {
243                 SFC_ASSERT(ret > 0);
244                 ret = -ret;
245                 goto fail_start;
246         }
247
248         sr->state = SFC_ETHDEV_STARTED;
249
250         sfcr_info(sr, "done");
251
252         return 0;
253
254 fail_start:
255         sr->state = SFC_ETHDEV_CONFIGURED;
256
257 fail_bad_state:
258         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
259         return ret;
260 }
261
262 static int
263 sfc_repr_dev_start(struct rte_eth_dev *dev)
264 {
265         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
266         int ret;
267
268         sfcr_info(sr, "entry");
269
270         sfc_repr_lock(sr);
271         ret = sfc_repr_start(dev);
272         sfc_repr_unlock(sr);
273
274         if (ret != 0)
275                 goto fail_start;
276
277         sfcr_info(sr, "done");
278
279         return 0;
280
281 fail_start:
282         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
283         return ret;
284 }
285
286 static int
287 sfc_repr_stop(struct rte_eth_dev *dev)
288 {
289         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
290         struct sfc_repr_shared *srs;
291         unsigned int i;
292         int ret;
293
294         sfcr_info(sr, "entry");
295
296         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
297
298         switch (sr->state) {
299         case SFC_ETHDEV_STARTED:
300                 break;
301         case SFC_ETHDEV_CONFIGURED:
302                 sfcr_info(sr, "already stopped");
303                 return 0;
304         default:
305                 sfcr_err(sr, "stop in unexpected state %u", sr->state);
306                 SFC_ASSERT(B_FALSE);
307                 ret = -EINVAL;
308                 goto fail_bad_state;
309         }
310
311         srs = sfc_repr_shared_by_eth_dev(dev);
312         ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
313         if (ret != 0) {
314                 SFC_ASSERT(ret > 0);
315                 ret = -ret;
316                 goto fail_stop;
317         }
318
319         for (i = 0; i < dev->data->nb_rx_queues; i++)
320                 sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
321
322         for (i = 0; i < dev->data->nb_tx_queues; i++)
323                 sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
324
325         sr->state = SFC_ETHDEV_CONFIGURED;
326         sfcr_info(sr, "done");
327
328         return 0;
329
330 fail_bad_state:
331 fail_stop:
332         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
333
334         return ret;
335 }
336
337 static int
338 sfc_repr_dev_stop(struct rte_eth_dev *dev)
339 {
340         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
341         int ret;
342
343         sfcr_info(sr, "entry");
344
345         sfc_repr_lock(sr);
346
347         ret = sfc_repr_stop(dev);
348         if (ret != 0) {
349                 sfcr_err(sr, "%s() failed to stop representor", __func__);
350                 goto fail_stop;
351         }
352
353         sfc_repr_unlock(sr);
354
355         sfcr_info(sr, "done");
356
357         return 0;
358
359 fail_stop:
360         sfc_repr_unlock(sr);
361
362         sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
363
364         return ret;
365 }
366
367 static int
368 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
369                     const struct rte_eth_conf *conf)
370 {
371         const struct rte_eth_rss_conf *rss_conf;
372         int ret = 0;
373
374         sfcr_info(sr, "entry");
375
376         if (conf->link_speeds != 0) {
377                 sfcr_err(sr, "specific link speeds not supported");
378                 ret = -EINVAL;
379         }
380
381         switch (conf->rxmode.mq_mode) {
382         case ETH_MQ_RX_RSS:
383                 if (nb_rx_queues != 1) {
384                         sfcr_err(sr, "Rx RSS is not supported with %u queues",
385                                  nb_rx_queues);
386                         ret = -EINVAL;
387                         break;
388                 }
389
390                 rss_conf = &conf->rx_adv_conf.rss_conf;
391                 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
392                     rss_conf->rss_hf != 0) {
393                         sfcr_err(sr, "Rx RSS configuration is not supported");
394                         ret = -EINVAL;
395                 }
396                 break;
397         case ETH_MQ_RX_NONE:
398                 break;
399         default:
400                 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
401                 ret = -EINVAL;
402                 break;
403         }
404
405         if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
406                 sfcr_err(sr, "Tx mode MQ modes not supported");
407                 ret = -EINVAL;
408         }
409
410         if (conf->lpbk_mode != 0) {
411                 sfcr_err(sr, "loopback not supported");
412                 ret = -EINVAL;
413         }
414
415         if (conf->dcb_capability_en != 0) {
416                 sfcr_err(sr, "priority-based flow control not supported");
417                 ret = -EINVAL;
418         }
419
420         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
421                 sfcr_err(sr, "Flow Director not supported");
422                 ret = -EINVAL;
423         }
424
425         if (conf->intr_conf.lsc != 0) {
426                 sfcr_err(sr, "link status change interrupt not supported");
427                 ret = -EINVAL;
428         }
429
430         if (conf->intr_conf.rxq != 0) {
431                 sfcr_err(sr, "receive queue interrupt not supported");
432                 ret = -EINVAL;
433         }
434
435         if (conf->intr_conf.rmv != 0) {
436                 sfcr_err(sr, "remove interrupt not supported");
437                 ret = -EINVAL;
438         }
439
440         sfcr_info(sr, "done %d", ret);
441
442         return ret;
443 }
444
445
446 static int
447 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
448                    const struct rte_eth_conf *conf)
449 {
450         int ret;
451
452         sfcr_info(sr, "entry");
453
454         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
455
456         ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
457         if (ret != 0)
458                 goto fail_check_conf;
459
460         sr->state = SFC_ETHDEV_CONFIGURED;
461
462         sfcr_info(sr, "done");
463
464         return 0;
465
466 fail_check_conf:
467         sfcr_info(sr, "failed %s", rte_strerror(-ret));
468         return ret;
469 }
470
471 static int
472 sfc_repr_dev_configure(struct rte_eth_dev *dev)
473 {
474         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
475         struct rte_eth_dev_data *dev_data = dev->data;
476         int ret;
477
478         sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
479                   dev_data->nb_rx_queues, dev_data->nb_tx_queues);
480
481         sfc_repr_lock(sr);
482         switch (sr->state) {
483         case SFC_ETHDEV_CONFIGURED:
484                 /* FALLTHROUGH */
485         case SFC_ETHDEV_INITIALIZED:
486                 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
487                                          &dev_data->dev_conf);
488                 break;
489         default:
490                 sfcr_err(sr, "unexpected adapter state %u to configure",
491                          sr->state);
492                 ret = -EINVAL;
493                 break;
494         }
495         sfc_repr_unlock(sr);
496
497         sfcr_info(sr, "done %s", rte_strerror(-ret));
498
499         return ret;
500 }
501
502 static int
503 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
504                        struct rte_eth_dev_info *dev_info)
505 {
506         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
507
508         dev_info->device = dev->device;
509
510         dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
511         dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
512         dev_info->default_rxconf.rx_drop_en = 1;
513         dev_info->switch_info.domain_id = srs->switch_domain_id;
514         dev_info->switch_info.port_id = srs->switch_port_id;
515
516         return 0;
517 }
518
519 static int
520 sfc_repr_dev_link_update(struct rte_eth_dev *dev,
521                          __rte_unused int wait_to_complete)
522 {
523         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
524         struct rte_eth_link link;
525
526         if (sr->state != SFC_ETHDEV_STARTED) {
527                 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
528         } else {
529                 memset(&link, 0, sizeof(link));
530                 link.link_status = ETH_LINK_UP;
531                 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
532         }
533
534         return rte_eth_linkstatus_set(dev, &link);
535 }
536
537 static int
538 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
539                      const char *type_name, uint16_t qid, uint16_t nb_desc,
540                      unsigned int socket_id, struct rte_ring **ring)
541 {
542         char ring_name[RTE_RING_NAMESIZE];
543         int ret;
544
545         ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
546                        pf_port_id, repr_id, type_name, qid);
547         if (ret >= (int)sizeof(ring_name))
548                 return -ENAMETOOLONG;
549
550         /*
551          * Single producer/consumer rings are used since the API for Tx/Rx
552          * packet burst for representors are guaranteed to be called from
553          * a single thread, and the user of the other end (representor proxy)
554          * is also single-threaded.
555          */
556         *ring = rte_ring_create(ring_name, nb_desc, socket_id,
557                                RING_F_SP_ENQ | RING_F_SC_DEQ);
558         if (*ring == NULL)
559                 return -rte_errno;
560
561         return 0;
562 }
563
564 static int
565 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
566                         const struct rte_eth_rxconf *rx_conf)
567 {
568         int ret = 0;
569
570         sfcr_info(sr, "entry");
571
572         if (rx_conf->rx_thresh.pthresh != 0 ||
573             rx_conf->rx_thresh.hthresh != 0 ||
574             rx_conf->rx_thresh.wthresh != 0) {
575                 sfcr_warn(sr,
576                         "RxQ prefetch/host/writeback thresholds are not supported");
577         }
578
579         if (rx_conf->rx_free_thresh != 0)
580                 sfcr_warn(sr, "RxQ free threshold is not supported");
581
582         if (rx_conf->rx_drop_en == 0)
583                 sfcr_warn(sr, "RxQ drop disable is not supported");
584
585         if (rx_conf->rx_deferred_start) {
586                 sfcr_err(sr, "Deferred start is not supported");
587                 ret = -EINVAL;
588         }
589
590         sfcr_info(sr, "done: %s", rte_strerror(-ret));
591
592         return ret;
593 }
594
595 static int
596 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
597                         uint16_t nb_rx_desc, unsigned int socket_id,
598                         __rte_unused const struct rte_eth_rxconf *rx_conf,
599                         struct rte_mempool *mb_pool)
600 {
601         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
602         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
603         struct sfc_repr_rxq *rxq;
604         int ret;
605
606         sfcr_info(sr, "entry");
607
608         ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
609         if (ret != 0)
610                 goto fail_check_conf;
611
612         ret = -ENOMEM;
613         rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
614                                  RTE_CACHE_LINE_SIZE, socket_id);
615         if (rxq == NULL) {
616                 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
617                 goto fail_rxq_alloc;
618         }
619
620         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
621                                    "rx", rx_queue_id, nb_rx_desc,
622                                    socket_id, &rxq->ring);
623         if (ret != 0) {
624                 sfcr_err(sr, "%s() failed to create ring", __func__);
625                 goto fail_ring_create;
626         }
627
628         ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
629                                      rx_queue_id, rxq->ring, mb_pool);
630         if (ret != 0) {
631                 SFC_ASSERT(ret > 0);
632                 ret = -ret;
633                 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
634                 goto fail_proxy_add_rxq;
635         }
636
637         dev->data->rx_queues[rx_queue_id] = rxq;
638
639         sfcr_info(sr, "done");
640
641         return 0;
642
643 fail_proxy_add_rxq:
644         rte_ring_free(rxq->ring);
645
646 fail_ring_create:
647         rte_free(rxq);
648
649 fail_rxq_alloc:
650 fail_check_conf:
651         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
652         return ret;
653 }
654
655 static void
656 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
657 {
658         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
659         struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
660
661         sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
662         rte_ring_free(rxq->ring);
663         rte_free(rxq);
664 }
665
666 static int
667 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
668                         const struct rte_eth_txconf *tx_conf)
669 {
670         int ret = 0;
671
672         sfcr_info(sr, "entry");
673
674         if (tx_conf->tx_rs_thresh != 0)
675                 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
676
677         if (tx_conf->tx_free_thresh != 0)
678                 sfcr_warn(sr, "TxQ free threshold is not supported");
679
680         if (tx_conf->tx_thresh.pthresh != 0 ||
681             tx_conf->tx_thresh.hthresh != 0 ||
682             tx_conf->tx_thresh.wthresh != 0) {
683                 sfcr_warn(sr,
684                         "prefetch/host/writeback thresholds are not supported");
685         }
686
687         if (tx_conf->tx_deferred_start) {
688                 sfcr_err(sr, "Deferred start is not supported");
689                 ret = -EINVAL;
690         }
691
692         sfcr_info(sr, "done: %s", rte_strerror(-ret));
693
694         return ret;
695 }
696
697 static int
698 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
699                         uint16_t nb_tx_desc, unsigned int socket_id,
700                         const struct rte_eth_txconf *tx_conf)
701 {
702         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
703         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
704         struct sfc_repr_txq *txq;
705         int ret;
706
707         sfcr_info(sr, "entry");
708
709         ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
710         if (ret != 0)
711                 goto fail_check_conf;
712
713         ret = -ENOMEM;
714         txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
715                                  RTE_CACHE_LINE_SIZE, socket_id);
716         if (txq == NULL)
717                 goto fail_txq_alloc;
718
719         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
720                                    "tx", tx_queue_id, nb_tx_desc,
721                                    socket_id, &txq->ring);
722         if (ret != 0)
723                 goto fail_ring_create;
724
725         ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
726                                      tx_queue_id, txq->ring,
727                                      &txq->egress_mport);
728         if (ret != 0)
729                 goto fail_proxy_add_txq;
730
731         dev->data->tx_queues[tx_queue_id] = txq;
732
733         sfcr_info(sr, "done");
734
735         return 0;
736
737 fail_proxy_add_txq:
738         rte_ring_free(txq->ring);
739
740 fail_ring_create:
741         rte_free(txq);
742
743 fail_txq_alloc:
744 fail_check_conf:
745         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
746         return ret;
747 }
748
749 static void
750 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
751 {
752         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
753         struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
754
755         sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
756         rte_ring_free(txq->ring);
757         rte_free(txq);
758 }
759
760 static void
761 sfc_repr_close(struct sfc_repr *sr)
762 {
763         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
764
765         SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
766         sr->state = SFC_ETHDEV_CLOSING;
767
768         /* Put representor close actions here */
769
770         sr->state = SFC_ETHDEV_INITIALIZED;
771 }
772
773 static int
774 sfc_repr_dev_close(struct rte_eth_dev *dev)
775 {
776         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
777         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
778         unsigned int i;
779
780         sfcr_info(sr, "entry");
781
782         sfc_repr_lock(sr);
783         switch (sr->state) {
784         case SFC_ETHDEV_STARTED:
785                 sfc_repr_stop(dev);
786                 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
787                 /* FALLTHROUGH */
788         case SFC_ETHDEV_CONFIGURED:
789                 sfc_repr_close(sr);
790                 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
791                 /* FALLTHROUGH */
792         case SFC_ETHDEV_INITIALIZED:
793                 break;
794         default:
795                 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
796                 break;
797         }
798
799         for (i = 0; i < dev->data->nb_rx_queues; i++) {
800                 sfc_repr_rx_queue_release(dev, i);
801                 dev->data->rx_queues[i] = NULL;
802         }
803
804         for (i = 0; i < dev->data->nb_tx_queues; i++) {
805                 sfc_repr_tx_queue_release(dev, i);
806                 dev->data->tx_queues[i] = NULL;
807         }
808
809         /*
810          * Cleanup all resources.
811          * Rollback primary process sfc_repr_eth_dev_init() below.
812          */
813
814         (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
815
816         dev->rx_pkt_burst = NULL;
817         dev->tx_pkt_burst = NULL;
818         dev->dev_ops = NULL;
819
820         sfc_repr_unlock(sr);
821         sfc_repr_lock_fini(sr);
822
823         sfcr_info(sr, "done");
824
825         free(sr);
826
827         return 0;
828 }
829
830 static const struct eth_dev_ops sfc_repr_dev_ops = {
831         .dev_configure                  = sfc_repr_dev_configure,
832         .dev_start                      = sfc_repr_dev_start,
833         .dev_stop                       = sfc_repr_dev_stop,
834         .dev_close                      = sfc_repr_dev_close,
835         .dev_infos_get                  = sfc_repr_dev_infos_get,
836         .link_update                    = sfc_repr_dev_link_update,
837         .rx_queue_setup                 = sfc_repr_rx_queue_setup,
838         .rx_queue_release               = sfc_repr_rx_queue_release,
839         .tx_queue_setup                 = sfc_repr_tx_queue_setup,
840         .tx_queue_release               = sfc_repr_tx_queue_release,
841 };
842
843
844 struct sfc_repr_init_data {
845         uint16_t                pf_port_id;
846         uint16_t                repr_id;
847         uint16_t                switch_domain_id;
848         efx_mport_sel_t         mport_sel;
849 };
850
851 static int
852 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
853                                 const struct sfc_mae_switch_port_request *req,
854                                 uint16_t *switch_port_id)
855 {
856         int rc;
857
858         rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
859
860         SFC_ASSERT(rc >= 0);
861         return -rc;
862 }
863
864 static int
865 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
866 {
867         const struct sfc_repr_init_data *repr_data = init_params;
868         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
869         struct sfc_mae_switch_port_request switch_port_request;
870         efx_mport_sel_t ethdev_mport_sel;
871         struct sfc_repr *sr;
872         int ret;
873
874         /*
875          * Currently there is no mport we can use for representor's
876          * ethdev. Use an invalid one for now. This way representors
877          * can be instantiated.
878          */
879         efx_mae_mport_invalid(&ethdev_mport_sel);
880
881         memset(&switch_port_request, 0, sizeof(switch_port_request));
882         switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
883         switch_port_request.ethdev_mportp = &ethdev_mport_sel;
884         switch_port_request.entity_mportp = &repr_data->mport_sel;
885         switch_port_request.ethdev_port_id = dev->data->port_id;
886
887         ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
888                                               &switch_port_request,
889                                               &srs->switch_port_id);
890         if (ret != 0) {
891                 SFC_GENERIC_LOG(ERR,
892                         "%s() failed to assign MAE switch port (domain id %u)",
893                         __func__, repr_data->switch_domain_id);
894                 goto fail_mae_assign_switch_port;
895         }
896
897         ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
898                                       repr_data->repr_id,
899                                       dev->data->port_id,
900                                       &repr_data->mport_sel);
901         if (ret != 0) {
902                 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
903                                 __func__);
904                 SFC_ASSERT(ret > 0);
905                 ret = -ret;
906                 goto fail_create_port;
907         }
908
909         /*
910          * Allocate process private data from heap, since it should not
911          * be located in shared memory allocated using rte_malloc() API.
912          */
913         sr = calloc(1, sizeof(*sr));
914         if (sr == NULL) {
915                 ret = -ENOMEM;
916                 goto fail_alloc_sr;
917         }
918
919         sfc_repr_lock_init(sr);
920         sfc_repr_lock(sr);
921
922         dev->process_private = sr;
923
924         srs->pf_port_id = repr_data->pf_port_id;
925         srs->repr_id = repr_data->repr_id;
926         srs->switch_domain_id = repr_data->switch_domain_id;
927
928         dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
929         dev->data->representor_id = srs->repr_id;
930         dev->data->backer_port_id = srs->pf_port_id;
931
932         dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
933         if (dev->data->mac_addrs == NULL) {
934                 ret = -ENOMEM;
935                 goto fail_mac_addrs;
936         }
937
938         dev->rx_pkt_burst = sfc_repr_rx_burst;
939         dev->tx_pkt_burst = sfc_repr_tx_burst;
940         dev->dev_ops = &sfc_repr_dev_ops;
941
942         sr->state = SFC_ETHDEV_INITIALIZED;
943         sfc_repr_unlock(sr);
944
945         return 0;
946
947 fail_mac_addrs:
948         sfc_repr_unlock(sr);
949         free(sr);
950
951 fail_alloc_sr:
952         (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
953                                       repr_data->repr_id);
954
955 fail_create_port:
956 fail_mae_assign_switch_port:
957         SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
958         return ret;
959 }
960
961 int
962 sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
963                 uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
964 {
965         struct sfc_repr_init_data repr_data;
966         char name[RTE_ETH_NAME_MAX_LEN];
967         int ret;
968         struct rte_eth_dev *dev;
969
970         if (snprintf(name, sizeof(name), "net_%s_representor_%u",
971                      parent->device->name, representor_id) >=
972                         (int)sizeof(name)) {
973                 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
974                 return -ENAMETOOLONG;
975         }
976
977         dev = rte_eth_dev_allocated(name);
978         if (dev == NULL) {
979                 memset(&repr_data, 0, sizeof(repr_data));
980                 repr_data.pf_port_id = parent->data->port_id;
981                 repr_data.repr_id = representor_id;
982                 repr_data.switch_domain_id = switch_domain_id;
983                 repr_data.mport_sel = *mport_sel;
984
985                 ret = rte_eth_dev_create(parent->device, name,
986                                          sizeof(struct sfc_repr_shared),
987                                          NULL, NULL,
988                                          sfc_repr_eth_dev_init, &repr_data);
989                 if (ret != 0) {
990                         SFC_GENERIC_LOG(ERR, "%s() failed to create device",
991                                         __func__);
992                         return ret;
993                 }
994         }
995
996         return 0;
997 }