d9c7c7ae9d5cb73f4bb44e6536b487c6f523379b
[dpdk.git] / drivers / net / sfc / sfc_repr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdint.h>
11
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <ethdev_driver.h>
15
16 #include "efx.h"
17
18 #include "sfc_log.h"
19 #include "sfc_debug.h"
20 #include "sfc_repr.h"
21 #include "sfc_ethdev_state.h"
22 #include "sfc_repr_proxy_api.h"
23 #include "sfc_switch.h"
24
25 /** Multi-process shared representor private data */
26 struct sfc_repr_shared {
27         uint16_t                pf_port_id;
28         uint16_t                repr_id;
29         uint16_t                switch_domain_id;
30         uint16_t                switch_port_id;
31 };
32
33 struct sfc_repr_rxq {
34         /* Datapath members */
35         struct rte_ring                 *ring;
36 };
37
38 struct sfc_repr_txq {
39         /* Datapath members */
40         struct rte_ring                 *ring;
41         efx_mport_id_t                  egress_mport;
42 };
43
44 /** Primary process representor private data */
45 struct sfc_repr {
46         /**
47          * PMD setup and configuration is not thread safe. Since it is not
48          * performance sensitive, it is better to guarantee thread-safety
49          * and add device level lock. Adapter control operations which
50          * change its state should acquire the lock.
51          */
52         rte_spinlock_t                  lock;
53         enum sfc_ethdev_state           state;
54 };
55
56 #define sfcr_err(sr, ...) \
57         do {                                                            \
58                 const struct sfc_repr *_sr = (sr);                      \
59                                                                         \
60                 (void)_sr;                                              \
61                 SFC_GENERIC_LOG(ERR, __VA_ARGS__);                      \
62         } while (0)
63
64 #define sfcr_warn(sr, ...) \
65         do {                                                            \
66                 const struct sfc_repr *_sr = (sr);                      \
67                                                                         \
68                 (void)_sr;                                              \
69                 SFC_GENERIC_LOG(WARNING, __VA_ARGS__);                  \
70         } while (0)
71
72 #define sfcr_info(sr, ...) \
73         do {                                                            \
74                 const struct sfc_repr *_sr = (sr);                      \
75                                                                         \
76                 (void)_sr;                                              \
77                 SFC_GENERIC_LOG(INFO,                                   \
78                                 RTE_FMT("%s() "                         \
79                                 RTE_FMT_HEAD(__VA_ARGS__ ,),            \
80                                 __func__,                               \
81                                 RTE_FMT_TAIL(__VA_ARGS__ ,)));          \
82         } while (0)
83
84 static inline struct sfc_repr_shared *
85 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
86 {
87         struct sfc_repr_shared *srs = eth_dev->data->dev_private;
88
89         return srs;
90 }
91
92 static inline struct sfc_repr *
93 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
94 {
95         struct sfc_repr *sr = eth_dev->process_private;
96
97         return sr;
98 }
99
100 /*
101  * Add wrapper functions to acquire/release lock to be able to remove or
102  * change the lock in one place.
103  */
104
105 static inline void
106 sfc_repr_lock_init(struct sfc_repr *sr)
107 {
108         rte_spinlock_init(&sr->lock);
109 }
110
111 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
112
113 static inline int
114 sfc_repr_lock_is_locked(struct sfc_repr *sr)
115 {
116         return rte_spinlock_is_locked(&sr->lock);
117 }
118
119 #endif
120
121 static inline void
122 sfc_repr_lock(struct sfc_repr *sr)
123 {
124         rte_spinlock_lock(&sr->lock);
125 }
126
127 static inline void
128 sfc_repr_unlock(struct sfc_repr *sr)
129 {
130         rte_spinlock_unlock(&sr->lock);
131 }
132
133 static inline void
134 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
135 {
136         /* Just for symmetry of the API */
137 }
138
139 static int
140 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
141                     const struct rte_eth_conf *conf)
142 {
143         const struct rte_eth_rss_conf *rss_conf;
144         int ret = 0;
145
146         sfcr_info(sr, "entry");
147
148         if (conf->link_speeds != 0) {
149                 sfcr_err(sr, "specific link speeds not supported");
150                 ret = -EINVAL;
151         }
152
153         switch (conf->rxmode.mq_mode) {
154         case ETH_MQ_RX_RSS:
155                 if (nb_rx_queues != 1) {
156                         sfcr_err(sr, "Rx RSS is not supported with %u queues",
157                                  nb_rx_queues);
158                         ret = -EINVAL;
159                         break;
160                 }
161
162                 rss_conf = &conf->rx_adv_conf.rss_conf;
163                 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
164                     rss_conf->rss_hf != 0) {
165                         sfcr_err(sr, "Rx RSS configuration is not supported");
166                         ret = -EINVAL;
167                 }
168                 break;
169         case ETH_MQ_RX_NONE:
170                 break;
171         default:
172                 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
173                 ret = -EINVAL;
174                 break;
175         }
176
177         if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
178                 sfcr_err(sr, "Tx mode MQ modes not supported");
179                 ret = -EINVAL;
180         }
181
182         if (conf->lpbk_mode != 0) {
183                 sfcr_err(sr, "loopback not supported");
184                 ret = -EINVAL;
185         }
186
187         if (conf->dcb_capability_en != 0) {
188                 sfcr_err(sr, "priority-based flow control not supported");
189                 ret = -EINVAL;
190         }
191
192         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
193                 sfcr_err(sr, "Flow Director not supported");
194                 ret = -EINVAL;
195         }
196
197         if (conf->intr_conf.lsc != 0) {
198                 sfcr_err(sr, "link status change interrupt not supported");
199                 ret = -EINVAL;
200         }
201
202         if (conf->intr_conf.rxq != 0) {
203                 sfcr_err(sr, "receive queue interrupt not supported");
204                 ret = -EINVAL;
205         }
206
207         if (conf->intr_conf.rmv != 0) {
208                 sfcr_err(sr, "remove interrupt not supported");
209                 ret = -EINVAL;
210         }
211
212         sfcr_info(sr, "done %d", ret);
213
214         return ret;
215 }
216
217
218 static int
219 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
220                    const struct rte_eth_conf *conf)
221 {
222         int ret;
223
224         sfcr_info(sr, "entry");
225
226         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
227
228         ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
229         if (ret != 0)
230                 goto fail_check_conf;
231
232         sr->state = SFC_ETHDEV_CONFIGURED;
233
234         sfcr_info(sr, "done");
235
236         return 0;
237
238 fail_check_conf:
239         sfcr_info(sr, "failed %s", rte_strerror(-ret));
240         return ret;
241 }
242
243 static int
244 sfc_repr_dev_configure(struct rte_eth_dev *dev)
245 {
246         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
247         struct rte_eth_dev_data *dev_data = dev->data;
248         int ret;
249
250         sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
251                   dev_data->nb_rx_queues, dev_data->nb_tx_queues);
252
253         sfc_repr_lock(sr);
254         switch (sr->state) {
255         case SFC_ETHDEV_CONFIGURED:
256                 /* FALLTHROUGH */
257         case SFC_ETHDEV_INITIALIZED:
258                 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
259                                          &dev_data->dev_conf);
260                 break;
261         default:
262                 sfcr_err(sr, "unexpected adapter state %u to configure",
263                          sr->state);
264                 ret = -EINVAL;
265                 break;
266         }
267         sfc_repr_unlock(sr);
268
269         sfcr_info(sr, "done %s", rte_strerror(-ret));
270
271         return ret;
272 }
273
274 static int
275 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
276                        struct rte_eth_dev_info *dev_info)
277 {
278         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
279
280         dev_info->device = dev->device;
281
282         dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
283         dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
284         dev_info->default_rxconf.rx_drop_en = 1;
285         dev_info->switch_info.domain_id = srs->switch_domain_id;
286         dev_info->switch_info.port_id = srs->switch_port_id;
287
288         return 0;
289 }
290
291 static int
292 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
293                      const char *type_name, uint16_t qid, uint16_t nb_desc,
294                      unsigned int socket_id, struct rte_ring **ring)
295 {
296         char ring_name[RTE_RING_NAMESIZE];
297         int ret;
298
299         ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
300                        pf_port_id, repr_id, type_name, qid);
301         if (ret >= (int)sizeof(ring_name))
302                 return -ENAMETOOLONG;
303
304         /*
305          * Single producer/consumer rings are used since the API for Tx/Rx
306          * packet burst for representors are guaranteed to be called from
307          * a single thread, and the user of the other end (representor proxy)
308          * is also single-threaded.
309          */
310         *ring = rte_ring_create(ring_name, nb_desc, socket_id,
311                                RING_F_SP_ENQ | RING_F_SC_DEQ);
312         if (*ring == NULL)
313                 return -rte_errno;
314
315         return 0;
316 }
317
318 static int
319 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
320                         const struct rte_eth_rxconf *rx_conf)
321 {
322         int ret = 0;
323
324         sfcr_info(sr, "entry");
325
326         if (rx_conf->rx_thresh.pthresh != 0 ||
327             rx_conf->rx_thresh.hthresh != 0 ||
328             rx_conf->rx_thresh.wthresh != 0) {
329                 sfcr_warn(sr,
330                         "RxQ prefetch/host/writeback thresholds are not supported");
331         }
332
333         if (rx_conf->rx_free_thresh != 0)
334                 sfcr_warn(sr, "RxQ free threshold is not supported");
335
336         if (rx_conf->rx_drop_en == 0)
337                 sfcr_warn(sr, "RxQ drop disable is not supported");
338
339         if (rx_conf->rx_deferred_start) {
340                 sfcr_err(sr, "Deferred start is not supported");
341                 ret = -EINVAL;
342         }
343
344         sfcr_info(sr, "done: %s", rte_strerror(-ret));
345
346         return ret;
347 }
348
349 static int
350 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
351                         uint16_t nb_rx_desc, unsigned int socket_id,
352                         __rte_unused const struct rte_eth_rxconf *rx_conf,
353                         struct rte_mempool *mb_pool)
354 {
355         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
356         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
357         struct sfc_repr_rxq *rxq;
358         int ret;
359
360         sfcr_info(sr, "entry");
361
362         ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
363         if (ret != 0)
364                 goto fail_check_conf;
365
366         ret = -ENOMEM;
367         rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
368                                  RTE_CACHE_LINE_SIZE, socket_id);
369         if (rxq == NULL) {
370                 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
371                 goto fail_rxq_alloc;
372         }
373
374         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
375                                    "rx", rx_queue_id, nb_rx_desc,
376                                    socket_id, &rxq->ring);
377         if (ret != 0) {
378                 sfcr_err(sr, "%s() failed to create ring", __func__);
379                 goto fail_ring_create;
380         }
381
382         ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
383                                      rx_queue_id, rxq->ring, mb_pool);
384         if (ret != 0) {
385                 SFC_ASSERT(ret > 0);
386                 ret = -ret;
387                 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
388                 goto fail_proxy_add_rxq;
389         }
390
391         dev->data->rx_queues[rx_queue_id] = rxq;
392
393         sfcr_info(sr, "done");
394
395         return 0;
396
397 fail_proxy_add_rxq:
398         rte_ring_free(rxq->ring);
399
400 fail_ring_create:
401         rte_free(rxq);
402
403 fail_rxq_alloc:
404 fail_check_conf:
405         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
406         return ret;
407 }
408
409 static void
410 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
411 {
412         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
413         struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
414
415         sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
416         rte_ring_free(rxq->ring);
417         rte_free(rxq);
418 }
419
420 static int
421 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
422                         const struct rte_eth_txconf *tx_conf)
423 {
424         int ret = 0;
425
426         sfcr_info(sr, "entry");
427
428         if (tx_conf->tx_rs_thresh != 0)
429                 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
430
431         if (tx_conf->tx_free_thresh != 0)
432                 sfcr_warn(sr, "TxQ free threshold is not supported");
433
434         if (tx_conf->tx_thresh.pthresh != 0 ||
435             tx_conf->tx_thresh.hthresh != 0 ||
436             tx_conf->tx_thresh.wthresh != 0) {
437                 sfcr_warn(sr,
438                         "prefetch/host/writeback thresholds are not supported");
439         }
440
441         if (tx_conf->tx_deferred_start) {
442                 sfcr_err(sr, "Deferred start is not supported");
443                 ret = -EINVAL;
444         }
445
446         sfcr_info(sr, "done: %s", rte_strerror(-ret));
447
448         return ret;
449 }
450
451 static int
452 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
453                         uint16_t nb_tx_desc, unsigned int socket_id,
454                         const struct rte_eth_txconf *tx_conf)
455 {
456         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
457         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
458         struct sfc_repr_txq *txq;
459         int ret;
460
461         sfcr_info(sr, "entry");
462
463         ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
464         if (ret != 0)
465                 goto fail_check_conf;
466
467         ret = -ENOMEM;
468         txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
469                                  RTE_CACHE_LINE_SIZE, socket_id);
470         if (txq == NULL)
471                 goto fail_txq_alloc;
472
473         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
474                                    "tx", tx_queue_id, nb_tx_desc,
475                                    socket_id, &txq->ring);
476         if (ret != 0)
477                 goto fail_ring_create;
478
479         ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
480                                      tx_queue_id, txq->ring,
481                                      &txq->egress_mport);
482         if (ret != 0)
483                 goto fail_proxy_add_txq;
484
485         dev->data->tx_queues[tx_queue_id] = txq;
486
487         sfcr_info(sr, "done");
488
489         return 0;
490
491 fail_proxy_add_txq:
492         rte_ring_free(txq->ring);
493
494 fail_ring_create:
495         rte_free(txq);
496
497 fail_txq_alloc:
498 fail_check_conf:
499         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
500         return ret;
501 }
502
503 static void
504 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
505 {
506         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
507         struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
508
509         sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
510         rte_ring_free(txq->ring);
511         rte_free(txq);
512 }
513
514 static void
515 sfc_repr_close(struct sfc_repr *sr)
516 {
517         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
518
519         SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
520         sr->state = SFC_ETHDEV_CLOSING;
521
522         /* Put representor close actions here */
523
524         sr->state = SFC_ETHDEV_INITIALIZED;
525 }
526
527 static int
528 sfc_repr_dev_close(struct rte_eth_dev *dev)
529 {
530         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
531         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
532         unsigned int i;
533
534         sfcr_info(sr, "entry");
535
536         sfc_repr_lock(sr);
537         switch (sr->state) {
538         case SFC_ETHDEV_CONFIGURED:
539                 sfc_repr_close(sr);
540                 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
541                 /* FALLTHROUGH */
542         case SFC_ETHDEV_INITIALIZED:
543                 break;
544         default:
545                 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
546                 break;
547         }
548
549         for (i = 0; i < dev->data->nb_rx_queues; i++) {
550                 sfc_repr_rx_queue_release(dev, i);
551                 dev->data->rx_queues[i] = NULL;
552         }
553
554         for (i = 0; i < dev->data->nb_tx_queues; i++) {
555                 sfc_repr_tx_queue_release(dev, i);
556                 dev->data->tx_queues[i] = NULL;
557         }
558
559         /*
560          * Cleanup all resources.
561          * Rollback primary process sfc_repr_eth_dev_init() below.
562          */
563
564         (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
565
566         dev->dev_ops = NULL;
567
568         sfc_repr_unlock(sr);
569         sfc_repr_lock_fini(sr);
570
571         sfcr_info(sr, "done");
572
573         free(sr);
574
575         return 0;
576 }
577
578 static const struct eth_dev_ops sfc_repr_dev_ops = {
579         .dev_configure                  = sfc_repr_dev_configure,
580         .dev_close                      = sfc_repr_dev_close,
581         .dev_infos_get                  = sfc_repr_dev_infos_get,
582         .rx_queue_setup                 = sfc_repr_rx_queue_setup,
583         .rx_queue_release               = sfc_repr_rx_queue_release,
584         .tx_queue_setup                 = sfc_repr_tx_queue_setup,
585         .tx_queue_release               = sfc_repr_tx_queue_release,
586 };
587
588
589 struct sfc_repr_init_data {
590         uint16_t                pf_port_id;
591         uint16_t                repr_id;
592         uint16_t                switch_domain_id;
593         efx_mport_sel_t         mport_sel;
594 };
595
596 static int
597 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
598                                 const struct sfc_mae_switch_port_request *req,
599                                 uint16_t *switch_port_id)
600 {
601         int rc;
602
603         rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
604
605         SFC_ASSERT(rc >= 0);
606         return -rc;
607 }
608
609 static int
610 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
611 {
612         const struct sfc_repr_init_data *repr_data = init_params;
613         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
614         struct sfc_mae_switch_port_request switch_port_request;
615         efx_mport_sel_t ethdev_mport_sel;
616         struct sfc_repr *sr;
617         int ret;
618
619         /*
620          * Currently there is no mport we can use for representor's
621          * ethdev. Use an invalid one for now. This way representors
622          * can be instantiated.
623          */
624         efx_mae_mport_invalid(&ethdev_mport_sel);
625
626         memset(&switch_port_request, 0, sizeof(switch_port_request));
627         switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
628         switch_port_request.ethdev_mportp = &ethdev_mport_sel;
629         switch_port_request.entity_mportp = &repr_data->mport_sel;
630         switch_port_request.ethdev_port_id = dev->data->port_id;
631
632         ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
633                                               &switch_port_request,
634                                               &srs->switch_port_id);
635         if (ret != 0) {
636                 SFC_GENERIC_LOG(ERR,
637                         "%s() failed to assign MAE switch port (domain id %u)",
638                         __func__, repr_data->switch_domain_id);
639                 goto fail_mae_assign_switch_port;
640         }
641
642         ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
643                                       repr_data->repr_id,
644                                       dev->data->port_id,
645                                       &repr_data->mport_sel);
646         if (ret != 0) {
647                 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
648                                 __func__);
649                 SFC_ASSERT(ret > 0);
650                 ret = -ret;
651                 goto fail_create_port;
652         }
653
654         /*
655          * Allocate process private data from heap, since it should not
656          * be located in shared memory allocated using rte_malloc() API.
657          */
658         sr = calloc(1, sizeof(*sr));
659         if (sr == NULL) {
660                 ret = -ENOMEM;
661                 goto fail_alloc_sr;
662         }
663
664         sfc_repr_lock_init(sr);
665         sfc_repr_lock(sr);
666
667         dev->process_private = sr;
668
669         srs->pf_port_id = repr_data->pf_port_id;
670         srs->repr_id = repr_data->repr_id;
671         srs->switch_domain_id = repr_data->switch_domain_id;
672
673         dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
674         dev->data->representor_id = srs->repr_id;
675         dev->data->backer_port_id = srs->pf_port_id;
676
677         dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
678         if (dev->data->mac_addrs == NULL) {
679                 ret = -ENOMEM;
680                 goto fail_mac_addrs;
681         }
682
683         dev->dev_ops = &sfc_repr_dev_ops;
684
685         sr->state = SFC_ETHDEV_INITIALIZED;
686         sfc_repr_unlock(sr);
687
688         return 0;
689
690 fail_mac_addrs:
691         sfc_repr_unlock(sr);
692         free(sr);
693
694 fail_alloc_sr:
695         (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
696                                       repr_data->repr_id);
697
698 fail_create_port:
699 fail_mae_assign_switch_port:
700         SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
701         return ret;
702 }
703
704 int
705 sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
706                 uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
707 {
708         struct sfc_repr_init_data repr_data;
709         char name[RTE_ETH_NAME_MAX_LEN];
710         int ret;
711
712         if (snprintf(name, sizeof(name), "net_%s_representor_%u",
713                      parent->device->name, representor_id) >=
714                         (int)sizeof(name)) {
715                 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
716                 return -ENAMETOOLONG;
717         }
718
719         memset(&repr_data, 0, sizeof(repr_data));
720         repr_data.pf_port_id = parent->data->port_id;
721         repr_data.repr_id = representor_id;
722         repr_data.switch_domain_id = switch_domain_id;
723         repr_data.mport_sel = *mport_sel;
724
725         ret = rte_eth_dev_create(parent->device, name,
726                                   sizeof(struct sfc_repr_shared),
727                                   NULL, NULL,
728                                   sfc_repr_eth_dev_init, &repr_data);
729         if (ret != 0)
730                 SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
731
732         SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));
733
734         return ret;
735 }