1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
20 #include "sfc_debug.h"
22 #include "sfc_ethdev_state.h"
23 #include "sfc_repr_proxy_api.h"
24 #include "sfc_switch.h"
25 #include "sfc_dp_tx.h"
27 /** Multi-process shared representor private data */
28 struct sfc_repr_shared {
31 uint16_t switch_domain_id;
32 uint16_t switch_port_id;
36 /* Datapath members */
37 struct rte_ring *ring;
41 /* Datapath members */
42 struct rte_ring *ring;
43 efx_mport_id_t egress_mport;
46 /** Primary process representor private data */
49 * PMD setup and configuration is not thread safe. Since it is not
50 * performance sensitive, it is better to guarantee thread-safety
51 * and add device level lock. Adapter control operations which
52 * change its state should acquire the lock.
55 enum sfc_ethdev_state state;
58 #define sfcr_err(sr, ...) \
60 const struct sfc_repr *_sr = (sr); \
63 SFC_GENERIC_LOG(ERR, __VA_ARGS__); \
66 #define sfcr_warn(sr, ...) \
68 const struct sfc_repr *_sr = (sr); \
71 SFC_GENERIC_LOG(WARNING, __VA_ARGS__); \
74 #define sfcr_info(sr, ...) \
76 const struct sfc_repr *_sr = (sr); \
79 SFC_GENERIC_LOG(INFO, \
81 RTE_FMT_HEAD(__VA_ARGS__ ,), \
83 RTE_FMT_TAIL(__VA_ARGS__ ,))); \
86 static inline struct sfc_repr_shared *
87 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
89 struct sfc_repr_shared *srs = eth_dev->data->dev_private;
94 static inline struct sfc_repr *
95 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
97 struct sfc_repr *sr = eth_dev->process_private;
103 * Add wrapper functions to acquire/release lock to be able to remove or
104 * change the lock in one place.
108 sfc_repr_lock_init(struct sfc_repr *sr)
110 rte_spinlock_init(&sr->lock);
113 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
116 sfc_repr_lock_is_locked(struct sfc_repr *sr)
118 return rte_spinlock_is_locked(&sr->lock);
124 sfc_repr_lock(struct sfc_repr *sr)
126 rte_spinlock_lock(&sr->lock);
130 sfc_repr_unlock(struct sfc_repr *sr)
132 rte_spinlock_unlock(&sr->lock);
136 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
138 /* Just for symmetry of the API */
142 sfc_repr_rx_queue_stop(void *queue)
144 struct sfc_repr_rxq *rxq = queue;
149 rte_ring_reset(rxq->ring);
153 sfc_repr_tx_queue_stop(void *queue)
155 struct sfc_repr_txq *txq = queue;
160 rte_ring_reset(txq->ring);
164 sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
166 struct sfc_repr_rxq *rxq = rx_queue;
167 void **objs = (void *)&rx_pkts[0];
169 /* mbufs port is already filled correctly by representors proxy */
170 return rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
174 sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
176 struct sfc_repr_txq *txq = tx_queue;
182 * mbuf is likely cache-hot. Set flag and egress m-port here instead of
183 * doing that in representors proxy. Also, it should help to avoid
184 * cache bounce. Moreover, potentially, it allows to use one
185 * multi-producer single-consumer ring for all representors.
187 * The only potential problem is doing so many times if enqueue
188 * fails and sender retries.
190 for (i = 0; i < nb_pkts; ++i) {
191 struct rte_mbuf *m = tx_pkts[i];
193 m->ol_flags |= sfc_dp_mport_override;
194 *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
195 efx_mport_id_t *) = txq->egress_mport;
198 objs = (void *)&tx_pkts[0];
199 n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
202 * Remove m-port override flag from packets that were not enqueued
203 * Setting the flag only for enqueued packets after the burst is
204 * not possible since the ownership of enqueued packets is
205 * transferred to representor proxy.
207 for (i = n_tx; i < nb_pkts; ++i) {
208 struct rte_mbuf *m = tx_pkts[i];
210 m->ol_flags &= ~sfc_dp_mport_override;
217 sfc_repr_start(struct rte_eth_dev *dev)
219 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
220 struct sfc_repr_shared *srs;
223 sfcr_info(sr, "entry");
225 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
228 case SFC_ETHDEV_CONFIGURED:
230 case SFC_ETHDEV_STARTED:
231 sfcr_info(sr, "already started");
238 sr->state = SFC_ETHDEV_STARTING;
240 srs = sfc_repr_shared_by_eth_dev(dev);
241 ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
248 sr->state = SFC_ETHDEV_STARTED;
250 sfcr_info(sr, "done");
255 sr->state = SFC_ETHDEV_CONFIGURED;
258 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
263 sfc_repr_dev_start(struct rte_eth_dev *dev)
265 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
268 sfcr_info(sr, "entry");
271 ret = sfc_repr_start(dev);
277 sfcr_info(sr, "done");
282 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
287 sfc_repr_stop(struct rte_eth_dev *dev)
289 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
290 struct sfc_repr_shared *srs;
294 sfcr_info(sr, "entry");
296 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
299 case SFC_ETHDEV_STARTED:
301 case SFC_ETHDEV_CONFIGURED:
302 sfcr_info(sr, "already stopped");
305 sfcr_err(sr, "stop in unexpected state %u", sr->state);
311 srs = sfc_repr_shared_by_eth_dev(dev);
312 ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
319 for (i = 0; i < dev->data->nb_rx_queues; i++)
320 sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
322 for (i = 0; i < dev->data->nb_tx_queues; i++)
323 sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
325 sr->state = SFC_ETHDEV_CONFIGURED;
326 sfcr_info(sr, "done");
332 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
338 sfc_repr_dev_stop(struct rte_eth_dev *dev)
340 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
343 sfcr_info(sr, "entry");
347 ret = sfc_repr_stop(dev);
349 sfcr_err(sr, "%s() failed to stop representor", __func__);
355 sfcr_info(sr, "done");
362 sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
368 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
369 const struct rte_eth_conf *conf)
371 const struct rte_eth_rss_conf *rss_conf;
374 sfcr_info(sr, "entry");
376 if (conf->link_speeds != 0) {
377 sfcr_err(sr, "specific link speeds not supported");
381 switch (conf->rxmode.mq_mode) {
383 if (nb_rx_queues != 1) {
384 sfcr_err(sr, "Rx RSS is not supported with %u queues",
390 rss_conf = &conf->rx_adv_conf.rss_conf;
391 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
392 rss_conf->rss_hf != 0) {
393 sfcr_err(sr, "Rx RSS configuration is not supported");
400 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
405 if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
406 sfcr_err(sr, "Tx mode MQ modes not supported");
410 if (conf->lpbk_mode != 0) {
411 sfcr_err(sr, "loopback not supported");
415 if (conf->dcb_capability_en != 0) {
416 sfcr_err(sr, "priority-based flow control not supported");
420 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
421 sfcr_err(sr, "Flow Director not supported");
425 if (conf->intr_conf.lsc != 0) {
426 sfcr_err(sr, "link status change interrupt not supported");
430 if (conf->intr_conf.rxq != 0) {
431 sfcr_err(sr, "receive queue interrupt not supported");
435 if (conf->intr_conf.rmv != 0) {
436 sfcr_err(sr, "remove interrupt not supported");
440 sfcr_info(sr, "done %d", ret);
447 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
448 const struct rte_eth_conf *conf)
452 sfcr_info(sr, "entry");
454 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
456 ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
458 goto fail_check_conf;
460 sr->state = SFC_ETHDEV_CONFIGURED;
462 sfcr_info(sr, "done");
467 sfcr_info(sr, "failed %s", rte_strerror(-ret));
472 sfc_repr_dev_configure(struct rte_eth_dev *dev)
474 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
475 struct rte_eth_dev_data *dev_data = dev->data;
478 sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
479 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
483 case SFC_ETHDEV_CONFIGURED:
485 case SFC_ETHDEV_INITIALIZED:
486 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
487 &dev_data->dev_conf);
490 sfcr_err(sr, "unexpected adapter state %u to configure",
497 sfcr_info(sr, "done %s", rte_strerror(-ret));
503 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
504 struct rte_eth_dev_info *dev_info)
506 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
508 dev_info->device = dev->device;
510 dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
511 dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
512 dev_info->default_rxconf.rx_drop_en = 1;
513 dev_info->switch_info.domain_id = srs->switch_domain_id;
514 dev_info->switch_info.port_id = srs->switch_port_id;
520 sfc_repr_dev_link_update(struct rte_eth_dev *dev,
521 __rte_unused int wait_to_complete)
523 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
524 struct rte_eth_link link;
526 if (sr->state != SFC_ETHDEV_STARTED) {
527 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
529 memset(&link, 0, sizeof(link));
530 link.link_status = ETH_LINK_UP;
531 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
534 return rte_eth_linkstatus_set(dev, &link);
538 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
539 const char *type_name, uint16_t qid, uint16_t nb_desc,
540 unsigned int socket_id, struct rte_ring **ring)
542 char ring_name[RTE_RING_NAMESIZE];
545 ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
546 pf_port_id, repr_id, type_name, qid);
547 if (ret >= (int)sizeof(ring_name))
548 return -ENAMETOOLONG;
551 * Single producer/consumer rings are used since the API for Tx/Rx
552 * packet burst for representors are guaranteed to be called from
553 * a single thread, and the user of the other end (representor proxy)
554 * is also single-threaded.
556 *ring = rte_ring_create(ring_name, nb_desc, socket_id,
557 RING_F_SP_ENQ | RING_F_SC_DEQ);
565 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
566 const struct rte_eth_rxconf *rx_conf)
570 sfcr_info(sr, "entry");
572 if (rx_conf->rx_thresh.pthresh != 0 ||
573 rx_conf->rx_thresh.hthresh != 0 ||
574 rx_conf->rx_thresh.wthresh != 0) {
576 "RxQ prefetch/host/writeback thresholds are not supported");
579 if (rx_conf->rx_free_thresh != 0)
580 sfcr_warn(sr, "RxQ free threshold is not supported");
582 if (rx_conf->rx_drop_en == 0)
583 sfcr_warn(sr, "RxQ drop disable is not supported");
585 if (rx_conf->rx_deferred_start) {
586 sfcr_err(sr, "Deferred start is not supported");
590 sfcr_info(sr, "done: %s", rte_strerror(-ret));
596 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
597 uint16_t nb_rx_desc, unsigned int socket_id,
598 __rte_unused const struct rte_eth_rxconf *rx_conf,
599 struct rte_mempool *mb_pool)
601 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
602 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
603 struct sfc_repr_rxq *rxq;
606 sfcr_info(sr, "entry");
608 ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
610 goto fail_check_conf;
613 rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
614 RTE_CACHE_LINE_SIZE, socket_id);
616 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
620 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
621 "rx", rx_queue_id, nb_rx_desc,
622 socket_id, &rxq->ring);
624 sfcr_err(sr, "%s() failed to create ring", __func__);
625 goto fail_ring_create;
628 ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
629 rx_queue_id, rxq->ring, mb_pool);
633 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
634 goto fail_proxy_add_rxq;
637 dev->data->rx_queues[rx_queue_id] = rxq;
639 sfcr_info(sr, "done");
644 rte_ring_free(rxq->ring);
651 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
656 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
658 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
659 struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
661 sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
662 rte_ring_free(rxq->ring);
667 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
668 const struct rte_eth_txconf *tx_conf)
672 sfcr_info(sr, "entry");
674 if (tx_conf->tx_rs_thresh != 0)
675 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
677 if (tx_conf->tx_free_thresh != 0)
678 sfcr_warn(sr, "TxQ free threshold is not supported");
680 if (tx_conf->tx_thresh.pthresh != 0 ||
681 tx_conf->tx_thresh.hthresh != 0 ||
682 tx_conf->tx_thresh.wthresh != 0) {
684 "prefetch/host/writeback thresholds are not supported");
687 if (tx_conf->tx_deferred_start) {
688 sfcr_err(sr, "Deferred start is not supported");
692 sfcr_info(sr, "done: %s", rte_strerror(-ret));
698 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
699 uint16_t nb_tx_desc, unsigned int socket_id,
700 const struct rte_eth_txconf *tx_conf)
702 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
703 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
704 struct sfc_repr_txq *txq;
707 sfcr_info(sr, "entry");
709 ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
711 goto fail_check_conf;
714 txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
715 RTE_CACHE_LINE_SIZE, socket_id);
719 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
720 "tx", tx_queue_id, nb_tx_desc,
721 socket_id, &txq->ring);
723 goto fail_ring_create;
725 ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
726 tx_queue_id, txq->ring,
729 goto fail_proxy_add_txq;
731 dev->data->tx_queues[tx_queue_id] = txq;
733 sfcr_info(sr, "done");
738 rte_ring_free(txq->ring);
745 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
750 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
752 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
753 struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
755 sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
756 rte_ring_free(txq->ring);
761 sfc_repr_close(struct sfc_repr *sr)
763 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
765 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
766 sr->state = SFC_ETHDEV_CLOSING;
768 /* Put representor close actions here */
770 sr->state = SFC_ETHDEV_INITIALIZED;
774 sfc_repr_dev_close(struct rte_eth_dev *dev)
776 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
777 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
780 sfcr_info(sr, "entry");
784 case SFC_ETHDEV_STARTED:
786 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
788 case SFC_ETHDEV_CONFIGURED:
790 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
792 case SFC_ETHDEV_INITIALIZED:
795 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
799 for (i = 0; i < dev->data->nb_rx_queues; i++) {
800 sfc_repr_rx_queue_release(dev, i);
801 dev->data->rx_queues[i] = NULL;
804 for (i = 0; i < dev->data->nb_tx_queues; i++) {
805 sfc_repr_tx_queue_release(dev, i);
806 dev->data->tx_queues[i] = NULL;
810 * Cleanup all resources.
811 * Rollback primary process sfc_repr_eth_dev_init() below.
814 (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
816 dev->rx_pkt_burst = NULL;
817 dev->tx_pkt_burst = NULL;
821 sfc_repr_lock_fini(sr);
823 sfcr_info(sr, "done");
830 static const struct eth_dev_ops sfc_repr_dev_ops = {
831 .dev_configure = sfc_repr_dev_configure,
832 .dev_start = sfc_repr_dev_start,
833 .dev_stop = sfc_repr_dev_stop,
834 .dev_close = sfc_repr_dev_close,
835 .dev_infos_get = sfc_repr_dev_infos_get,
836 .link_update = sfc_repr_dev_link_update,
837 .rx_queue_setup = sfc_repr_rx_queue_setup,
838 .rx_queue_release = sfc_repr_rx_queue_release,
839 .tx_queue_setup = sfc_repr_tx_queue_setup,
840 .tx_queue_release = sfc_repr_tx_queue_release,
844 struct sfc_repr_init_data {
847 uint16_t switch_domain_id;
848 efx_mport_sel_t mport_sel;
852 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
853 const struct sfc_mae_switch_port_request *req,
854 uint16_t *switch_port_id)
858 rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
865 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
867 const struct sfc_repr_init_data *repr_data = init_params;
868 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
869 struct sfc_mae_switch_port_request switch_port_request;
870 efx_mport_sel_t ethdev_mport_sel;
875 * Currently there is no mport we can use for representor's
876 * ethdev. Use an invalid one for now. This way representors
877 * can be instantiated.
879 efx_mae_mport_invalid(ðdev_mport_sel);
881 memset(&switch_port_request, 0, sizeof(switch_port_request));
882 switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
883 switch_port_request.ethdev_mportp = ðdev_mport_sel;
884 switch_port_request.entity_mportp = &repr_data->mport_sel;
885 switch_port_request.ethdev_port_id = dev->data->port_id;
887 ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
888 &switch_port_request,
889 &srs->switch_port_id);
892 "%s() failed to assign MAE switch port (domain id %u)",
893 __func__, repr_data->switch_domain_id);
894 goto fail_mae_assign_switch_port;
897 ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
900 &repr_data->mport_sel);
902 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
906 goto fail_create_port;
910 * Allocate process private data from heap, since it should not
911 * be located in shared memory allocated using rte_malloc() API.
913 sr = calloc(1, sizeof(*sr));
919 sfc_repr_lock_init(sr);
922 dev->process_private = sr;
924 srs->pf_port_id = repr_data->pf_port_id;
925 srs->repr_id = repr_data->repr_id;
926 srs->switch_domain_id = repr_data->switch_domain_id;
928 dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
929 dev->data->representor_id = srs->repr_id;
930 dev->data->backer_port_id = srs->pf_port_id;
932 dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
933 if (dev->data->mac_addrs == NULL) {
938 dev->rx_pkt_burst = sfc_repr_rx_burst;
939 dev->tx_pkt_burst = sfc_repr_tx_burst;
940 dev->dev_ops = &sfc_repr_dev_ops;
942 sr->state = SFC_ETHDEV_INITIALIZED;
952 (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
956 fail_mae_assign_switch_port:
957 SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
962 sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
963 uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
965 struct sfc_repr_init_data repr_data;
966 char name[RTE_ETH_NAME_MAX_LEN];
968 struct rte_eth_dev *dev;
970 if (snprintf(name, sizeof(name), "net_%s_representor_%u",
971 parent->device->name, representor_id) >=
973 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
974 return -ENAMETOOLONG;
977 dev = rte_eth_dev_allocated(name);
979 memset(&repr_data, 0, sizeof(repr_data));
980 repr_data.pf_port_id = parent->data->port_id;
981 repr_data.repr_id = representor_id;
982 repr_data.switch_domain_id = switch_domain_id;
983 repr_data.mport_sel = *mport_sel;
985 ret = rte_eth_dev_create(parent->device, name,
986 sizeof(struct sfc_repr_shared),
988 sfc_repr_eth_dev_init, &repr_data);
990 SFC_GENERIC_LOG(ERR, "%s() failed to create device",