1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
20 #include "sfc_debug.h"
22 #include "sfc_ethdev_state.h"
23 #include "sfc_repr_proxy_api.h"
24 #include "sfc_switch.h"
25 #include "sfc_dp_tx.h"
27 /** Multi-process shared representor private data */
28 struct sfc_repr_shared {
31 uint16_t switch_domain_id;
32 uint16_t switch_port_id;
35 struct sfc_repr_queue_stats {
36 union sfc_pkts_bytes packets_bytes;
40 /* Datapath members */
41 struct rte_ring *ring;
42 struct sfc_repr_queue_stats stats;
46 /* Datapath members */
47 struct rte_ring *ring;
48 efx_mport_id_t egress_mport;
49 struct sfc_repr_queue_stats stats;
52 /** Primary process representor private data */
55 * PMD setup and configuration is not thread safe. Since it is not
56 * performance sensitive, it is better to guarantee thread-safety
57 * and add device level lock. Adapter control operations which
58 * change its state should acquire the lock.
61 enum sfc_ethdev_state state;
64 #define sfcr_err(sr, ...) \
66 const struct sfc_repr *_sr = (sr); \
69 SFC_GENERIC_LOG(ERR, __VA_ARGS__); \
72 #define sfcr_warn(sr, ...) \
74 const struct sfc_repr *_sr = (sr); \
77 SFC_GENERIC_LOG(WARNING, __VA_ARGS__); \
80 #define sfcr_info(sr, ...) \
82 const struct sfc_repr *_sr = (sr); \
85 SFC_GENERIC_LOG(INFO, \
87 RTE_FMT_HEAD(__VA_ARGS__ ,), \
89 RTE_FMT_TAIL(__VA_ARGS__ ,))); \
92 static inline struct sfc_repr_shared *
93 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
95 struct sfc_repr_shared *srs = eth_dev->data->dev_private;
100 static inline struct sfc_repr *
101 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
103 struct sfc_repr *sr = eth_dev->process_private;
109 * Add wrapper functions to acquire/release lock to be able to remove or
110 * change the lock in one place.
114 sfc_repr_lock_init(struct sfc_repr *sr)
116 rte_spinlock_init(&sr->lock);
119 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
122 sfc_repr_lock_is_locked(struct sfc_repr *sr)
124 return rte_spinlock_is_locked(&sr->lock);
130 sfc_repr_lock(struct sfc_repr *sr)
132 rte_spinlock_lock(&sr->lock);
136 sfc_repr_unlock(struct sfc_repr *sr)
138 rte_spinlock_unlock(&sr->lock);
142 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
144 /* Just for symmetry of the API */
148 sfc_repr_rx_queue_stop(void *queue)
150 struct sfc_repr_rxq *rxq = queue;
155 rte_ring_reset(rxq->ring);
159 sfc_repr_tx_queue_stop(void *queue)
161 struct sfc_repr_txq *txq = queue;
166 rte_ring_reset(txq->ring);
170 sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
172 struct sfc_repr_rxq *rxq = rx_queue;
173 void **objs = (void *)&rx_pkts[0];
176 /* mbufs port is already filled correctly by representors proxy */
177 n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
180 unsigned int n_bytes = 0;
184 n_bytes += rx_pkts[i]->pkt_len;
185 } while (++i < n_rx);
187 sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes);
194 sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
196 struct sfc_repr_txq *txq = tx_queue;
197 unsigned int n_bytes = 0;
203 * mbuf is likely cache-hot. Set flag and egress m-port here instead of
204 * doing that in representors proxy. Also, it should help to avoid
205 * cache bounce. Moreover, potentially, it allows to use one
206 * multi-producer single-consumer ring for all representors.
208 * The only potential problem is doing so many times if enqueue
209 * fails and sender retries.
211 for (i = 0; i < nb_pkts; ++i) {
212 struct rte_mbuf *m = tx_pkts[i];
214 m->ol_flags |= sfc_dp_mport_override;
215 *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
216 efx_mport_id_t *) = txq->egress_mport;
217 n_bytes += tx_pkts[i]->pkt_len;
220 objs = (void *)&tx_pkts[0];
221 n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
224 * Remove m-port override flag from packets that were not enqueued
225 * Setting the flag only for enqueued packets after the burst is
226 * not possible since the ownership of enqueued packets is
227 * transferred to representor proxy. The same logic applies to
228 * counting the enqueued packets' bytes.
230 for (i = n_tx; i < nb_pkts; ++i) {
231 struct rte_mbuf *m = tx_pkts[i];
233 m->ol_flags &= ~sfc_dp_mport_override;
234 n_bytes -= m->pkt_len;
237 sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes);
243 sfc_repr_start(struct rte_eth_dev *dev)
245 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
246 struct sfc_repr_shared *srs;
249 sfcr_info(sr, "entry");
251 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
254 case SFC_ETHDEV_CONFIGURED:
256 case SFC_ETHDEV_STARTED:
257 sfcr_info(sr, "already started");
264 sr->state = SFC_ETHDEV_STARTING;
266 srs = sfc_repr_shared_by_eth_dev(dev);
267 ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
274 sr->state = SFC_ETHDEV_STARTED;
276 sfcr_info(sr, "done");
281 sr->state = SFC_ETHDEV_CONFIGURED;
284 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
289 sfc_repr_dev_start(struct rte_eth_dev *dev)
291 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
294 sfcr_info(sr, "entry");
297 ret = sfc_repr_start(dev);
303 sfcr_info(sr, "done");
308 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
313 sfc_repr_stop(struct rte_eth_dev *dev)
315 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
316 struct sfc_repr_shared *srs;
320 sfcr_info(sr, "entry");
322 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
325 case SFC_ETHDEV_STARTED:
327 case SFC_ETHDEV_CONFIGURED:
328 sfcr_info(sr, "already stopped");
331 sfcr_err(sr, "stop in unexpected state %u", sr->state);
337 srs = sfc_repr_shared_by_eth_dev(dev);
338 ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
345 for (i = 0; i < dev->data->nb_rx_queues; i++)
346 sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
348 for (i = 0; i < dev->data->nb_tx_queues; i++)
349 sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
351 sr->state = SFC_ETHDEV_CONFIGURED;
352 sfcr_info(sr, "done");
358 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
364 sfc_repr_dev_stop(struct rte_eth_dev *dev)
366 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
369 sfcr_info(sr, "entry");
373 ret = sfc_repr_stop(dev);
375 sfcr_err(sr, "%s() failed to stop representor", __func__);
381 sfcr_info(sr, "done");
388 sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
394 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
395 const struct rte_eth_conf *conf)
397 const struct rte_eth_rss_conf *rss_conf;
400 sfcr_info(sr, "entry");
402 if (conf->link_speeds != 0) {
403 sfcr_err(sr, "specific link speeds not supported");
407 switch (conf->rxmode.mq_mode) {
408 case RTE_ETH_MQ_RX_RSS:
409 if (nb_rx_queues != 1) {
410 sfcr_err(sr, "Rx RSS is not supported with %u queues",
416 rss_conf = &conf->rx_adv_conf.rss_conf;
417 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
418 rss_conf->rss_hf != 0) {
419 sfcr_err(sr, "Rx RSS configuration is not supported");
423 case RTE_ETH_MQ_RX_NONE:
426 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
431 if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
432 sfcr_err(sr, "Tx mode MQ modes not supported");
436 if (conf->lpbk_mode != 0) {
437 sfcr_err(sr, "loopback not supported");
441 if (conf->dcb_capability_en != 0) {
442 sfcr_err(sr, "priority-based flow control not supported");
446 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
447 sfcr_err(sr, "Flow Director not supported");
451 if (conf->intr_conf.lsc != 0) {
452 sfcr_err(sr, "link status change interrupt not supported");
456 if (conf->intr_conf.rxq != 0) {
457 sfcr_err(sr, "receive queue interrupt not supported");
461 if (conf->intr_conf.rmv != 0) {
462 sfcr_err(sr, "remove interrupt not supported");
466 sfcr_info(sr, "done %d", ret);
473 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
474 const struct rte_eth_conf *conf)
478 sfcr_info(sr, "entry");
480 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
482 ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
484 goto fail_check_conf;
486 sr->state = SFC_ETHDEV_CONFIGURED;
488 sfcr_info(sr, "done");
493 sfcr_info(sr, "failed %s", rte_strerror(-ret));
498 sfc_repr_dev_configure(struct rte_eth_dev *dev)
500 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
501 struct rte_eth_dev_data *dev_data = dev->data;
504 sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
505 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
509 case SFC_ETHDEV_CONFIGURED:
511 case SFC_ETHDEV_INITIALIZED:
512 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
513 &dev_data->dev_conf);
516 sfcr_err(sr, "unexpected adapter state %u to configure",
523 sfcr_info(sr, "done %s", rte_strerror(-ret));
529 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
530 struct rte_eth_dev_info *dev_info)
532 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
534 dev_info->device = dev->device;
536 dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
537 dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
538 dev_info->default_rxconf.rx_drop_en = 1;
539 dev_info->switch_info.domain_id = srs->switch_domain_id;
540 dev_info->switch_info.port_id = srs->switch_port_id;
546 sfc_repr_dev_link_update(struct rte_eth_dev *dev,
547 __rte_unused int wait_to_complete)
549 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
550 struct rte_eth_link link;
552 if (sr->state != SFC_ETHDEV_STARTED) {
553 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
555 memset(&link, 0, sizeof(link));
556 link.link_status = RTE_ETH_LINK_UP;
557 link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
560 return rte_eth_linkstatus_set(dev, &link);
564 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
565 const char *type_name, uint16_t qid, uint16_t nb_desc,
566 unsigned int socket_id, struct rte_ring **ring)
568 char ring_name[RTE_RING_NAMESIZE];
571 ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
572 pf_port_id, repr_id, type_name, qid);
573 if (ret >= (int)sizeof(ring_name))
574 return -ENAMETOOLONG;
577 * Single producer/consumer rings are used since the API for Tx/Rx
578 * packet burst for representors are guaranteed to be called from
579 * a single thread, and the user of the other end (representor proxy)
580 * is also single-threaded.
582 *ring = rte_ring_create(ring_name, nb_desc, socket_id,
583 RING_F_SP_ENQ | RING_F_SC_DEQ);
591 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
592 const struct rte_eth_rxconf *rx_conf)
596 sfcr_info(sr, "entry");
598 if (rx_conf->rx_thresh.pthresh != 0 ||
599 rx_conf->rx_thresh.hthresh != 0 ||
600 rx_conf->rx_thresh.wthresh != 0) {
602 "RxQ prefetch/host/writeback thresholds are not supported");
605 if (rx_conf->rx_free_thresh != 0)
606 sfcr_warn(sr, "RxQ free threshold is not supported");
608 if (rx_conf->rx_drop_en == 0)
609 sfcr_warn(sr, "RxQ drop disable is not supported");
611 if (rx_conf->rx_deferred_start) {
612 sfcr_err(sr, "Deferred start is not supported");
616 sfcr_info(sr, "done: %s", rte_strerror(-ret));
622 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
623 uint16_t nb_rx_desc, unsigned int socket_id,
624 __rte_unused const struct rte_eth_rxconf *rx_conf,
625 struct rte_mempool *mb_pool)
627 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
628 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
629 struct sfc_repr_rxq *rxq;
632 sfcr_info(sr, "entry");
634 ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
636 goto fail_check_conf;
639 rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
640 RTE_CACHE_LINE_SIZE, socket_id);
642 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
646 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
647 "rx", rx_queue_id, nb_rx_desc,
648 socket_id, &rxq->ring);
650 sfcr_err(sr, "%s() failed to create ring", __func__);
651 goto fail_ring_create;
654 ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
655 rx_queue_id, rxq->ring, mb_pool);
659 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
660 goto fail_proxy_add_rxq;
663 dev->data->rx_queues[rx_queue_id] = rxq;
665 sfcr_info(sr, "done");
670 rte_ring_free(rxq->ring);
677 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
682 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
684 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
685 struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
687 sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
688 rte_ring_free(rxq->ring);
693 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
694 const struct rte_eth_txconf *tx_conf)
698 sfcr_info(sr, "entry");
700 if (tx_conf->tx_rs_thresh != 0)
701 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
703 if (tx_conf->tx_free_thresh != 0)
704 sfcr_warn(sr, "TxQ free threshold is not supported");
706 if (tx_conf->tx_thresh.pthresh != 0 ||
707 tx_conf->tx_thresh.hthresh != 0 ||
708 tx_conf->tx_thresh.wthresh != 0) {
710 "prefetch/host/writeback thresholds are not supported");
713 if (tx_conf->tx_deferred_start) {
714 sfcr_err(sr, "Deferred start is not supported");
718 sfcr_info(sr, "done: %s", rte_strerror(-ret));
724 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
725 uint16_t nb_tx_desc, unsigned int socket_id,
726 const struct rte_eth_txconf *tx_conf)
728 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
729 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
730 struct sfc_repr_txq *txq;
733 sfcr_info(sr, "entry");
735 ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
737 goto fail_check_conf;
740 txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
741 RTE_CACHE_LINE_SIZE, socket_id);
745 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
746 "tx", tx_queue_id, nb_tx_desc,
747 socket_id, &txq->ring);
749 goto fail_ring_create;
751 ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
752 tx_queue_id, txq->ring,
755 goto fail_proxy_add_txq;
757 dev->data->tx_queues[tx_queue_id] = txq;
759 sfcr_info(sr, "done");
764 rte_ring_free(txq->ring);
771 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
776 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
778 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
779 struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
781 sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
782 rte_ring_free(txq->ring);
787 sfc_repr_close(struct sfc_repr *sr)
789 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
791 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
792 sr->state = SFC_ETHDEV_CLOSING;
794 /* Put representor close actions here */
796 sr->state = SFC_ETHDEV_INITIALIZED;
800 sfc_repr_dev_close(struct rte_eth_dev *dev)
802 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
803 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
806 sfcr_info(sr, "entry");
810 case SFC_ETHDEV_STARTED:
812 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
814 case SFC_ETHDEV_CONFIGURED:
816 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
818 case SFC_ETHDEV_INITIALIZED:
821 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
825 for (i = 0; i < dev->data->nb_rx_queues; i++) {
826 sfc_repr_rx_queue_release(dev, i);
827 dev->data->rx_queues[i] = NULL;
830 for (i = 0; i < dev->data->nb_tx_queues; i++) {
831 sfc_repr_tx_queue_release(dev, i);
832 dev->data->tx_queues[i] = NULL;
836 * Cleanup all resources.
837 * Rollback primary process sfc_repr_eth_dev_init() below.
840 (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
842 dev->rx_pkt_burst = NULL;
843 dev->tx_pkt_burst = NULL;
847 sfc_repr_lock_fini(sr);
849 sfcr_info(sr, "done");
857 sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
859 union sfc_pkts_bytes queue_stats;
862 for (i = 0; i < dev->data->nb_rx_queues; i++) {
863 struct sfc_repr_rxq *rxq = dev->data->rx_queues[i];
865 sfc_pkts_bytes_get(&rxq->stats.packets_bytes,
868 stats->ipackets += queue_stats.pkts;
869 stats->ibytes += queue_stats.bytes;
872 for (i = 0; i < dev->data->nb_tx_queues; i++) {
873 struct sfc_repr_txq *txq = dev->data->tx_queues[i];
875 sfc_pkts_bytes_get(&txq->stats.packets_bytes,
878 stats->opackets += queue_stats.pkts;
879 stats->obytes += queue_stats.bytes;
885 static const struct eth_dev_ops sfc_repr_dev_ops = {
886 .dev_configure = sfc_repr_dev_configure,
887 .dev_start = sfc_repr_dev_start,
888 .dev_stop = sfc_repr_dev_stop,
889 .dev_close = sfc_repr_dev_close,
890 .dev_infos_get = sfc_repr_dev_infos_get,
891 .link_update = sfc_repr_dev_link_update,
892 .stats_get = sfc_repr_stats_get,
893 .rx_queue_setup = sfc_repr_rx_queue_setup,
894 .rx_queue_release = sfc_repr_rx_queue_release,
895 .tx_queue_setup = sfc_repr_tx_queue_setup,
896 .tx_queue_release = sfc_repr_tx_queue_release,
900 struct sfc_repr_init_data {
902 uint16_t switch_domain_id;
903 efx_mport_sel_t mport_sel;
904 efx_pcie_interface_t intf;
910 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
911 const struct sfc_mae_switch_port_request *req,
912 uint16_t *switch_port_id)
916 rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
923 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
925 const struct sfc_repr_init_data *repr_data = init_params;
926 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
927 struct sfc_mae_switch_port_request switch_port_request;
928 efx_mport_sel_t ethdev_mport_sel;
933 * Currently there is no mport we can use for representor's
934 * ethdev. Use an invalid one for now. This way representors
935 * can be instantiated.
937 efx_mae_mport_invalid(ðdev_mport_sel);
939 memset(&switch_port_request, 0, sizeof(switch_port_request));
940 switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
941 switch_port_request.ethdev_mportp = ðdev_mport_sel;
942 switch_port_request.entity_mportp = &repr_data->mport_sel;
943 switch_port_request.ethdev_port_id = dev->data->port_id;
944 switch_port_request.port_data.repr.intf = repr_data->intf;
945 switch_port_request.port_data.repr.pf = repr_data->pf;
946 switch_port_request.port_data.repr.vf = repr_data->vf;
948 ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
949 &switch_port_request,
950 &srs->switch_port_id);
953 "%s() failed to assign MAE switch port (domain id %u)",
954 __func__, repr_data->switch_domain_id);
955 goto fail_mae_assign_switch_port;
958 ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
961 &repr_data->mport_sel);
963 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
967 goto fail_create_port;
971 * Allocate process private data from heap, since it should not
972 * be located in shared memory allocated using rte_malloc() API.
974 sr = calloc(1, sizeof(*sr));
980 sfc_repr_lock_init(sr);
983 dev->process_private = sr;
985 srs->pf_port_id = repr_data->pf_port_id;
986 srs->repr_id = srs->switch_port_id;
987 srs->switch_domain_id = repr_data->switch_domain_id;
989 dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
990 dev->data->representor_id = srs->repr_id;
991 dev->data->backer_port_id = srs->pf_port_id;
993 dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
994 if (dev->data->mac_addrs == NULL) {
999 dev->rx_pkt_burst = sfc_repr_rx_burst;
1000 dev->tx_pkt_burst = sfc_repr_tx_burst;
1001 dev->dev_ops = &sfc_repr_dev_ops;
1003 sr->state = SFC_ETHDEV_INITIALIZED;
1004 sfc_repr_unlock(sr);
1009 sfc_repr_unlock(sr);
1013 (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
1014 srs->switch_port_id);
1017 fail_mae_assign_switch_port:
1018 SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
1023 sfc_repr_create(struct rte_eth_dev *parent,
1024 struct sfc_repr_entity_info *entity,
1025 uint16_t switch_domain_id,
1026 const efx_mport_sel_t *mport_sel)
1028 struct sfc_repr_init_data repr_data;
1029 char name[RTE_ETH_NAME_MAX_LEN];
1033 struct rte_eth_dev *dev;
1036 rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
1037 entity->intf, &controller);
1039 SFC_GENERIC_LOG(ERR, "%s() failed to get DPDK controller for %d",
1040 __func__, entity->intf);
1044 switch (entity->type) {
1045 case RTE_ETH_REPRESENTOR_VF:
1046 ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%uvf%u",
1047 parent->device->name, controller, entity->pf,
1050 case RTE_ETH_REPRESENTOR_PF:
1051 ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%u",
1052 parent->device->name, controller, entity->pf);
1058 if (ret >= (int)sizeof(name)) {
1059 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
1060 return -ENAMETOOLONG;
1063 dev = rte_eth_dev_allocated(name);
1065 memset(&repr_data, 0, sizeof(repr_data));
1066 repr_data.pf_port_id = parent->data->port_id;
1067 repr_data.switch_domain_id = switch_domain_id;
1068 repr_data.mport_sel = *mport_sel;
1069 repr_data.intf = entity->intf;
1070 repr_data.pf = entity->pf;
1071 repr_data.vf = entity->vf;
1073 ret = rte_eth_dev_create(parent->device, name,
1074 sizeof(struct sfc_repr_shared),
1076 sfc_repr_eth_dev_init, &repr_data);
1078 SFC_GENERIC_LOG(ERR, "%s() failed to create device",