1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <ethdev_driver.h>
19 #include "sfc_debug.h"
21 #include "sfc_ethdev_state.h"
22 #include "sfc_repr_proxy_api.h"
23 #include "sfc_switch.h"
25 /** Multi-process shared representor private data */
26 struct sfc_repr_shared {
29 uint16_t switch_domain_id;
30 uint16_t switch_port_id;
34 /* Datapath members */
35 struct rte_ring *ring;
39 /* Datapath members */
40 struct rte_ring *ring;
41 efx_mport_id_t egress_mport;
44 /** Primary process representor private data */
47 * PMD setup and configuration is not thread safe. Since it is not
48 * performance sensitive, it is better to guarantee thread-safety
49 * and add device level lock. Adapter control operations which
50 * change its state should acquire the lock.
53 enum sfc_ethdev_state state;
56 #define sfcr_err(sr, ...) \
58 const struct sfc_repr *_sr = (sr); \
61 SFC_GENERIC_LOG(ERR, __VA_ARGS__); \
64 #define sfcr_warn(sr, ...) \
66 const struct sfc_repr *_sr = (sr); \
69 SFC_GENERIC_LOG(WARNING, __VA_ARGS__); \
72 #define sfcr_info(sr, ...) \
74 const struct sfc_repr *_sr = (sr); \
77 SFC_GENERIC_LOG(INFO, \
79 RTE_FMT_HEAD(__VA_ARGS__ ,), \
81 RTE_FMT_TAIL(__VA_ARGS__ ,))); \
84 static inline struct sfc_repr_shared *
85 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
87 struct sfc_repr_shared *srs = eth_dev->data->dev_private;
92 static inline struct sfc_repr *
93 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
95 struct sfc_repr *sr = eth_dev->process_private;
101 * Add wrapper functions to acquire/release lock to be able to remove or
102 * change the lock in one place.
106 sfc_repr_lock_init(struct sfc_repr *sr)
108 rte_spinlock_init(&sr->lock);
111 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
114 sfc_repr_lock_is_locked(struct sfc_repr *sr)
116 return rte_spinlock_is_locked(&sr->lock);
122 sfc_repr_lock(struct sfc_repr *sr)
124 rte_spinlock_lock(&sr->lock);
128 sfc_repr_unlock(struct sfc_repr *sr)
130 rte_spinlock_unlock(&sr->lock);
134 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
136 /* Just for symmetry of the API */
140 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
141 const struct rte_eth_conf *conf)
143 const struct rte_eth_rss_conf *rss_conf;
146 sfcr_info(sr, "entry");
148 if (conf->link_speeds != 0) {
149 sfcr_err(sr, "specific link speeds not supported");
153 switch (conf->rxmode.mq_mode) {
155 if (nb_rx_queues != 1) {
156 sfcr_err(sr, "Rx RSS is not supported with %u queues",
162 rss_conf = &conf->rx_adv_conf.rss_conf;
163 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
164 rss_conf->rss_hf != 0) {
165 sfcr_err(sr, "Rx RSS configuration is not supported");
172 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
177 if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
178 sfcr_err(sr, "Tx mode MQ modes not supported");
182 if (conf->lpbk_mode != 0) {
183 sfcr_err(sr, "loopback not supported");
187 if (conf->dcb_capability_en != 0) {
188 sfcr_err(sr, "priority-based flow control not supported");
192 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
193 sfcr_err(sr, "Flow Director not supported");
197 if (conf->intr_conf.lsc != 0) {
198 sfcr_err(sr, "link status change interrupt not supported");
202 if (conf->intr_conf.rxq != 0) {
203 sfcr_err(sr, "receive queue interrupt not supported");
207 if (conf->intr_conf.rmv != 0) {
208 sfcr_err(sr, "remove interrupt not supported");
212 sfcr_info(sr, "done %d", ret);
219 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
220 const struct rte_eth_conf *conf)
224 sfcr_info(sr, "entry");
226 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
228 ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
230 goto fail_check_conf;
232 sr->state = SFC_ETHDEV_CONFIGURED;
234 sfcr_info(sr, "done");
239 sfcr_info(sr, "failed %s", rte_strerror(-ret));
244 sfc_repr_dev_configure(struct rte_eth_dev *dev)
246 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
247 struct rte_eth_dev_data *dev_data = dev->data;
250 sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
251 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
255 case SFC_ETHDEV_CONFIGURED:
257 case SFC_ETHDEV_INITIALIZED:
258 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
259 &dev_data->dev_conf);
262 sfcr_err(sr, "unexpected adapter state %u to configure",
269 sfcr_info(sr, "done %s", rte_strerror(-ret));
275 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
276 struct rte_eth_dev_info *dev_info)
278 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
280 dev_info->device = dev->device;
282 dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
283 dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
284 dev_info->default_rxconf.rx_drop_en = 1;
285 dev_info->switch_info.domain_id = srs->switch_domain_id;
286 dev_info->switch_info.port_id = srs->switch_port_id;
292 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
293 const char *type_name, uint16_t qid, uint16_t nb_desc,
294 unsigned int socket_id, struct rte_ring **ring)
296 char ring_name[RTE_RING_NAMESIZE];
299 ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
300 pf_port_id, repr_id, type_name, qid);
301 if (ret >= (int)sizeof(ring_name))
302 return -ENAMETOOLONG;
305 * Single producer/consumer rings are used since the API for Tx/Rx
306 * packet burst for representors are guaranteed to be called from
307 * a single thread, and the user of the other end (representor proxy)
308 * is also single-threaded.
310 *ring = rte_ring_create(ring_name, nb_desc, socket_id,
311 RING_F_SP_ENQ | RING_F_SC_DEQ);
319 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
320 const struct rte_eth_rxconf *rx_conf)
324 sfcr_info(sr, "entry");
326 if (rx_conf->rx_thresh.pthresh != 0 ||
327 rx_conf->rx_thresh.hthresh != 0 ||
328 rx_conf->rx_thresh.wthresh != 0) {
330 "RxQ prefetch/host/writeback thresholds are not supported");
333 if (rx_conf->rx_free_thresh != 0)
334 sfcr_warn(sr, "RxQ free threshold is not supported");
336 if (rx_conf->rx_drop_en == 0)
337 sfcr_warn(sr, "RxQ drop disable is not supported");
339 if (rx_conf->rx_deferred_start) {
340 sfcr_err(sr, "Deferred start is not supported");
344 sfcr_info(sr, "done: %s", rte_strerror(-ret));
350 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
351 uint16_t nb_rx_desc, unsigned int socket_id,
352 __rte_unused const struct rte_eth_rxconf *rx_conf,
353 struct rte_mempool *mb_pool)
355 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
356 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
357 struct sfc_repr_rxq *rxq;
360 sfcr_info(sr, "entry");
362 ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
364 goto fail_check_conf;
367 rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
368 RTE_CACHE_LINE_SIZE, socket_id);
370 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
374 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
375 "rx", rx_queue_id, nb_rx_desc,
376 socket_id, &rxq->ring);
378 sfcr_err(sr, "%s() failed to create ring", __func__);
379 goto fail_ring_create;
382 ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
383 rx_queue_id, rxq->ring, mb_pool);
387 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
388 goto fail_proxy_add_rxq;
391 dev->data->rx_queues[rx_queue_id] = rxq;
393 sfcr_info(sr, "done");
398 rte_ring_free(rxq->ring);
405 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
410 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
412 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
413 struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
415 sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
416 rte_ring_free(rxq->ring);
421 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
422 const struct rte_eth_txconf *tx_conf)
426 sfcr_info(sr, "entry");
428 if (tx_conf->tx_rs_thresh != 0)
429 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
431 if (tx_conf->tx_free_thresh != 0)
432 sfcr_warn(sr, "TxQ free threshold is not supported");
434 if (tx_conf->tx_thresh.pthresh != 0 ||
435 tx_conf->tx_thresh.hthresh != 0 ||
436 tx_conf->tx_thresh.wthresh != 0) {
438 "prefetch/host/writeback thresholds are not supported");
441 if (tx_conf->tx_deferred_start) {
442 sfcr_err(sr, "Deferred start is not supported");
446 sfcr_info(sr, "done: %s", rte_strerror(-ret));
452 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
453 uint16_t nb_tx_desc, unsigned int socket_id,
454 const struct rte_eth_txconf *tx_conf)
456 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
457 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
458 struct sfc_repr_txq *txq;
461 sfcr_info(sr, "entry");
463 ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
465 goto fail_check_conf;
468 txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
469 RTE_CACHE_LINE_SIZE, socket_id);
473 ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
474 "tx", tx_queue_id, nb_tx_desc,
475 socket_id, &txq->ring);
477 goto fail_ring_create;
479 ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
480 tx_queue_id, txq->ring,
483 goto fail_proxy_add_txq;
485 dev->data->tx_queues[tx_queue_id] = txq;
487 sfcr_info(sr, "done");
492 rte_ring_free(txq->ring);
499 sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
504 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
506 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
507 struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
509 sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
510 rte_ring_free(txq->ring);
515 sfc_repr_close(struct sfc_repr *sr)
517 SFC_ASSERT(sfc_repr_lock_is_locked(sr));
519 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
520 sr->state = SFC_ETHDEV_CLOSING;
522 /* Put representor close actions here */
524 sr->state = SFC_ETHDEV_INITIALIZED;
528 sfc_repr_dev_close(struct rte_eth_dev *dev)
530 struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
531 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
534 sfcr_info(sr, "entry");
538 case SFC_ETHDEV_CONFIGURED:
540 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
542 case SFC_ETHDEV_INITIALIZED:
545 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
549 for (i = 0; i < dev->data->nb_rx_queues; i++) {
550 sfc_repr_rx_queue_release(dev, i);
551 dev->data->rx_queues[i] = NULL;
554 for (i = 0; i < dev->data->nb_tx_queues; i++) {
555 sfc_repr_tx_queue_release(dev, i);
556 dev->data->tx_queues[i] = NULL;
560 * Cleanup all resources.
561 * Rollback primary process sfc_repr_eth_dev_init() below.
564 (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
569 sfc_repr_lock_fini(sr);
571 sfcr_info(sr, "done");
578 static const struct eth_dev_ops sfc_repr_dev_ops = {
579 .dev_configure = sfc_repr_dev_configure,
580 .dev_close = sfc_repr_dev_close,
581 .dev_infos_get = sfc_repr_dev_infos_get,
582 .rx_queue_setup = sfc_repr_rx_queue_setup,
583 .rx_queue_release = sfc_repr_rx_queue_release,
584 .tx_queue_setup = sfc_repr_tx_queue_setup,
585 .tx_queue_release = sfc_repr_tx_queue_release,
589 struct sfc_repr_init_data {
592 uint16_t switch_domain_id;
593 efx_mport_sel_t mport_sel;
597 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
598 const struct sfc_mae_switch_port_request *req,
599 uint16_t *switch_port_id)
603 rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
610 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
612 const struct sfc_repr_init_data *repr_data = init_params;
613 struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
614 struct sfc_mae_switch_port_request switch_port_request;
615 efx_mport_sel_t ethdev_mport_sel;
620 * Currently there is no mport we can use for representor's
621 * ethdev. Use an invalid one for now. This way representors
622 * can be instantiated.
624 efx_mae_mport_invalid(ðdev_mport_sel);
626 memset(&switch_port_request, 0, sizeof(switch_port_request));
627 switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
628 switch_port_request.ethdev_mportp = ðdev_mport_sel;
629 switch_port_request.entity_mportp = &repr_data->mport_sel;
630 switch_port_request.ethdev_port_id = dev->data->port_id;
632 ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
633 &switch_port_request,
634 &srs->switch_port_id);
637 "%s() failed to assign MAE switch port (domain id %u)",
638 __func__, repr_data->switch_domain_id);
639 goto fail_mae_assign_switch_port;
642 ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
645 &repr_data->mport_sel);
647 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
651 goto fail_create_port;
655 * Allocate process private data from heap, since it should not
656 * be located in shared memory allocated using rte_malloc() API.
658 sr = calloc(1, sizeof(*sr));
664 sfc_repr_lock_init(sr);
667 dev->process_private = sr;
669 srs->pf_port_id = repr_data->pf_port_id;
670 srs->repr_id = repr_data->repr_id;
671 srs->switch_domain_id = repr_data->switch_domain_id;
673 dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
674 dev->data->representor_id = srs->repr_id;
675 dev->data->backer_port_id = srs->pf_port_id;
677 dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
678 if (dev->data->mac_addrs == NULL) {
683 dev->dev_ops = &sfc_repr_dev_ops;
685 sr->state = SFC_ETHDEV_INITIALIZED;
695 (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
699 fail_mae_assign_switch_port:
700 SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
705 sfc_repr_create(struct rte_eth_dev *parent, uint16_t representor_id,
706 uint16_t switch_domain_id, const efx_mport_sel_t *mport_sel)
708 struct sfc_repr_init_data repr_data;
709 char name[RTE_ETH_NAME_MAX_LEN];
712 if (snprintf(name, sizeof(name), "net_%s_representor_%u",
713 parent->device->name, representor_id) >=
715 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
716 return -ENAMETOOLONG;
719 memset(&repr_data, 0, sizeof(repr_data));
720 repr_data.pf_port_id = parent->data->port_id;
721 repr_data.repr_id = representor_id;
722 repr_data.switch_domain_id = switch_domain_id;
723 repr_data.mport_sel = *mport_sel;
725 ret = rte_eth_dev_create(parent->device, name,
726 sizeof(struct sfc_repr_shared),
728 sfc_repr_eth_dev_init, &repr_data);
730 SFC_GENERIC_LOG(ERR, "%s() failed to create device", __func__);
732 SFC_GENERIC_LOG(INFO, "%s() done: %s", __func__, rte_strerror(-ret));