2500b14cb006708114790fe35af13b03a4f8b4c1
[dpdk.git] / drivers / net / sfc / sfc_repr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9
10 #include <stdint.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_ethdev.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16
17 #include "efx.h"
18
19 #include "sfc_log.h"
20 #include "sfc_debug.h"
21 #include "sfc_repr.h"
22 #include "sfc_ethdev_state.h"
23 #include "sfc_repr_proxy_api.h"
24 #include "sfc_switch.h"
25 #include "sfc_dp_tx.h"
26
27 /** Multi-process shared representor private data */
28 struct sfc_repr_shared {
29         uint16_t                pf_port_id;
30         uint16_t                repr_id;
31         uint16_t                switch_domain_id;
32         uint16_t                switch_port_id;
33 };
34
35 struct sfc_repr_queue_stats {
36         union sfc_pkts_bytes            packets_bytes;
37 };
38
39 struct sfc_repr_rxq {
40         /* Datapath members */
41         struct rte_ring                 *ring;
42         struct sfc_repr_queue_stats     stats;
43 };
44
45 struct sfc_repr_txq {
46         /* Datapath members */
47         struct rte_ring                 *ring;
48         efx_mport_id_t                  egress_mport;
49         struct sfc_repr_queue_stats     stats;
50 };
51
52 /** Primary process representor private data */
53 struct sfc_repr {
54         /**
55          * PMD setup and configuration is not thread safe. Since it is not
56          * performance sensitive, it is better to guarantee thread-safety
57          * and add device level lock. Adapter control operations which
58          * change its state should acquire the lock.
59          */
60         rte_spinlock_t                  lock;
61         enum sfc_ethdev_state           state;
62 };
63
64 #define sfcr_err(sr, ...) \
65         do {                                                            \
66                 const struct sfc_repr *_sr = (sr);                      \
67                                                                         \
68                 (void)_sr;                                              \
69                 SFC_GENERIC_LOG(ERR, __VA_ARGS__);                      \
70         } while (0)
71
72 #define sfcr_warn(sr, ...) \
73         do {                                                            \
74                 const struct sfc_repr *_sr = (sr);                      \
75                                                                         \
76                 (void)_sr;                                              \
77                 SFC_GENERIC_LOG(WARNING, __VA_ARGS__);                  \
78         } while (0)
79
80 #define sfcr_info(sr, ...) \
81         do {                                                            \
82                 const struct sfc_repr *_sr = (sr);                      \
83                                                                         \
84                 (void)_sr;                                              \
85                 SFC_GENERIC_LOG(INFO,                                   \
86                                 RTE_FMT("%s() "                         \
87                                 RTE_FMT_HEAD(__VA_ARGS__ ,),            \
88                                 __func__,                               \
89                                 RTE_FMT_TAIL(__VA_ARGS__ ,)));          \
90         } while (0)
91
92 static inline struct sfc_repr_shared *
93 sfc_repr_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
94 {
95         struct sfc_repr_shared *srs = eth_dev->data->dev_private;
96
97         return srs;
98 }
99
100 static inline struct sfc_repr *
101 sfc_repr_by_eth_dev(struct rte_eth_dev *eth_dev)
102 {
103         struct sfc_repr *sr = eth_dev->process_private;
104
105         return sr;
106 }
107
108 /*
109  * Add wrapper functions to acquire/release lock to be able to remove or
110  * change the lock in one place.
111  */
112
113 static inline void
114 sfc_repr_lock_init(struct sfc_repr *sr)
115 {
116         rte_spinlock_init(&sr->lock);
117 }
118
119 #if defined(RTE_LIBRTE_SFC_EFX_DEBUG) || defined(RTE_ENABLE_ASSERT)
120
121 static inline int
122 sfc_repr_lock_is_locked(struct sfc_repr *sr)
123 {
124         return rte_spinlock_is_locked(&sr->lock);
125 }
126
127 #endif
128
129 static inline void
130 sfc_repr_lock(struct sfc_repr *sr)
131 {
132         rte_spinlock_lock(&sr->lock);
133 }
134
135 static inline void
136 sfc_repr_unlock(struct sfc_repr *sr)
137 {
138         rte_spinlock_unlock(&sr->lock);
139 }
140
141 static inline void
142 sfc_repr_lock_fini(__rte_unused struct sfc_repr *sr)
143 {
144         /* Just for symmetry of the API */
145 }
146
147 static void
148 sfc_repr_rx_queue_stop(void *queue)
149 {
150         struct sfc_repr_rxq *rxq = queue;
151
152         if (rxq == NULL)
153                 return;
154
155         rte_ring_reset(rxq->ring);
156 }
157
158 static void
159 sfc_repr_tx_queue_stop(void *queue)
160 {
161         struct sfc_repr_txq *txq = queue;
162
163         if (txq == NULL)
164                 return;
165
166         rte_ring_reset(txq->ring);
167 }
168
169 static uint16_t
170 sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
171 {
172         struct sfc_repr_rxq *rxq = rx_queue;
173         void **objs = (void *)&rx_pkts[0];
174         unsigned int n_rx;
175
176         /* mbufs port is already filled correctly by representors proxy */
177         n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
178
179         if (n_rx > 0) {
180                 unsigned int n_bytes = 0;
181                 unsigned int i = 0;
182
183                 do {
184                         n_bytes += rx_pkts[i]->pkt_len;
185                 } while (++i < n_rx);
186
187                 sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes);
188         }
189
190         return n_rx;
191 }
192
193 static uint16_t
194 sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
195 {
196         struct sfc_repr_txq *txq = tx_queue;
197         unsigned int n_bytes = 0;
198         unsigned int n_tx;
199         void **objs;
200         uint16_t i;
201
202         /*
203          * mbuf is likely cache-hot. Set flag and egress m-port here instead of
204          * doing that in representors proxy. Also, it should help to avoid
205          * cache bounce. Moreover, potentially, it allows to use one
206          * multi-producer single-consumer ring for all representors.
207          *
208          * The only potential problem is doing so many times if enqueue
209          * fails and sender retries.
210          */
211         for (i = 0; i < nb_pkts; ++i) {
212                 struct rte_mbuf *m = tx_pkts[i];
213
214                 m->ol_flags |= sfc_dp_mport_override;
215                 *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
216                                    efx_mport_id_t *) = txq->egress_mport;
217                 n_bytes += tx_pkts[i]->pkt_len;
218         }
219
220         objs = (void *)&tx_pkts[0];
221         n_tx = rte_ring_sp_enqueue_burst(txq->ring, objs, nb_pkts, NULL);
222
223         /*
224          * Remove m-port override flag from packets that were not enqueued
225          * Setting the flag only for enqueued packets after the burst is
226          * not possible since the ownership of enqueued packets is
227          * transferred to representor proxy. The same logic applies to
228          * counting the enqueued packets' bytes.
229          */
230         for (i = n_tx; i < nb_pkts; ++i) {
231                 struct rte_mbuf *m = tx_pkts[i];
232
233                 m->ol_flags &= ~sfc_dp_mport_override;
234                 n_bytes -= m->pkt_len;
235         }
236
237         sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes);
238
239         return n_tx;
240 }
241
242 static int
243 sfc_repr_start(struct rte_eth_dev *dev)
244 {
245         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
246         struct sfc_repr_shared *srs;
247         int ret;
248
249         sfcr_info(sr, "entry");
250
251         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
252
253         switch (sr->state) {
254         case SFC_ETHDEV_CONFIGURED:
255                 break;
256         case SFC_ETHDEV_STARTED:
257                 sfcr_info(sr, "already started");
258                 return 0;
259         default:
260                 ret = -EINVAL;
261                 goto fail_bad_state;
262         }
263
264         sr->state = SFC_ETHDEV_STARTING;
265
266         srs = sfc_repr_shared_by_eth_dev(dev);
267         ret = sfc_repr_proxy_start_repr(srs->pf_port_id, srs->repr_id);
268         if (ret != 0) {
269                 SFC_ASSERT(ret > 0);
270                 ret = -ret;
271                 goto fail_start;
272         }
273
274         sr->state = SFC_ETHDEV_STARTED;
275
276         sfcr_info(sr, "done");
277
278         return 0;
279
280 fail_start:
281         sr->state = SFC_ETHDEV_CONFIGURED;
282
283 fail_bad_state:
284         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
285         return ret;
286 }
287
288 static int
289 sfc_repr_dev_start(struct rte_eth_dev *dev)
290 {
291         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
292         int ret;
293
294         sfcr_info(sr, "entry");
295
296         sfc_repr_lock(sr);
297         ret = sfc_repr_start(dev);
298         sfc_repr_unlock(sr);
299
300         if (ret != 0)
301                 goto fail_start;
302
303         sfcr_info(sr, "done");
304
305         return 0;
306
307 fail_start:
308         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
309         return ret;
310 }
311
312 static int
313 sfc_repr_stop(struct rte_eth_dev *dev)
314 {
315         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
316         struct sfc_repr_shared *srs;
317         unsigned int i;
318         int ret;
319
320         sfcr_info(sr, "entry");
321
322         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
323
324         switch (sr->state) {
325         case SFC_ETHDEV_STARTED:
326                 break;
327         case SFC_ETHDEV_CONFIGURED:
328                 sfcr_info(sr, "already stopped");
329                 return 0;
330         default:
331                 sfcr_err(sr, "stop in unexpected state %u", sr->state);
332                 SFC_ASSERT(B_FALSE);
333                 ret = -EINVAL;
334                 goto fail_bad_state;
335         }
336
337         srs = sfc_repr_shared_by_eth_dev(dev);
338         ret = sfc_repr_proxy_stop_repr(srs->pf_port_id, srs->repr_id);
339         if (ret != 0) {
340                 SFC_ASSERT(ret > 0);
341                 ret = -ret;
342                 goto fail_stop;
343         }
344
345         for (i = 0; i < dev->data->nb_rx_queues; i++)
346                 sfc_repr_rx_queue_stop(dev->data->rx_queues[i]);
347
348         for (i = 0; i < dev->data->nb_tx_queues; i++)
349                 sfc_repr_tx_queue_stop(dev->data->tx_queues[i]);
350
351         sr->state = SFC_ETHDEV_CONFIGURED;
352         sfcr_info(sr, "done");
353
354         return 0;
355
356 fail_bad_state:
357 fail_stop:
358         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
359
360         return ret;
361 }
362
363 static int
364 sfc_repr_dev_stop(struct rte_eth_dev *dev)
365 {
366         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
367         int ret;
368
369         sfcr_info(sr, "entry");
370
371         sfc_repr_lock(sr);
372
373         ret = sfc_repr_stop(dev);
374         if (ret != 0) {
375                 sfcr_err(sr, "%s() failed to stop representor", __func__);
376                 goto fail_stop;
377         }
378
379         sfc_repr_unlock(sr);
380
381         sfcr_info(sr, "done");
382
383         return 0;
384
385 fail_stop:
386         sfc_repr_unlock(sr);
387
388         sfcr_err(sr, "%s() failed %s", __func__, rte_strerror(-ret));
389
390         return ret;
391 }
392
393 static int
394 sfc_repr_check_conf(struct sfc_repr *sr, uint16_t nb_rx_queues,
395                     const struct rte_eth_conf *conf)
396 {
397         const struct rte_eth_rss_conf *rss_conf;
398         int ret = 0;
399
400         sfcr_info(sr, "entry");
401
402         if (conf->link_speeds != 0) {
403                 sfcr_err(sr, "specific link speeds not supported");
404                 ret = -EINVAL;
405         }
406
407         switch (conf->rxmode.mq_mode) {
408         case ETH_MQ_RX_RSS:
409                 if (nb_rx_queues != 1) {
410                         sfcr_err(sr, "Rx RSS is not supported with %u queues",
411                                  nb_rx_queues);
412                         ret = -EINVAL;
413                         break;
414                 }
415
416                 rss_conf = &conf->rx_adv_conf.rss_conf;
417                 if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 ||
418                     rss_conf->rss_hf != 0) {
419                         sfcr_err(sr, "Rx RSS configuration is not supported");
420                         ret = -EINVAL;
421                 }
422                 break;
423         case ETH_MQ_RX_NONE:
424                 break;
425         default:
426                 sfcr_err(sr, "Rx mode MQ modes other than RSS not supported");
427                 ret = -EINVAL;
428                 break;
429         }
430
431         if (conf->txmode.mq_mode != ETH_MQ_TX_NONE) {
432                 sfcr_err(sr, "Tx mode MQ modes not supported");
433                 ret = -EINVAL;
434         }
435
436         if (conf->lpbk_mode != 0) {
437                 sfcr_err(sr, "loopback not supported");
438                 ret = -EINVAL;
439         }
440
441         if (conf->dcb_capability_en != 0) {
442                 sfcr_err(sr, "priority-based flow control not supported");
443                 ret = -EINVAL;
444         }
445
446         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
447                 sfcr_err(sr, "Flow Director not supported");
448                 ret = -EINVAL;
449         }
450
451         if (conf->intr_conf.lsc != 0) {
452                 sfcr_err(sr, "link status change interrupt not supported");
453                 ret = -EINVAL;
454         }
455
456         if (conf->intr_conf.rxq != 0) {
457                 sfcr_err(sr, "receive queue interrupt not supported");
458                 ret = -EINVAL;
459         }
460
461         if (conf->intr_conf.rmv != 0) {
462                 sfcr_err(sr, "remove interrupt not supported");
463                 ret = -EINVAL;
464         }
465
466         sfcr_info(sr, "done %d", ret);
467
468         return ret;
469 }
470
471
472 static int
473 sfc_repr_configure(struct sfc_repr *sr, uint16_t nb_rx_queues,
474                    const struct rte_eth_conf *conf)
475 {
476         int ret;
477
478         sfcr_info(sr, "entry");
479
480         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
481
482         ret = sfc_repr_check_conf(sr, nb_rx_queues, conf);
483         if (ret != 0)
484                 goto fail_check_conf;
485
486         sr->state = SFC_ETHDEV_CONFIGURED;
487
488         sfcr_info(sr, "done");
489
490         return 0;
491
492 fail_check_conf:
493         sfcr_info(sr, "failed %s", rte_strerror(-ret));
494         return ret;
495 }
496
497 static int
498 sfc_repr_dev_configure(struct rte_eth_dev *dev)
499 {
500         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
501         struct rte_eth_dev_data *dev_data = dev->data;
502         int ret;
503
504         sfcr_info(sr, "entry n_rxq=%u n_txq=%u",
505                   dev_data->nb_rx_queues, dev_data->nb_tx_queues);
506
507         sfc_repr_lock(sr);
508         switch (sr->state) {
509         case SFC_ETHDEV_CONFIGURED:
510                 /* FALLTHROUGH */
511         case SFC_ETHDEV_INITIALIZED:
512                 ret = sfc_repr_configure(sr, dev_data->nb_rx_queues,
513                                          &dev_data->dev_conf);
514                 break;
515         default:
516                 sfcr_err(sr, "unexpected adapter state %u to configure",
517                          sr->state);
518                 ret = -EINVAL;
519                 break;
520         }
521         sfc_repr_unlock(sr);
522
523         sfcr_info(sr, "done %s", rte_strerror(-ret));
524
525         return ret;
526 }
527
528 static int
529 sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
530                        struct rte_eth_dev_info *dev_info)
531 {
532         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
533
534         dev_info->device = dev->device;
535
536         dev_info->max_rx_queues = SFC_REPR_RXQ_MAX;
537         dev_info->max_tx_queues = SFC_REPR_TXQ_MAX;
538         dev_info->default_rxconf.rx_drop_en = 1;
539         dev_info->switch_info.domain_id = srs->switch_domain_id;
540         dev_info->switch_info.port_id = srs->switch_port_id;
541
542         return 0;
543 }
544
545 static int
546 sfc_repr_dev_link_update(struct rte_eth_dev *dev,
547                          __rte_unused int wait_to_complete)
548 {
549         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
550         struct rte_eth_link link;
551
552         if (sr->state != SFC_ETHDEV_STARTED) {
553                 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &link);
554         } else {
555                 memset(&link, 0, sizeof(link));
556                 link.link_status = ETH_LINK_UP;
557                 link.link_speed = ETH_SPEED_NUM_UNKNOWN;
558         }
559
560         return rte_eth_linkstatus_set(dev, &link);
561 }
562
563 static int
564 sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
565                      const char *type_name, uint16_t qid, uint16_t nb_desc,
566                      unsigned int socket_id, struct rte_ring **ring)
567 {
568         char ring_name[RTE_RING_NAMESIZE];
569         int ret;
570
571         ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
572                        pf_port_id, repr_id, type_name, qid);
573         if (ret >= (int)sizeof(ring_name))
574                 return -ENAMETOOLONG;
575
576         /*
577          * Single producer/consumer rings are used since the API for Tx/Rx
578          * packet burst for representors are guaranteed to be called from
579          * a single thread, and the user of the other end (representor proxy)
580          * is also single-threaded.
581          */
582         *ring = rte_ring_create(ring_name, nb_desc, socket_id,
583                                RING_F_SP_ENQ | RING_F_SC_DEQ);
584         if (*ring == NULL)
585                 return -rte_errno;
586
587         return 0;
588 }
589
590 static int
591 sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
592                         const struct rte_eth_rxconf *rx_conf)
593 {
594         int ret = 0;
595
596         sfcr_info(sr, "entry");
597
598         if (rx_conf->rx_thresh.pthresh != 0 ||
599             rx_conf->rx_thresh.hthresh != 0 ||
600             rx_conf->rx_thresh.wthresh != 0) {
601                 sfcr_warn(sr,
602                         "RxQ prefetch/host/writeback thresholds are not supported");
603         }
604
605         if (rx_conf->rx_free_thresh != 0)
606                 sfcr_warn(sr, "RxQ free threshold is not supported");
607
608         if (rx_conf->rx_drop_en == 0)
609                 sfcr_warn(sr, "RxQ drop disable is not supported");
610
611         if (rx_conf->rx_deferred_start) {
612                 sfcr_err(sr, "Deferred start is not supported");
613                 ret = -EINVAL;
614         }
615
616         sfcr_info(sr, "done: %s", rte_strerror(-ret));
617
618         return ret;
619 }
620
621 static int
622 sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
623                         uint16_t nb_rx_desc, unsigned int socket_id,
624                         __rte_unused const struct rte_eth_rxconf *rx_conf,
625                         struct rte_mempool *mb_pool)
626 {
627         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
628         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
629         struct sfc_repr_rxq *rxq;
630         int ret;
631
632         sfcr_info(sr, "entry");
633
634         ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
635         if (ret != 0)
636                 goto fail_check_conf;
637
638         ret = -ENOMEM;
639         rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
640                                  RTE_CACHE_LINE_SIZE, socket_id);
641         if (rxq == NULL) {
642                 sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
643                 goto fail_rxq_alloc;
644         }
645
646         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
647                                    "rx", rx_queue_id, nb_rx_desc,
648                                    socket_id, &rxq->ring);
649         if (ret != 0) {
650                 sfcr_err(sr, "%s() failed to create ring", __func__);
651                 goto fail_ring_create;
652         }
653
654         ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
655                                      rx_queue_id, rxq->ring, mb_pool);
656         if (ret != 0) {
657                 SFC_ASSERT(ret > 0);
658                 ret = -ret;
659                 sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
660                 goto fail_proxy_add_rxq;
661         }
662
663         dev->data->rx_queues[rx_queue_id] = rxq;
664
665         sfcr_info(sr, "done");
666
667         return 0;
668
669 fail_proxy_add_rxq:
670         rte_ring_free(rxq->ring);
671
672 fail_ring_create:
673         rte_free(rxq);
674
675 fail_rxq_alloc:
676 fail_check_conf:
677         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
678         return ret;
679 }
680
681 static void
682 sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
683 {
684         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
685         struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
686
687         sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
688         rte_ring_free(rxq->ring);
689         rte_free(rxq);
690 }
691
692 static int
693 sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
694                         const struct rte_eth_txconf *tx_conf)
695 {
696         int ret = 0;
697
698         sfcr_info(sr, "entry");
699
700         if (tx_conf->tx_rs_thresh != 0)
701                 sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
702
703         if (tx_conf->tx_free_thresh != 0)
704                 sfcr_warn(sr, "TxQ free threshold is not supported");
705
706         if (tx_conf->tx_thresh.pthresh != 0 ||
707             tx_conf->tx_thresh.hthresh != 0 ||
708             tx_conf->tx_thresh.wthresh != 0) {
709                 sfcr_warn(sr,
710                         "prefetch/host/writeback thresholds are not supported");
711         }
712
713         if (tx_conf->tx_deferred_start) {
714                 sfcr_err(sr, "Deferred start is not supported");
715                 ret = -EINVAL;
716         }
717
718         sfcr_info(sr, "done: %s", rte_strerror(-ret));
719
720         return ret;
721 }
722
723 static int
724 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
725                         uint16_t nb_tx_desc, unsigned int socket_id,
726                         const struct rte_eth_txconf *tx_conf)
727 {
728         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
729         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
730         struct sfc_repr_txq *txq;
731         int ret;
732
733         sfcr_info(sr, "entry");
734
735         ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
736         if (ret != 0)
737                 goto fail_check_conf;
738
739         ret = -ENOMEM;
740         txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
741                                  RTE_CACHE_LINE_SIZE, socket_id);
742         if (txq == NULL)
743                 goto fail_txq_alloc;
744
745         ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
746                                    "tx", tx_queue_id, nb_tx_desc,
747                                    socket_id, &txq->ring);
748         if (ret != 0)
749                 goto fail_ring_create;
750
751         ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
752                                      tx_queue_id, txq->ring,
753                                      &txq->egress_mport);
754         if (ret != 0)
755                 goto fail_proxy_add_txq;
756
757         dev->data->tx_queues[tx_queue_id] = txq;
758
759         sfcr_info(sr, "done");
760
761         return 0;
762
763 fail_proxy_add_txq:
764         rte_ring_free(txq->ring);
765
766 fail_ring_create:
767         rte_free(txq);
768
769 fail_txq_alloc:
770 fail_check_conf:
771         sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
772         return ret;
773 }
774
775 static void
776 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
777 {
778         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
779         struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
780
781         sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
782         rte_ring_free(txq->ring);
783         rte_free(txq);
784 }
785
786 static void
787 sfc_repr_close(struct sfc_repr *sr)
788 {
789         SFC_ASSERT(sfc_repr_lock_is_locked(sr));
790
791         SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
792         sr->state = SFC_ETHDEV_CLOSING;
793
794         /* Put representor close actions here */
795
796         sr->state = SFC_ETHDEV_INITIALIZED;
797 }
798
799 static int
800 sfc_repr_dev_close(struct rte_eth_dev *dev)
801 {
802         struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
803         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
804         unsigned int i;
805
806         sfcr_info(sr, "entry");
807
808         sfc_repr_lock(sr);
809         switch (sr->state) {
810         case SFC_ETHDEV_STARTED:
811                 sfc_repr_stop(dev);
812                 SFC_ASSERT(sr->state == SFC_ETHDEV_CONFIGURED);
813                 /* FALLTHROUGH */
814         case SFC_ETHDEV_CONFIGURED:
815                 sfc_repr_close(sr);
816                 SFC_ASSERT(sr->state == SFC_ETHDEV_INITIALIZED);
817                 /* FALLTHROUGH */
818         case SFC_ETHDEV_INITIALIZED:
819                 break;
820         default:
821                 sfcr_err(sr, "unexpected adapter state %u on close", sr->state);
822                 break;
823         }
824
825         for (i = 0; i < dev->data->nb_rx_queues; i++) {
826                 sfc_repr_rx_queue_release(dev, i);
827                 dev->data->rx_queues[i] = NULL;
828         }
829
830         for (i = 0; i < dev->data->nb_tx_queues; i++) {
831                 sfc_repr_tx_queue_release(dev, i);
832                 dev->data->tx_queues[i] = NULL;
833         }
834
835         /*
836          * Cleanup all resources.
837          * Rollback primary process sfc_repr_eth_dev_init() below.
838          */
839
840         (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id);
841
842         dev->rx_pkt_burst = NULL;
843         dev->tx_pkt_burst = NULL;
844         dev->dev_ops = NULL;
845
846         sfc_repr_unlock(sr);
847         sfc_repr_lock_fini(sr);
848
849         sfcr_info(sr, "done");
850
851         free(sr);
852
853         return 0;
854 }
855
856 static int
857 sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
858 {
859         union sfc_pkts_bytes queue_stats;
860         uint16_t i;
861
862         for (i = 0; i < dev->data->nb_rx_queues; i++) {
863                 struct sfc_repr_rxq *rxq = dev->data->rx_queues[i];
864
865                 sfc_pkts_bytes_get(&rxq->stats.packets_bytes,
866                                    &queue_stats);
867
868                 stats->ipackets += queue_stats.pkts;
869                 stats->ibytes += queue_stats.bytes;
870         }
871
872         for (i = 0; i < dev->data->nb_tx_queues; i++) {
873                 struct sfc_repr_txq *txq = dev->data->tx_queues[i];
874
875                 sfc_pkts_bytes_get(&txq->stats.packets_bytes,
876                                    &queue_stats);
877
878                 stats->opackets += queue_stats.pkts;
879                 stats->obytes += queue_stats.bytes;
880         }
881
882         return 0;
883 }
884
885 static const struct eth_dev_ops sfc_repr_dev_ops = {
886         .dev_configure                  = sfc_repr_dev_configure,
887         .dev_start                      = sfc_repr_dev_start,
888         .dev_stop                       = sfc_repr_dev_stop,
889         .dev_close                      = sfc_repr_dev_close,
890         .dev_infos_get                  = sfc_repr_dev_infos_get,
891         .link_update                    = sfc_repr_dev_link_update,
892         .stats_get                      = sfc_repr_stats_get,
893         .rx_queue_setup                 = sfc_repr_rx_queue_setup,
894         .rx_queue_release               = sfc_repr_rx_queue_release,
895         .tx_queue_setup                 = sfc_repr_tx_queue_setup,
896         .tx_queue_release               = sfc_repr_tx_queue_release,
897 };
898
899
900 struct sfc_repr_init_data {
901         uint16_t                pf_port_id;
902         uint16_t                switch_domain_id;
903         efx_mport_sel_t         mport_sel;
904         efx_pcie_interface_t    intf;
905         uint16_t                pf;
906         uint16_t                vf;
907 };
908
909 static int
910 sfc_repr_assign_mae_switch_port(uint16_t switch_domain_id,
911                                 const struct sfc_mae_switch_port_request *req,
912                                 uint16_t *switch_port_id)
913 {
914         int rc;
915
916         rc = sfc_mae_assign_switch_port(switch_domain_id, req, switch_port_id);
917
918         SFC_ASSERT(rc >= 0);
919         return -rc;
920 }
921
922 static int
923 sfc_repr_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
924 {
925         const struct sfc_repr_init_data *repr_data = init_params;
926         struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
927         struct sfc_mae_switch_port_request switch_port_request;
928         efx_mport_sel_t ethdev_mport_sel;
929         struct sfc_repr *sr;
930         int ret;
931
932         /*
933          * Currently there is no mport we can use for representor's
934          * ethdev. Use an invalid one for now. This way representors
935          * can be instantiated.
936          */
937         efx_mae_mport_invalid(&ethdev_mport_sel);
938
939         memset(&switch_port_request, 0, sizeof(switch_port_request));
940         switch_port_request.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
941         switch_port_request.ethdev_mportp = &ethdev_mport_sel;
942         switch_port_request.entity_mportp = &repr_data->mport_sel;
943         switch_port_request.ethdev_port_id = dev->data->port_id;
944         switch_port_request.port_data.repr.intf = repr_data->intf;
945         switch_port_request.port_data.repr.pf = repr_data->pf;
946         switch_port_request.port_data.repr.vf = repr_data->vf;
947
948         ret = sfc_repr_assign_mae_switch_port(repr_data->switch_domain_id,
949                                               &switch_port_request,
950                                               &srs->switch_port_id);
951         if (ret != 0) {
952                 SFC_GENERIC_LOG(ERR,
953                         "%s() failed to assign MAE switch port (domain id %u)",
954                         __func__, repr_data->switch_domain_id);
955                 goto fail_mae_assign_switch_port;
956         }
957
958         ret = sfc_repr_proxy_add_port(repr_data->pf_port_id,
959                                       srs->switch_port_id,
960                                       dev->data->port_id,
961                                       &repr_data->mport_sel);
962         if (ret != 0) {
963                 SFC_GENERIC_LOG(ERR, "%s() failed to add repr proxy port",
964                                 __func__);
965                 SFC_ASSERT(ret > 0);
966                 ret = -ret;
967                 goto fail_create_port;
968         }
969
970         /*
971          * Allocate process private data from heap, since it should not
972          * be located in shared memory allocated using rte_malloc() API.
973          */
974         sr = calloc(1, sizeof(*sr));
975         if (sr == NULL) {
976                 ret = -ENOMEM;
977                 goto fail_alloc_sr;
978         }
979
980         sfc_repr_lock_init(sr);
981         sfc_repr_lock(sr);
982
983         dev->process_private = sr;
984
985         srs->pf_port_id = repr_data->pf_port_id;
986         srs->repr_id = srs->switch_port_id;
987         srs->switch_domain_id = repr_data->switch_domain_id;
988
989         dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
990         dev->data->representor_id = srs->repr_id;
991         dev->data->backer_port_id = srs->pf_port_id;
992
993         dev->data->mac_addrs = rte_zmalloc("sfcr", RTE_ETHER_ADDR_LEN, 0);
994         if (dev->data->mac_addrs == NULL) {
995                 ret = -ENOMEM;
996                 goto fail_mac_addrs;
997         }
998
999         dev->rx_pkt_burst = sfc_repr_rx_burst;
1000         dev->tx_pkt_burst = sfc_repr_tx_burst;
1001         dev->dev_ops = &sfc_repr_dev_ops;
1002
1003         sr->state = SFC_ETHDEV_INITIALIZED;
1004         sfc_repr_unlock(sr);
1005
1006         return 0;
1007
1008 fail_mac_addrs:
1009         sfc_repr_unlock(sr);
1010         free(sr);
1011
1012 fail_alloc_sr:
1013         (void)sfc_repr_proxy_del_port(repr_data->pf_port_id,
1014                                       srs->switch_port_id);
1015
1016 fail_create_port:
1017 fail_mae_assign_switch_port:
1018         SFC_GENERIC_LOG(ERR, "%s() failed: %s", __func__, rte_strerror(-ret));
1019         return ret;
1020 }
1021
1022 int
1023 sfc_repr_create(struct rte_eth_dev *parent,
1024                 struct sfc_repr_entity_info *entity,
1025                 uint16_t switch_domain_id,
1026                 const efx_mport_sel_t *mport_sel)
1027 {
1028         struct sfc_repr_init_data repr_data;
1029         char name[RTE_ETH_NAME_MAX_LEN];
1030         int controller;
1031         int ret;
1032         int rc;
1033         struct rte_eth_dev *dev;
1034
1035         controller = -1;
1036         rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
1037                                                   entity->intf, &controller);
1038         if (rc != 0) {
1039                 SFC_GENERIC_LOG(ERR, "%s() failed to get DPDK controller for %d",
1040                                 __func__, entity->intf);
1041                 return -rc;
1042         }
1043
1044         switch (entity->type) {
1045         case RTE_ETH_REPRESENTOR_VF:
1046                 ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%uvf%u",
1047                                parent->device->name, controller, entity->pf,
1048                                entity->vf);
1049                 break;
1050         case RTE_ETH_REPRESENTOR_PF:
1051                 ret = snprintf(name, sizeof(name), "net_%s_representor_c%upf%u",
1052                                parent->device->name, controller, entity->pf);
1053                 break;
1054         default:
1055                 return -ENOTSUP;
1056         }
1057
1058         if (ret >= (int)sizeof(name)) {
1059                 SFC_GENERIC_LOG(ERR, "%s() failed name too long", __func__);
1060                 return -ENAMETOOLONG;
1061         }
1062
1063         dev = rte_eth_dev_allocated(name);
1064         if (dev == NULL) {
1065                 memset(&repr_data, 0, sizeof(repr_data));
1066                 repr_data.pf_port_id = parent->data->port_id;
1067                 repr_data.switch_domain_id = switch_domain_id;
1068                 repr_data.mport_sel = *mport_sel;
1069                 repr_data.intf = entity->intf;
1070                 repr_data.pf = entity->pf;
1071                 repr_data.vf = entity->vf;
1072
1073                 ret = rte_eth_dev_create(parent->device, name,
1074                                          sizeof(struct sfc_repr_shared),
1075                                          NULL, NULL,
1076                                          sfc_repr_eth_dev_init, &repr_data);
1077                 if (ret != 0) {
1078                         SFC_GENERIC_LOG(ERR, "%s() failed to create device",
1079                                         __func__);
1080                         return ret;
1081                 }
1082         }
1083
1084         return 0;
1085 }