ethdev: separate driver APIs
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdbool.h>
35 #include <stdint.h>
36
37 #include <rte_debug.h>
38 #include <rte_atomic.h>
39 #include <rte_ethdev_driver.h>
40 #include <rte_malloc.h>
41 #include <rte_flow.h>
42 #include <rte_cycles.h>
43
44 #include "failsafe_private.h"
45
46 static struct rte_eth_dev_info default_infos = {
47         /* Max possible number of elements */
48         .max_rx_pktlen = UINT32_MAX,
49         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
50         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
51         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
52         .max_hash_mac_addrs = UINT32_MAX,
53         .max_vfs = UINT16_MAX,
54         .max_vmdq_pools = UINT16_MAX,
55         .rx_desc_lim = {
56                 .nb_max = UINT16_MAX,
57                 .nb_min = 0,
58                 .nb_align = 1,
59                 .nb_seg_max = UINT16_MAX,
60                 .nb_mtu_seg_max = UINT16_MAX,
61         },
62         .tx_desc_lim = {
63                 .nb_max = UINT16_MAX,
64                 .nb_min = 0,
65                 .nb_align = 1,
66                 .nb_seg_max = UINT16_MAX,
67                 .nb_mtu_seg_max = UINT16_MAX,
68         },
69         /*
70          * Set of capabilities that can be verified upon
71          * configuring a sub-device.
72          */
73         .rx_offload_capa =
74                 DEV_RX_OFFLOAD_VLAN_STRIP |
75                 DEV_RX_OFFLOAD_IPV4_CKSUM |
76                 DEV_RX_OFFLOAD_UDP_CKSUM |
77                 DEV_RX_OFFLOAD_TCP_CKSUM |
78                 DEV_RX_OFFLOAD_TCP_LRO |
79                 DEV_RX_OFFLOAD_QINQ_STRIP |
80                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
81                 DEV_RX_OFFLOAD_MACSEC_STRIP |
82                 DEV_RX_OFFLOAD_HEADER_SPLIT |
83                 DEV_RX_OFFLOAD_VLAN_FILTER |
84                 DEV_RX_OFFLOAD_VLAN_EXTEND |
85                 DEV_RX_OFFLOAD_JUMBO_FRAME |
86                 DEV_RX_OFFLOAD_CRC_STRIP |
87                 DEV_RX_OFFLOAD_SCATTER |
88                 DEV_RX_OFFLOAD_TIMESTAMP |
89                 DEV_RX_OFFLOAD_SECURITY,
90         .rx_queue_offload_capa =
91                 DEV_RX_OFFLOAD_VLAN_STRIP |
92                 DEV_RX_OFFLOAD_IPV4_CKSUM |
93                 DEV_RX_OFFLOAD_UDP_CKSUM |
94                 DEV_RX_OFFLOAD_TCP_CKSUM |
95                 DEV_RX_OFFLOAD_TCP_LRO |
96                 DEV_RX_OFFLOAD_QINQ_STRIP |
97                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
98                 DEV_RX_OFFLOAD_MACSEC_STRIP |
99                 DEV_RX_OFFLOAD_HEADER_SPLIT |
100                 DEV_RX_OFFLOAD_VLAN_FILTER |
101                 DEV_RX_OFFLOAD_VLAN_EXTEND |
102                 DEV_RX_OFFLOAD_JUMBO_FRAME |
103                 DEV_RX_OFFLOAD_CRC_STRIP |
104                 DEV_RX_OFFLOAD_SCATTER |
105                 DEV_RX_OFFLOAD_TIMESTAMP |
106                 DEV_RX_OFFLOAD_SECURITY,
107         .tx_offload_capa = 0x0,
108         .flow_type_rss_offloads = 0x0,
109 };
110
111 static int
112 fs_dev_configure(struct rte_eth_dev *dev)
113 {
114         struct sub_device *sdev;
115         uint64_t supp_tx_offloads;
116         uint64_t tx_offloads;
117         uint8_t i;
118         int ret;
119
120         supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
121         tx_offloads = dev->data->dev_conf.txmode.offloads;
122         if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
123                 rte_errno = ENOTSUP;
124                 ERROR("Some Tx offloads are not supported, "
125                       "requested 0x%" PRIx64 " supported 0x%" PRIx64,
126                       tx_offloads, supp_tx_offloads);
127                 return -rte_errno;
128         }
129         FOREACH_SUBDEV(sdev, i, dev) {
130                 int rmv_interrupt = 0;
131                 int lsc_interrupt = 0;
132                 int lsc_enabled;
133
134                 if (sdev->state != DEV_PROBED)
135                         continue;
136
137                 rmv_interrupt = ETH(sdev)->data->dev_flags &
138                                 RTE_ETH_DEV_INTR_RMV;
139                 if (rmv_interrupt) {
140                         DEBUG("Enabling RMV interrupts for sub_device %d", i);
141                         dev->data->dev_conf.intr_conf.rmv = 1;
142                 } else {
143                         DEBUG("sub_device %d does not support RMV event", i);
144                 }
145                 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
146                 lsc_interrupt = lsc_enabled &&
147                                 (ETH(sdev)->data->dev_flags &
148                                  RTE_ETH_DEV_INTR_LSC);
149                 if (lsc_interrupt) {
150                         DEBUG("Enabling LSC interrupts for sub_device %d", i);
151                         dev->data->dev_conf.intr_conf.lsc = 1;
152                 } else if (lsc_enabled && !lsc_interrupt) {
153                         DEBUG("Disabling LSC interrupts for sub_device %d", i);
154                         dev->data->dev_conf.intr_conf.lsc = 0;
155                 }
156                 DEBUG("Configuring sub-device %d", i);
157                 sdev->remove = 0;
158                 ret = rte_eth_dev_configure(PORT_ID(sdev),
159                                         dev->data->nb_rx_queues,
160                                         dev->data->nb_tx_queues,
161                                         &dev->data->dev_conf);
162                 if (ret) {
163                         if (!fs_err(sdev, ret))
164                                 continue;
165                         ERROR("Could not configure sub_device %d", i);
166                         return ret;
167                 }
168                 if (rmv_interrupt) {
169                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
170                                         RTE_ETH_EVENT_INTR_RMV,
171                                         failsafe_eth_rmv_event_callback,
172                                         sdev);
173                         if (ret)
174                                 WARN("Failed to register RMV callback for sub_device %d",
175                                      SUB_ID(sdev));
176                 }
177                 dev->data->dev_conf.intr_conf.rmv = 0;
178                 if (lsc_interrupt) {
179                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
180                                                 RTE_ETH_EVENT_INTR_LSC,
181                                                 failsafe_eth_lsc_event_callback,
182                                                 dev);
183                         if (ret)
184                                 WARN("Failed to register LSC callback for sub_device %d",
185                                      SUB_ID(sdev));
186                 }
187                 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
188                 sdev->state = DEV_ACTIVE;
189         }
190         if (PRIV(dev)->state < DEV_ACTIVE)
191                 PRIV(dev)->state = DEV_ACTIVE;
192         return 0;
193 }
194
195 static int
196 fs_dev_start(struct rte_eth_dev *dev)
197 {
198         struct sub_device *sdev;
199         uint8_t i;
200         int ret;
201
202         FOREACH_SUBDEV(sdev, i, dev) {
203                 if (sdev->state != DEV_ACTIVE)
204                         continue;
205                 DEBUG("Starting sub_device %d", i);
206                 ret = rte_eth_dev_start(PORT_ID(sdev));
207                 if (ret) {
208                         if (!fs_err(sdev, ret))
209                                 continue;
210                         return ret;
211                 }
212                 sdev->state = DEV_STARTED;
213         }
214         if (PRIV(dev)->state < DEV_STARTED)
215                 PRIV(dev)->state = DEV_STARTED;
216         fs_switch_dev(dev, NULL);
217         return 0;
218 }
219
220 static void
221 fs_dev_stop(struct rte_eth_dev *dev)
222 {
223         struct sub_device *sdev;
224         uint8_t i;
225
226         PRIV(dev)->state = DEV_STARTED - 1;
227         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
228                 rte_eth_dev_stop(PORT_ID(sdev));
229                 sdev->state = DEV_STARTED - 1;
230         }
231 }
232
233 static int
234 fs_dev_set_link_up(struct rte_eth_dev *dev)
235 {
236         struct sub_device *sdev;
237         uint8_t i;
238         int ret;
239
240         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
241                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
242                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
243                 if ((ret = fs_err(sdev, ret))) {
244                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
245                               " with error %d", i, ret);
246                         return ret;
247                 }
248         }
249         return 0;
250 }
251
252 static int
253 fs_dev_set_link_down(struct rte_eth_dev *dev)
254 {
255         struct sub_device *sdev;
256         uint8_t i;
257         int ret;
258
259         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
260                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
261                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
262                 if ((ret = fs_err(sdev, ret))) {
263                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
264                               " with error %d", i, ret);
265                         return ret;
266                 }
267         }
268         return 0;
269 }
270
271 static void fs_dev_free_queues(struct rte_eth_dev *dev);
272 static void
273 fs_dev_close(struct rte_eth_dev *dev)
274 {
275         struct sub_device *sdev;
276         uint8_t i;
277
278         failsafe_hotplug_alarm_cancel(dev);
279         if (PRIV(dev)->state == DEV_STARTED)
280                 dev->dev_ops->dev_stop(dev);
281         PRIV(dev)->state = DEV_ACTIVE - 1;
282         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
283                 DEBUG("Closing sub_device %d", i);
284                 rte_eth_dev_close(PORT_ID(sdev));
285                 sdev->state = DEV_ACTIVE - 1;
286         }
287         fs_dev_free_queues(dev);
288 }
289
290 static bool
291 fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
292 {
293         uint64_t port_offloads;
294         uint64_t queue_supp_offloads;
295         uint64_t port_supp_offloads;
296
297         port_offloads = dev->data->dev_conf.rxmode.offloads;
298         queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
299         port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
300         if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
301              offloads)
302                 return false;
303         /* Verify we have no conflict with port offloads */
304         if ((port_offloads ^ offloads) & port_supp_offloads)
305                 return false;
306         return true;
307 }
308
309 static void
310 fs_rx_queue_release(void *queue)
311 {
312         struct rte_eth_dev *dev;
313         struct sub_device *sdev;
314         uint8_t i;
315         struct rxq *rxq;
316
317         if (queue == NULL)
318                 return;
319         rxq = queue;
320         dev = rxq->priv->dev;
321         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
322                 SUBOPS(sdev, rx_queue_release)
323                         (ETH(sdev)->data->rx_queues[rxq->qid]);
324         dev->data->rx_queues[rxq->qid] = NULL;
325         rte_free(rxq);
326 }
327
328 static int
329 fs_rx_queue_setup(struct rte_eth_dev *dev,
330                 uint16_t rx_queue_id,
331                 uint16_t nb_rx_desc,
332                 unsigned int socket_id,
333                 const struct rte_eth_rxconf *rx_conf,
334                 struct rte_mempool *mb_pool)
335 {
336         struct sub_device *sdev;
337         struct rxq *rxq;
338         uint8_t i;
339         int ret;
340
341         rxq = dev->data->rx_queues[rx_queue_id];
342         if (rxq != NULL) {
343                 fs_rx_queue_release(rxq);
344                 dev->data->rx_queues[rx_queue_id] = NULL;
345         }
346         /* Verify application offloads are valid for our port and queue. */
347         if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
348                 rte_errno = ENOTSUP;
349                 ERROR("Rx queue offloads 0x%" PRIx64
350                       " don't match port offloads 0x%" PRIx64
351                       " or supported offloads 0x%" PRIx64,
352                       rx_conf->offloads,
353                       dev->data->dev_conf.rxmode.offloads,
354                       PRIV(dev)->infos.rx_offload_capa |
355                       PRIV(dev)->infos.rx_queue_offload_capa);
356                 return -rte_errno;
357         }
358         rxq = rte_zmalloc(NULL,
359                           sizeof(*rxq) +
360                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
361                           RTE_CACHE_LINE_SIZE);
362         if (rxq == NULL)
363                 return -ENOMEM;
364         FOREACH_SUBDEV(sdev, i, dev)
365                 rte_atomic64_init(&rxq->refcnt[i]);
366         rxq->qid = rx_queue_id;
367         rxq->socket_id = socket_id;
368         rxq->info.mp = mb_pool;
369         rxq->info.conf = *rx_conf;
370         rxq->info.nb_desc = nb_rx_desc;
371         rxq->priv = PRIV(dev);
372         rxq->sdev = PRIV(dev)->subs;
373         dev->data->rx_queues[rx_queue_id] = rxq;
374         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
375                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
376                                 rx_queue_id,
377                                 nb_rx_desc, socket_id,
378                                 rx_conf, mb_pool);
379                 if ((ret = fs_err(sdev, ret))) {
380                         ERROR("RX queue setup failed for sub_device %d", i);
381                         goto free_rxq;
382                 }
383         }
384         return 0;
385 free_rxq:
386         fs_rx_queue_release(rxq);
387         return ret;
388 }
389
390 static bool
391 fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
392 {
393         uint64_t port_offloads;
394         uint64_t queue_supp_offloads;
395         uint64_t port_supp_offloads;
396
397         port_offloads = dev->data->dev_conf.txmode.offloads;
398         queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
399         port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
400         if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
401              offloads)
402                 return false;
403         /* Verify we have no conflict with port offloads */
404         if ((port_offloads ^ offloads) & port_supp_offloads)
405                 return false;
406         return true;
407 }
408
409 static void
410 fs_tx_queue_release(void *queue)
411 {
412         struct rte_eth_dev *dev;
413         struct sub_device *sdev;
414         uint8_t i;
415         struct txq *txq;
416
417         if (queue == NULL)
418                 return;
419         txq = queue;
420         dev = txq->priv->dev;
421         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
422                 SUBOPS(sdev, tx_queue_release)
423                         (ETH(sdev)->data->tx_queues[txq->qid]);
424         dev->data->tx_queues[txq->qid] = NULL;
425         rte_free(txq);
426 }
427
428 static int
429 fs_tx_queue_setup(struct rte_eth_dev *dev,
430                 uint16_t tx_queue_id,
431                 uint16_t nb_tx_desc,
432                 unsigned int socket_id,
433                 const struct rte_eth_txconf *tx_conf)
434 {
435         struct sub_device *sdev;
436         struct txq *txq;
437         uint8_t i;
438         int ret;
439
440         txq = dev->data->tx_queues[tx_queue_id];
441         if (txq != NULL) {
442                 fs_tx_queue_release(txq);
443                 dev->data->tx_queues[tx_queue_id] = NULL;
444         }
445         /*
446          * Don't verify queue offloads for applications which
447          * use the old API.
448          */
449         if (tx_conf != NULL &&
450             (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
451             fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
452                 rte_errno = ENOTSUP;
453                 ERROR("Tx queue offloads 0x%" PRIx64
454                       " don't match port offloads 0x%" PRIx64
455                       " or supported offloads 0x%" PRIx64,
456                       tx_conf->offloads,
457                       dev->data->dev_conf.txmode.offloads,
458                       PRIV(dev)->infos.tx_offload_capa |
459                       PRIV(dev)->infos.tx_queue_offload_capa);
460                 return -rte_errno;
461         }
462         txq = rte_zmalloc("ethdev TX queue",
463                           sizeof(*txq) +
464                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
465                           RTE_CACHE_LINE_SIZE);
466         if (txq == NULL)
467                 return -ENOMEM;
468         FOREACH_SUBDEV(sdev, i, dev)
469                 rte_atomic64_init(&txq->refcnt[i]);
470         txq->qid = tx_queue_id;
471         txq->socket_id = socket_id;
472         txq->info.conf = *tx_conf;
473         txq->info.nb_desc = nb_tx_desc;
474         txq->priv = PRIV(dev);
475         dev->data->tx_queues[tx_queue_id] = txq;
476         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
477                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
478                                 tx_queue_id,
479                                 nb_tx_desc, socket_id,
480                                 tx_conf);
481                 if ((ret = fs_err(sdev, ret))) {
482                         ERROR("TX queue setup failed for sub_device %d", i);
483                         goto free_txq;
484                 }
485         }
486         return 0;
487 free_txq:
488         fs_tx_queue_release(txq);
489         return ret;
490 }
491
492 static void
493 fs_dev_free_queues(struct rte_eth_dev *dev)
494 {
495         uint16_t i;
496
497         for (i = 0; i < dev->data->nb_rx_queues; i++) {
498                 fs_rx_queue_release(dev->data->rx_queues[i]);
499                 dev->data->rx_queues[i] = NULL;
500         }
501         dev->data->nb_rx_queues = 0;
502         for (i = 0; i < dev->data->nb_tx_queues; i++) {
503                 fs_tx_queue_release(dev->data->tx_queues[i]);
504                 dev->data->tx_queues[i] = NULL;
505         }
506         dev->data->nb_tx_queues = 0;
507 }
508
509 static void
510 fs_promiscuous_enable(struct rte_eth_dev *dev)
511 {
512         struct sub_device *sdev;
513         uint8_t i;
514
515         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
516                 rte_eth_promiscuous_enable(PORT_ID(sdev));
517 }
518
519 static void
520 fs_promiscuous_disable(struct rte_eth_dev *dev)
521 {
522         struct sub_device *sdev;
523         uint8_t i;
524
525         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
526                 rte_eth_promiscuous_disable(PORT_ID(sdev));
527 }
528
529 static void
530 fs_allmulticast_enable(struct rte_eth_dev *dev)
531 {
532         struct sub_device *sdev;
533         uint8_t i;
534
535         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
536                 rte_eth_allmulticast_enable(PORT_ID(sdev));
537 }
538
539 static void
540 fs_allmulticast_disable(struct rte_eth_dev *dev)
541 {
542         struct sub_device *sdev;
543         uint8_t i;
544
545         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
546                 rte_eth_allmulticast_disable(PORT_ID(sdev));
547 }
548
549 static int
550 fs_link_update(struct rte_eth_dev *dev,
551                 int wait_to_complete)
552 {
553         struct sub_device *sdev;
554         uint8_t i;
555         int ret;
556
557         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
558                 DEBUG("Calling link_update on sub_device %d", i);
559                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
560                 if (ret && ret != -1 && sdev->remove == 0 &&
561                     rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
562                         ERROR("Link update failed for sub_device %d with error %d",
563                               i, ret);
564                         return ret;
565                 }
566         }
567         if (TX_SUBDEV(dev)) {
568                 struct rte_eth_link *l1;
569                 struct rte_eth_link *l2;
570
571                 l1 = &dev->data->dev_link;
572                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
573                 if (memcmp(l1, l2, sizeof(*l1))) {
574                         *l1 = *l2;
575                         return 0;
576                 }
577         }
578         return -1;
579 }
580
581 static int
582 fs_stats_get(struct rte_eth_dev *dev,
583              struct rte_eth_stats *stats)
584 {
585         struct rte_eth_stats backup;
586         struct sub_device *sdev;
587         uint8_t i;
588         int ret;
589
590         rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
591         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
592                 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
593                 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
594
595                 rte_memcpy(&backup, snapshot, sizeof(backup));
596                 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
597                 if (ret) {
598                         if (!fs_err(sdev, ret)) {
599                                 rte_memcpy(snapshot, &backup, sizeof(backup));
600                                 goto inc;
601                         }
602                         ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
603                                   i, ret);
604                         *timestamp = 0;
605                         return ret;
606                 }
607                 *timestamp = rte_rdtsc();
608 inc:
609                 failsafe_stats_increment(stats, snapshot);
610         }
611         return 0;
612 }
613
614 static void
615 fs_stats_reset(struct rte_eth_dev *dev)
616 {
617         struct sub_device *sdev;
618         uint8_t i;
619
620         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
621                 rte_eth_stats_reset(PORT_ID(sdev));
622                 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
623         }
624         memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
625 }
626
627 /**
628  * Fail-safe dev_infos_get rules:
629  *
630  * No sub_device:
631  *   Numerables:
632  *      Use the maximum possible values for any field, so as not
633  *      to impede any further configuration effort.
634  *   Capabilities:
635  *      Limits capabilities to those that are understood by the
636  *      fail-safe PMD. This understanding stems from the fail-safe
637  *      being capable of verifying that the related capability is
638  *      expressed within the device configuration (struct rte_eth_conf).
639  *
640  * At least one probed sub_device:
641  *   Numerables:
642  *      Uses values from the active probed sub_device
643  *      The rationale here is that if any sub_device is less capable
644  *      (for example concerning the number of queues) than the active
645  *      sub_device, then its subsequent configuration will fail.
646  *      It is impossible to foresee this failure when the failing sub_device
647  *      is supposed to be plugged-in later on, so the configuration process
648  *      is the single point of failure and error reporting.
649  *   Capabilities:
650  *      Uses a logical AND of RX capabilities among
651  *      all sub_devices and the default capabilities.
652  *      Uses a logical AND of TX capabilities among
653  *      the active probed sub_device and the default capabilities.
654  *
655  */
656 static void
657 fs_dev_infos_get(struct rte_eth_dev *dev,
658                   struct rte_eth_dev_info *infos)
659 {
660         struct sub_device *sdev;
661         uint8_t i;
662
663         sdev = TX_SUBDEV(dev);
664         if (sdev == NULL) {
665                 DEBUG("No probed device, using default infos");
666                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
667                            sizeof(default_infos));
668         } else {
669                 uint64_t rx_offload_capa;
670                 uint64_t rxq_offload_capa;
671
672                 rx_offload_capa = default_infos.rx_offload_capa;
673                 rxq_offload_capa = default_infos.rx_queue_offload_capa;
674                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
675                         rte_eth_dev_info_get(PORT_ID(sdev),
676                                         &PRIV(dev)->infos);
677                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
678                         rxq_offload_capa &=
679                                         PRIV(dev)->infos.rx_queue_offload_capa;
680                 }
681                 sdev = TX_SUBDEV(dev);
682                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
683                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
684                 PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
685                 PRIV(dev)->infos.tx_offload_capa &=
686                                         default_infos.tx_offload_capa;
687                 PRIV(dev)->infos.tx_queue_offload_capa &=
688                                         default_infos.tx_queue_offload_capa;
689                 PRIV(dev)->infos.flow_type_rss_offloads &=
690                                         default_infos.flow_type_rss_offloads;
691         }
692         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
693 }
694
695 static const uint32_t *
696 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
697 {
698         struct sub_device *sdev;
699         struct rte_eth_dev *edev;
700
701         sdev = TX_SUBDEV(dev);
702         if (sdev == NULL)
703                 return NULL;
704         edev = ETH(sdev);
705         /* ENOTSUP: counts as no supported ptypes */
706         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
707                 return NULL;
708         /*
709          * The API does not permit to do a clean AND of all ptypes,
710          * It is also incomplete by design and we do not really care
711          * to have a best possible value in this context.
712          * We just return the ptypes of the device of highest
713          * priority, usually the PREFERRED device.
714          */
715         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
716 }
717
718 static int
719 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
720 {
721         struct sub_device *sdev;
722         uint8_t i;
723         int ret;
724
725         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
726                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
727                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
728                 if ((ret = fs_err(sdev, ret))) {
729                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
730                               i, ret);
731                         return ret;
732                 }
733         }
734         return 0;
735 }
736
737 static int
738 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
739 {
740         struct sub_device *sdev;
741         uint8_t i;
742         int ret;
743
744         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
745                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
746                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
747                 if ((ret = fs_err(sdev, ret))) {
748                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
749                               " with error %d", i, ret);
750                         return ret;
751                 }
752         }
753         return 0;
754 }
755
756 static int
757 fs_flow_ctrl_get(struct rte_eth_dev *dev,
758                 struct rte_eth_fc_conf *fc_conf)
759 {
760         struct sub_device *sdev;
761
762         sdev = TX_SUBDEV(dev);
763         if (sdev == NULL)
764                 return 0;
765         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
766                 return -ENOTSUP;
767         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
768 }
769
770 static int
771 fs_flow_ctrl_set(struct rte_eth_dev *dev,
772                 struct rte_eth_fc_conf *fc_conf)
773 {
774         struct sub_device *sdev;
775         uint8_t i;
776         int ret;
777
778         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
779                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
780                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
781                 if ((ret = fs_err(sdev, ret))) {
782                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
783                               " with error %d", i, ret);
784                         return ret;
785                 }
786         }
787         return 0;
788 }
789
790 static void
791 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
792 {
793         struct sub_device *sdev;
794         uint8_t i;
795
796         /* No check: already done within the rte_eth_dev_mac_addr_remove
797          * call for the fail-safe device.
798          */
799         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
800                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
801                                 &dev->data->mac_addrs[index]);
802         PRIV(dev)->mac_addr_pool[index] = 0;
803 }
804
805 static int
806 fs_mac_addr_add(struct rte_eth_dev *dev,
807                 struct ether_addr *mac_addr,
808                 uint32_t index,
809                 uint32_t vmdq)
810 {
811         struct sub_device *sdev;
812         int ret;
813         uint8_t i;
814
815         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
816         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
817                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
818                 if ((ret = fs_err(sdev, ret))) {
819                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
820                               PRIu8 " with error %d", i, ret);
821                         return ret;
822                 }
823         }
824         if (index >= PRIV(dev)->nb_mac_addr) {
825                 DEBUG("Growing mac_addrs array");
826                 PRIV(dev)->nb_mac_addr = index;
827         }
828         PRIV(dev)->mac_addr_pool[index] = vmdq;
829         return 0;
830 }
831
832 static void
833 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
834 {
835         struct sub_device *sdev;
836         uint8_t i;
837
838         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
839                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
840 }
841
842 static int
843 fs_filter_ctrl(struct rte_eth_dev *dev,
844                 enum rte_filter_type type,
845                 enum rte_filter_op op,
846                 void *arg)
847 {
848         struct sub_device *sdev;
849         uint8_t i;
850         int ret;
851
852         if (type == RTE_ETH_FILTER_GENERIC &&
853             op == RTE_ETH_FILTER_GET) {
854                 *(const void **)arg = &fs_flow_ops;
855                 return 0;
856         }
857         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
858                 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
859                 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
860                 if ((ret = fs_err(sdev, ret))) {
861                         ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
862                               " with error %d", i, ret);
863                         return ret;
864                 }
865         }
866         return 0;
867 }
868
869 const struct eth_dev_ops failsafe_ops = {
870         .dev_configure = fs_dev_configure,
871         .dev_start = fs_dev_start,
872         .dev_stop = fs_dev_stop,
873         .dev_set_link_down = fs_dev_set_link_down,
874         .dev_set_link_up = fs_dev_set_link_up,
875         .dev_close = fs_dev_close,
876         .promiscuous_enable = fs_promiscuous_enable,
877         .promiscuous_disable = fs_promiscuous_disable,
878         .allmulticast_enable = fs_allmulticast_enable,
879         .allmulticast_disable = fs_allmulticast_disable,
880         .link_update = fs_link_update,
881         .stats_get = fs_stats_get,
882         .stats_reset = fs_stats_reset,
883         .dev_infos_get = fs_dev_infos_get,
884         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
885         .mtu_set = fs_mtu_set,
886         .vlan_filter_set = fs_vlan_filter_set,
887         .rx_queue_setup = fs_rx_queue_setup,
888         .tx_queue_setup = fs_tx_queue_setup,
889         .rx_queue_release = fs_rx_queue_release,
890         .tx_queue_release = fs_tx_queue_release,
891         .flow_ctrl_get = fs_flow_ctrl_get,
892         .flow_ctrl_set = fs_flow_ctrl_set,
893         .mac_addr_remove = fs_mac_addr_remove,
894         .mac_addr_add = fs_mac_addr_add,
895         .mac_addr_set = fs_mac_addr_set,
896         .filter_ctrl = fs_filter_ctrl,
897 };