net/failsafe: add timestamp to stats snapshot
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_flow.h>
41 #include <rte_cycles.h>
42
43 #include "failsafe_private.h"
44
45 static struct rte_eth_dev_info default_infos = {
46         /* Max possible number of elements */
47         .max_rx_pktlen = UINT32_MAX,
48         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
49         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
50         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
51         .max_hash_mac_addrs = UINT32_MAX,
52         .max_vfs = UINT16_MAX,
53         .max_vmdq_pools = UINT16_MAX,
54         .rx_desc_lim = {
55                 .nb_max = UINT16_MAX,
56                 .nb_min = 0,
57                 .nb_align = 1,
58                 .nb_seg_max = UINT16_MAX,
59                 .nb_mtu_seg_max = UINT16_MAX,
60         },
61         .tx_desc_lim = {
62                 .nb_max = UINT16_MAX,
63                 .nb_min = 0,
64                 .nb_align = 1,
65                 .nb_seg_max = UINT16_MAX,
66                 .nb_mtu_seg_max = UINT16_MAX,
67         },
68         /*
69          * Set of capabilities that can be verified upon
70          * configuring a sub-device.
71          */
72         .rx_offload_capa =
73                 DEV_RX_OFFLOAD_VLAN_STRIP |
74                 DEV_RX_OFFLOAD_QINQ_STRIP |
75                 DEV_RX_OFFLOAD_IPV4_CKSUM |
76                 DEV_RX_OFFLOAD_UDP_CKSUM |
77                 DEV_RX_OFFLOAD_TCP_CKSUM |
78                 DEV_RX_OFFLOAD_TCP_LRO,
79         .tx_offload_capa = 0x0,
80         .flow_type_rss_offloads = 0x0,
81 };
82
83 /**
84  * Check whether a specific offloading capability
85  * is supported by a sub_device.
86  *
87  * @return
88  *   0: all requested capabilities are supported by the sub_device
89  *   positive value: This flag at least is not supported by the sub_device
90  */
91 static int
92 fs_port_offload_validate(struct rte_eth_dev *dev,
93                          struct sub_device *sdev)
94 {
95         struct rte_eth_dev_info infos = {0};
96         struct rte_eth_conf *cf;
97         uint32_t cap;
98
99         cf = &dev->data->dev_conf;
100         SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
101         /* RX capabilities */
102         cap = infos.rx_offload_capa;
103         if (cf->rxmode.hw_vlan_strip &&
104             ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
105                 WARN("VLAN stripping offload requested but not supported by sub_device %d",
106                       SUB_ID(sdev));
107                 return DEV_RX_OFFLOAD_VLAN_STRIP;
108         }
109         if (cf->rxmode.hw_ip_checksum &&
110             ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
111                      DEV_RX_OFFLOAD_UDP_CKSUM |
112                      DEV_RX_OFFLOAD_TCP_CKSUM)) !=
113              (DEV_RX_OFFLOAD_IPV4_CKSUM |
114               DEV_RX_OFFLOAD_UDP_CKSUM |
115               DEV_RX_OFFLOAD_TCP_CKSUM))) {
116                 WARN("IP checksum offload requested but not supported by sub_device %d",
117                       SUB_ID(sdev));
118                 return DEV_RX_OFFLOAD_IPV4_CKSUM |
119                        DEV_RX_OFFLOAD_UDP_CKSUM |
120                        DEV_RX_OFFLOAD_TCP_CKSUM;
121         }
122         if (cf->rxmode.enable_lro &&
123             ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
124                 WARN("TCP LRO offload requested but not supported by sub_device %d",
125                       SUB_ID(sdev));
126                 return DEV_RX_OFFLOAD_TCP_LRO;
127         }
128         if (cf->rxmode.hw_vlan_extend &&
129             ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
130                 WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
131                       SUB_ID(sdev));
132                 return DEV_RX_OFFLOAD_QINQ_STRIP;
133         }
134         /* TX capabilities */
135         /* Nothing to do, no tx capa supported */
136         return 0;
137 }
138
139 /*
140  * Disable the dev_conf flag related to an offload capability flag
141  * within an ethdev configuration.
142  */
143 static int
144 fs_port_disable_offload(struct rte_eth_conf *cf,
145                         uint32_t ol_cap)
146 {
147         switch (ol_cap) {
148         case DEV_RX_OFFLOAD_VLAN_STRIP:
149                 INFO("Disabling VLAN stripping offload");
150                 cf->rxmode.hw_vlan_strip = 0;
151                 break;
152         case DEV_RX_OFFLOAD_IPV4_CKSUM:
153         case DEV_RX_OFFLOAD_UDP_CKSUM:
154         case DEV_RX_OFFLOAD_TCP_CKSUM:
155         case (DEV_RX_OFFLOAD_IPV4_CKSUM |
156               DEV_RX_OFFLOAD_UDP_CKSUM |
157               DEV_RX_OFFLOAD_TCP_CKSUM):
158                 INFO("Disabling IP checksum offload");
159                 cf->rxmode.hw_ip_checksum = 0;
160                 break;
161         case DEV_RX_OFFLOAD_TCP_LRO:
162                 INFO("Disabling TCP LRO offload");
163                 cf->rxmode.enable_lro = 0;
164                 break;
165         case DEV_RX_OFFLOAD_QINQ_STRIP:
166                 INFO("Disabling stacked VLAN stripping offload");
167                 cf->rxmode.hw_vlan_extend = 0;
168                 break;
169         default:
170                 DEBUG("Unable to disable offload capability: %" PRIx32,
171                       ol_cap);
172                 return -1;
173         }
174         return 0;
175 }
176
177 static int
178 fs_dev_configure(struct rte_eth_dev *dev)
179 {
180         struct sub_device *sdev;
181         uint8_t i;
182         int capa_flag;
183         int ret;
184
185         FOREACH_SUBDEV(sdev, i, dev) {
186                 if (sdev->state != DEV_PROBED)
187                         continue;
188                 DEBUG("Checking capabilities for sub_device %d", i);
189                 while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
190                         /*
191                          * Refuse to change configuration if multiple devices
192                          * are present and we already have configured at least
193                          * some of them.
194                          */
195                         if (PRIV(dev)->state >= DEV_ACTIVE &&
196                             PRIV(dev)->subs_tail > 1) {
197                                 ERROR("device already configured, cannot fix live configuration");
198                                 return -1;
199                         }
200                         ret = fs_port_disable_offload(&dev->data->dev_conf,
201                                                       capa_flag);
202                         if (ret) {
203                                 ERROR("Unable to disable offload capability");
204                                 return ret;
205                         }
206                 }
207         }
208         FOREACH_SUBDEV(sdev, i, dev) {
209                 int rmv_interrupt = 0;
210                 int lsc_interrupt = 0;
211                 int lsc_enabled;
212
213                 if (sdev->state != DEV_PROBED)
214                         continue;
215
216                 rmv_interrupt = ETH(sdev)->data->dev_flags &
217                                 RTE_ETH_DEV_INTR_RMV;
218                 if (rmv_interrupt) {
219                         DEBUG("Enabling RMV interrupts for sub_device %d", i);
220                         dev->data->dev_conf.intr_conf.rmv = 1;
221                 } else {
222                         DEBUG("sub_device %d does not support RMV event", i);
223                 }
224                 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
225                 lsc_interrupt = lsc_enabled &&
226                                 (ETH(sdev)->data->dev_flags &
227                                  RTE_ETH_DEV_INTR_LSC);
228                 if (lsc_interrupt) {
229                         DEBUG("Enabling LSC interrupts for sub_device %d", i);
230                         dev->data->dev_conf.intr_conf.lsc = 1;
231                 } else if (lsc_enabled && !lsc_interrupt) {
232                         DEBUG("Disabling LSC interrupts for sub_device %d", i);
233                         dev->data->dev_conf.intr_conf.lsc = 0;
234                 }
235                 DEBUG("Configuring sub-device %d", i);
236                 sdev->remove = 0;
237                 ret = rte_eth_dev_configure(PORT_ID(sdev),
238                                         dev->data->nb_rx_queues,
239                                         dev->data->nb_tx_queues,
240                                         &dev->data->dev_conf);
241                 if (ret) {
242                         ERROR("Could not configure sub_device %d", i);
243                         return ret;
244                 }
245                 if (rmv_interrupt) {
246                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
247                                         RTE_ETH_EVENT_INTR_RMV,
248                                         failsafe_eth_rmv_event_callback,
249                                         sdev);
250                         if (ret)
251                                 WARN("Failed to register RMV callback for sub_device %d",
252                                      SUB_ID(sdev));
253                 }
254                 dev->data->dev_conf.intr_conf.rmv = 0;
255                 if (lsc_interrupt) {
256                         ret = rte_eth_dev_callback_register(PORT_ID(sdev),
257                                                 RTE_ETH_EVENT_INTR_LSC,
258                                                 failsafe_eth_lsc_event_callback,
259                                                 dev);
260                         if (ret)
261                                 WARN("Failed to register LSC callback for sub_device %d",
262                                      SUB_ID(sdev));
263                 }
264                 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
265                 sdev->state = DEV_ACTIVE;
266         }
267         if (PRIV(dev)->state < DEV_ACTIVE)
268                 PRIV(dev)->state = DEV_ACTIVE;
269         return 0;
270 }
271
272 static int
273 fs_dev_start(struct rte_eth_dev *dev)
274 {
275         struct sub_device *sdev;
276         uint8_t i;
277         int ret;
278
279         FOREACH_SUBDEV(sdev, i, dev) {
280                 if (sdev->state != DEV_ACTIVE)
281                         continue;
282                 DEBUG("Starting sub_device %d", i);
283                 ret = rte_eth_dev_start(PORT_ID(sdev));
284                 if (ret)
285                         return ret;
286                 sdev->state = DEV_STARTED;
287         }
288         if (PRIV(dev)->state < DEV_STARTED)
289                 PRIV(dev)->state = DEV_STARTED;
290         fs_switch_dev(dev, NULL);
291         return 0;
292 }
293
294 static void
295 fs_dev_stop(struct rte_eth_dev *dev)
296 {
297         struct sub_device *sdev;
298         uint8_t i;
299
300         PRIV(dev)->state = DEV_STARTED - 1;
301         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
302                 rte_eth_dev_stop(PORT_ID(sdev));
303                 sdev->state = DEV_STARTED - 1;
304         }
305 }
306
307 static int
308 fs_dev_set_link_up(struct rte_eth_dev *dev)
309 {
310         struct sub_device *sdev;
311         uint8_t i;
312         int ret;
313
314         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
315                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
316                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
317                 if (ret) {
318                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
319                               " with error %d", i, ret);
320                         return ret;
321                 }
322         }
323         return 0;
324 }
325
326 static int
327 fs_dev_set_link_down(struct rte_eth_dev *dev)
328 {
329         struct sub_device *sdev;
330         uint8_t i;
331         int ret;
332
333         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
334                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
335                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
336                 if (ret) {
337                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
338                               " with error %d", i, ret);
339                         return ret;
340                 }
341         }
342         return 0;
343 }
344
345 static void fs_dev_free_queues(struct rte_eth_dev *dev);
346 static void
347 fs_dev_close(struct rte_eth_dev *dev)
348 {
349         struct sub_device *sdev;
350         uint8_t i;
351
352         failsafe_hotplug_alarm_cancel(dev);
353         if (PRIV(dev)->state == DEV_STARTED)
354                 dev->dev_ops->dev_stop(dev);
355         PRIV(dev)->state = DEV_ACTIVE - 1;
356         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
357                 DEBUG("Closing sub_device %d", i);
358                 rte_eth_dev_close(PORT_ID(sdev));
359                 sdev->state = DEV_ACTIVE - 1;
360         }
361         fs_dev_free_queues(dev);
362 }
363
364 static void
365 fs_rx_queue_release(void *queue)
366 {
367         struct rte_eth_dev *dev;
368         struct sub_device *sdev;
369         uint8_t i;
370         struct rxq *rxq;
371
372         if (queue == NULL)
373                 return;
374         rxq = queue;
375         dev = rxq->priv->dev;
376         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
377                 SUBOPS(sdev, rx_queue_release)
378                         (ETH(sdev)->data->rx_queues[rxq->qid]);
379         dev->data->rx_queues[rxq->qid] = NULL;
380         rte_free(rxq);
381 }
382
383 static int
384 fs_rx_queue_setup(struct rte_eth_dev *dev,
385                 uint16_t rx_queue_id,
386                 uint16_t nb_rx_desc,
387                 unsigned int socket_id,
388                 const struct rte_eth_rxconf *rx_conf,
389                 struct rte_mempool *mb_pool)
390 {
391         struct sub_device *sdev;
392         struct rxq *rxq;
393         uint8_t i;
394         int ret;
395
396         rxq = dev->data->rx_queues[rx_queue_id];
397         if (rxq != NULL) {
398                 fs_rx_queue_release(rxq);
399                 dev->data->rx_queues[rx_queue_id] = NULL;
400         }
401         rxq = rte_zmalloc(NULL,
402                           sizeof(*rxq) +
403                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
404                           RTE_CACHE_LINE_SIZE);
405         if (rxq == NULL)
406                 return -ENOMEM;
407         FOREACH_SUBDEV(sdev, i, dev)
408                 rte_atomic64_init(&rxq->refcnt[i]);
409         rxq->qid = rx_queue_id;
410         rxq->socket_id = socket_id;
411         rxq->info.mp = mb_pool;
412         rxq->info.conf = *rx_conf;
413         rxq->info.nb_desc = nb_rx_desc;
414         rxq->priv = PRIV(dev);
415         dev->data->rx_queues[rx_queue_id] = rxq;
416         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
417                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
418                                 rx_queue_id,
419                                 nb_rx_desc, socket_id,
420                                 rx_conf, mb_pool);
421                 if (ret) {
422                         ERROR("RX queue setup failed for sub_device %d", i);
423                         goto free_rxq;
424                 }
425         }
426         return 0;
427 free_rxq:
428         fs_rx_queue_release(rxq);
429         return ret;
430 }
431
432 static void
433 fs_tx_queue_release(void *queue)
434 {
435         struct rte_eth_dev *dev;
436         struct sub_device *sdev;
437         uint8_t i;
438         struct txq *txq;
439
440         if (queue == NULL)
441                 return;
442         txq = queue;
443         dev = txq->priv->dev;
444         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
445                 SUBOPS(sdev, tx_queue_release)
446                         (ETH(sdev)->data->tx_queues[txq->qid]);
447         dev->data->tx_queues[txq->qid] = NULL;
448         rte_free(txq);
449 }
450
451 static int
452 fs_tx_queue_setup(struct rte_eth_dev *dev,
453                 uint16_t tx_queue_id,
454                 uint16_t nb_tx_desc,
455                 unsigned int socket_id,
456                 const struct rte_eth_txconf *tx_conf)
457 {
458         struct sub_device *sdev;
459         struct txq *txq;
460         uint8_t i;
461         int ret;
462
463         txq = dev->data->tx_queues[tx_queue_id];
464         if (txq != NULL) {
465                 fs_tx_queue_release(txq);
466                 dev->data->tx_queues[tx_queue_id] = NULL;
467         }
468         txq = rte_zmalloc("ethdev TX queue",
469                           sizeof(*txq) +
470                           sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
471                           RTE_CACHE_LINE_SIZE);
472         if (txq == NULL)
473                 return -ENOMEM;
474         FOREACH_SUBDEV(sdev, i, dev)
475                 rte_atomic64_init(&txq->refcnt[i]);
476         txq->qid = tx_queue_id;
477         txq->socket_id = socket_id;
478         txq->info.conf = *tx_conf;
479         txq->info.nb_desc = nb_tx_desc;
480         txq->priv = PRIV(dev);
481         dev->data->tx_queues[tx_queue_id] = txq;
482         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
483                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
484                                 tx_queue_id,
485                                 nb_tx_desc, socket_id,
486                                 tx_conf);
487                 if (ret) {
488                         ERROR("TX queue setup failed for sub_device %d", i);
489                         goto free_txq;
490                 }
491         }
492         return 0;
493 free_txq:
494         fs_tx_queue_release(txq);
495         return ret;
496 }
497
498 static void
499 fs_dev_free_queues(struct rte_eth_dev *dev)
500 {
501         uint16_t i;
502
503         for (i = 0; i < dev->data->nb_rx_queues; i++) {
504                 fs_rx_queue_release(dev->data->rx_queues[i]);
505                 dev->data->rx_queues[i] = NULL;
506         }
507         dev->data->nb_rx_queues = 0;
508         for (i = 0; i < dev->data->nb_tx_queues; i++) {
509                 fs_tx_queue_release(dev->data->tx_queues[i]);
510                 dev->data->tx_queues[i] = NULL;
511         }
512         dev->data->nb_tx_queues = 0;
513 }
514
515 static void
516 fs_promiscuous_enable(struct rte_eth_dev *dev)
517 {
518         struct sub_device *sdev;
519         uint8_t i;
520
521         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
522                 rte_eth_promiscuous_enable(PORT_ID(sdev));
523 }
524
525 static void
526 fs_promiscuous_disable(struct rte_eth_dev *dev)
527 {
528         struct sub_device *sdev;
529         uint8_t i;
530
531         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
532                 rte_eth_promiscuous_disable(PORT_ID(sdev));
533 }
534
535 static void
536 fs_allmulticast_enable(struct rte_eth_dev *dev)
537 {
538         struct sub_device *sdev;
539         uint8_t i;
540
541         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
542                 rte_eth_allmulticast_enable(PORT_ID(sdev));
543 }
544
545 static void
546 fs_allmulticast_disable(struct rte_eth_dev *dev)
547 {
548         struct sub_device *sdev;
549         uint8_t i;
550
551         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
552                 rte_eth_allmulticast_disable(PORT_ID(sdev));
553 }
554
555 static int
556 fs_link_update(struct rte_eth_dev *dev,
557                 int wait_to_complete)
558 {
559         struct sub_device *sdev;
560         uint8_t i;
561         int ret;
562
563         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
564                 DEBUG("Calling link_update on sub_device %d", i);
565                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
566                 if (ret && ret != -1) {
567                         ERROR("Link update failed for sub_device %d with error %d",
568                               i, ret);
569                         return ret;
570                 }
571         }
572         if (TX_SUBDEV(dev)) {
573                 struct rte_eth_link *l1;
574                 struct rte_eth_link *l2;
575
576                 l1 = &dev->data->dev_link;
577                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
578                 if (memcmp(l1, l2, sizeof(*l1))) {
579                         *l1 = *l2;
580                         return 0;
581                 }
582         }
583         return -1;
584 }
585
586 static int
587 fs_stats_get(struct rte_eth_dev *dev,
588              struct rte_eth_stats *stats)
589 {
590         struct sub_device *sdev;
591         uint8_t i;
592         int ret;
593
594         rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
595         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
596                 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
597                 uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
598
599                 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
600                 if (ret) {
601                         ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
602                                   i, ret);
603                         *timestamp = 0;
604                         return ret;
605                 }
606                 *timestamp = rte_rdtsc();
607                 failsafe_stats_increment(stats, snapshot);
608         }
609         return 0;
610 }
611
612 static void
613 fs_stats_reset(struct rte_eth_dev *dev)
614 {
615         struct sub_device *sdev;
616         uint8_t i;
617
618         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
619                 rte_eth_stats_reset(PORT_ID(sdev));
620                 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
621         }
622         memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
623 }
624
625 /**
626  * Fail-safe dev_infos_get rules:
627  *
628  * No sub_device:
629  *   Numerables:
630  *      Use the maximum possible values for any field, so as not
631  *      to impede any further configuration effort.
632  *   Capabilities:
633  *      Limits capabilities to those that are understood by the
634  *      fail-safe PMD. This understanding stems from the fail-safe
635  *      being capable of verifying that the related capability is
636  *      expressed within the device configuration (struct rte_eth_conf).
637  *
638  * At least one probed sub_device:
639  *   Numerables:
640  *      Uses values from the active probed sub_device
641  *      The rationale here is that if any sub_device is less capable
642  *      (for example concerning the number of queues) than the active
643  *      sub_device, then its subsequent configuration will fail.
644  *      It is impossible to foresee this failure when the failing sub_device
645  *      is supposed to be plugged-in later on, so the configuration process
646  *      is the single point of failure and error reporting.
647  *   Capabilities:
648  *      Uses a logical AND of RX capabilities among
649  *      all sub_devices and the default capabilities.
650  *      Uses a logical AND of TX capabilities among
651  *      the active probed sub_device and the default capabilities.
652  *
653  */
654 static void
655 fs_dev_infos_get(struct rte_eth_dev *dev,
656                   struct rte_eth_dev_info *infos)
657 {
658         struct sub_device *sdev;
659         uint8_t i;
660
661         sdev = TX_SUBDEV(dev);
662         if (sdev == NULL) {
663                 DEBUG("No probed device, using default infos");
664                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
665                            sizeof(default_infos));
666         } else {
667                 uint32_t rx_offload_capa;
668
669                 rx_offload_capa = default_infos.rx_offload_capa;
670                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
671                         rte_eth_dev_info_get(PORT_ID(sdev),
672                                         &PRIV(dev)->infos);
673                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
674                 }
675                 sdev = TX_SUBDEV(dev);
676                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
677                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
678                 PRIV(dev)->infos.tx_offload_capa &=
679                                         default_infos.tx_offload_capa;
680                 PRIV(dev)->infos.flow_type_rss_offloads &=
681                                         default_infos.flow_type_rss_offloads;
682         }
683         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
684 }
685
686 static const uint32_t *
687 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
688 {
689         struct sub_device *sdev;
690         struct rte_eth_dev *edev;
691
692         sdev = TX_SUBDEV(dev);
693         if (sdev == NULL)
694                 return NULL;
695         edev = ETH(sdev);
696         /* ENOTSUP: counts as no supported ptypes */
697         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
698                 return NULL;
699         /*
700          * The API does not permit to do a clean AND of all ptypes,
701          * It is also incomplete by design and we do not really care
702          * to have a best possible value in this context.
703          * We just return the ptypes of the device of highest
704          * priority, usually the PREFERRED device.
705          */
706         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
707 }
708
709 static int
710 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
711 {
712         struct sub_device *sdev;
713         uint8_t i;
714         int ret;
715
716         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
717                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
718                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
719                 if (ret) {
720                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
721                               i, ret);
722                         return ret;
723                 }
724         }
725         return 0;
726 }
727
728 static int
729 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
730 {
731         struct sub_device *sdev;
732         uint8_t i;
733         int ret;
734
735         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
736                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
737                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
738                 if (ret) {
739                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
740                               " with error %d", i, ret);
741                         return ret;
742                 }
743         }
744         return 0;
745 }
746
747 static int
748 fs_flow_ctrl_get(struct rte_eth_dev *dev,
749                 struct rte_eth_fc_conf *fc_conf)
750 {
751         struct sub_device *sdev;
752
753         sdev = TX_SUBDEV(dev);
754         if (sdev == NULL)
755                 return 0;
756         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
757                 return -ENOTSUP;
758         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
759 }
760
761 static int
762 fs_flow_ctrl_set(struct rte_eth_dev *dev,
763                 struct rte_eth_fc_conf *fc_conf)
764 {
765         struct sub_device *sdev;
766         uint8_t i;
767         int ret;
768
769         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
770                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
771                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
772                 if (ret) {
773                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
774                               " with error %d", i, ret);
775                         return ret;
776                 }
777         }
778         return 0;
779 }
780
781 static void
782 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
783 {
784         struct sub_device *sdev;
785         uint8_t i;
786
787         /* No check: already done within the rte_eth_dev_mac_addr_remove
788          * call for the fail-safe device.
789          */
790         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
791                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
792                                 &dev->data->mac_addrs[index]);
793         PRIV(dev)->mac_addr_pool[index] = 0;
794 }
795
796 static int
797 fs_mac_addr_add(struct rte_eth_dev *dev,
798                 struct ether_addr *mac_addr,
799                 uint32_t index,
800                 uint32_t vmdq)
801 {
802         struct sub_device *sdev;
803         int ret;
804         uint8_t i;
805
806         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
807         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
808                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
809                 if (ret) {
810                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
811                               PRIu8 " with error %d", i, ret);
812                         return ret;
813                 }
814         }
815         if (index >= PRIV(dev)->nb_mac_addr) {
816                 DEBUG("Growing mac_addrs array");
817                 PRIV(dev)->nb_mac_addr = index;
818         }
819         PRIV(dev)->mac_addr_pool[index] = vmdq;
820         return 0;
821 }
822
823 static void
824 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
825 {
826         struct sub_device *sdev;
827         uint8_t i;
828
829         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
830                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
831 }
832
833 static int
834 fs_filter_ctrl(struct rte_eth_dev *dev,
835                 enum rte_filter_type type,
836                 enum rte_filter_op op,
837                 void *arg)
838 {
839         struct sub_device *sdev;
840         uint8_t i;
841         int ret;
842
843         if (type == RTE_ETH_FILTER_GENERIC &&
844             op == RTE_ETH_FILTER_GET) {
845                 *(const void **)arg = &fs_flow_ops;
846                 return 0;
847         }
848         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
849                 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
850                 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
851                 if (ret) {
852                         ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
853                               " with error %d", i, ret);
854                         return ret;
855                 }
856         }
857         return 0;
858 }
859
860 const struct eth_dev_ops failsafe_ops = {
861         .dev_configure = fs_dev_configure,
862         .dev_start = fs_dev_start,
863         .dev_stop = fs_dev_stop,
864         .dev_set_link_down = fs_dev_set_link_down,
865         .dev_set_link_up = fs_dev_set_link_up,
866         .dev_close = fs_dev_close,
867         .promiscuous_enable = fs_promiscuous_enable,
868         .promiscuous_disable = fs_promiscuous_disable,
869         .allmulticast_enable = fs_allmulticast_enable,
870         .allmulticast_disable = fs_allmulticast_disable,
871         .link_update = fs_link_update,
872         .stats_get = fs_stats_get,
873         .stats_reset = fs_stats_reset,
874         .dev_infos_get = fs_dev_infos_get,
875         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
876         .mtu_set = fs_mtu_set,
877         .vlan_filter_set = fs_vlan_filter_set,
878         .rx_queue_setup = fs_rx_queue_setup,
879         .tx_queue_setup = fs_tx_queue_setup,
880         .rx_queue_release = fs_rx_queue_release,
881         .tx_queue_release = fs_tx_queue_release,
882         .flow_ctrl_get = fs_flow_ctrl_get,
883         .flow_ctrl_set = fs_flow_ctrl_set,
884         .mac_addr_remove = fs_mac_addr_remove,
885         .mac_addr_add = fs_mac_addr_add,
886         .mac_addr_set = fs_mac_addr_set,
887         .filter_ctrl = fs_filter_ctrl,
888 };