net/failsafe: support Rx offload capabilities
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_flow.h>
40
41 #include "failsafe_private.h"
42
43 static struct rte_eth_dev_info default_infos = {
44         /* Max possible number of elements */
45         .max_rx_pktlen = UINT32_MAX,
46         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
47         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
48         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
49         .max_hash_mac_addrs = UINT32_MAX,
50         .max_vfs = UINT16_MAX,
51         .max_vmdq_pools = UINT16_MAX,
52         .rx_desc_lim = {
53                 .nb_max = UINT16_MAX,
54                 .nb_min = 0,
55                 .nb_align = 1,
56                 .nb_seg_max = UINT16_MAX,
57                 .nb_mtu_seg_max = UINT16_MAX,
58         },
59         .tx_desc_lim = {
60                 .nb_max = UINT16_MAX,
61                 .nb_min = 0,
62                 .nb_align = 1,
63                 .nb_seg_max = UINT16_MAX,
64                 .nb_mtu_seg_max = UINT16_MAX,
65         },
66         /*
67          * Set of capabilities that can be verified upon
68          * configuring a sub-device.
69          */
70         .rx_offload_capa =
71                 DEV_RX_OFFLOAD_VLAN_STRIP |
72                 DEV_RX_OFFLOAD_QINQ_STRIP |
73                 DEV_RX_OFFLOAD_IPV4_CKSUM |
74                 DEV_RX_OFFLOAD_UDP_CKSUM |
75                 DEV_RX_OFFLOAD_TCP_CKSUM |
76                 DEV_RX_OFFLOAD_TCP_LRO,
77         .tx_offload_capa = 0x0,
78         .flow_type_rss_offloads = 0x0,
79 };
80
81 /**
82  * Check whether a specific offloading capability
83  * is supported by a sub_device.
84  *
85  * @return
86  *   0: all requested capabilities are supported by the sub_device
87  *   positive value: This flag at least is not supported by the sub_device
88  */
89 static int
90 fs_port_offload_validate(struct rte_eth_dev *dev,
91                          struct sub_device *sdev)
92 {
93         struct rte_eth_dev_info infos = {0};
94         struct rte_eth_conf *cf;
95         uint32_t cap;
96
97         cf = &dev->data->dev_conf;
98         SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
99         /* RX capabilities */
100         cap = infos.rx_offload_capa;
101         if (cf->rxmode.hw_vlan_strip &&
102             ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
103                 WARN("VLAN stripping offload requested but not supported by sub_device %d",
104                       SUB_ID(sdev));
105                 return DEV_RX_OFFLOAD_VLAN_STRIP;
106         }
107         if (cf->rxmode.hw_ip_checksum &&
108             ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
109                      DEV_RX_OFFLOAD_UDP_CKSUM |
110                      DEV_RX_OFFLOAD_TCP_CKSUM)) !=
111              (DEV_RX_OFFLOAD_IPV4_CKSUM |
112               DEV_RX_OFFLOAD_UDP_CKSUM |
113               DEV_RX_OFFLOAD_TCP_CKSUM))) {
114                 WARN("IP checksum offload requested but not supported by sub_device %d",
115                       SUB_ID(sdev));
116                 return DEV_RX_OFFLOAD_IPV4_CKSUM |
117                        DEV_RX_OFFLOAD_UDP_CKSUM |
118                        DEV_RX_OFFLOAD_TCP_CKSUM;
119         }
120         if (cf->rxmode.enable_lro &&
121             ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
122                 WARN("TCP LRO offload requested but not supported by sub_device %d",
123                       SUB_ID(sdev));
124                 return DEV_RX_OFFLOAD_TCP_LRO;
125         }
126         if (cf->rxmode.hw_vlan_extend &&
127             ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
128                 WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
129                       SUB_ID(sdev));
130                 return DEV_RX_OFFLOAD_QINQ_STRIP;
131         }
132         /* TX capabilities */
133         /* Nothing to do, no tx capa supported */
134         return 0;
135 }
136
137 /*
138  * Disable the dev_conf flag related to an offload capability flag
139  * within an ethdev configuration.
140  */
141 static int
142 fs_port_disable_offload(struct rte_eth_conf *cf,
143                         uint32_t ol_cap)
144 {
145         switch (ol_cap) {
146         case DEV_RX_OFFLOAD_VLAN_STRIP:
147                 INFO("Disabling VLAN stripping offload");
148                 cf->rxmode.hw_vlan_strip = 0;
149                 break;
150         case DEV_RX_OFFLOAD_IPV4_CKSUM:
151         case DEV_RX_OFFLOAD_UDP_CKSUM:
152         case DEV_RX_OFFLOAD_TCP_CKSUM:
153         case (DEV_RX_OFFLOAD_IPV4_CKSUM |
154               DEV_RX_OFFLOAD_UDP_CKSUM |
155               DEV_RX_OFFLOAD_TCP_CKSUM):
156                 INFO("Disabling IP checksum offload");
157                 cf->rxmode.hw_ip_checksum = 0;
158                 break;
159         case DEV_RX_OFFLOAD_TCP_LRO:
160                 INFO("Disabling TCP LRO offload");
161                 cf->rxmode.enable_lro = 0;
162                 break;
163         case DEV_RX_OFFLOAD_QINQ_STRIP:
164                 INFO("Disabling stacked VLAN stripping offload");
165                 cf->rxmode.hw_vlan_extend = 0;
166                 break;
167         default:
168                 DEBUG("Unable to disable offload capability: %" PRIx32,
169                       ol_cap);
170                 return -1;
171         }
172         return 0;
173 }
174
175 static int
176 fs_dev_configure(struct rte_eth_dev *dev)
177 {
178         struct sub_device *sdev;
179         uint8_t i;
180         int capa_flag;
181         int ret;
182
183         FOREACH_SUBDEV(sdev, i, dev) {
184                 if (sdev->state != DEV_PROBED)
185                         continue;
186                 DEBUG("Checking capabilities for sub_device %d", i);
187                 while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
188                         /*
189                          * Refuse to change configuration if multiple devices
190                          * are present and we already have configured at least
191                          * some of them.
192                          */
193                         if (PRIV(dev)->state >= DEV_ACTIVE &&
194                             PRIV(dev)->subs_tail > 1) {
195                                 ERROR("device already configured, cannot fix live configuration");
196                                 return -1;
197                         }
198                         ret = fs_port_disable_offload(&dev->data->dev_conf,
199                                                       capa_flag);
200                         if (ret) {
201                                 ERROR("Unable to disable offload capability");
202                                 return ret;
203                         }
204                 }
205         }
206         FOREACH_SUBDEV(sdev, i, dev) {
207                 if (sdev->state != DEV_PROBED)
208                         continue;
209                 DEBUG("Configuring sub-device %d", i);
210                 ret = rte_eth_dev_configure(PORT_ID(sdev),
211                                         dev->data->nb_rx_queues,
212                                         dev->data->nb_tx_queues,
213                                         &dev->data->dev_conf);
214                 if (ret) {
215                         ERROR("Could not configure sub_device %d", i);
216                         return ret;
217                 }
218                 sdev->state = DEV_ACTIVE;
219         }
220         if (PRIV(dev)->state < DEV_ACTIVE)
221                 PRIV(dev)->state = DEV_ACTIVE;
222         return 0;
223 }
224
225 static int
226 fs_dev_start(struct rte_eth_dev *dev)
227 {
228         struct sub_device *sdev;
229         uint8_t i;
230         int ret;
231
232         FOREACH_SUBDEV(sdev, i, dev) {
233                 if (sdev->state != DEV_ACTIVE)
234                         continue;
235                 DEBUG("Starting sub_device %d", i);
236                 ret = rte_eth_dev_start(PORT_ID(sdev));
237                 if (ret)
238                         return ret;
239                 sdev->state = DEV_STARTED;
240         }
241         if (PRIV(dev)->state < DEV_STARTED)
242                 PRIV(dev)->state = DEV_STARTED;
243         fs_switch_dev(dev);
244         return 0;
245 }
246
247 static void
248 fs_dev_stop(struct rte_eth_dev *dev)
249 {
250         struct sub_device *sdev;
251         uint8_t i;
252
253         PRIV(dev)->state = DEV_STARTED - 1;
254         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
255                 rte_eth_dev_stop(PORT_ID(sdev));
256                 sdev->state = DEV_STARTED - 1;
257         }
258 }
259
260 static int
261 fs_dev_set_link_up(struct rte_eth_dev *dev)
262 {
263         struct sub_device *sdev;
264         uint8_t i;
265         int ret;
266
267         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
268                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
269                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
270                 if (ret) {
271                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
272                               " with error %d", i, ret);
273                         return ret;
274                 }
275         }
276         return 0;
277 }
278
279 static int
280 fs_dev_set_link_down(struct rte_eth_dev *dev)
281 {
282         struct sub_device *sdev;
283         uint8_t i;
284         int ret;
285
286         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
287                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
288                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
289                 if (ret) {
290                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
291                               " with error %d", i, ret);
292                         return ret;
293                 }
294         }
295         return 0;
296 }
297
298 static void fs_dev_free_queues(struct rte_eth_dev *dev);
299 static void
300 fs_dev_close(struct rte_eth_dev *dev)
301 {
302         struct sub_device *sdev;
303         uint8_t i;
304
305         failsafe_hotplug_alarm_cancel(dev);
306         if (PRIV(dev)->state == DEV_STARTED)
307                 dev->dev_ops->dev_stop(dev);
308         PRIV(dev)->state = DEV_ACTIVE - 1;
309         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
310                 DEBUG("Closing sub_device %d", i);
311                 rte_eth_dev_close(PORT_ID(sdev));
312                 sdev->state = DEV_ACTIVE - 1;
313         }
314         fs_dev_free_queues(dev);
315 }
316
317 static void
318 fs_rx_queue_release(void *queue)
319 {
320         struct rte_eth_dev *dev;
321         struct sub_device *sdev;
322         uint8_t i;
323         struct rxq *rxq;
324
325         if (queue == NULL)
326                 return;
327         rxq = queue;
328         dev = rxq->priv->dev;
329         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
330                 SUBOPS(sdev, rx_queue_release)
331                         (ETH(sdev)->data->rx_queues[rxq->qid]);
332         dev->data->rx_queues[rxq->qid] = NULL;
333         rte_free(rxq);
334 }
335
336 static int
337 fs_rx_queue_setup(struct rte_eth_dev *dev,
338                 uint16_t rx_queue_id,
339                 uint16_t nb_rx_desc,
340                 unsigned int socket_id,
341                 const struct rte_eth_rxconf *rx_conf,
342                 struct rte_mempool *mb_pool)
343 {
344         struct sub_device *sdev;
345         struct rxq *rxq;
346         uint8_t i;
347         int ret;
348
349         rxq = dev->data->rx_queues[rx_queue_id];
350         if (rxq != NULL) {
351                 fs_rx_queue_release(rxq);
352                 dev->data->rx_queues[rx_queue_id] = NULL;
353         }
354         rxq = rte_zmalloc(NULL, sizeof(*rxq),
355                           RTE_CACHE_LINE_SIZE);
356         if (rxq == NULL)
357                 return -ENOMEM;
358         rxq->qid = rx_queue_id;
359         rxq->socket_id = socket_id;
360         rxq->info.mp = mb_pool;
361         rxq->info.conf = *rx_conf;
362         rxq->info.nb_desc = nb_rx_desc;
363         rxq->priv = PRIV(dev);
364         dev->data->rx_queues[rx_queue_id] = rxq;
365         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
366                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
367                                 rx_queue_id,
368                                 nb_rx_desc, socket_id,
369                                 rx_conf, mb_pool);
370                 if (ret) {
371                         ERROR("RX queue setup failed for sub_device %d", i);
372                         goto free_rxq;
373                 }
374         }
375         return 0;
376 free_rxq:
377         fs_rx_queue_release(rxq);
378         return ret;
379 }
380
381 static void
382 fs_tx_queue_release(void *queue)
383 {
384         struct rte_eth_dev *dev;
385         struct sub_device *sdev;
386         uint8_t i;
387         struct txq *txq;
388
389         if (queue == NULL)
390                 return;
391         txq = queue;
392         dev = txq->priv->dev;
393         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
394                 SUBOPS(sdev, tx_queue_release)
395                         (ETH(sdev)->data->tx_queues[txq->qid]);
396         dev->data->tx_queues[txq->qid] = NULL;
397         rte_free(txq);
398 }
399
400 static int
401 fs_tx_queue_setup(struct rte_eth_dev *dev,
402                 uint16_t tx_queue_id,
403                 uint16_t nb_tx_desc,
404                 unsigned int socket_id,
405                 const struct rte_eth_txconf *tx_conf)
406 {
407         struct sub_device *sdev;
408         struct txq *txq;
409         uint8_t i;
410         int ret;
411
412         txq = dev->data->tx_queues[tx_queue_id];
413         if (txq != NULL) {
414                 fs_tx_queue_release(txq);
415                 dev->data->tx_queues[tx_queue_id] = NULL;
416         }
417         txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
418                           RTE_CACHE_LINE_SIZE);
419         if (txq == NULL)
420                 return -ENOMEM;
421         txq->qid = tx_queue_id;
422         txq->socket_id = socket_id;
423         txq->info.conf = *tx_conf;
424         txq->info.nb_desc = nb_tx_desc;
425         txq->priv = PRIV(dev);
426         dev->data->tx_queues[tx_queue_id] = txq;
427         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
428                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
429                                 tx_queue_id,
430                                 nb_tx_desc, socket_id,
431                                 tx_conf);
432                 if (ret) {
433                         ERROR("TX queue setup failed for sub_device %d", i);
434                         goto free_txq;
435                 }
436         }
437         return 0;
438 free_txq:
439         fs_tx_queue_release(txq);
440         return ret;
441 }
442
443 static void
444 fs_dev_free_queues(struct rte_eth_dev *dev)
445 {
446         uint16_t i;
447
448         for (i = 0; i < dev->data->nb_rx_queues; i++) {
449                 fs_rx_queue_release(dev->data->rx_queues[i]);
450                 dev->data->rx_queues[i] = NULL;
451         }
452         dev->data->nb_rx_queues = 0;
453         for (i = 0; i < dev->data->nb_tx_queues; i++) {
454                 fs_tx_queue_release(dev->data->tx_queues[i]);
455                 dev->data->tx_queues[i] = NULL;
456         }
457         dev->data->nb_tx_queues = 0;
458 }
459
460 static void
461 fs_promiscuous_enable(struct rte_eth_dev *dev)
462 {
463         struct sub_device *sdev;
464         uint8_t i;
465
466         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
467                 rte_eth_promiscuous_enable(PORT_ID(sdev));
468 }
469
470 static void
471 fs_promiscuous_disable(struct rte_eth_dev *dev)
472 {
473         struct sub_device *sdev;
474         uint8_t i;
475
476         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
477                 rte_eth_promiscuous_disable(PORT_ID(sdev));
478 }
479
480 static void
481 fs_allmulticast_enable(struct rte_eth_dev *dev)
482 {
483         struct sub_device *sdev;
484         uint8_t i;
485
486         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
487                 rte_eth_allmulticast_enable(PORT_ID(sdev));
488 }
489
490 static void
491 fs_allmulticast_disable(struct rte_eth_dev *dev)
492 {
493         struct sub_device *sdev;
494         uint8_t i;
495
496         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
497                 rte_eth_allmulticast_disable(PORT_ID(sdev));
498 }
499
500 static int
501 fs_link_update(struct rte_eth_dev *dev,
502                 int wait_to_complete)
503 {
504         struct sub_device *sdev;
505         uint8_t i;
506         int ret;
507
508         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
509                 DEBUG("Calling link_update on sub_device %d", i);
510                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
511                 if (ret && ret != -1) {
512                         ERROR("Link update failed for sub_device %d with error %d",
513                               i, ret);
514                         return ret;
515                 }
516         }
517         if (TX_SUBDEV(dev)) {
518                 struct rte_eth_link *l1;
519                 struct rte_eth_link *l2;
520
521                 l1 = &dev->data->dev_link;
522                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
523                 if (memcmp(l1, l2, sizeof(*l1))) {
524                         *l1 = *l2;
525                         return 0;
526                 }
527         }
528         return -1;
529 }
530
531 static void
532 fs_stats_get(struct rte_eth_dev *dev,
533              struct rte_eth_stats *stats)
534 {
535         if (TX_SUBDEV(dev) == NULL)
536                 return;
537         rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
538 }
539
540 static void
541 fs_stats_reset(struct rte_eth_dev *dev)
542 {
543         struct sub_device *sdev;
544         uint8_t i;
545
546         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
547                 rte_eth_stats_reset(PORT_ID(sdev));
548 }
549
550 /**
551  * Fail-safe dev_infos_get rules:
552  *
553  * No sub_device:
554  *   Numerables:
555  *      Use the maximum possible values for any field, so as not
556  *      to impede any further configuration effort.
557  *   Capabilities:
558  *      Limits capabilities to those that are understood by the
559  *      fail-safe PMD. This understanding stems from the fail-safe
560  *      being capable of verifying that the related capability is
561  *      expressed within the device configuration (struct rte_eth_conf).
562  *
563  * At least one probed sub_device:
564  *   Numerables:
565  *      Uses values from the active probed sub_device
566  *      The rationale here is that if any sub_device is less capable
567  *      (for example concerning the number of queues) than the active
568  *      sub_device, then its subsequent configuration will fail.
569  *      It is impossible to foresee this failure when the failing sub_device
570  *      is supposed to be plugged-in later on, so the configuration process
571  *      is the single point of failure and error reporting.
572  *   Capabilities:
573  *      Uses a logical AND of RX capabilities among
574  *      all sub_devices and the default capabilities.
575  *      Uses a logical AND of TX capabilities among
576  *      the active probed sub_device and the default capabilities.
577  *
578  */
579 static void
580 fs_dev_infos_get(struct rte_eth_dev *dev,
581                   struct rte_eth_dev_info *infos)
582 {
583         struct sub_device *sdev;
584         uint8_t i;
585
586         sdev = TX_SUBDEV(dev);
587         if (sdev == NULL) {
588                 DEBUG("No probed device, using default infos");
589                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
590                            sizeof(default_infos));
591         } else {
592                 uint32_t rx_offload_capa;
593
594                 rx_offload_capa = default_infos.rx_offload_capa;
595                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
596                         rte_eth_dev_info_get(PORT_ID(sdev),
597                                         &PRIV(dev)->infos);
598                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
599                 }
600                 sdev = TX_SUBDEV(dev);
601                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
602                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
603                 PRIV(dev)->infos.tx_offload_capa &=
604                                         default_infos.tx_offload_capa;
605                 PRIV(dev)->infos.flow_type_rss_offloads &=
606                                         default_infos.flow_type_rss_offloads;
607         }
608         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
609 }
610
611 static const uint32_t *
612 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
613 {
614         struct sub_device *sdev;
615         struct rte_eth_dev *edev;
616
617         sdev = TX_SUBDEV(dev);
618         if (sdev == NULL)
619                 return NULL;
620         edev = ETH(sdev);
621         /* ENOTSUP: counts as no supported ptypes */
622         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
623                 return NULL;
624         /*
625          * The API does not permit to do a clean AND of all ptypes,
626          * It is also incomplete by design and we do not really care
627          * to have a best possible value in this context.
628          * We just return the ptypes of the device of highest
629          * priority, usually the PREFERRED device.
630          */
631         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
632 }
633
634 static int
635 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
636 {
637         struct sub_device *sdev;
638         uint8_t i;
639         int ret;
640
641         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
642                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
643                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
644                 if (ret) {
645                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
646                               i, ret);
647                         return ret;
648                 }
649         }
650         return 0;
651 }
652
653 static int
654 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
655 {
656         struct sub_device *sdev;
657         uint8_t i;
658         int ret;
659
660         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
661                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
662                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
663                 if (ret) {
664                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
665                               " with error %d", i, ret);
666                         return ret;
667                 }
668         }
669         return 0;
670 }
671
672 static int
673 fs_flow_ctrl_get(struct rte_eth_dev *dev,
674                 struct rte_eth_fc_conf *fc_conf)
675 {
676         struct sub_device *sdev;
677
678         sdev = TX_SUBDEV(dev);
679         if (sdev == NULL)
680                 return 0;
681         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
682                 return -ENOTSUP;
683         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
684 }
685
686 static int
687 fs_flow_ctrl_set(struct rte_eth_dev *dev,
688                 struct rte_eth_fc_conf *fc_conf)
689 {
690         struct sub_device *sdev;
691         uint8_t i;
692         int ret;
693
694         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
695                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
696                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
697                 if (ret) {
698                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
699                               " with error %d", i, ret);
700                         return ret;
701                 }
702         }
703         return 0;
704 }
705
706 static void
707 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
708 {
709         struct sub_device *sdev;
710         uint8_t i;
711
712         /* No check: already done within the rte_eth_dev_mac_addr_remove
713          * call for the fail-safe device.
714          */
715         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
716                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
717                                 &dev->data->mac_addrs[index]);
718         PRIV(dev)->mac_addr_pool[index] = 0;
719 }
720
721 static int
722 fs_mac_addr_add(struct rte_eth_dev *dev,
723                 struct ether_addr *mac_addr,
724                 uint32_t index,
725                 uint32_t vmdq)
726 {
727         struct sub_device *sdev;
728         int ret;
729         uint8_t i;
730
731         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
732         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
733                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
734                 if (ret) {
735                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
736                               PRIu8 " with error %d", i, ret);
737                         return ret;
738                 }
739         }
740         if (index >= PRIV(dev)->nb_mac_addr) {
741                 DEBUG("Growing mac_addrs array");
742                 PRIV(dev)->nb_mac_addr = index;
743         }
744         PRIV(dev)->mac_addr_pool[index] = vmdq;
745         return 0;
746 }
747
748 static void
749 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
750 {
751         struct sub_device *sdev;
752         uint8_t i;
753
754         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
755                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
756 }
757
758 static int
759 fs_filter_ctrl(struct rte_eth_dev *dev,
760                 enum rte_filter_type type,
761                 enum rte_filter_op op,
762                 void *arg)
763 {
764         struct sub_device *sdev;
765         uint8_t i;
766         int ret;
767
768         if (type == RTE_ETH_FILTER_GENERIC &&
769             op == RTE_ETH_FILTER_GET) {
770                 *(const void **)arg = &fs_flow_ops;
771                 return 0;
772         }
773         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
774                 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
775                 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
776                 if (ret) {
777                         ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
778                               " with error %d", i, ret);
779                         return ret;
780                 }
781         }
782         return 0;
783 }
784
785 const struct eth_dev_ops failsafe_ops = {
786         .dev_configure = fs_dev_configure,
787         .dev_start = fs_dev_start,
788         .dev_stop = fs_dev_stop,
789         .dev_set_link_down = fs_dev_set_link_down,
790         .dev_set_link_up = fs_dev_set_link_up,
791         .dev_close = fs_dev_close,
792         .promiscuous_enable = fs_promiscuous_enable,
793         .promiscuous_disable = fs_promiscuous_disable,
794         .allmulticast_enable = fs_allmulticast_enable,
795         .allmulticast_disable = fs_allmulticast_disable,
796         .link_update = fs_link_update,
797         .stats_get = fs_stats_get,
798         .stats_reset = fs_stats_reset,
799         .dev_infos_get = fs_dev_infos_get,
800         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
801         .mtu_set = fs_mtu_set,
802         .vlan_filter_set = fs_vlan_filter_set,
803         .rx_queue_setup = fs_rx_queue_setup,
804         .tx_queue_setup = fs_tx_queue_setup,
805         .rx_queue_release = fs_rx_queue_release,
806         .tx_queue_release = fs_tx_queue_release,
807         .flow_ctrl_get = fs_flow_ctrl_get,
808         .flow_ctrl_set = fs_flow_ctrl_set,
809         .mac_addr_remove = fs_mac_addr_remove,
810         .mac_addr_add = fs_mac_addr_add,
811         .mac_addr_set = fs_mac_addr_set,
812         .filter_ctrl = fs_filter_ctrl,
813 };