net/failsafe: support flow API
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39 #include <rte_flow.h>
40
41 #include "failsafe_private.h"
42
43 static struct rte_eth_dev_info default_infos = {
44         /* Max possible number of elements */
45         .max_rx_pktlen = UINT32_MAX,
46         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
47         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
48         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
49         .max_hash_mac_addrs = UINT32_MAX,
50         .max_vfs = UINT16_MAX,
51         .max_vmdq_pools = UINT16_MAX,
52         .rx_desc_lim = {
53                 .nb_max = UINT16_MAX,
54                 .nb_min = 0,
55                 .nb_align = 1,
56                 .nb_seg_max = UINT16_MAX,
57                 .nb_mtu_seg_max = UINT16_MAX,
58         },
59         .tx_desc_lim = {
60                 .nb_max = UINT16_MAX,
61                 .nb_min = 0,
62                 .nb_align = 1,
63                 .nb_seg_max = UINT16_MAX,
64                 .nb_mtu_seg_max = UINT16_MAX,
65         },
66         /* Set of understood capabilities */
67         .rx_offload_capa = 0x0,
68         .tx_offload_capa = 0x0,
69         .flow_type_rss_offloads = 0x0,
70 };
71
72 static int
73 fs_dev_configure(struct rte_eth_dev *dev)
74 {
75         struct sub_device *sdev;
76         uint8_t i;
77         int ret;
78
79         FOREACH_SUBDEV(sdev, i, dev) {
80                 if (sdev->state != DEV_PROBED)
81                         continue;
82                 DEBUG("Configuring sub-device %d", i);
83                 ret = rte_eth_dev_configure(PORT_ID(sdev),
84                                         dev->data->nb_rx_queues,
85                                         dev->data->nb_tx_queues,
86                                         &dev->data->dev_conf);
87                 if (ret) {
88                         ERROR("Could not configure sub_device %d", i);
89                         return ret;
90                 }
91                 sdev->state = DEV_ACTIVE;
92         }
93         if (PRIV(dev)->state < DEV_ACTIVE)
94                 PRIV(dev)->state = DEV_ACTIVE;
95         return 0;
96 }
97
98 static int
99 fs_dev_start(struct rte_eth_dev *dev)
100 {
101         struct sub_device *sdev;
102         uint8_t i;
103         int ret;
104
105         FOREACH_SUBDEV(sdev, i, dev) {
106                 if (sdev->state != DEV_ACTIVE)
107                         continue;
108                 DEBUG("Starting sub_device %d", i);
109                 ret = rte_eth_dev_start(PORT_ID(sdev));
110                 if (ret)
111                         return ret;
112                 sdev->state = DEV_STARTED;
113         }
114         if (PRIV(dev)->state < DEV_STARTED)
115                 PRIV(dev)->state = DEV_STARTED;
116         fs_switch_dev(dev);
117         return 0;
118 }
119
120 static void
121 fs_dev_stop(struct rte_eth_dev *dev)
122 {
123         struct sub_device *sdev;
124         uint8_t i;
125
126         PRIV(dev)->state = DEV_STARTED - 1;
127         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
128                 rte_eth_dev_stop(PORT_ID(sdev));
129                 sdev->state = DEV_STARTED - 1;
130         }
131 }
132
133 static int
134 fs_dev_set_link_up(struct rte_eth_dev *dev)
135 {
136         struct sub_device *sdev;
137         uint8_t i;
138         int ret;
139
140         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
141                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
142                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
143                 if (ret) {
144                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
145                               " with error %d", i, ret);
146                         return ret;
147                 }
148         }
149         return 0;
150 }
151
152 static int
153 fs_dev_set_link_down(struct rte_eth_dev *dev)
154 {
155         struct sub_device *sdev;
156         uint8_t i;
157         int ret;
158
159         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
160                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
161                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
162                 if (ret) {
163                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
164                               " with error %d", i, ret);
165                         return ret;
166                 }
167         }
168         return 0;
169 }
170
171 static void fs_dev_free_queues(struct rte_eth_dev *dev);
172 static void
173 fs_dev_close(struct rte_eth_dev *dev)
174 {
175         struct sub_device *sdev;
176         uint8_t i;
177
178         failsafe_hotplug_alarm_cancel(dev);
179         if (PRIV(dev)->state == DEV_STARTED)
180                 dev->dev_ops->dev_stop(dev);
181         PRIV(dev)->state = DEV_ACTIVE - 1;
182         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
183                 DEBUG("Closing sub_device %d", i);
184                 rte_eth_dev_close(PORT_ID(sdev));
185                 sdev->state = DEV_ACTIVE - 1;
186         }
187         fs_dev_free_queues(dev);
188 }
189
190 static void
191 fs_rx_queue_release(void *queue)
192 {
193         struct rte_eth_dev *dev;
194         struct sub_device *sdev;
195         uint8_t i;
196         struct rxq *rxq;
197
198         if (queue == NULL)
199                 return;
200         rxq = queue;
201         dev = rxq->priv->dev;
202         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
203                 SUBOPS(sdev, rx_queue_release)
204                         (ETH(sdev)->data->rx_queues[rxq->qid]);
205         dev->data->rx_queues[rxq->qid] = NULL;
206         rte_free(rxq);
207 }
208
209 static int
210 fs_rx_queue_setup(struct rte_eth_dev *dev,
211                 uint16_t rx_queue_id,
212                 uint16_t nb_rx_desc,
213                 unsigned int socket_id,
214                 const struct rte_eth_rxconf *rx_conf,
215                 struct rte_mempool *mb_pool)
216 {
217         struct sub_device *sdev;
218         struct rxq *rxq;
219         uint8_t i;
220         int ret;
221
222         rxq = dev->data->rx_queues[rx_queue_id];
223         if (rxq != NULL) {
224                 fs_rx_queue_release(rxq);
225                 dev->data->rx_queues[rx_queue_id] = NULL;
226         }
227         rxq = rte_zmalloc(NULL, sizeof(*rxq),
228                           RTE_CACHE_LINE_SIZE);
229         if (rxq == NULL)
230                 return -ENOMEM;
231         rxq->qid = rx_queue_id;
232         rxq->socket_id = socket_id;
233         rxq->info.mp = mb_pool;
234         rxq->info.conf = *rx_conf;
235         rxq->info.nb_desc = nb_rx_desc;
236         rxq->priv = PRIV(dev);
237         dev->data->rx_queues[rx_queue_id] = rxq;
238         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
239                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
240                                 rx_queue_id,
241                                 nb_rx_desc, socket_id,
242                                 rx_conf, mb_pool);
243                 if (ret) {
244                         ERROR("RX queue setup failed for sub_device %d", i);
245                         goto free_rxq;
246                 }
247         }
248         return 0;
249 free_rxq:
250         fs_rx_queue_release(rxq);
251         return ret;
252 }
253
254 static void
255 fs_tx_queue_release(void *queue)
256 {
257         struct rte_eth_dev *dev;
258         struct sub_device *sdev;
259         uint8_t i;
260         struct txq *txq;
261
262         if (queue == NULL)
263                 return;
264         txq = queue;
265         dev = txq->priv->dev;
266         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
267                 SUBOPS(sdev, tx_queue_release)
268                         (ETH(sdev)->data->tx_queues[txq->qid]);
269         dev->data->tx_queues[txq->qid] = NULL;
270         rte_free(txq);
271 }
272
273 static int
274 fs_tx_queue_setup(struct rte_eth_dev *dev,
275                 uint16_t tx_queue_id,
276                 uint16_t nb_tx_desc,
277                 unsigned int socket_id,
278                 const struct rte_eth_txconf *tx_conf)
279 {
280         struct sub_device *sdev;
281         struct txq *txq;
282         uint8_t i;
283         int ret;
284
285         txq = dev->data->tx_queues[tx_queue_id];
286         if (txq != NULL) {
287                 fs_tx_queue_release(txq);
288                 dev->data->tx_queues[tx_queue_id] = NULL;
289         }
290         txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
291                           RTE_CACHE_LINE_SIZE);
292         if (txq == NULL)
293                 return -ENOMEM;
294         txq->qid = tx_queue_id;
295         txq->socket_id = socket_id;
296         txq->info.conf = *tx_conf;
297         txq->info.nb_desc = nb_tx_desc;
298         txq->priv = PRIV(dev);
299         dev->data->tx_queues[tx_queue_id] = txq;
300         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
301                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
302                                 tx_queue_id,
303                                 nb_tx_desc, socket_id,
304                                 tx_conf);
305                 if (ret) {
306                         ERROR("TX queue setup failed for sub_device %d", i);
307                         goto free_txq;
308                 }
309         }
310         return 0;
311 free_txq:
312         fs_tx_queue_release(txq);
313         return ret;
314 }
315
316 static void
317 fs_dev_free_queues(struct rte_eth_dev *dev)
318 {
319         uint16_t i;
320
321         for (i = 0; i < dev->data->nb_rx_queues; i++) {
322                 fs_rx_queue_release(dev->data->rx_queues[i]);
323                 dev->data->rx_queues[i] = NULL;
324         }
325         dev->data->nb_rx_queues = 0;
326         for (i = 0; i < dev->data->nb_tx_queues; i++) {
327                 fs_tx_queue_release(dev->data->tx_queues[i]);
328                 dev->data->tx_queues[i] = NULL;
329         }
330         dev->data->nb_tx_queues = 0;
331 }
332
333 static void
334 fs_promiscuous_enable(struct rte_eth_dev *dev)
335 {
336         struct sub_device *sdev;
337         uint8_t i;
338
339         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
340                 rte_eth_promiscuous_enable(PORT_ID(sdev));
341 }
342
343 static void
344 fs_promiscuous_disable(struct rte_eth_dev *dev)
345 {
346         struct sub_device *sdev;
347         uint8_t i;
348
349         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
350                 rte_eth_promiscuous_disable(PORT_ID(sdev));
351 }
352
353 static void
354 fs_allmulticast_enable(struct rte_eth_dev *dev)
355 {
356         struct sub_device *sdev;
357         uint8_t i;
358
359         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
360                 rte_eth_allmulticast_enable(PORT_ID(sdev));
361 }
362
363 static void
364 fs_allmulticast_disable(struct rte_eth_dev *dev)
365 {
366         struct sub_device *sdev;
367         uint8_t i;
368
369         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
370                 rte_eth_allmulticast_disable(PORT_ID(sdev));
371 }
372
373 static int
374 fs_link_update(struct rte_eth_dev *dev,
375                 int wait_to_complete)
376 {
377         struct sub_device *sdev;
378         uint8_t i;
379         int ret;
380
381         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
382                 DEBUG("Calling link_update on sub_device %d", i);
383                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
384                 if (ret && ret != -1) {
385                         ERROR("Link update failed for sub_device %d with error %d",
386                               i, ret);
387                         return ret;
388                 }
389         }
390         if (TX_SUBDEV(dev)) {
391                 struct rte_eth_link *l1;
392                 struct rte_eth_link *l2;
393
394                 l1 = &dev->data->dev_link;
395                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
396                 if (memcmp(l1, l2, sizeof(*l1))) {
397                         *l1 = *l2;
398                         return 0;
399                 }
400         }
401         return -1;
402 }
403
404 static void
405 fs_stats_get(struct rte_eth_dev *dev,
406              struct rte_eth_stats *stats)
407 {
408         if (TX_SUBDEV(dev) == NULL)
409                 return;
410         rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
411 }
412
413 static void
414 fs_stats_reset(struct rte_eth_dev *dev)
415 {
416         struct sub_device *sdev;
417         uint8_t i;
418
419         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
420                 rte_eth_stats_reset(PORT_ID(sdev));
421 }
422
423 /**
424  * Fail-safe dev_infos_get rules:
425  *
426  * No sub_device:
427  *   Numerables:
428  *      Use the maximum possible values for any field, so as not
429  *      to impede any further configuration effort.
430  *   Capabilities:
431  *      Limits capabilities to those that are understood by the
432  *      fail-safe PMD. This understanding stems from the fail-safe
433  *      being capable of verifying that the related capability is
434  *      expressed within the device configuration (struct rte_eth_conf).
435  *
436  * At least one probed sub_device:
437  *   Numerables:
438  *      Uses values from the active probed sub_device
439  *      The rationale here is that if any sub_device is less capable
440  *      (for example concerning the number of queues) than the active
441  *      sub_device, then its subsequent configuration will fail.
442  *      It is impossible to foresee this failure when the failing sub_device
443  *      is supposed to be plugged-in later on, so the configuration process
444  *      is the single point of failure and error reporting.
445  *   Capabilities:
446  *      Uses a logical AND of RX capabilities among
447  *      all sub_devices and the default capabilities.
448  *      Uses a logical AND of TX capabilities among
449  *      the active probed sub_device and the default capabilities.
450  *
451  */
452 static void
453 fs_dev_infos_get(struct rte_eth_dev *dev,
454                   struct rte_eth_dev_info *infos)
455 {
456         struct sub_device *sdev;
457         uint8_t i;
458
459         sdev = TX_SUBDEV(dev);
460         if (sdev == NULL) {
461                 DEBUG("No probed device, using default infos");
462                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
463                            sizeof(default_infos));
464         } else {
465                 uint32_t rx_offload_capa;
466
467                 rx_offload_capa = default_infos.rx_offload_capa;
468                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
469                         rte_eth_dev_info_get(PORT_ID(sdev),
470                                         &PRIV(dev)->infos);
471                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
472                 }
473                 sdev = TX_SUBDEV(dev);
474                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
475                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
476                 PRIV(dev)->infos.tx_offload_capa &=
477                                         default_infos.tx_offload_capa;
478                 PRIV(dev)->infos.flow_type_rss_offloads &=
479                                         default_infos.flow_type_rss_offloads;
480         }
481         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
482 }
483
484 static const uint32_t *
485 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
486 {
487         struct sub_device *sdev;
488         struct rte_eth_dev *edev;
489
490         sdev = TX_SUBDEV(dev);
491         if (sdev == NULL)
492                 return NULL;
493         edev = ETH(sdev);
494         /* ENOTSUP: counts as no supported ptypes */
495         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
496                 return NULL;
497         /*
498          * The API does not permit to do a clean AND of all ptypes,
499          * It is also incomplete by design and we do not really care
500          * to have a best possible value in this context.
501          * We just return the ptypes of the device of highest
502          * priority, usually the PREFERRED device.
503          */
504         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
505 }
506
507 static int
508 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
509 {
510         struct sub_device *sdev;
511         uint8_t i;
512         int ret;
513
514         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
515                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
516                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
517                 if (ret) {
518                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
519                               i, ret);
520                         return ret;
521                 }
522         }
523         return 0;
524 }
525
526 static int
527 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
528 {
529         struct sub_device *sdev;
530         uint8_t i;
531         int ret;
532
533         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
534                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
535                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
536                 if (ret) {
537                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
538                               " with error %d", i, ret);
539                         return ret;
540                 }
541         }
542         return 0;
543 }
544
545 static int
546 fs_flow_ctrl_get(struct rte_eth_dev *dev,
547                 struct rte_eth_fc_conf *fc_conf)
548 {
549         struct sub_device *sdev;
550
551         sdev = TX_SUBDEV(dev);
552         if (sdev == NULL)
553                 return 0;
554         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
555                 return -ENOTSUP;
556         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
557 }
558
559 static int
560 fs_flow_ctrl_set(struct rte_eth_dev *dev,
561                 struct rte_eth_fc_conf *fc_conf)
562 {
563         struct sub_device *sdev;
564         uint8_t i;
565         int ret;
566
567         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
568                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
569                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
570                 if (ret) {
571                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
572                               " with error %d", i, ret);
573                         return ret;
574                 }
575         }
576         return 0;
577 }
578
579 static void
580 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
581 {
582         struct sub_device *sdev;
583         uint8_t i;
584
585         /* No check: already done within the rte_eth_dev_mac_addr_remove
586          * call for the fail-safe device.
587          */
588         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
589                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
590                                 &dev->data->mac_addrs[index]);
591         PRIV(dev)->mac_addr_pool[index] = 0;
592 }
593
594 static int
595 fs_mac_addr_add(struct rte_eth_dev *dev,
596                 struct ether_addr *mac_addr,
597                 uint32_t index,
598                 uint32_t vmdq)
599 {
600         struct sub_device *sdev;
601         int ret;
602         uint8_t i;
603
604         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
605         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
606                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
607                 if (ret) {
608                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
609                               PRIu8 " with error %d", i, ret);
610                         return ret;
611                 }
612         }
613         if (index >= PRIV(dev)->nb_mac_addr) {
614                 DEBUG("Growing mac_addrs array");
615                 PRIV(dev)->nb_mac_addr = index;
616         }
617         PRIV(dev)->mac_addr_pool[index] = vmdq;
618         return 0;
619 }
620
621 static void
622 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
623 {
624         struct sub_device *sdev;
625         uint8_t i;
626
627         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
628                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
629 }
630
631 static int
632 fs_filter_ctrl(struct rte_eth_dev *dev,
633                 enum rte_filter_type type,
634                 enum rte_filter_op op,
635                 void *arg)
636 {
637         struct sub_device *sdev;
638         uint8_t i;
639         int ret;
640
641         if (type == RTE_ETH_FILTER_GENERIC &&
642             op == RTE_ETH_FILTER_GET) {
643                 *(const void **)arg = &fs_flow_ops;
644                 return 0;
645         }
646         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
647                 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
648                 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
649                 if (ret) {
650                         ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
651                               " with error %d", i, ret);
652                         return ret;
653                 }
654         }
655         return 0;
656 }
657
658 const struct eth_dev_ops failsafe_ops = {
659         .dev_configure = fs_dev_configure,
660         .dev_start = fs_dev_start,
661         .dev_stop = fs_dev_stop,
662         .dev_set_link_down = fs_dev_set_link_down,
663         .dev_set_link_up = fs_dev_set_link_up,
664         .dev_close = fs_dev_close,
665         .promiscuous_enable = fs_promiscuous_enable,
666         .promiscuous_disable = fs_promiscuous_disable,
667         .allmulticast_enable = fs_allmulticast_enable,
668         .allmulticast_disable = fs_allmulticast_disable,
669         .link_update = fs_link_update,
670         .stats_get = fs_stats_get,
671         .stats_reset = fs_stats_reset,
672         .dev_infos_get = fs_dev_infos_get,
673         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
674         .mtu_set = fs_mtu_set,
675         .vlan_filter_set = fs_vlan_filter_set,
676         .rx_queue_setup = fs_rx_queue_setup,
677         .tx_queue_setup = fs_tx_queue_setup,
678         .rx_queue_release = fs_rx_queue_release,
679         .tx_queue_release = fs_tx_queue_release,
680         .flow_ctrl_get = fs_flow_ctrl_get,
681         .flow_ctrl_set = fs_flow_ctrl_set,
682         .mac_addr_remove = fs_mac_addr_remove,
683         .mac_addr_add = fs_mac_addr_add,
684         .mac_addr_set = fs_mac_addr_set,
685         .filter_ctrl = fs_filter_ctrl,
686 };