net/failsafe: add fail-safe PMD
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39
40 #include "failsafe_private.h"
41
42 static struct rte_eth_dev_info default_infos = {
43         /* Max possible number of elements */
44         .max_rx_pktlen = UINT32_MAX,
45         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
46         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
47         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
48         .max_hash_mac_addrs = UINT32_MAX,
49         .max_vfs = UINT16_MAX,
50         .max_vmdq_pools = UINT16_MAX,
51         .rx_desc_lim = {
52                 .nb_max = UINT16_MAX,
53                 .nb_min = 0,
54                 .nb_align = 1,
55                 .nb_seg_max = UINT16_MAX,
56                 .nb_mtu_seg_max = UINT16_MAX,
57         },
58         .tx_desc_lim = {
59                 .nb_max = UINT16_MAX,
60                 .nb_min = 0,
61                 .nb_align = 1,
62                 .nb_seg_max = UINT16_MAX,
63                 .nb_mtu_seg_max = UINT16_MAX,
64         },
65         /* Set of understood capabilities */
66         .rx_offload_capa = 0x0,
67         .tx_offload_capa = 0x0,
68         .flow_type_rss_offloads = 0x0,
69 };
70
71 static int
72 fs_dev_configure(struct rte_eth_dev *dev)
73 {
74         struct sub_device *sdev;
75         uint8_t i;
76         int ret;
77
78         FOREACH_SUBDEV(sdev, i, dev) {
79                 if (sdev->state != DEV_PROBED)
80                         continue;
81                 DEBUG("Configuring sub-device %d", i);
82                 ret = rte_eth_dev_configure(PORT_ID(sdev),
83                                         dev->data->nb_rx_queues,
84                                         dev->data->nb_tx_queues,
85                                         &dev->data->dev_conf);
86                 if (ret) {
87                         ERROR("Could not configure sub_device %d", i);
88                         return ret;
89                 }
90                 sdev->state = DEV_ACTIVE;
91         }
92         return 0;
93 }
94
95 static int
96 fs_dev_start(struct rte_eth_dev *dev)
97 {
98         struct sub_device *sdev;
99         uint8_t i;
100         int ret;
101
102         FOREACH_SUBDEV(sdev, i, dev) {
103                 if (sdev->state != DEV_ACTIVE)
104                         continue;
105                 DEBUG("Starting sub_device %d", i);
106                 ret = rte_eth_dev_start(PORT_ID(sdev));
107                 if (ret)
108                         return ret;
109                 sdev->state = DEV_STARTED;
110         }
111         if (PREFERRED_SUBDEV(dev)->state == DEV_STARTED) {
112                 if (TX_SUBDEV(dev) != PREFERRED_SUBDEV(dev)) {
113                         DEBUG("Switching tx_dev to preferred sub_device");
114                         PRIV(dev)->subs_tx = 0;
115                 }
116         } else {
117                 if ((TX_SUBDEV(dev) && TX_SUBDEV(dev)->state < DEV_STARTED) ||
118                     TX_SUBDEV(dev) == NULL) {
119                         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
120                                 DEBUG("Switching tx_dev to sub_device %d", i);
121                                 PRIV(dev)->subs_tx = i;
122                                 break;
123                         }
124                 }
125         }
126         return 0;
127 }
128
129 static void
130 fs_dev_stop(struct rte_eth_dev *dev)
131 {
132         struct sub_device *sdev;
133         uint8_t i;
134
135         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
136                 rte_eth_dev_stop(PORT_ID(sdev));
137                 sdev->state = DEV_STARTED - 1;
138         }
139 }
140
141 static int
142 fs_dev_set_link_up(struct rte_eth_dev *dev)
143 {
144         struct sub_device *sdev;
145         uint8_t i;
146         int ret;
147
148         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
149                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
150                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
151                 if (ret) {
152                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
153                               " with error %d", i, ret);
154                         return ret;
155                 }
156         }
157         return 0;
158 }
159
160 static int
161 fs_dev_set_link_down(struct rte_eth_dev *dev)
162 {
163         struct sub_device *sdev;
164         uint8_t i;
165         int ret;
166
167         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
168                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
169                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
170                 if (ret) {
171                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
172                               " with error %d", i, ret);
173                         return ret;
174                 }
175         }
176         return 0;
177 }
178
179 static void fs_dev_free_queues(struct rte_eth_dev *dev);
180 static void
181 fs_dev_close(struct rte_eth_dev *dev)
182 {
183         struct sub_device *sdev;
184         uint8_t i;
185
186         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
187                 DEBUG("Closing sub_device %d", i);
188                 rte_eth_dev_close(PORT_ID(sdev));
189                 sdev->state = DEV_ACTIVE - 1;
190         }
191         fs_dev_free_queues(dev);
192 }
193
194 static void
195 fs_rx_queue_release(void *queue)
196 {
197         struct rte_eth_dev *dev;
198         struct sub_device *sdev;
199         uint8_t i;
200         struct rxq *rxq;
201
202         if (queue == NULL)
203                 return;
204         rxq = queue;
205         dev = rxq->priv->dev;
206         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
207                 SUBOPS(sdev, rx_queue_release)
208                         (ETH(sdev)->data->rx_queues[rxq->qid]);
209         dev->data->rx_queues[rxq->qid] = NULL;
210         rte_free(rxq);
211 }
212
213 static int
214 fs_rx_queue_setup(struct rte_eth_dev *dev,
215                 uint16_t rx_queue_id,
216                 uint16_t nb_rx_desc,
217                 unsigned int socket_id,
218                 const struct rte_eth_rxconf *rx_conf,
219                 struct rte_mempool *mb_pool)
220 {
221         struct sub_device *sdev;
222         struct rxq *rxq;
223         uint8_t i;
224         int ret;
225
226         rxq = dev->data->rx_queues[rx_queue_id];
227         if (rxq != NULL) {
228                 fs_rx_queue_release(rxq);
229                 dev->data->rx_queues[rx_queue_id] = NULL;
230         }
231         rxq = rte_zmalloc(NULL, sizeof(*rxq),
232                           RTE_CACHE_LINE_SIZE);
233         if (rxq == NULL)
234                 return -ENOMEM;
235         rxq->qid = rx_queue_id;
236         rxq->socket_id = socket_id;
237         rxq->info.mp = mb_pool;
238         rxq->info.conf = *rx_conf;
239         rxq->info.nb_desc = nb_rx_desc;
240         rxq->priv = PRIV(dev);
241         dev->data->rx_queues[rx_queue_id] = rxq;
242         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
243                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
244                                 rx_queue_id,
245                                 nb_rx_desc, socket_id,
246                                 rx_conf, mb_pool);
247                 if (ret) {
248                         ERROR("RX queue setup failed for sub_device %d", i);
249                         goto free_rxq;
250                 }
251         }
252         return 0;
253 free_rxq:
254         fs_rx_queue_release(rxq);
255         return ret;
256 }
257
258 static void
259 fs_tx_queue_release(void *queue)
260 {
261         struct rte_eth_dev *dev;
262         struct sub_device *sdev;
263         uint8_t i;
264         struct txq *txq;
265
266         if (queue == NULL)
267                 return;
268         txq = queue;
269         dev = txq->priv->dev;
270         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
271                 SUBOPS(sdev, tx_queue_release)
272                         (ETH(sdev)->data->tx_queues[txq->qid]);
273         dev->data->tx_queues[txq->qid] = NULL;
274         rte_free(txq);
275 }
276
277 static int
278 fs_tx_queue_setup(struct rte_eth_dev *dev,
279                 uint16_t tx_queue_id,
280                 uint16_t nb_tx_desc,
281                 unsigned int socket_id,
282                 const struct rte_eth_txconf *tx_conf)
283 {
284         struct sub_device *sdev;
285         struct txq *txq;
286         uint8_t i;
287         int ret;
288
289         txq = dev->data->tx_queues[tx_queue_id];
290         if (txq != NULL) {
291                 fs_tx_queue_release(txq);
292                 dev->data->tx_queues[tx_queue_id] = NULL;
293         }
294         txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
295                           RTE_CACHE_LINE_SIZE);
296         if (txq == NULL)
297                 return -ENOMEM;
298         txq->qid = tx_queue_id;
299         txq->socket_id = socket_id;
300         txq->info.conf = *tx_conf;
301         txq->info.nb_desc = nb_tx_desc;
302         txq->priv = PRIV(dev);
303         dev->data->tx_queues[tx_queue_id] = txq;
304         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
305                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
306                                 tx_queue_id,
307                                 nb_tx_desc, socket_id,
308                                 tx_conf);
309                 if (ret) {
310                         ERROR("TX queue setup failed for sub_device %d", i);
311                         goto free_txq;
312                 }
313         }
314         return 0;
315 free_txq:
316         fs_tx_queue_release(txq);
317         return ret;
318 }
319
320 static void
321 fs_dev_free_queues(struct rte_eth_dev *dev)
322 {
323         uint16_t i;
324
325         for (i = 0; i < dev->data->nb_rx_queues; i++) {
326                 fs_rx_queue_release(dev->data->rx_queues[i]);
327                 dev->data->rx_queues[i] = NULL;
328         }
329         dev->data->nb_rx_queues = 0;
330         for (i = 0; i < dev->data->nb_tx_queues; i++) {
331                 fs_tx_queue_release(dev->data->tx_queues[i]);
332                 dev->data->tx_queues[i] = NULL;
333         }
334         dev->data->nb_tx_queues = 0;
335 }
336
337 static void
338 fs_promiscuous_enable(struct rte_eth_dev *dev)
339 {
340         struct sub_device *sdev;
341         uint8_t i;
342
343         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
344                 rte_eth_promiscuous_enable(PORT_ID(sdev));
345 }
346
347 static void
348 fs_promiscuous_disable(struct rte_eth_dev *dev)
349 {
350         struct sub_device *sdev;
351         uint8_t i;
352
353         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
354                 rte_eth_promiscuous_disable(PORT_ID(sdev));
355 }
356
357 static void
358 fs_allmulticast_enable(struct rte_eth_dev *dev)
359 {
360         struct sub_device *sdev;
361         uint8_t i;
362
363         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
364                 rte_eth_allmulticast_enable(PORT_ID(sdev));
365 }
366
367 static void
368 fs_allmulticast_disable(struct rte_eth_dev *dev)
369 {
370         struct sub_device *sdev;
371         uint8_t i;
372
373         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
374                 rte_eth_allmulticast_disable(PORT_ID(sdev));
375 }
376
377 static int
378 fs_link_update(struct rte_eth_dev *dev,
379                 int wait_to_complete)
380 {
381         struct sub_device *sdev;
382         uint8_t i;
383         int ret;
384
385         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
386                 DEBUG("Calling link_update on sub_device %d", i);
387                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
388                 if (ret && ret != -1) {
389                         ERROR("Link update failed for sub_device %d with error %d",
390                               i, ret);
391                         return ret;
392                 }
393         }
394         if (TX_SUBDEV(dev)) {
395                 struct rte_eth_link *l1;
396                 struct rte_eth_link *l2;
397
398                 l1 = &dev->data->dev_link;
399                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
400                 if (memcmp(l1, l2, sizeof(*l1))) {
401                         *l1 = *l2;
402                         return 0;
403                 }
404         }
405         return -1;
406 }
407
408 static void
409 fs_stats_get(struct rte_eth_dev *dev,
410              struct rte_eth_stats *stats)
411 {
412         if (TX_SUBDEV(dev) == NULL)
413                 return;
414         rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
415 }
416
417 static void
418 fs_stats_reset(struct rte_eth_dev *dev)
419 {
420         struct sub_device *sdev;
421         uint8_t i;
422
423         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
424                 rte_eth_stats_reset(PORT_ID(sdev));
425 }
426
427 /**
428  * Fail-safe dev_infos_get rules:
429  *
430  * No sub_device:
431  *   Numerables:
432  *      Use the maximum possible values for any field, so as not
433  *      to impede any further configuration effort.
434  *   Capabilities:
435  *      Limits capabilities to those that are understood by the
436  *      fail-safe PMD. This understanding stems from the fail-safe
437  *      being capable of verifying that the related capability is
438  *      expressed within the device configuration (struct rte_eth_conf).
439  *
440  * At least one probed sub_device:
441  *   Numerables:
442  *      Uses values from the active probed sub_device
443  *      The rationale here is that if any sub_device is less capable
444  *      (for example concerning the number of queues) than the active
445  *      sub_device, then its subsequent configuration will fail.
446  *      It is impossible to foresee this failure when the failing sub_device
447  *      is supposed to be plugged-in later on, so the configuration process
448  *      is the single point of failure and error reporting.
449  *   Capabilities:
450  *      Uses a logical AND of RX capabilities among
451  *      all sub_devices and the default capabilities.
452  *      Uses a logical AND of TX capabilities among
453  *      the active probed sub_device and the default capabilities.
454  *
455  */
456 static void
457 fs_dev_infos_get(struct rte_eth_dev *dev,
458                   struct rte_eth_dev_info *infos)
459 {
460         struct sub_device *sdev;
461         uint8_t i;
462
463         sdev = TX_SUBDEV(dev);
464         if (sdev == NULL) {
465                 DEBUG("No probed device, using default infos");
466                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
467                            sizeof(default_infos));
468         } else {
469                 uint32_t rx_offload_capa;
470
471                 rx_offload_capa = default_infos.rx_offload_capa;
472                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
473                         rte_eth_dev_info_get(PORT_ID(sdev),
474                                         &PRIV(dev)->infos);
475                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
476                 }
477                 sdev = TX_SUBDEV(dev);
478                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
479                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
480                 PRIV(dev)->infos.tx_offload_capa &=
481                                         default_infos.tx_offload_capa;
482                 PRIV(dev)->infos.flow_type_rss_offloads &=
483                                         default_infos.flow_type_rss_offloads;
484         }
485         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
486 }
487
488 static const uint32_t *
489 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
490 {
491         struct sub_device *sdev;
492         struct rte_eth_dev *edev;
493
494         sdev = TX_SUBDEV(dev);
495         if (sdev == NULL)
496                 return NULL;
497         edev = ETH(sdev);
498         /* ENOTSUP: counts as no supported ptypes */
499         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
500                 return NULL;
501         /*
502          * The API does not permit to do a clean AND of all ptypes,
503          * It is also incomplete by design and we do not really care
504          * to have a best possible value in this context.
505          * We just return the ptypes of the device of highest
506          * priority, usually the PREFERRED device.
507          */
508         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
509 }
510
511 static int
512 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
513 {
514         struct sub_device *sdev;
515         uint8_t i;
516         int ret;
517
518         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
519                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
520                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
521                 if (ret) {
522                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
523                               i, ret);
524                         return ret;
525                 }
526         }
527         return 0;
528 }
529
530 static int
531 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
532 {
533         struct sub_device *sdev;
534         uint8_t i;
535         int ret;
536
537         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
538                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
539                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
540                 if (ret) {
541                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
542                               " with error %d", i, ret);
543                         return ret;
544                 }
545         }
546         return 0;
547 }
548
549 static int
550 fs_flow_ctrl_get(struct rte_eth_dev *dev,
551                 struct rte_eth_fc_conf *fc_conf)
552 {
553         struct sub_device *sdev;
554
555         sdev = TX_SUBDEV(dev);
556         if (sdev == NULL)
557                 return 0;
558         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
559                 return -ENOTSUP;
560         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
561 }
562
563 static int
564 fs_flow_ctrl_set(struct rte_eth_dev *dev,
565                 struct rte_eth_fc_conf *fc_conf)
566 {
567         struct sub_device *sdev;
568         uint8_t i;
569         int ret;
570
571         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
572                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
573                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
574                 if (ret) {
575                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
576                               " with error %d", i, ret);
577                         return ret;
578                 }
579         }
580         return 0;
581 }
582
583 static void
584 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
585 {
586         struct sub_device *sdev;
587         uint8_t i;
588
589         /* No check: already done within the rte_eth_dev_mac_addr_remove
590          * call for the fail-safe device.
591          */
592         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
593                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
594                                 &dev->data->mac_addrs[index]);
595         PRIV(dev)->mac_addr_pool[index] = 0;
596 }
597
598 static int
599 fs_mac_addr_add(struct rte_eth_dev *dev,
600                 struct ether_addr *mac_addr,
601                 uint32_t index,
602                 uint32_t vmdq)
603 {
604         struct sub_device *sdev;
605         int ret;
606         uint8_t i;
607
608         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
609         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
610                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
611                 if (ret) {
612                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
613                               PRIu8 " with error %d", i, ret);
614                         return ret;
615                 }
616         }
617         if (index >= PRIV(dev)->nb_mac_addr) {
618                 DEBUG("Growing mac_addrs array");
619                 PRIV(dev)->nb_mac_addr = index;
620         }
621         PRIV(dev)->mac_addr_pool[index] = vmdq;
622         return 0;
623 }
624
625 static void
626 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
627 {
628         struct sub_device *sdev;
629         uint8_t i;
630
631         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
632                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
633 }
634
635 const struct eth_dev_ops failsafe_ops = {
636         .dev_configure = fs_dev_configure,
637         .dev_start = fs_dev_start,
638         .dev_stop = fs_dev_stop,
639         .dev_set_link_down = fs_dev_set_link_down,
640         .dev_set_link_up = fs_dev_set_link_up,
641         .dev_close = fs_dev_close,
642         .promiscuous_enable = fs_promiscuous_enable,
643         .promiscuous_disable = fs_promiscuous_disable,
644         .allmulticast_enable = fs_allmulticast_enable,
645         .allmulticast_disable = fs_allmulticast_disable,
646         .link_update = fs_link_update,
647         .stats_get = fs_stats_get,
648         .stats_reset = fs_stats_reset,
649         .dev_infos_get = fs_dev_infos_get,
650         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
651         .mtu_set = fs_mtu_set,
652         .vlan_filter_set = fs_vlan_filter_set,
653         .rx_queue_setup = fs_rx_queue_setup,
654         .tx_queue_setup = fs_tx_queue_setup,
655         .rx_queue_release = fs_rx_queue_release,
656         .tx_queue_release = fs_tx_queue_release,
657         .flow_ctrl_get = fs_flow_ctrl_get,
658         .flow_ctrl_set = fs_flow_ctrl_set,
659         .mac_addr_remove = fs_mac_addr_remove,
660         .mac_addr_add = fs_mac_addr_add,
661         .mac_addr_set = fs_mac_addr_set,
662 };