net/failsafe: add plug-in support
[dpdk.git] / drivers / net / failsafe / failsafe_ops.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35
36 #include <rte_debug.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
39
40 #include "failsafe_private.h"
41
42 static struct rte_eth_dev_info default_infos = {
43         /* Max possible number of elements */
44         .max_rx_pktlen = UINT32_MAX,
45         .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
46         .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
47         .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
48         .max_hash_mac_addrs = UINT32_MAX,
49         .max_vfs = UINT16_MAX,
50         .max_vmdq_pools = UINT16_MAX,
51         .rx_desc_lim = {
52                 .nb_max = UINT16_MAX,
53                 .nb_min = 0,
54                 .nb_align = 1,
55                 .nb_seg_max = UINT16_MAX,
56                 .nb_mtu_seg_max = UINT16_MAX,
57         },
58         .tx_desc_lim = {
59                 .nb_max = UINT16_MAX,
60                 .nb_min = 0,
61                 .nb_align = 1,
62                 .nb_seg_max = UINT16_MAX,
63                 .nb_mtu_seg_max = UINT16_MAX,
64         },
65         /* Set of understood capabilities */
66         .rx_offload_capa = 0x0,
67         .tx_offload_capa = 0x0,
68         .flow_type_rss_offloads = 0x0,
69 };
70
71 static int
72 fs_dev_configure(struct rte_eth_dev *dev)
73 {
74         struct sub_device *sdev;
75         uint8_t i;
76         int ret;
77
78         FOREACH_SUBDEV(sdev, i, dev) {
79                 if (sdev->state != DEV_PROBED)
80                         continue;
81                 DEBUG("Configuring sub-device %d", i);
82                 ret = rte_eth_dev_configure(PORT_ID(sdev),
83                                         dev->data->nb_rx_queues,
84                                         dev->data->nb_tx_queues,
85                                         &dev->data->dev_conf);
86                 if (ret) {
87                         ERROR("Could not configure sub_device %d", i);
88                         return ret;
89                 }
90                 sdev->state = DEV_ACTIVE;
91         }
92         if (PRIV(dev)->state < DEV_ACTIVE)
93                 PRIV(dev)->state = DEV_ACTIVE;
94         return 0;
95 }
96
97 static int
98 fs_dev_start(struct rte_eth_dev *dev)
99 {
100         struct sub_device *sdev;
101         uint8_t i;
102         int ret;
103
104         FOREACH_SUBDEV(sdev, i, dev) {
105                 if (sdev->state != DEV_ACTIVE)
106                         continue;
107                 DEBUG("Starting sub_device %d", i);
108                 ret = rte_eth_dev_start(PORT_ID(sdev));
109                 if (ret)
110                         return ret;
111                 sdev->state = DEV_STARTED;
112         }
113         if (PRIV(dev)->state < DEV_STARTED)
114                 PRIV(dev)->state = DEV_STARTED;
115         fs_switch_dev(dev);
116         return 0;
117 }
118
119 static void
120 fs_dev_stop(struct rte_eth_dev *dev)
121 {
122         struct sub_device *sdev;
123         uint8_t i;
124
125         PRIV(dev)->state = DEV_STARTED - 1;
126         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
127                 rte_eth_dev_stop(PORT_ID(sdev));
128                 sdev->state = DEV_STARTED - 1;
129         }
130 }
131
132 static int
133 fs_dev_set_link_up(struct rte_eth_dev *dev)
134 {
135         struct sub_device *sdev;
136         uint8_t i;
137         int ret;
138
139         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
140                 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
141                 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
142                 if (ret) {
143                         ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
144                               " with error %d", i, ret);
145                         return ret;
146                 }
147         }
148         return 0;
149 }
150
151 static int
152 fs_dev_set_link_down(struct rte_eth_dev *dev)
153 {
154         struct sub_device *sdev;
155         uint8_t i;
156         int ret;
157
158         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
159                 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
160                 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
161                 if (ret) {
162                         ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
163                               " with error %d", i, ret);
164                         return ret;
165                 }
166         }
167         return 0;
168 }
169
170 static void fs_dev_free_queues(struct rte_eth_dev *dev);
171 static void
172 fs_dev_close(struct rte_eth_dev *dev)
173 {
174         struct sub_device *sdev;
175         uint8_t i;
176
177         failsafe_hotplug_alarm_cancel(dev);
178         if (PRIV(dev)->state == DEV_STARTED)
179                 dev->dev_ops->dev_stop(dev);
180         PRIV(dev)->state = DEV_ACTIVE - 1;
181         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
182                 DEBUG("Closing sub_device %d", i);
183                 rte_eth_dev_close(PORT_ID(sdev));
184                 sdev->state = DEV_ACTIVE - 1;
185         }
186         fs_dev_free_queues(dev);
187 }
188
189 static void
190 fs_rx_queue_release(void *queue)
191 {
192         struct rte_eth_dev *dev;
193         struct sub_device *sdev;
194         uint8_t i;
195         struct rxq *rxq;
196
197         if (queue == NULL)
198                 return;
199         rxq = queue;
200         dev = rxq->priv->dev;
201         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
202                 SUBOPS(sdev, rx_queue_release)
203                         (ETH(sdev)->data->rx_queues[rxq->qid]);
204         dev->data->rx_queues[rxq->qid] = NULL;
205         rte_free(rxq);
206 }
207
208 static int
209 fs_rx_queue_setup(struct rte_eth_dev *dev,
210                 uint16_t rx_queue_id,
211                 uint16_t nb_rx_desc,
212                 unsigned int socket_id,
213                 const struct rte_eth_rxconf *rx_conf,
214                 struct rte_mempool *mb_pool)
215 {
216         struct sub_device *sdev;
217         struct rxq *rxq;
218         uint8_t i;
219         int ret;
220
221         rxq = dev->data->rx_queues[rx_queue_id];
222         if (rxq != NULL) {
223                 fs_rx_queue_release(rxq);
224                 dev->data->rx_queues[rx_queue_id] = NULL;
225         }
226         rxq = rte_zmalloc(NULL, sizeof(*rxq),
227                           RTE_CACHE_LINE_SIZE);
228         if (rxq == NULL)
229                 return -ENOMEM;
230         rxq->qid = rx_queue_id;
231         rxq->socket_id = socket_id;
232         rxq->info.mp = mb_pool;
233         rxq->info.conf = *rx_conf;
234         rxq->info.nb_desc = nb_rx_desc;
235         rxq->priv = PRIV(dev);
236         dev->data->rx_queues[rx_queue_id] = rxq;
237         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
238                 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
239                                 rx_queue_id,
240                                 nb_rx_desc, socket_id,
241                                 rx_conf, mb_pool);
242                 if (ret) {
243                         ERROR("RX queue setup failed for sub_device %d", i);
244                         goto free_rxq;
245                 }
246         }
247         return 0;
248 free_rxq:
249         fs_rx_queue_release(rxq);
250         return ret;
251 }
252
253 static void
254 fs_tx_queue_release(void *queue)
255 {
256         struct rte_eth_dev *dev;
257         struct sub_device *sdev;
258         uint8_t i;
259         struct txq *txq;
260
261         if (queue == NULL)
262                 return;
263         txq = queue;
264         dev = txq->priv->dev;
265         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
266                 SUBOPS(sdev, tx_queue_release)
267                         (ETH(sdev)->data->tx_queues[txq->qid]);
268         dev->data->tx_queues[txq->qid] = NULL;
269         rte_free(txq);
270 }
271
272 static int
273 fs_tx_queue_setup(struct rte_eth_dev *dev,
274                 uint16_t tx_queue_id,
275                 uint16_t nb_tx_desc,
276                 unsigned int socket_id,
277                 const struct rte_eth_txconf *tx_conf)
278 {
279         struct sub_device *sdev;
280         struct txq *txq;
281         uint8_t i;
282         int ret;
283
284         txq = dev->data->tx_queues[tx_queue_id];
285         if (txq != NULL) {
286                 fs_tx_queue_release(txq);
287                 dev->data->tx_queues[tx_queue_id] = NULL;
288         }
289         txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
290                           RTE_CACHE_LINE_SIZE);
291         if (txq == NULL)
292                 return -ENOMEM;
293         txq->qid = tx_queue_id;
294         txq->socket_id = socket_id;
295         txq->info.conf = *tx_conf;
296         txq->info.nb_desc = nb_tx_desc;
297         txq->priv = PRIV(dev);
298         dev->data->tx_queues[tx_queue_id] = txq;
299         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
300                 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
301                                 tx_queue_id,
302                                 nb_tx_desc, socket_id,
303                                 tx_conf);
304                 if (ret) {
305                         ERROR("TX queue setup failed for sub_device %d", i);
306                         goto free_txq;
307                 }
308         }
309         return 0;
310 free_txq:
311         fs_tx_queue_release(txq);
312         return ret;
313 }
314
315 static void
316 fs_dev_free_queues(struct rte_eth_dev *dev)
317 {
318         uint16_t i;
319
320         for (i = 0; i < dev->data->nb_rx_queues; i++) {
321                 fs_rx_queue_release(dev->data->rx_queues[i]);
322                 dev->data->rx_queues[i] = NULL;
323         }
324         dev->data->nb_rx_queues = 0;
325         for (i = 0; i < dev->data->nb_tx_queues; i++) {
326                 fs_tx_queue_release(dev->data->tx_queues[i]);
327                 dev->data->tx_queues[i] = NULL;
328         }
329         dev->data->nb_tx_queues = 0;
330 }
331
332 static void
333 fs_promiscuous_enable(struct rte_eth_dev *dev)
334 {
335         struct sub_device *sdev;
336         uint8_t i;
337
338         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
339                 rte_eth_promiscuous_enable(PORT_ID(sdev));
340 }
341
342 static void
343 fs_promiscuous_disable(struct rte_eth_dev *dev)
344 {
345         struct sub_device *sdev;
346         uint8_t i;
347
348         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
349                 rte_eth_promiscuous_disable(PORT_ID(sdev));
350 }
351
352 static void
353 fs_allmulticast_enable(struct rte_eth_dev *dev)
354 {
355         struct sub_device *sdev;
356         uint8_t i;
357
358         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
359                 rte_eth_allmulticast_enable(PORT_ID(sdev));
360 }
361
362 static void
363 fs_allmulticast_disable(struct rte_eth_dev *dev)
364 {
365         struct sub_device *sdev;
366         uint8_t i;
367
368         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
369                 rte_eth_allmulticast_disable(PORT_ID(sdev));
370 }
371
372 static int
373 fs_link_update(struct rte_eth_dev *dev,
374                 int wait_to_complete)
375 {
376         struct sub_device *sdev;
377         uint8_t i;
378         int ret;
379
380         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
381                 DEBUG("Calling link_update on sub_device %d", i);
382                 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
383                 if (ret && ret != -1) {
384                         ERROR("Link update failed for sub_device %d with error %d",
385                               i, ret);
386                         return ret;
387                 }
388         }
389         if (TX_SUBDEV(dev)) {
390                 struct rte_eth_link *l1;
391                 struct rte_eth_link *l2;
392
393                 l1 = &dev->data->dev_link;
394                 l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
395                 if (memcmp(l1, l2, sizeof(*l1))) {
396                         *l1 = *l2;
397                         return 0;
398                 }
399         }
400         return -1;
401 }
402
403 static void
404 fs_stats_get(struct rte_eth_dev *dev,
405              struct rte_eth_stats *stats)
406 {
407         if (TX_SUBDEV(dev) == NULL)
408                 return;
409         rte_eth_stats_get(PORT_ID(TX_SUBDEV(dev)), stats);
410 }
411
412 static void
413 fs_stats_reset(struct rte_eth_dev *dev)
414 {
415         struct sub_device *sdev;
416         uint8_t i;
417
418         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
419                 rte_eth_stats_reset(PORT_ID(sdev));
420 }
421
422 /**
423  * Fail-safe dev_infos_get rules:
424  *
425  * No sub_device:
426  *   Numerables:
427  *      Use the maximum possible values for any field, so as not
428  *      to impede any further configuration effort.
429  *   Capabilities:
430  *      Limits capabilities to those that are understood by the
431  *      fail-safe PMD. This understanding stems from the fail-safe
432  *      being capable of verifying that the related capability is
433  *      expressed within the device configuration (struct rte_eth_conf).
434  *
435  * At least one probed sub_device:
436  *   Numerables:
437  *      Uses values from the active probed sub_device
438  *      The rationale here is that if any sub_device is less capable
439  *      (for example concerning the number of queues) than the active
440  *      sub_device, then its subsequent configuration will fail.
441  *      It is impossible to foresee this failure when the failing sub_device
442  *      is supposed to be plugged-in later on, so the configuration process
443  *      is the single point of failure and error reporting.
444  *   Capabilities:
445  *      Uses a logical AND of RX capabilities among
446  *      all sub_devices and the default capabilities.
447  *      Uses a logical AND of TX capabilities among
448  *      the active probed sub_device and the default capabilities.
449  *
450  */
451 static void
452 fs_dev_infos_get(struct rte_eth_dev *dev,
453                   struct rte_eth_dev_info *infos)
454 {
455         struct sub_device *sdev;
456         uint8_t i;
457
458         sdev = TX_SUBDEV(dev);
459         if (sdev == NULL) {
460                 DEBUG("No probed device, using default infos");
461                 rte_memcpy(&PRIV(dev)->infos, &default_infos,
462                            sizeof(default_infos));
463         } else {
464                 uint32_t rx_offload_capa;
465
466                 rx_offload_capa = default_infos.rx_offload_capa;
467                 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
468                         rte_eth_dev_info_get(PORT_ID(sdev),
469                                         &PRIV(dev)->infos);
470                         rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
471                 }
472                 sdev = TX_SUBDEV(dev);
473                 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
474                 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
475                 PRIV(dev)->infos.tx_offload_capa &=
476                                         default_infos.tx_offload_capa;
477                 PRIV(dev)->infos.flow_type_rss_offloads &=
478                                         default_infos.flow_type_rss_offloads;
479         }
480         rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
481 }
482
483 static const uint32_t *
484 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
485 {
486         struct sub_device *sdev;
487         struct rte_eth_dev *edev;
488
489         sdev = TX_SUBDEV(dev);
490         if (sdev == NULL)
491                 return NULL;
492         edev = ETH(sdev);
493         /* ENOTSUP: counts as no supported ptypes */
494         if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
495                 return NULL;
496         /*
497          * The API does not permit to do a clean AND of all ptypes,
498          * It is also incomplete by design and we do not really care
499          * to have a best possible value in this context.
500          * We just return the ptypes of the device of highest
501          * priority, usually the PREFERRED device.
502          */
503         return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
504 }
505
506 static int
507 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
508 {
509         struct sub_device *sdev;
510         uint8_t i;
511         int ret;
512
513         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
514                 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
515                 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
516                 if (ret) {
517                         ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
518                               i, ret);
519                         return ret;
520                 }
521         }
522         return 0;
523 }
524
525 static int
526 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
527 {
528         struct sub_device *sdev;
529         uint8_t i;
530         int ret;
531
532         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
533                 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
534                 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
535                 if (ret) {
536                         ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
537                               " with error %d", i, ret);
538                         return ret;
539                 }
540         }
541         return 0;
542 }
543
544 static int
545 fs_flow_ctrl_get(struct rte_eth_dev *dev,
546                 struct rte_eth_fc_conf *fc_conf)
547 {
548         struct sub_device *sdev;
549
550         sdev = TX_SUBDEV(dev);
551         if (sdev == NULL)
552                 return 0;
553         if (SUBOPS(sdev, flow_ctrl_get) == NULL)
554                 return -ENOTSUP;
555         return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
556 }
557
558 static int
559 fs_flow_ctrl_set(struct rte_eth_dev *dev,
560                 struct rte_eth_fc_conf *fc_conf)
561 {
562         struct sub_device *sdev;
563         uint8_t i;
564         int ret;
565
566         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
567                 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
568                 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
569                 if (ret) {
570                         ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
571                               " with error %d", i, ret);
572                         return ret;
573                 }
574         }
575         return 0;
576 }
577
578 static void
579 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
580 {
581         struct sub_device *sdev;
582         uint8_t i;
583
584         /* No check: already done within the rte_eth_dev_mac_addr_remove
585          * call for the fail-safe device.
586          */
587         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
588                 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
589                                 &dev->data->mac_addrs[index]);
590         PRIV(dev)->mac_addr_pool[index] = 0;
591 }
592
593 static int
594 fs_mac_addr_add(struct rte_eth_dev *dev,
595                 struct ether_addr *mac_addr,
596                 uint32_t index,
597                 uint32_t vmdq)
598 {
599         struct sub_device *sdev;
600         int ret;
601         uint8_t i;
602
603         RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
604         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
605                 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
606                 if (ret) {
607                         ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
608                               PRIu8 " with error %d", i, ret);
609                         return ret;
610                 }
611         }
612         if (index >= PRIV(dev)->nb_mac_addr) {
613                 DEBUG("Growing mac_addrs array");
614                 PRIV(dev)->nb_mac_addr = index;
615         }
616         PRIV(dev)->mac_addr_pool[index] = vmdq;
617         return 0;
618 }
619
620 static void
621 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
622 {
623         struct sub_device *sdev;
624         uint8_t i;
625
626         FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
627                 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
628 }
629
630 const struct eth_dev_ops failsafe_ops = {
631         .dev_configure = fs_dev_configure,
632         .dev_start = fs_dev_start,
633         .dev_stop = fs_dev_stop,
634         .dev_set_link_down = fs_dev_set_link_down,
635         .dev_set_link_up = fs_dev_set_link_up,
636         .dev_close = fs_dev_close,
637         .promiscuous_enable = fs_promiscuous_enable,
638         .promiscuous_disable = fs_promiscuous_disable,
639         .allmulticast_enable = fs_allmulticast_enable,
640         .allmulticast_disable = fs_allmulticast_disable,
641         .link_update = fs_link_update,
642         .stats_get = fs_stats_get,
643         .stats_reset = fs_stats_reset,
644         .dev_infos_get = fs_dev_infos_get,
645         .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
646         .mtu_set = fs_mtu_set,
647         .vlan_filter_set = fs_vlan_filter_set,
648         .rx_queue_setup = fs_rx_queue_setup,
649         .tx_queue_setup = fs_tx_queue_setup,
650         .rx_queue_release = fs_rx_queue_release,
651         .tx_queue_release = fs_tx_queue_release,
652         .flow_ctrl_get = fs_flow_ctrl_get,
653         .flow_ctrl_set = fs_flow_ctrl_set,
654         .mac_addr_remove = fs_mac_addr_remove,
655         .mac_addr_add = fs_mac_addr_add,
656         .mac_addr_set = fs_mac_addr_set,
657 };