4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_debug.h>
37 #include <rte_atomic.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
42 #include "failsafe_private.h"
44 static struct rte_eth_dev_info default_infos = {
45 /* Max possible number of elements */
46 .max_rx_pktlen = UINT32_MAX,
47 .max_rx_queues = RTE_MAX_QUEUES_PER_PORT,
48 .max_tx_queues = RTE_MAX_QUEUES_PER_PORT,
49 .max_mac_addrs = FAILSAFE_MAX_ETHADDR,
50 .max_hash_mac_addrs = UINT32_MAX,
51 .max_vfs = UINT16_MAX,
52 .max_vmdq_pools = UINT16_MAX,
57 .nb_seg_max = UINT16_MAX,
58 .nb_mtu_seg_max = UINT16_MAX,
64 .nb_seg_max = UINT16_MAX,
65 .nb_mtu_seg_max = UINT16_MAX,
68 * Set of capabilities that can be verified upon
69 * configuring a sub-device.
72 DEV_RX_OFFLOAD_VLAN_STRIP |
73 DEV_RX_OFFLOAD_QINQ_STRIP |
74 DEV_RX_OFFLOAD_IPV4_CKSUM |
75 DEV_RX_OFFLOAD_UDP_CKSUM |
76 DEV_RX_OFFLOAD_TCP_CKSUM |
77 DEV_RX_OFFLOAD_TCP_LRO,
78 .tx_offload_capa = 0x0,
79 .flow_type_rss_offloads = 0x0,
83 * Check whether a specific offloading capability
84 * is supported by a sub_device.
87 * 0: all requested capabilities are supported by the sub_device
88 * positive value: This flag at least is not supported by the sub_device
91 fs_port_offload_validate(struct rte_eth_dev *dev,
92 struct sub_device *sdev)
94 struct rte_eth_dev_info infos = {0};
95 struct rte_eth_conf *cf;
98 cf = &dev->data->dev_conf;
99 SUBOPS(sdev, dev_infos_get)(ETH(sdev), &infos);
100 /* RX capabilities */
101 cap = infos.rx_offload_capa;
102 if (cf->rxmode.hw_vlan_strip &&
103 ((cap & DEV_RX_OFFLOAD_VLAN_STRIP) == 0)) {
104 WARN("VLAN stripping offload requested but not supported by sub_device %d",
106 return DEV_RX_OFFLOAD_VLAN_STRIP;
108 if (cf->rxmode.hw_ip_checksum &&
109 ((cap & (DEV_RX_OFFLOAD_IPV4_CKSUM |
110 DEV_RX_OFFLOAD_UDP_CKSUM |
111 DEV_RX_OFFLOAD_TCP_CKSUM)) !=
112 (DEV_RX_OFFLOAD_IPV4_CKSUM |
113 DEV_RX_OFFLOAD_UDP_CKSUM |
114 DEV_RX_OFFLOAD_TCP_CKSUM))) {
115 WARN("IP checksum offload requested but not supported by sub_device %d",
117 return DEV_RX_OFFLOAD_IPV4_CKSUM |
118 DEV_RX_OFFLOAD_UDP_CKSUM |
119 DEV_RX_OFFLOAD_TCP_CKSUM;
121 if (cf->rxmode.enable_lro &&
122 ((cap & DEV_RX_OFFLOAD_TCP_LRO) == 0)) {
123 WARN("TCP LRO offload requested but not supported by sub_device %d",
125 return DEV_RX_OFFLOAD_TCP_LRO;
127 if (cf->rxmode.hw_vlan_extend &&
128 ((cap & DEV_RX_OFFLOAD_QINQ_STRIP) == 0)) {
129 WARN("Stacked VLAN stripping offload requested but not supported by sub_device %d",
131 return DEV_RX_OFFLOAD_QINQ_STRIP;
133 /* TX capabilities */
134 /* Nothing to do, no tx capa supported */
139 * Disable the dev_conf flag related to an offload capability flag
140 * within an ethdev configuration.
143 fs_port_disable_offload(struct rte_eth_conf *cf,
147 case DEV_RX_OFFLOAD_VLAN_STRIP:
148 INFO("Disabling VLAN stripping offload");
149 cf->rxmode.hw_vlan_strip = 0;
151 case DEV_RX_OFFLOAD_IPV4_CKSUM:
152 case DEV_RX_OFFLOAD_UDP_CKSUM:
153 case DEV_RX_OFFLOAD_TCP_CKSUM:
154 case (DEV_RX_OFFLOAD_IPV4_CKSUM |
155 DEV_RX_OFFLOAD_UDP_CKSUM |
156 DEV_RX_OFFLOAD_TCP_CKSUM):
157 INFO("Disabling IP checksum offload");
158 cf->rxmode.hw_ip_checksum = 0;
160 case DEV_RX_OFFLOAD_TCP_LRO:
161 INFO("Disabling TCP LRO offload");
162 cf->rxmode.enable_lro = 0;
164 case DEV_RX_OFFLOAD_QINQ_STRIP:
165 INFO("Disabling stacked VLAN stripping offload");
166 cf->rxmode.hw_vlan_extend = 0;
169 DEBUG("Unable to disable offload capability: %" PRIx32,
177 fs_dev_configure(struct rte_eth_dev *dev)
179 struct sub_device *sdev;
184 FOREACH_SUBDEV(sdev, i, dev) {
185 if (sdev->state != DEV_PROBED)
187 DEBUG("Checking capabilities for sub_device %d", i);
188 while ((capa_flag = fs_port_offload_validate(dev, sdev))) {
190 * Refuse to change configuration if multiple devices
191 * are present and we already have configured at least
194 if (PRIV(dev)->state >= DEV_ACTIVE &&
195 PRIV(dev)->subs_tail > 1) {
196 ERROR("device already configured, cannot fix live configuration");
199 ret = fs_port_disable_offload(&dev->data->dev_conf,
202 ERROR("Unable to disable offload capability");
207 FOREACH_SUBDEV(sdev, i, dev) {
208 int rmv_interrupt = 0;
209 int lsc_interrupt = 0;
212 if (sdev->state != DEV_PROBED)
215 rmv_interrupt = ETH(sdev)->data->dev_flags &
216 RTE_ETH_DEV_INTR_RMV;
218 DEBUG("Enabling RMV interrupts for sub_device %d", i);
219 dev->data->dev_conf.intr_conf.rmv = 1;
221 DEBUG("sub_device %d does not support RMV event", i);
223 lsc_enabled = dev->data->dev_conf.intr_conf.lsc;
224 lsc_interrupt = lsc_enabled &&
225 (ETH(sdev)->data->dev_flags &
226 RTE_ETH_DEV_INTR_LSC);
228 DEBUG("Enabling LSC interrupts for sub_device %d", i);
229 dev->data->dev_conf.intr_conf.lsc = 1;
230 } else if (lsc_enabled && !lsc_interrupt) {
231 DEBUG("Disabling LSC interrupts for sub_device %d", i);
232 dev->data->dev_conf.intr_conf.lsc = 0;
234 DEBUG("Configuring sub-device %d", i);
236 ret = rte_eth_dev_configure(PORT_ID(sdev),
237 dev->data->nb_rx_queues,
238 dev->data->nb_tx_queues,
239 &dev->data->dev_conf);
241 ERROR("Could not configure sub_device %d", i);
245 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
246 RTE_ETH_EVENT_INTR_RMV,
247 failsafe_eth_rmv_event_callback,
250 WARN("Failed to register RMV callback for sub_device %d",
253 dev->data->dev_conf.intr_conf.rmv = 0;
255 ret = rte_eth_dev_callback_register(PORT_ID(sdev),
256 RTE_ETH_EVENT_INTR_LSC,
257 failsafe_eth_lsc_event_callback,
260 WARN("Failed to register LSC callback for sub_device %d",
263 dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
264 sdev->state = DEV_ACTIVE;
266 if (PRIV(dev)->state < DEV_ACTIVE)
267 PRIV(dev)->state = DEV_ACTIVE;
272 fs_dev_start(struct rte_eth_dev *dev)
274 struct sub_device *sdev;
278 FOREACH_SUBDEV(sdev, i, dev) {
279 if (sdev->state != DEV_ACTIVE)
281 DEBUG("Starting sub_device %d", i);
282 ret = rte_eth_dev_start(PORT_ID(sdev));
285 sdev->state = DEV_STARTED;
287 if (PRIV(dev)->state < DEV_STARTED)
288 PRIV(dev)->state = DEV_STARTED;
289 fs_switch_dev(dev, NULL);
294 fs_dev_stop(struct rte_eth_dev *dev)
296 struct sub_device *sdev;
299 PRIV(dev)->state = DEV_STARTED - 1;
300 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
301 rte_eth_dev_stop(PORT_ID(sdev));
302 sdev->state = DEV_STARTED - 1;
307 fs_dev_set_link_up(struct rte_eth_dev *dev)
309 struct sub_device *sdev;
313 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
314 DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
315 ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
317 ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
318 " with error %d", i, ret);
326 fs_dev_set_link_down(struct rte_eth_dev *dev)
328 struct sub_device *sdev;
332 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
333 DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
334 ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
336 ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
337 " with error %d", i, ret);
344 static void fs_dev_free_queues(struct rte_eth_dev *dev);
346 fs_dev_close(struct rte_eth_dev *dev)
348 struct sub_device *sdev;
351 failsafe_hotplug_alarm_cancel(dev);
352 if (PRIV(dev)->state == DEV_STARTED)
353 dev->dev_ops->dev_stop(dev);
354 PRIV(dev)->state = DEV_ACTIVE - 1;
355 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
356 DEBUG("Closing sub_device %d", i);
357 rte_eth_dev_close(PORT_ID(sdev));
358 sdev->state = DEV_ACTIVE - 1;
360 fs_dev_free_queues(dev);
364 fs_rx_queue_release(void *queue)
366 struct rte_eth_dev *dev;
367 struct sub_device *sdev;
374 dev = rxq->priv->dev;
375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
376 SUBOPS(sdev, rx_queue_release)
377 (ETH(sdev)->data->rx_queues[rxq->qid]);
378 dev->data->rx_queues[rxq->qid] = NULL;
383 fs_rx_queue_setup(struct rte_eth_dev *dev,
384 uint16_t rx_queue_id,
386 unsigned int socket_id,
387 const struct rte_eth_rxconf *rx_conf,
388 struct rte_mempool *mb_pool)
390 struct sub_device *sdev;
395 rxq = dev->data->rx_queues[rx_queue_id];
397 fs_rx_queue_release(rxq);
398 dev->data->rx_queues[rx_queue_id] = NULL;
400 rxq = rte_zmalloc(NULL,
402 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
403 RTE_CACHE_LINE_SIZE);
406 FOREACH_SUBDEV(sdev, i, dev)
407 rte_atomic64_init(&rxq->refcnt[i]);
408 rxq->qid = rx_queue_id;
409 rxq->socket_id = socket_id;
410 rxq->info.mp = mb_pool;
411 rxq->info.conf = *rx_conf;
412 rxq->info.nb_desc = nb_rx_desc;
413 rxq->priv = PRIV(dev);
414 dev->data->rx_queues[rx_queue_id] = rxq;
415 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
416 ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
418 nb_rx_desc, socket_id,
421 ERROR("RX queue setup failed for sub_device %d", i);
427 fs_rx_queue_release(rxq);
432 fs_tx_queue_release(void *queue)
434 struct rte_eth_dev *dev;
435 struct sub_device *sdev;
442 dev = txq->priv->dev;
443 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
444 SUBOPS(sdev, tx_queue_release)
445 (ETH(sdev)->data->tx_queues[txq->qid]);
446 dev->data->tx_queues[txq->qid] = NULL;
451 fs_tx_queue_setup(struct rte_eth_dev *dev,
452 uint16_t tx_queue_id,
454 unsigned int socket_id,
455 const struct rte_eth_txconf *tx_conf)
457 struct sub_device *sdev;
462 txq = dev->data->tx_queues[tx_queue_id];
464 fs_tx_queue_release(txq);
465 dev->data->tx_queues[tx_queue_id] = NULL;
467 txq = rte_zmalloc("ethdev TX queue",
469 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
470 RTE_CACHE_LINE_SIZE);
473 FOREACH_SUBDEV(sdev, i, dev)
474 rte_atomic64_init(&txq->refcnt[i]);
475 txq->qid = tx_queue_id;
476 txq->socket_id = socket_id;
477 txq->info.conf = *tx_conf;
478 txq->info.nb_desc = nb_tx_desc;
479 txq->priv = PRIV(dev);
480 dev->data->tx_queues[tx_queue_id] = txq;
481 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
482 ret = rte_eth_tx_queue_setup(PORT_ID(sdev),
484 nb_tx_desc, socket_id,
487 ERROR("TX queue setup failed for sub_device %d", i);
493 fs_tx_queue_release(txq);
498 fs_dev_free_queues(struct rte_eth_dev *dev)
502 for (i = 0; i < dev->data->nb_rx_queues; i++) {
503 fs_rx_queue_release(dev->data->rx_queues[i]);
504 dev->data->rx_queues[i] = NULL;
506 dev->data->nb_rx_queues = 0;
507 for (i = 0; i < dev->data->nb_tx_queues; i++) {
508 fs_tx_queue_release(dev->data->tx_queues[i]);
509 dev->data->tx_queues[i] = NULL;
511 dev->data->nb_tx_queues = 0;
515 fs_promiscuous_enable(struct rte_eth_dev *dev)
517 struct sub_device *sdev;
520 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
521 rte_eth_promiscuous_enable(PORT_ID(sdev));
525 fs_promiscuous_disable(struct rte_eth_dev *dev)
527 struct sub_device *sdev;
530 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
531 rte_eth_promiscuous_disable(PORT_ID(sdev));
535 fs_allmulticast_enable(struct rte_eth_dev *dev)
537 struct sub_device *sdev;
540 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
541 rte_eth_allmulticast_enable(PORT_ID(sdev));
545 fs_allmulticast_disable(struct rte_eth_dev *dev)
547 struct sub_device *sdev;
550 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
551 rte_eth_allmulticast_disable(PORT_ID(sdev));
555 fs_link_update(struct rte_eth_dev *dev,
556 int wait_to_complete)
558 struct sub_device *sdev;
562 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
563 DEBUG("Calling link_update on sub_device %d", i);
564 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
565 if (ret && ret != -1) {
566 ERROR("Link update failed for sub_device %d with error %d",
571 if (TX_SUBDEV(dev)) {
572 struct rte_eth_link *l1;
573 struct rte_eth_link *l2;
575 l1 = &dev->data->dev_link;
576 l2 = Ð(TX_SUBDEV(dev))->data->dev_link;
577 if (memcmp(l1, l2, sizeof(*l1))) {
586 fs_stats_get(struct rte_eth_dev *dev,
587 struct rte_eth_stats *stats)
589 struct sub_device *sdev;
592 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
593 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
594 rte_eth_stats_get(PORT_ID(sdev), &sdev->stats_snapshot);
595 failsafe_stats_increment(stats, &sdev->stats_snapshot);
600 fs_stats_reset(struct rte_eth_dev *dev)
602 struct sub_device *sdev;
605 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
606 rte_eth_stats_reset(PORT_ID(sdev));
607 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
609 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
613 * Fail-safe dev_infos_get rules:
617 * Use the maximum possible values for any field, so as not
618 * to impede any further configuration effort.
620 * Limits capabilities to those that are understood by the
621 * fail-safe PMD. This understanding stems from the fail-safe
622 * being capable of verifying that the related capability is
623 * expressed within the device configuration (struct rte_eth_conf).
625 * At least one probed sub_device:
627 * Uses values from the active probed sub_device
628 * The rationale here is that if any sub_device is less capable
629 * (for example concerning the number of queues) than the active
630 * sub_device, then its subsequent configuration will fail.
631 * It is impossible to foresee this failure when the failing sub_device
632 * is supposed to be plugged-in later on, so the configuration process
633 * is the single point of failure and error reporting.
635 * Uses a logical AND of RX capabilities among
636 * all sub_devices and the default capabilities.
637 * Uses a logical AND of TX capabilities among
638 * the active probed sub_device and the default capabilities.
642 fs_dev_infos_get(struct rte_eth_dev *dev,
643 struct rte_eth_dev_info *infos)
645 struct sub_device *sdev;
648 sdev = TX_SUBDEV(dev);
650 DEBUG("No probed device, using default infos");
651 rte_memcpy(&PRIV(dev)->infos, &default_infos,
652 sizeof(default_infos));
654 uint32_t rx_offload_capa;
656 rx_offload_capa = default_infos.rx_offload_capa;
657 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
658 rte_eth_dev_info_get(PORT_ID(sdev),
660 rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
662 sdev = TX_SUBDEV(dev);
663 rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
664 PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
665 PRIV(dev)->infos.tx_offload_capa &=
666 default_infos.tx_offload_capa;
667 PRIV(dev)->infos.flow_type_rss_offloads &=
668 default_infos.flow_type_rss_offloads;
670 rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
673 static const uint32_t *
674 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
676 struct sub_device *sdev;
677 struct rte_eth_dev *edev;
679 sdev = TX_SUBDEV(dev);
683 /* ENOTSUP: counts as no supported ptypes */
684 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
687 * The API does not permit to do a clean AND of all ptypes,
688 * It is also incomplete by design and we do not really care
689 * to have a best possible value in this context.
690 * We just return the ptypes of the device of highest
691 * priority, usually the PREFERRED device.
693 return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
697 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
699 struct sub_device *sdev;
703 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
704 DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
705 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
707 ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
716 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
718 struct sub_device *sdev;
722 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
723 DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
724 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
726 ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
727 " with error %d", i, ret);
735 fs_flow_ctrl_get(struct rte_eth_dev *dev,
736 struct rte_eth_fc_conf *fc_conf)
738 struct sub_device *sdev;
740 sdev = TX_SUBDEV(dev);
743 if (SUBOPS(sdev, flow_ctrl_get) == NULL)
745 return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
749 fs_flow_ctrl_set(struct rte_eth_dev *dev,
750 struct rte_eth_fc_conf *fc_conf)
752 struct sub_device *sdev;
756 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
757 DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
758 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
760 ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
761 " with error %d", i, ret);
769 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
771 struct sub_device *sdev;
774 /* No check: already done within the rte_eth_dev_mac_addr_remove
775 * call for the fail-safe device.
777 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
778 rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
779 &dev->data->mac_addrs[index]);
780 PRIV(dev)->mac_addr_pool[index] = 0;
784 fs_mac_addr_add(struct rte_eth_dev *dev,
785 struct ether_addr *mac_addr,
789 struct sub_device *sdev;
793 RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
794 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
795 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
797 ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
798 PRIu8 " with error %d", i, ret);
802 if (index >= PRIV(dev)->nb_mac_addr) {
803 DEBUG("Growing mac_addrs array");
804 PRIV(dev)->nb_mac_addr = index;
806 PRIV(dev)->mac_addr_pool[index] = vmdq;
811 fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
813 struct sub_device *sdev;
816 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
817 rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
821 fs_filter_ctrl(struct rte_eth_dev *dev,
822 enum rte_filter_type type,
823 enum rte_filter_op op,
826 struct sub_device *sdev;
830 if (type == RTE_ETH_FILTER_GENERIC &&
831 op == RTE_ETH_FILTER_GET) {
832 *(const void **)arg = &fs_flow_ops;
835 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
836 DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
837 ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
839 ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
840 " with error %d", i, ret);
847 const struct eth_dev_ops failsafe_ops = {
848 .dev_configure = fs_dev_configure,
849 .dev_start = fs_dev_start,
850 .dev_stop = fs_dev_stop,
851 .dev_set_link_down = fs_dev_set_link_down,
852 .dev_set_link_up = fs_dev_set_link_up,
853 .dev_close = fs_dev_close,
854 .promiscuous_enable = fs_promiscuous_enable,
855 .promiscuous_disable = fs_promiscuous_disable,
856 .allmulticast_enable = fs_allmulticast_enable,
857 .allmulticast_disable = fs_allmulticast_disable,
858 .link_update = fs_link_update,
859 .stats_get = fs_stats_get,
860 .stats_reset = fs_stats_reset,
861 .dev_infos_get = fs_dev_infos_get,
862 .dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
863 .mtu_set = fs_mtu_set,
864 .vlan_filter_set = fs_vlan_filter_set,
865 .rx_queue_setup = fs_rx_queue_setup,
866 .tx_queue_setup = fs_tx_queue_setup,
867 .rx_queue_release = fs_rx_queue_release,
868 .tx_queue_release = fs_tx_queue_release,
869 .flow_ctrl_get = fs_flow_ctrl_get,
870 .flow_ctrl_set = fs_flow_ctrl_set,
871 .mac_addr_remove = fs_mac_addr_remove,
872 .mac_addr_add = fs_mac_addr_add,
873 .mac_addr_set = fs_mac_addr_set,
874 .filter_ctrl = fs_filter_ctrl,