1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <ethdev_driver.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
14 #include "rte_eth_bond.h"
15 #include "eth_bond_private.h"
16 #include "eth_bond_8023ad_private.h"
19 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
21 /* Check valid pointer */
22 if (eth_dev == NULL ||
23 eth_dev->device == NULL ||
24 eth_dev->device->driver == NULL ||
25 eth_dev->device->driver->name == NULL)
28 /* return 0 if driver name matches */
29 return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
33 valid_bonded_port_id(uint16_t port_id)
35 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
36 return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
40 check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
43 struct bond_dev_private *internals;
45 if (check_for_bonded_ethdev(eth_dev) != 0)
48 internals = eth_dev->data->dev_private;
50 /* Check if any of slave devices is a bonded device */
51 for (i = 0; i < internals->slave_count; i++)
52 if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
59 valid_slave_port_id(struct bond_dev_private *internals, uint16_t slave_port_id)
61 RTE_ETH_VALID_PORTID_OR_ERR_RET(slave_port_id, -1);
63 /* Verify that slave_port_id refers to a non bonded port */
64 if (check_for_bonded_ethdev(&rte_eth_devices[slave_port_id]) == 0 &&
65 internals->mode == BONDING_MODE_8023AD) {
66 RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
67 " mode as slave is also a bonded device, only "
68 "physical devices can be support in this mode.");
72 if (internals->port_id == slave_port_id) {
74 "Cannot add the bonded device itself as its slave.");
82 activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
84 struct bond_dev_private *internals = eth_dev->data->dev_private;
85 uint16_t active_count = internals->active_slave_count;
87 if (internals->mode == BONDING_MODE_8023AD)
88 bond_mode_8023ad_activate_slave(eth_dev, port_id);
90 if (internals->mode == BONDING_MODE_TLB
91 || internals->mode == BONDING_MODE_ALB) {
93 internals->tlb_slaves_order[active_count] = port_id;
96 RTE_ASSERT(internals->active_slave_count <
97 (RTE_DIM(internals->active_slaves) - 1));
99 internals->active_slaves[internals->active_slave_count] = port_id;
100 internals->active_slave_count++;
102 if (internals->mode == BONDING_MODE_TLB)
103 bond_tlb_activate_slave(internals);
104 if (internals->mode == BONDING_MODE_ALB)
105 bond_mode_alb_client_list_upd(eth_dev);
109 deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
112 struct bond_dev_private *internals = eth_dev->data->dev_private;
113 uint16_t active_count = internals->active_slave_count;
115 if (internals->mode == BONDING_MODE_8023AD) {
116 bond_mode_8023ad_stop(eth_dev);
117 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
118 } else if (internals->mode == BONDING_MODE_TLB
119 || internals->mode == BONDING_MODE_ALB)
120 bond_tlb_disable(internals);
122 slave_pos = find_slave_by_id(internals->active_slaves, active_count,
125 /* If slave was not at the end of the list
126 * shift active slaves up active array list */
127 if (slave_pos < active_count) {
129 memmove(internals->active_slaves + slave_pos,
130 internals->active_slaves + slave_pos + 1,
131 (active_count - slave_pos) *
132 sizeof(internals->active_slaves[0]));
135 RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
136 internals->active_slave_count = active_count;
138 if (eth_dev->data->dev_started) {
139 if (internals->mode == BONDING_MODE_8023AD) {
140 bond_mode_8023ad_start(eth_dev);
141 } else if (internals->mode == BONDING_MODE_TLB) {
142 bond_tlb_enable(internals);
143 } else if (internals->mode == BONDING_MODE_ALB) {
144 bond_tlb_enable(internals);
145 bond_mode_alb_client_list_upd(eth_dev);
151 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
153 struct bond_dev_private *internals;
159 RTE_BOND_LOG(ERR, "Invalid name specified");
163 ret = snprintf(devargs, sizeof(devargs),
164 "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
165 if (ret < 0 || ret >= (int)sizeof(devargs))
168 ret = rte_vdev_init(name, devargs);
172 ret = rte_eth_dev_get_port_by_name(name, &port_id);
176 * To make bond_ethdev_configure() happy we need to free the
177 * internals->kvlist here.
179 * Also see comment in bond_ethdev_configure().
181 internals = rte_eth_devices[port_id].data->dev_private;
182 rte_kvargs_free(internals->kvlist);
183 internals->kvlist = NULL;
189 rte_eth_bond_free(const char *name)
191 return rte_vdev_uninit(name);
195 slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
197 struct rte_eth_dev *bonded_eth_dev;
198 struct bond_dev_private *internals;
205 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
206 if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
207 DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
210 internals = bonded_eth_dev->data->dev_private;
211 found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
221 for (i = 0, mask = 1;
222 i < RTE_BITMAP_SLAB_BIT_SIZE;
224 if (unlikely(slab & mask)) {
225 uint16_t vlan_id = pos + i;
227 res = rte_eth_dev_vlan_filter(slave_port_id,
231 found = rte_bitmap_scan(internals->vlan_filter_bmp,
233 } while (found && first != pos && res == 0);
239 slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
241 struct rte_flow *flow;
242 struct rte_flow_error ferror;
243 uint16_t slave_port_id = internals->slaves[slave_id].port_id;
245 if (internals->flow_isolated_valid != 0) {
246 if (rte_eth_dev_stop(slave_port_id) != 0) {
247 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
252 if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
254 RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
255 " %d: %s", slave_id, ferror.message ?
256 ferror.message : "(no stated reason)");
260 TAILQ_FOREACH(flow, &internals->flow_list, next) {
261 flow->flows[slave_id] = rte_flow_create(slave_port_id,
266 if (flow->flows[slave_id] == NULL) {
267 RTE_BOND_LOG(ERR, "Cannot create flow for slave"
269 ferror.message ? ferror.message :
270 "(no stated reason)");
271 /* Destroy successful bond flows from the slave */
272 TAILQ_FOREACH(flow, &internals->flow_list, next) {
273 if (flow->flows[slave_id] != NULL) {
274 rte_flow_destroy(slave_port_id,
275 flow->flows[slave_id],
277 flow->flows[slave_id] = NULL;
287 eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
288 const struct rte_eth_dev_info *di)
290 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
292 internals->reta_size = di->reta_size;
294 /* Inherit Rx offload capabilities from the first slave device */
295 internals->rx_offload_capa = di->rx_offload_capa;
296 internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
297 internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
299 /* Inherit maximum Rx packet size from the first slave device */
300 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
302 /* Inherit default Rx queue settings from the first slave device */
303 memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
306 * Turn off descriptor prefetch and writeback by default for all
307 * slave devices. Applications may tweak this setting if need be.
309 rxconf_i->rx_thresh.pthresh = 0;
310 rxconf_i->rx_thresh.hthresh = 0;
311 rxconf_i->rx_thresh.wthresh = 0;
313 /* Setting this to zero should effectively enable default values */
314 rxconf_i->rx_free_thresh = 0;
316 /* Disable deferred start by default for all slave devices */
317 rxconf_i->rx_deferred_start = 0;
321 eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
322 const struct rte_eth_dev_info *di)
324 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
326 /* Inherit Tx offload capabilities from the first slave device */
327 internals->tx_offload_capa = di->tx_offload_capa;
328 internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
330 /* Inherit default Tx queue settings from the first slave device */
331 memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
334 * Turn off descriptor prefetch and writeback by default for all
335 * slave devices. Applications may tweak this setting if need be.
337 txconf_i->tx_thresh.pthresh = 0;
338 txconf_i->tx_thresh.hthresh = 0;
339 txconf_i->tx_thresh.wthresh = 0;
342 * Setting these parameters to zero assumes that default
343 * values will be configured implicitly by slave devices.
345 txconf_i->tx_free_thresh = 0;
346 txconf_i->tx_rs_thresh = 0;
348 /* Disable deferred start by default for all slave devices */
349 txconf_i->tx_deferred_start = 0;
353 eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
354 const struct rte_eth_dev_info *di)
356 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
357 const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
359 internals->rx_offload_capa &= di->rx_offload_capa;
360 internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
361 internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
364 * If at least one slave device suggests enabling this
365 * setting by default, enable it for all slave devices
366 * since disabling it may not be necessarily supported.
368 if (rxconf->rx_drop_en == 1)
369 rxconf_i->rx_drop_en = 1;
372 * Adding a new slave device may cause some of previously inherited
373 * offloads to be withdrawn from the internal rx_queue_offload_capa
374 * value. Thus, the new internal value of default Rx queue offloads
375 * has to be masked by rx_queue_offload_capa to make sure that only
376 * commonly supported offloads are preserved from both the previous
377 * value and the value being inhereted from the new slave device.
379 rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
380 internals->rx_queue_offload_capa;
383 * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
384 * the power of 2, the lower one is GCD
386 if (internals->reta_size > di->reta_size)
387 internals->reta_size = di->reta_size;
389 if (!internals->max_rx_pktlen &&
390 di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
391 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
395 eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
396 const struct rte_eth_dev_info *di)
398 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
399 const struct rte_eth_txconf *txconf = &di->default_txconf;
401 internals->tx_offload_capa &= di->tx_offload_capa;
402 internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
405 * Adding a new slave device may cause some of previously inherited
406 * offloads to be withdrawn from the internal tx_queue_offload_capa
407 * value. Thus, the new internal value of default Tx queue offloads
408 * has to be masked by tx_queue_offload_capa to make sure that only
409 * commonly supported offloads are preserved from both the previous
410 * value and the value being inhereted from the new slave device.
412 txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
413 internals->tx_queue_offload_capa;
417 eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
418 const struct rte_eth_desc_lim *slave_desc_lim)
420 memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim));
424 eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
425 const struct rte_eth_desc_lim *slave_desc_lim)
427 bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
428 slave_desc_lim->nb_max);
429 bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
430 slave_desc_lim->nb_min);
431 bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
432 slave_desc_lim->nb_align);
434 if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
435 bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
436 RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
440 /* Treat maximum number of segments equal to 0 as unspecified */
441 if (slave_desc_lim->nb_seg_max != 0 &&
442 (bond_desc_lim->nb_seg_max == 0 ||
443 slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
444 bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max;
445 if (slave_desc_lim->nb_mtu_seg_max != 0 &&
446 (bond_desc_lim->nb_mtu_seg_max == 0 ||
447 slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
448 bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max;
454 __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
456 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
457 struct bond_dev_private *internals;
458 struct rte_eth_link link_props;
459 struct rte_eth_dev_info dev_info;
462 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
463 internals = bonded_eth_dev->data->dev_private;
465 if (valid_slave_port_id(internals, slave_port_id) != 0)
468 slave_eth_dev = &rte_eth_devices[slave_port_id];
469 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
470 RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
474 ret = rte_eth_dev_info_get(slave_port_id, &dev_info);
477 "%s: Error during getting device (port %u) info: %s\n",
478 __func__, slave_port_id, strerror(-ret));
482 if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
483 RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
488 slave_add(internals, slave_eth_dev);
490 /* We need to store slaves reta_size to be able to synchronize RETA for all
491 * slave devices even if its sizes are different.
493 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
495 if (internals->slave_count < 1) {
496 /* if MAC is not user defined then use MAC of first slave add to
498 if (!internals->user_defined_mac) {
499 if (mac_address_set(bonded_eth_dev,
500 slave_eth_dev->data->mac_addrs)) {
501 RTE_BOND_LOG(ERR, "Failed to set MAC address");
506 /* Make primary slave */
507 internals->primary_port = slave_port_id;
508 internals->current_primary_port = slave_port_id;
510 /* Inherit queues settings from first slave */
511 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
512 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
514 eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info);
515 eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info);
517 eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim,
518 &dev_info.rx_desc_lim);
519 eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim,
520 &dev_info.tx_desc_lim);
524 eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
525 eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
527 ret = eth_bond_slave_inherit_desc_lim_next(
528 &internals->rx_desc_lim, &dev_info.rx_desc_lim);
532 ret = eth_bond_slave_inherit_desc_lim_next(
533 &internals->tx_desc_lim, &dev_info.tx_desc_lim);
538 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
539 internals->flow_type_rss_offloads;
541 if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
542 RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
547 /* Add additional MAC addresses to the slave */
548 if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
549 RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
554 internals->slave_count++;
556 if (bonded_eth_dev->data->dev_started) {
557 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
558 internals->slave_count--;
559 RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
565 /* Update all slave devices MACs */
566 mac_address_slaves_update(bonded_eth_dev);
568 /* Register link status change callback with bonded device pointer as
570 rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
571 bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
573 /* If bonded device is started then we can add the slave to our active
575 if (bonded_eth_dev->data->dev_started) {
576 ret = rte_eth_link_get_nowait(slave_port_id, &link_props);
578 rte_eth_dev_callback_unregister(slave_port_id,
579 RTE_ETH_EVENT_INTR_LSC,
580 bond_ethdev_lsc_event_callback,
581 &bonded_eth_dev->data->port_id);
582 internals->slave_count--;
584 "Slave (port %u) link get failed: %s\n",
585 slave_port_id, rte_strerror(-ret));
589 if (link_props.link_status == ETH_LINK_UP) {
590 if (internals->active_slave_count == 0 &&
591 !internals->user_defined_primary_port)
592 bond_ethdev_primary_set(internals,
597 /* Add slave details to bonded device */
598 slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
600 slave_vlan_filter_set(bonded_port_id, slave_port_id);
607 rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
609 struct rte_eth_dev *bonded_eth_dev;
610 struct bond_dev_private *internals;
614 if (valid_bonded_port_id(bonded_port_id) != 0)
617 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
618 internals = bonded_eth_dev->data->dev_private;
620 if (valid_slave_port_id(internals, slave_port_id) != 0)
623 rte_spinlock_lock(&internals->lock);
625 retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
627 rte_spinlock_unlock(&internals->lock);
633 __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
634 uint16_t slave_port_id)
636 struct rte_eth_dev *bonded_eth_dev;
637 struct bond_dev_private *internals;
638 struct rte_eth_dev *slave_eth_dev;
639 struct rte_flow_error flow_error;
640 struct rte_flow *flow;
643 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
644 internals = bonded_eth_dev->data->dev_private;
646 if (valid_slave_port_id(internals, slave_port_id) < 0)
649 /* first remove from active slave list */
650 slave_idx = find_slave_by_id(internals->active_slaves,
651 internals->active_slave_count, slave_port_id);
653 if (slave_idx < internals->active_slave_count)
654 deactivate_slave(bonded_eth_dev, slave_port_id);
657 /* now find in slave list */
658 for (i = 0; i < internals->slave_count; i++)
659 if (internals->slaves[i].port_id == slave_port_id) {
665 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
666 internals->slave_count);
670 /* Un-register link status change callback with bonded device pointer as
672 rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
673 bond_ethdev_lsc_event_callback,
674 &rte_eth_devices[bonded_port_id].data->port_id);
676 /* Restore original MAC address of slave device */
677 rte_eth_dev_default_mac_addr_set(slave_port_id,
678 &(internals->slaves[slave_idx].persisted_mac_addr));
680 /* remove additional MAC addresses from the slave */
681 slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
684 * Remove bond device flows from slave device.
685 * Note: don't restore flow isolate mode.
687 TAILQ_FOREACH(flow, &internals->flow_list, next) {
688 if (flow->flows[slave_idx] != NULL) {
689 rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
691 flow->flows[slave_idx] = NULL;
695 slave_eth_dev = &rte_eth_devices[slave_port_id];
696 slave_remove(internals, slave_eth_dev);
697 slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
699 /* first slave in the active list will be the primary by default,
700 * otherwise use first device in list */
701 if (internals->current_primary_port == slave_port_id) {
702 if (internals->active_slave_count > 0)
703 internals->current_primary_port = internals->active_slaves[0];
704 else if (internals->slave_count > 0)
705 internals->current_primary_port = internals->slaves[0].port_id;
707 internals->primary_port = 0;
708 mac_address_slaves_update(bonded_eth_dev);
711 if (internals->active_slave_count < 1) {
712 /* if no slaves are any longer attached to bonded device and MAC is not
713 * user defined then clear MAC of bonded device as it will be reset
714 * when a new slave is added */
715 if (internals->slave_count < 1 && !internals->user_defined_mac)
716 memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
717 sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
719 if (internals->slave_count == 0) {
720 internals->rx_offload_capa = 0;
721 internals->tx_offload_capa = 0;
722 internals->rx_queue_offload_capa = 0;
723 internals->tx_queue_offload_capa = 0;
724 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
725 internals->reta_size = 0;
726 internals->candidate_max_rx_pktlen = 0;
727 internals->max_rx_pktlen = 0;
733 rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
735 struct rte_eth_dev *bonded_eth_dev;
736 struct bond_dev_private *internals;
739 if (valid_bonded_port_id(bonded_port_id) != 0)
742 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
743 internals = bonded_eth_dev->data->dev_private;
745 rte_spinlock_lock(&internals->lock);
747 retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
749 rte_spinlock_unlock(&internals->lock);
755 rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
757 struct rte_eth_dev *bonded_eth_dev;
759 if (valid_bonded_port_id(bonded_port_id) != 0)
762 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
764 if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
765 mode == BONDING_MODE_8023AD)
768 return bond_ethdev_mode_set(bonded_eth_dev, mode);
772 rte_eth_bond_mode_get(uint16_t bonded_port_id)
774 struct bond_dev_private *internals;
776 if (valid_bonded_port_id(bonded_port_id) != 0)
779 internals = rte_eth_devices[bonded_port_id].data->dev_private;
781 return internals->mode;
785 rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
787 struct bond_dev_private *internals;
789 if (valid_bonded_port_id(bonded_port_id) != 0)
792 internals = rte_eth_devices[bonded_port_id].data->dev_private;
794 if (valid_slave_port_id(internals, slave_port_id) != 0)
797 internals->user_defined_primary_port = 1;
798 internals->primary_port = slave_port_id;
800 bond_ethdev_primary_set(internals, slave_port_id);
806 rte_eth_bond_primary_get(uint16_t bonded_port_id)
808 struct bond_dev_private *internals;
810 if (valid_bonded_port_id(bonded_port_id) != 0)
813 internals = rte_eth_devices[bonded_port_id].data->dev_private;
815 if (internals->slave_count < 1)
818 return internals->current_primary_port;
822 rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
825 struct bond_dev_private *internals;
828 if (valid_bonded_port_id(bonded_port_id) != 0)
834 internals = rte_eth_devices[bonded_port_id].data->dev_private;
836 if (internals->slave_count > len)
839 for (i = 0; i < internals->slave_count; i++)
840 slaves[i] = internals->slaves[i].port_id;
842 return internals->slave_count;
846 rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
849 struct bond_dev_private *internals;
851 if (valid_bonded_port_id(bonded_port_id) != 0)
857 internals = rte_eth_devices[bonded_port_id].data->dev_private;
859 if (internals->active_slave_count > len)
862 memcpy(slaves, internals->active_slaves,
863 internals->active_slave_count * sizeof(internals->active_slaves[0]));
865 return internals->active_slave_count;
869 rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
870 struct rte_ether_addr *mac_addr)
872 struct rte_eth_dev *bonded_eth_dev;
873 struct bond_dev_private *internals;
875 if (valid_bonded_port_id(bonded_port_id) != 0)
878 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
879 internals = bonded_eth_dev->data->dev_private;
881 /* Set MAC Address of Bonded Device */
882 if (mac_address_set(bonded_eth_dev, mac_addr))
885 internals->user_defined_mac = 1;
887 /* Update all slave devices MACs*/
888 if (internals->slave_count > 0)
889 return mac_address_slaves_update(bonded_eth_dev);
895 rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
897 struct rte_eth_dev *bonded_eth_dev;
898 struct bond_dev_private *internals;
900 if (valid_bonded_port_id(bonded_port_id) != 0)
903 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
904 internals = bonded_eth_dev->data->dev_private;
906 internals->user_defined_mac = 0;
908 if (internals->slave_count > 0) {
910 /* Get the primary slave location based on the primary port
911 * number as, while slave_add(), we will keep the primary
912 * slave based on slave_count,but not based on the primary port.
914 for (slave_port = 0; slave_port < internals->slave_count;
916 if (internals->slaves[slave_port].port_id ==
917 internals->primary_port)
921 /* Set MAC Address of Bonded Device */
922 if (mac_address_set(bonded_eth_dev,
923 &internals->slaves[slave_port].persisted_mac_addr)
925 RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
928 /* Update all slave devices MAC addresses */
929 return mac_address_slaves_update(bonded_eth_dev);
931 /* No need to update anything as no slaves present */
936 rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
938 struct bond_dev_private *internals;
940 if (valid_bonded_port_id(bonded_port_id) != 0)
943 internals = rte_eth_devices[bonded_port_id].data->dev_private;
946 case BALANCE_XMIT_POLICY_LAYER2:
947 internals->balance_xmit_policy = policy;
948 internals->burst_xmit_hash = burst_xmit_l2_hash;
950 case BALANCE_XMIT_POLICY_LAYER23:
951 internals->balance_xmit_policy = policy;
952 internals->burst_xmit_hash = burst_xmit_l23_hash;
954 case BALANCE_XMIT_POLICY_LAYER34:
955 internals->balance_xmit_policy = policy;
956 internals->burst_xmit_hash = burst_xmit_l34_hash;
966 rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
968 struct bond_dev_private *internals;
970 if (valid_bonded_port_id(bonded_port_id) != 0)
973 internals = rte_eth_devices[bonded_port_id].data->dev_private;
975 return internals->balance_xmit_policy;
979 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
981 struct bond_dev_private *internals;
983 if (valid_bonded_port_id(bonded_port_id) != 0)
986 internals = rte_eth_devices[bonded_port_id].data->dev_private;
987 internals->link_status_polling_interval_ms = internal_ms;
993 rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
995 struct bond_dev_private *internals;
997 if (valid_bonded_port_id(bonded_port_id) != 0)
1000 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1002 return internals->link_status_polling_interval_ms;
1006 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
1010 struct bond_dev_private *internals;
1012 if (valid_bonded_port_id(bonded_port_id) != 0)
1015 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1016 internals->link_down_delay_ms = delay_ms;
1022 rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
1024 struct bond_dev_private *internals;
1026 if (valid_bonded_port_id(bonded_port_id) != 0)
1029 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1031 return internals->link_down_delay_ms;
1035 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
1038 struct bond_dev_private *internals;
1040 if (valid_bonded_port_id(bonded_port_id) != 0)
1043 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1044 internals->link_up_delay_ms = delay_ms;
1050 rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
1052 struct bond_dev_private *internals;
1054 if (valid_bonded_port_id(bonded_port_id) != 0)
1057 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1059 return internals->link_up_delay_ms;