1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_ethdev_driver.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
14 #include "rte_eth_bond.h"
15 #include "rte_eth_bond_private.h"
16 #include "rte_eth_bond_8023ad_private.h"
19 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
21 /* Check valid pointer */
22 if (eth_dev->device->driver->name == NULL)
25 /* return 0 if driver name matches */
26 return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
30 valid_bonded_port_id(uint16_t port_id)
32 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
33 return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
37 check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
40 struct bond_dev_private *internals;
42 if (check_for_bonded_ethdev(eth_dev) != 0)
45 internals = eth_dev->data->dev_private;
47 /* Check if any of slave devices is a bonded device */
48 for (i = 0; i < internals->slave_count; i++)
49 if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
56 valid_slave_port_id(uint16_t port_id, uint8_t mode)
58 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
60 /* Verify that port_id refers to a non bonded port */
61 if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
62 mode == BONDING_MODE_8023AD) {
63 RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
64 " mode as slave is also a bonded device, only "
65 "physical devices can be support in this mode.");
73 activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
75 struct bond_dev_private *internals = eth_dev->data->dev_private;
76 uint8_t active_count = internals->active_slave_count;
78 if (internals->mode == BONDING_MODE_8023AD)
79 bond_mode_8023ad_activate_slave(eth_dev, port_id);
81 if (internals->mode == BONDING_MODE_TLB
82 || internals->mode == BONDING_MODE_ALB) {
84 internals->tlb_slaves_order[active_count] = port_id;
87 RTE_ASSERT(internals->active_slave_count <
88 (RTE_DIM(internals->active_slaves) - 1));
90 internals->active_slaves[internals->active_slave_count] = port_id;
91 internals->active_slave_count++;
93 if (internals->mode == BONDING_MODE_TLB)
94 bond_tlb_activate_slave(internals);
95 if (internals->mode == BONDING_MODE_ALB)
96 bond_mode_alb_client_list_upd(eth_dev);
100 deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
103 struct bond_dev_private *internals = eth_dev->data->dev_private;
104 uint16_t active_count = internals->active_slave_count;
106 if (internals->mode == BONDING_MODE_8023AD) {
107 bond_mode_8023ad_stop(eth_dev);
108 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
109 } else if (internals->mode == BONDING_MODE_TLB
110 || internals->mode == BONDING_MODE_ALB)
111 bond_tlb_disable(internals);
113 slave_pos = find_slave_by_id(internals->active_slaves, active_count,
116 /* If slave was not at the end of the list
117 * shift active slaves up active array list */
118 if (slave_pos < active_count) {
120 memmove(internals->active_slaves + slave_pos,
121 internals->active_slaves + slave_pos + 1,
122 (active_count - slave_pos) *
123 sizeof(internals->active_slaves[0]));
126 RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
127 internals->active_slave_count = active_count;
129 if (eth_dev->data->dev_started) {
130 if (internals->mode == BONDING_MODE_8023AD) {
131 bond_mode_8023ad_start(eth_dev);
132 } else if (internals->mode == BONDING_MODE_TLB) {
133 bond_tlb_enable(internals);
134 } else if (internals->mode == BONDING_MODE_ALB) {
135 bond_tlb_enable(internals);
136 bond_mode_alb_client_list_upd(eth_dev);
142 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
144 struct bond_dev_private *internals;
150 RTE_BOND_LOG(ERR, "Invalid name specified");
154 ret = snprintf(devargs, sizeof(devargs),
155 "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
156 if (ret < 0 || ret >= (int)sizeof(devargs))
159 ret = rte_vdev_init(name, devargs);
163 ret = rte_eth_dev_get_port_by_name(name, &port_id);
167 * To make bond_ethdev_configure() happy we need to free the
168 * internals->kvlist here.
170 * Also see comment in bond_ethdev_configure().
172 internals = rte_eth_devices[port_id].data->dev_private;
173 rte_kvargs_free(internals->kvlist);
174 internals->kvlist = NULL;
180 rte_eth_bond_free(const char *name)
182 return rte_vdev_uninit(name);
186 slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
188 struct rte_eth_dev *bonded_eth_dev;
189 struct bond_dev_private *internals;
196 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
197 if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
198 DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
201 internals = bonded_eth_dev->data->dev_private;
202 found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
212 for (i = 0, mask = 1;
213 i < RTE_BITMAP_SLAB_BIT_SIZE;
215 if (unlikely(slab & mask)) {
216 uint16_t vlan_id = pos + i;
218 res = rte_eth_dev_vlan_filter(slave_port_id,
222 found = rte_bitmap_scan(internals->vlan_filter_bmp,
224 } while (found && first != pos && res == 0);
230 slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
232 struct rte_flow *flow;
233 struct rte_flow_error ferror;
234 uint16_t slave_port_id = internals->slaves[slave_id].port_id;
236 if (internals->flow_isolated_valid != 0) {
237 rte_eth_dev_stop(slave_port_id);
238 if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
240 RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
241 " %d: %s", slave_id, ferror.message ?
242 ferror.message : "(no stated reason)");
246 TAILQ_FOREACH(flow, &internals->flow_list, next) {
247 flow->flows[slave_id] = rte_flow_create(slave_port_id,
252 if (flow->flows[slave_id] == NULL) {
253 RTE_BOND_LOG(ERR, "Cannot create flow for slave"
255 ferror.message ? ferror.message :
256 "(no stated reason)");
257 /* Destroy successful bond flows from the slave */
258 TAILQ_FOREACH(flow, &internals->flow_list, next) {
259 if (flow->flows[slave_id] != NULL) {
260 rte_flow_destroy(slave_port_id,
261 flow->flows[slave_id],
263 flow->flows[slave_id] = NULL;
273 eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
274 const struct rte_eth_dev_info *di)
276 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
278 internals->reta_size = di->reta_size;
280 /* Inherit Rx offload capabilities from the first slave device */
281 internals->rx_offload_capa = di->rx_offload_capa;
282 internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
283 internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
285 /* Inherit maximum Rx packet size from the first slave device */
286 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
288 /* Inherit default Rx queue settings from the first slave device */
289 memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
292 * Turn off descriptor prefetch and writeback by default for all
293 * slave devices. Applications may tweak this setting if need be.
295 rxconf_i->rx_thresh.pthresh = 0;
296 rxconf_i->rx_thresh.hthresh = 0;
297 rxconf_i->rx_thresh.wthresh = 0;
299 /* Setting this to zero should effectively enable default values */
300 rxconf_i->rx_free_thresh = 0;
302 /* Disable deferred start by default for all slave devices */
303 rxconf_i->rx_deferred_start = 0;
307 eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
308 const struct rte_eth_dev_info *di)
310 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
312 /* Inherit Tx offload capabilities from the first slave device */
313 internals->tx_offload_capa = di->tx_offload_capa;
314 internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
316 /* Inherit default Tx queue settings from the first slave device */
317 memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
320 * Turn off descriptor prefetch and writeback by default for all
321 * slave devices. Applications may tweak this setting if need be.
323 txconf_i->tx_thresh.pthresh = 0;
324 txconf_i->tx_thresh.hthresh = 0;
325 txconf_i->tx_thresh.wthresh = 0;
328 * Setting these parameters to zero assumes that default
329 * values will be configured implicitly by slave devices.
331 txconf_i->tx_free_thresh = 0;
332 txconf_i->tx_rs_thresh = 0;
334 /* Disable deferred start by default for all slave devices */
335 txconf_i->tx_deferred_start = 0;
339 eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
340 const struct rte_eth_dev_info *di)
342 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
343 const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
345 internals->rx_offload_capa &= di->rx_offload_capa;
346 internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
347 internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
350 * If at least one slave device suggests enabling this
351 * setting by default, enable it for all slave devices
352 * since disabling it may not be necessarily supported.
354 if (rxconf->rx_drop_en == 1)
355 rxconf_i->rx_drop_en = 1;
358 * Adding a new slave device may cause some of previously inherited
359 * offloads to be withdrawn from the internal rx_queue_offload_capa
360 * value. Thus, the new internal value of default Rx queue offloads
361 * has to be masked by rx_queue_offload_capa to make sure that only
362 * commonly supported offloads are preserved from both the previous
363 * value and the value being inhereted from the new slave device.
365 rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
366 internals->rx_queue_offload_capa;
369 * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
370 * the power of 2, the lower one is GCD
372 if (internals->reta_size > di->reta_size)
373 internals->reta_size = di->reta_size;
375 if (!internals->max_rx_pktlen &&
376 di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
377 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
381 eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
382 const struct rte_eth_dev_info *di)
384 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
385 const struct rte_eth_txconf *txconf = &di->default_txconf;
387 internals->tx_offload_capa &= di->tx_offload_capa;
388 internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
391 * Adding a new slave device may cause some of previously inherited
392 * offloads to be withdrawn from the internal tx_queue_offload_capa
393 * value. Thus, the new internal value of default Tx queue offloads
394 * has to be masked by tx_queue_offload_capa to make sure that only
395 * commonly supported offloads are preserved from both the previous
396 * value and the value being inhereted from the new slave device.
398 txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
399 internals->tx_queue_offload_capa;
403 eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
404 const struct rte_eth_desc_lim *slave_desc_lim)
406 memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim));
410 eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
411 const struct rte_eth_desc_lim *slave_desc_lim)
413 bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
414 slave_desc_lim->nb_max);
415 bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
416 slave_desc_lim->nb_min);
417 bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
418 slave_desc_lim->nb_align);
420 if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
421 bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
422 RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
426 /* Treat maximum number of segments equal to 0 as unspecified */
427 if (slave_desc_lim->nb_seg_max != 0 &&
428 (bond_desc_lim->nb_seg_max == 0 ||
429 slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
430 bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max;
431 if (slave_desc_lim->nb_mtu_seg_max != 0 &&
432 (bond_desc_lim->nb_mtu_seg_max == 0 ||
433 slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
434 bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max;
440 __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
442 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
443 struct bond_dev_private *internals;
444 struct rte_eth_link link_props;
445 struct rte_eth_dev_info dev_info;
447 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
448 internals = bonded_eth_dev->data->dev_private;
450 if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
453 slave_eth_dev = &rte_eth_devices[slave_port_id];
454 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
455 RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
459 rte_eth_dev_info_get(slave_port_id, &dev_info);
460 if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
461 RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
466 slave_add(internals, slave_eth_dev);
468 /* We need to store slaves reta_size to be able to synchronize RETA for all
469 * slave devices even if its sizes are different.
471 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
473 if (internals->slave_count < 1) {
474 /* if MAC is not user defined then use MAC of first slave add to
476 if (!internals->user_defined_mac) {
477 if (mac_address_set(bonded_eth_dev,
478 slave_eth_dev->data->mac_addrs)) {
479 RTE_BOND_LOG(ERR, "Failed to set MAC address");
484 /* Inherit eth dev link properties from first slave */
485 link_properties_set(bonded_eth_dev,
486 &(slave_eth_dev->data->dev_link));
488 /* Make primary slave */
489 internals->primary_port = slave_port_id;
490 internals->current_primary_port = slave_port_id;
492 /* Inherit queues settings from first slave */
493 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
494 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
496 eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info);
497 eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info);
499 eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim,
500 &dev_info.rx_desc_lim);
501 eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim,
502 &dev_info.tx_desc_lim);
506 eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
507 eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
509 ret = eth_bond_slave_inherit_desc_lim_next(
510 &internals->rx_desc_lim, &dev_info.rx_desc_lim);
514 ret = eth_bond_slave_inherit_desc_lim_next(
515 &internals->tx_desc_lim, &dev_info.tx_desc_lim);
520 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
521 internals->flow_type_rss_offloads;
523 if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
524 RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
529 /* Add additional MAC addresses to the slave */
530 if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
531 RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
536 internals->slave_count++;
538 if (bonded_eth_dev->data->dev_started) {
539 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
540 internals->slave_count--;
541 RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
547 /* Add slave details to bonded device */
548 slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
550 /* Update all slave devices MACs */
551 mac_address_slaves_update(bonded_eth_dev);
553 /* Register link status change callback with bonded device pointer as
555 rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
556 bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
558 /* If bonded device is started then we can add the slave to our active
560 if (bonded_eth_dev->data->dev_started) {
561 rte_eth_link_get_nowait(slave_port_id, &link_props);
563 if (link_props.link_status == ETH_LINK_UP) {
564 if (internals->active_slave_count == 0 &&
565 !internals->user_defined_primary_port)
566 bond_ethdev_primary_set(internals,
571 slave_vlan_filter_set(bonded_port_id, slave_port_id);
578 rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
580 struct rte_eth_dev *bonded_eth_dev;
581 struct bond_dev_private *internals;
585 /* Verify that port id's are valid bonded and slave ports */
586 if (valid_bonded_port_id(bonded_port_id) != 0)
589 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
590 internals = bonded_eth_dev->data->dev_private;
592 rte_spinlock_lock(&internals->lock);
594 retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
596 rte_spinlock_unlock(&internals->lock);
602 __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
603 uint16_t slave_port_id)
605 struct rte_eth_dev *bonded_eth_dev;
606 struct bond_dev_private *internals;
607 struct rte_eth_dev *slave_eth_dev;
608 struct rte_flow_error flow_error;
609 struct rte_flow *flow;
612 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
613 internals = bonded_eth_dev->data->dev_private;
615 if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
618 /* first remove from active slave list */
619 slave_idx = find_slave_by_id(internals->active_slaves,
620 internals->active_slave_count, slave_port_id);
622 if (slave_idx < internals->active_slave_count)
623 deactivate_slave(bonded_eth_dev, slave_port_id);
626 /* now find in slave list */
627 for (i = 0; i < internals->slave_count; i++)
628 if (internals->slaves[i].port_id == slave_port_id) {
634 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
635 internals->slave_count);
639 /* Un-register link status change callback with bonded device pointer as
641 rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
642 bond_ethdev_lsc_event_callback,
643 &rte_eth_devices[bonded_port_id].data->port_id);
645 /* Restore original MAC address of slave device */
646 rte_eth_dev_default_mac_addr_set(slave_port_id,
647 &(internals->slaves[slave_idx].persisted_mac_addr));
649 /* remove additional MAC addresses from the slave */
650 slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
653 * Remove bond device flows from slave device.
654 * Note: don't restore flow isolate mode.
656 TAILQ_FOREACH(flow, &internals->flow_list, next) {
657 if (flow->flows[slave_idx] != NULL) {
658 rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
660 flow->flows[slave_idx] = NULL;
664 slave_eth_dev = &rte_eth_devices[slave_port_id];
665 slave_remove(internals, slave_eth_dev);
666 slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
668 /* first slave in the active list will be the primary by default,
669 * otherwise use first device in list */
670 if (internals->current_primary_port == slave_port_id) {
671 if (internals->active_slave_count > 0)
672 internals->current_primary_port = internals->active_slaves[0];
673 else if (internals->slave_count > 0)
674 internals->current_primary_port = internals->slaves[0].port_id;
676 internals->primary_port = 0;
679 if (internals->active_slave_count < 1) {
680 /* if no slaves are any longer attached to bonded device and MAC is not
681 * user defined then clear MAC of bonded device as it will be reset
682 * when a new slave is added */
683 if (internals->slave_count < 1 && !internals->user_defined_mac)
684 memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
685 sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
687 if (internals->slave_count == 0) {
688 internals->rx_offload_capa = 0;
689 internals->tx_offload_capa = 0;
690 internals->rx_queue_offload_capa = 0;
691 internals->tx_queue_offload_capa = 0;
692 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
693 internals->reta_size = 0;
694 internals->candidate_max_rx_pktlen = 0;
695 internals->max_rx_pktlen = 0;
701 rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
703 struct rte_eth_dev *bonded_eth_dev;
704 struct bond_dev_private *internals;
707 if (valid_bonded_port_id(bonded_port_id) != 0)
710 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
711 internals = bonded_eth_dev->data->dev_private;
713 rte_spinlock_lock(&internals->lock);
715 retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
717 rte_spinlock_unlock(&internals->lock);
723 rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
725 struct rte_eth_dev *bonded_eth_dev;
727 if (valid_bonded_port_id(bonded_port_id) != 0)
730 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
732 if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
733 mode == BONDING_MODE_8023AD)
736 return bond_ethdev_mode_set(bonded_eth_dev, mode);
740 rte_eth_bond_mode_get(uint16_t bonded_port_id)
742 struct bond_dev_private *internals;
744 if (valid_bonded_port_id(bonded_port_id) != 0)
747 internals = rte_eth_devices[bonded_port_id].data->dev_private;
749 return internals->mode;
753 rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
755 struct bond_dev_private *internals;
757 if (valid_bonded_port_id(bonded_port_id) != 0)
760 internals = rte_eth_devices[bonded_port_id].data->dev_private;
762 if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
765 internals->user_defined_primary_port = 1;
766 internals->primary_port = slave_port_id;
768 bond_ethdev_primary_set(internals, slave_port_id);
774 rte_eth_bond_primary_get(uint16_t bonded_port_id)
776 struct bond_dev_private *internals;
778 if (valid_bonded_port_id(bonded_port_id) != 0)
781 internals = rte_eth_devices[bonded_port_id].data->dev_private;
783 if (internals->slave_count < 1)
786 return internals->current_primary_port;
790 rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
793 struct bond_dev_private *internals;
796 if (valid_bonded_port_id(bonded_port_id) != 0)
802 internals = rte_eth_devices[bonded_port_id].data->dev_private;
804 if (internals->slave_count > len)
807 for (i = 0; i < internals->slave_count; i++)
808 slaves[i] = internals->slaves[i].port_id;
810 return internals->slave_count;
814 rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
817 struct bond_dev_private *internals;
819 if (valid_bonded_port_id(bonded_port_id) != 0)
825 internals = rte_eth_devices[bonded_port_id].data->dev_private;
827 if (internals->active_slave_count > len)
830 memcpy(slaves, internals->active_slaves,
831 internals->active_slave_count * sizeof(internals->active_slaves[0]));
833 return internals->active_slave_count;
837 rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
838 struct ether_addr *mac_addr)
840 struct rte_eth_dev *bonded_eth_dev;
841 struct bond_dev_private *internals;
843 if (valid_bonded_port_id(bonded_port_id) != 0)
846 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
847 internals = bonded_eth_dev->data->dev_private;
849 /* Set MAC Address of Bonded Device */
850 if (mac_address_set(bonded_eth_dev, mac_addr))
853 internals->user_defined_mac = 1;
855 /* Update all slave devices MACs*/
856 if (internals->slave_count > 0)
857 return mac_address_slaves_update(bonded_eth_dev);
863 rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
865 struct rte_eth_dev *bonded_eth_dev;
866 struct bond_dev_private *internals;
868 if (valid_bonded_port_id(bonded_port_id) != 0)
871 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
872 internals = bonded_eth_dev->data->dev_private;
874 internals->user_defined_mac = 0;
876 if (internals->slave_count > 0) {
878 /* Get the primary slave location based on the primary port
879 * number as, while slave_add(), we will keep the primary
880 * slave based on slave_count,but not based on the primary port.
882 for (slave_port = 0; slave_port < internals->slave_count;
884 if (internals->slaves[slave_port].port_id ==
885 internals->primary_port)
889 /* Set MAC Address of Bonded Device */
890 if (mac_address_set(bonded_eth_dev,
891 &internals->slaves[slave_port].persisted_mac_addr)
893 RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
896 /* Update all slave devices MAC addresses */
897 return mac_address_slaves_update(bonded_eth_dev);
899 /* No need to update anything as no slaves present */
904 rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
906 struct bond_dev_private *internals;
908 if (valid_bonded_port_id(bonded_port_id) != 0)
911 internals = rte_eth_devices[bonded_port_id].data->dev_private;
914 case BALANCE_XMIT_POLICY_LAYER2:
915 internals->balance_xmit_policy = policy;
916 internals->burst_xmit_hash = burst_xmit_l2_hash;
918 case BALANCE_XMIT_POLICY_LAYER23:
919 internals->balance_xmit_policy = policy;
920 internals->burst_xmit_hash = burst_xmit_l23_hash;
922 case BALANCE_XMIT_POLICY_LAYER34:
923 internals->balance_xmit_policy = policy;
924 internals->burst_xmit_hash = burst_xmit_l34_hash;
934 rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
936 struct bond_dev_private *internals;
938 if (valid_bonded_port_id(bonded_port_id) != 0)
941 internals = rte_eth_devices[bonded_port_id].data->dev_private;
943 return internals->balance_xmit_policy;
947 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
949 struct bond_dev_private *internals;
951 if (valid_bonded_port_id(bonded_port_id) != 0)
954 internals = rte_eth_devices[bonded_port_id].data->dev_private;
955 internals->link_status_polling_interval_ms = internal_ms;
961 rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
963 struct bond_dev_private *internals;
965 if (valid_bonded_port_id(bonded_port_id) != 0)
968 internals = rte_eth_devices[bonded_port_id].data->dev_private;
970 return internals->link_status_polling_interval_ms;
974 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
978 struct bond_dev_private *internals;
980 if (valid_bonded_port_id(bonded_port_id) != 0)
983 internals = rte_eth_devices[bonded_port_id].data->dev_private;
984 internals->link_down_delay_ms = delay_ms;
990 rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
992 struct bond_dev_private *internals;
994 if (valid_bonded_port_id(bonded_port_id) != 0)
997 internals = rte_eth_devices[bonded_port_id].data->dev_private;
999 return internals->link_down_delay_ms;
1003 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
1006 struct bond_dev_private *internals;
1008 if (valid_bonded_port_id(bonded_port_id) != 0)
1011 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1012 internals->link_up_delay_ms = delay_ms;
1018 rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
1020 struct bond_dev_private *internals;
1022 if (valid_bonded_port_id(bonded_port_id) != 0)
1025 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1027 return internals->link_up_delay_ms;