1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_ethdev_driver.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
14 #include "rte_eth_bond.h"
15 #include "rte_eth_bond_private.h"
16 #include "rte_eth_bond_8023ad_private.h"
19 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
21 /* Check valid pointer */
22 if (eth_dev == NULL ||
23 eth_dev->device == NULL ||
24 eth_dev->device->driver == NULL ||
25 eth_dev->device->driver->name == NULL)
28 /* return 0 if driver name matches */
29 return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
33 valid_bonded_port_id(uint16_t port_id)
35 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
36 return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
40 check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
43 struct bond_dev_private *internals;
45 if (check_for_bonded_ethdev(eth_dev) != 0)
48 internals = eth_dev->data->dev_private;
50 /* Check if any of slave devices is a bonded device */
51 for (i = 0; i < internals->slave_count; i++)
52 if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
59 valid_slave_port_id(uint16_t port_id, uint8_t mode)
61 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
63 /* Verify that port_id refers to a non bonded port */
64 if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
65 mode == BONDING_MODE_8023AD) {
66 RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
67 " mode as slave is also a bonded device, only "
68 "physical devices can be support in this mode.");
76 activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
78 struct bond_dev_private *internals = eth_dev->data->dev_private;
79 uint8_t active_count = internals->active_slave_count;
81 if (internals->mode == BONDING_MODE_8023AD)
82 bond_mode_8023ad_activate_slave(eth_dev, port_id);
84 if (internals->mode == BONDING_MODE_TLB
85 || internals->mode == BONDING_MODE_ALB) {
87 internals->tlb_slaves_order[active_count] = port_id;
90 RTE_ASSERT(internals->active_slave_count <
91 (RTE_DIM(internals->active_slaves) - 1));
93 internals->active_slaves[internals->active_slave_count] = port_id;
94 internals->active_slave_count++;
96 if (internals->mode == BONDING_MODE_TLB)
97 bond_tlb_activate_slave(internals);
98 if (internals->mode == BONDING_MODE_ALB)
99 bond_mode_alb_client_list_upd(eth_dev);
103 deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
106 struct bond_dev_private *internals = eth_dev->data->dev_private;
107 uint16_t active_count = internals->active_slave_count;
109 if (internals->mode == BONDING_MODE_8023AD) {
110 bond_mode_8023ad_stop(eth_dev);
111 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
112 } else if (internals->mode == BONDING_MODE_TLB
113 || internals->mode == BONDING_MODE_ALB)
114 bond_tlb_disable(internals);
116 slave_pos = find_slave_by_id(internals->active_slaves, active_count,
119 /* If slave was not at the end of the list
120 * shift active slaves up active array list */
121 if (slave_pos < active_count) {
123 memmove(internals->active_slaves + slave_pos,
124 internals->active_slaves + slave_pos + 1,
125 (active_count - slave_pos) *
126 sizeof(internals->active_slaves[0]));
129 RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
130 internals->active_slave_count = active_count;
132 if (eth_dev->data->dev_started) {
133 if (internals->mode == BONDING_MODE_8023AD) {
134 bond_mode_8023ad_start(eth_dev);
135 } else if (internals->mode == BONDING_MODE_TLB) {
136 bond_tlb_enable(internals);
137 } else if (internals->mode == BONDING_MODE_ALB) {
138 bond_tlb_enable(internals);
139 bond_mode_alb_client_list_upd(eth_dev);
145 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
147 struct bond_dev_private *internals;
153 RTE_BOND_LOG(ERR, "Invalid name specified");
157 ret = snprintf(devargs, sizeof(devargs),
158 "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
159 if (ret < 0 || ret >= (int)sizeof(devargs))
162 ret = rte_vdev_init(name, devargs);
166 ret = rte_eth_dev_get_port_by_name(name, &port_id);
170 * To make bond_ethdev_configure() happy we need to free the
171 * internals->kvlist here.
173 * Also see comment in bond_ethdev_configure().
175 internals = rte_eth_devices[port_id].data->dev_private;
176 rte_kvargs_free(internals->kvlist);
177 internals->kvlist = NULL;
183 rte_eth_bond_free(const char *name)
185 return rte_vdev_uninit(name);
189 slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
191 struct rte_eth_dev *bonded_eth_dev;
192 struct bond_dev_private *internals;
199 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
200 if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
201 DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
204 internals = bonded_eth_dev->data->dev_private;
205 found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
215 for (i = 0, mask = 1;
216 i < RTE_BITMAP_SLAB_BIT_SIZE;
218 if (unlikely(slab & mask)) {
219 uint16_t vlan_id = pos + i;
221 res = rte_eth_dev_vlan_filter(slave_port_id,
225 found = rte_bitmap_scan(internals->vlan_filter_bmp,
227 } while (found && first != pos && res == 0);
233 slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
235 struct rte_flow *flow;
236 struct rte_flow_error ferror;
237 uint16_t slave_port_id = internals->slaves[slave_id].port_id;
239 if (internals->flow_isolated_valid != 0) {
240 rte_eth_dev_stop(slave_port_id);
241 if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
243 RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
244 " %d: %s", slave_id, ferror.message ?
245 ferror.message : "(no stated reason)");
249 TAILQ_FOREACH(flow, &internals->flow_list, next) {
250 flow->flows[slave_id] = rte_flow_create(slave_port_id,
255 if (flow->flows[slave_id] == NULL) {
256 RTE_BOND_LOG(ERR, "Cannot create flow for slave"
258 ferror.message ? ferror.message :
259 "(no stated reason)");
260 /* Destroy successful bond flows from the slave */
261 TAILQ_FOREACH(flow, &internals->flow_list, next) {
262 if (flow->flows[slave_id] != NULL) {
263 rte_flow_destroy(slave_port_id,
264 flow->flows[slave_id],
266 flow->flows[slave_id] = NULL;
276 eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
277 const struct rte_eth_dev_info *di)
279 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
281 internals->reta_size = di->reta_size;
283 /* Inherit Rx offload capabilities from the first slave device */
284 internals->rx_offload_capa = di->rx_offload_capa;
285 internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
286 internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
288 /* Inherit maximum Rx packet size from the first slave device */
289 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
291 /* Inherit default Rx queue settings from the first slave device */
292 memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
295 * Turn off descriptor prefetch and writeback by default for all
296 * slave devices. Applications may tweak this setting if need be.
298 rxconf_i->rx_thresh.pthresh = 0;
299 rxconf_i->rx_thresh.hthresh = 0;
300 rxconf_i->rx_thresh.wthresh = 0;
302 /* Setting this to zero should effectively enable default values */
303 rxconf_i->rx_free_thresh = 0;
305 /* Disable deferred start by default for all slave devices */
306 rxconf_i->rx_deferred_start = 0;
310 eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
311 const struct rte_eth_dev_info *di)
313 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
315 /* Inherit Tx offload capabilities from the first slave device */
316 internals->tx_offload_capa = di->tx_offload_capa;
317 internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
319 /* Inherit default Tx queue settings from the first slave device */
320 memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
323 * Turn off descriptor prefetch and writeback by default for all
324 * slave devices. Applications may tweak this setting if need be.
326 txconf_i->tx_thresh.pthresh = 0;
327 txconf_i->tx_thresh.hthresh = 0;
328 txconf_i->tx_thresh.wthresh = 0;
331 * Setting these parameters to zero assumes that default
332 * values will be configured implicitly by slave devices.
334 txconf_i->tx_free_thresh = 0;
335 txconf_i->tx_rs_thresh = 0;
337 /* Disable deferred start by default for all slave devices */
338 txconf_i->tx_deferred_start = 0;
342 eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
343 const struct rte_eth_dev_info *di)
345 struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
346 const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
348 internals->rx_offload_capa &= di->rx_offload_capa;
349 internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
350 internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
353 * If at least one slave device suggests enabling this
354 * setting by default, enable it for all slave devices
355 * since disabling it may not be necessarily supported.
357 if (rxconf->rx_drop_en == 1)
358 rxconf_i->rx_drop_en = 1;
361 * Adding a new slave device may cause some of previously inherited
362 * offloads to be withdrawn from the internal rx_queue_offload_capa
363 * value. Thus, the new internal value of default Rx queue offloads
364 * has to be masked by rx_queue_offload_capa to make sure that only
365 * commonly supported offloads are preserved from both the previous
366 * value and the value being inhereted from the new slave device.
368 rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
369 internals->rx_queue_offload_capa;
372 * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
373 * the power of 2, the lower one is GCD
375 if (internals->reta_size > di->reta_size)
376 internals->reta_size = di->reta_size;
378 if (!internals->max_rx_pktlen &&
379 di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
380 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
384 eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
385 const struct rte_eth_dev_info *di)
387 struct rte_eth_txconf *txconf_i = &internals->default_txconf;
388 const struct rte_eth_txconf *txconf = &di->default_txconf;
390 internals->tx_offload_capa &= di->tx_offload_capa;
391 internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
394 * Adding a new slave device may cause some of previously inherited
395 * offloads to be withdrawn from the internal tx_queue_offload_capa
396 * value. Thus, the new internal value of default Tx queue offloads
397 * has to be masked by tx_queue_offload_capa to make sure that only
398 * commonly supported offloads are preserved from both the previous
399 * value and the value being inhereted from the new slave device.
401 txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
402 internals->tx_queue_offload_capa;
406 eth_bond_slave_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim,
407 const struct rte_eth_desc_lim *slave_desc_lim)
409 memcpy(bond_desc_lim, slave_desc_lim, sizeof(*bond_desc_lim));
413 eth_bond_slave_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim,
414 const struct rte_eth_desc_lim *slave_desc_lim)
416 bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max,
417 slave_desc_lim->nb_max);
418 bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min,
419 slave_desc_lim->nb_min);
420 bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align,
421 slave_desc_lim->nb_align);
423 if (bond_desc_lim->nb_min > bond_desc_lim->nb_max ||
424 bond_desc_lim->nb_align > bond_desc_lim->nb_max) {
425 RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits");
429 /* Treat maximum number of segments equal to 0 as unspecified */
430 if (slave_desc_lim->nb_seg_max != 0 &&
431 (bond_desc_lim->nb_seg_max == 0 ||
432 slave_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max))
433 bond_desc_lim->nb_seg_max = slave_desc_lim->nb_seg_max;
434 if (slave_desc_lim->nb_mtu_seg_max != 0 &&
435 (bond_desc_lim->nb_mtu_seg_max == 0 ||
436 slave_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max))
437 bond_desc_lim->nb_mtu_seg_max = slave_desc_lim->nb_mtu_seg_max;
443 __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
445 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
446 struct bond_dev_private *internals;
447 struct rte_eth_link link_props;
448 struct rte_eth_dev_info dev_info;
450 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
451 internals = bonded_eth_dev->data->dev_private;
453 if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
456 slave_eth_dev = &rte_eth_devices[slave_port_id];
457 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
458 RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
462 rte_eth_dev_info_get(slave_port_id, &dev_info);
463 if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
464 RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
469 slave_add(internals, slave_eth_dev);
471 /* We need to store slaves reta_size to be able to synchronize RETA for all
472 * slave devices even if its sizes are different.
474 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
476 if (internals->slave_count < 1) {
477 /* if MAC is not user defined then use MAC of first slave add to
479 if (!internals->user_defined_mac) {
480 if (mac_address_set(bonded_eth_dev,
481 slave_eth_dev->data->mac_addrs)) {
482 RTE_BOND_LOG(ERR, "Failed to set MAC address");
487 /* Make primary slave */
488 internals->primary_port = slave_port_id;
489 internals->current_primary_port = slave_port_id;
491 /* Inherit queues settings from first slave */
492 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
493 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
495 eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info);
496 eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info);
498 eth_bond_slave_inherit_desc_lim_first(&internals->rx_desc_lim,
499 &dev_info.rx_desc_lim);
500 eth_bond_slave_inherit_desc_lim_first(&internals->tx_desc_lim,
501 &dev_info.tx_desc_lim);
505 eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
506 eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
508 ret = eth_bond_slave_inherit_desc_lim_next(
509 &internals->rx_desc_lim, &dev_info.rx_desc_lim);
513 ret = eth_bond_slave_inherit_desc_lim_next(
514 &internals->tx_desc_lim, &dev_info.tx_desc_lim);
519 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
520 internals->flow_type_rss_offloads;
522 if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
523 RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
528 /* Add additional MAC addresses to the slave */
529 if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
530 RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
535 internals->slave_count++;
537 if (bonded_eth_dev->data->dev_started) {
538 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
539 internals->slave_count--;
540 RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
546 /* Add slave details to bonded device */
547 slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
549 /* Update all slave devices MACs */
550 mac_address_slaves_update(bonded_eth_dev);
552 /* Register link status change callback with bonded device pointer as
554 rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
555 bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
557 /* If bonded device is started then we can add the slave to our active
559 if (bonded_eth_dev->data->dev_started) {
560 rte_eth_link_get_nowait(slave_port_id, &link_props);
562 if (link_props.link_status == ETH_LINK_UP) {
563 if (internals->active_slave_count == 0 &&
564 !internals->user_defined_primary_port)
565 bond_ethdev_primary_set(internals,
570 slave_vlan_filter_set(bonded_port_id, slave_port_id);
577 rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
579 struct rte_eth_dev *bonded_eth_dev;
580 struct bond_dev_private *internals;
584 /* Verify that port id's are valid bonded and slave ports */
585 if (valid_bonded_port_id(bonded_port_id) != 0)
588 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
589 internals = bonded_eth_dev->data->dev_private;
591 rte_spinlock_lock(&internals->lock);
593 retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
595 rte_spinlock_unlock(&internals->lock);
601 __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
602 uint16_t slave_port_id)
604 struct rte_eth_dev *bonded_eth_dev;
605 struct bond_dev_private *internals;
606 struct rte_eth_dev *slave_eth_dev;
607 struct rte_flow_error flow_error;
608 struct rte_flow *flow;
611 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
612 internals = bonded_eth_dev->data->dev_private;
614 if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
617 /* first remove from active slave list */
618 slave_idx = find_slave_by_id(internals->active_slaves,
619 internals->active_slave_count, slave_port_id);
621 if (slave_idx < internals->active_slave_count)
622 deactivate_slave(bonded_eth_dev, slave_port_id);
625 /* now find in slave list */
626 for (i = 0; i < internals->slave_count; i++)
627 if (internals->slaves[i].port_id == slave_port_id) {
633 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
634 internals->slave_count);
638 /* Un-register link status change callback with bonded device pointer as
640 rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
641 bond_ethdev_lsc_event_callback,
642 &rte_eth_devices[bonded_port_id].data->port_id);
644 /* Restore original MAC address of slave device */
645 rte_eth_dev_default_mac_addr_set(slave_port_id,
646 &(internals->slaves[slave_idx].persisted_mac_addr));
648 /* remove additional MAC addresses from the slave */
649 slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
652 * Remove bond device flows from slave device.
653 * Note: don't restore flow isolate mode.
655 TAILQ_FOREACH(flow, &internals->flow_list, next) {
656 if (flow->flows[slave_idx] != NULL) {
657 rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
659 flow->flows[slave_idx] = NULL;
663 slave_eth_dev = &rte_eth_devices[slave_port_id];
664 slave_remove(internals, slave_eth_dev);
665 slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
667 /* first slave in the active list will be the primary by default,
668 * otherwise use first device in list */
669 if (internals->current_primary_port == slave_port_id) {
670 if (internals->active_slave_count > 0)
671 internals->current_primary_port = internals->active_slaves[0];
672 else if (internals->slave_count > 0)
673 internals->current_primary_port = internals->slaves[0].port_id;
675 internals->primary_port = 0;
678 if (internals->active_slave_count < 1) {
679 /* if no slaves are any longer attached to bonded device and MAC is not
680 * user defined then clear MAC of bonded device as it will be reset
681 * when a new slave is added */
682 if (internals->slave_count < 1 && !internals->user_defined_mac)
683 memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
684 sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
686 if (internals->slave_count == 0) {
687 internals->rx_offload_capa = 0;
688 internals->tx_offload_capa = 0;
689 internals->rx_queue_offload_capa = 0;
690 internals->tx_queue_offload_capa = 0;
691 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
692 internals->reta_size = 0;
693 internals->candidate_max_rx_pktlen = 0;
694 internals->max_rx_pktlen = 0;
700 rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
702 struct rte_eth_dev *bonded_eth_dev;
703 struct bond_dev_private *internals;
706 if (valid_bonded_port_id(bonded_port_id) != 0)
709 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
710 internals = bonded_eth_dev->data->dev_private;
712 rte_spinlock_lock(&internals->lock);
714 retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
716 rte_spinlock_unlock(&internals->lock);
722 rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
724 struct rte_eth_dev *bonded_eth_dev;
726 if (valid_bonded_port_id(bonded_port_id) != 0)
729 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
731 if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
732 mode == BONDING_MODE_8023AD)
735 return bond_ethdev_mode_set(bonded_eth_dev, mode);
739 rte_eth_bond_mode_get(uint16_t bonded_port_id)
741 struct bond_dev_private *internals;
743 if (valid_bonded_port_id(bonded_port_id) != 0)
746 internals = rte_eth_devices[bonded_port_id].data->dev_private;
748 return internals->mode;
752 rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
754 struct bond_dev_private *internals;
756 if (valid_bonded_port_id(bonded_port_id) != 0)
759 internals = rte_eth_devices[bonded_port_id].data->dev_private;
761 if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
764 internals->user_defined_primary_port = 1;
765 internals->primary_port = slave_port_id;
767 bond_ethdev_primary_set(internals, slave_port_id);
773 rte_eth_bond_primary_get(uint16_t bonded_port_id)
775 struct bond_dev_private *internals;
777 if (valid_bonded_port_id(bonded_port_id) != 0)
780 internals = rte_eth_devices[bonded_port_id].data->dev_private;
782 if (internals->slave_count < 1)
785 return internals->current_primary_port;
789 rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
792 struct bond_dev_private *internals;
795 if (valid_bonded_port_id(bonded_port_id) != 0)
801 internals = rte_eth_devices[bonded_port_id].data->dev_private;
803 if (internals->slave_count > len)
806 for (i = 0; i < internals->slave_count; i++)
807 slaves[i] = internals->slaves[i].port_id;
809 return internals->slave_count;
813 rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
816 struct bond_dev_private *internals;
818 if (valid_bonded_port_id(bonded_port_id) != 0)
824 internals = rte_eth_devices[bonded_port_id].data->dev_private;
826 if (internals->active_slave_count > len)
829 memcpy(slaves, internals->active_slaves,
830 internals->active_slave_count * sizeof(internals->active_slaves[0]));
832 return internals->active_slave_count;
836 rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
837 struct ether_addr *mac_addr)
839 struct rte_eth_dev *bonded_eth_dev;
840 struct bond_dev_private *internals;
842 if (valid_bonded_port_id(bonded_port_id) != 0)
845 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
846 internals = bonded_eth_dev->data->dev_private;
848 /* Set MAC Address of Bonded Device */
849 if (mac_address_set(bonded_eth_dev, mac_addr))
852 internals->user_defined_mac = 1;
854 /* Update all slave devices MACs*/
855 if (internals->slave_count > 0)
856 return mac_address_slaves_update(bonded_eth_dev);
862 rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
864 struct rte_eth_dev *bonded_eth_dev;
865 struct bond_dev_private *internals;
867 if (valid_bonded_port_id(bonded_port_id) != 0)
870 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
871 internals = bonded_eth_dev->data->dev_private;
873 internals->user_defined_mac = 0;
875 if (internals->slave_count > 0) {
877 /* Get the primary slave location based on the primary port
878 * number as, while slave_add(), we will keep the primary
879 * slave based on slave_count,but not based on the primary port.
881 for (slave_port = 0; slave_port < internals->slave_count;
883 if (internals->slaves[slave_port].port_id ==
884 internals->primary_port)
888 /* Set MAC Address of Bonded Device */
889 if (mac_address_set(bonded_eth_dev,
890 &internals->slaves[slave_port].persisted_mac_addr)
892 RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
895 /* Update all slave devices MAC addresses */
896 return mac_address_slaves_update(bonded_eth_dev);
898 /* No need to update anything as no slaves present */
903 rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
905 struct bond_dev_private *internals;
907 if (valid_bonded_port_id(bonded_port_id) != 0)
910 internals = rte_eth_devices[bonded_port_id].data->dev_private;
913 case BALANCE_XMIT_POLICY_LAYER2:
914 internals->balance_xmit_policy = policy;
915 internals->burst_xmit_hash = burst_xmit_l2_hash;
917 case BALANCE_XMIT_POLICY_LAYER23:
918 internals->balance_xmit_policy = policy;
919 internals->burst_xmit_hash = burst_xmit_l23_hash;
921 case BALANCE_XMIT_POLICY_LAYER34:
922 internals->balance_xmit_policy = policy;
923 internals->burst_xmit_hash = burst_xmit_l34_hash;
933 rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
935 struct bond_dev_private *internals;
937 if (valid_bonded_port_id(bonded_port_id) != 0)
940 internals = rte_eth_devices[bonded_port_id].data->dev_private;
942 return internals->balance_xmit_policy;
946 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
948 struct bond_dev_private *internals;
950 if (valid_bonded_port_id(bonded_port_id) != 0)
953 internals = rte_eth_devices[bonded_port_id].data->dev_private;
954 internals->link_status_polling_interval_ms = internal_ms;
960 rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
962 struct bond_dev_private *internals;
964 if (valid_bonded_port_id(bonded_port_id) != 0)
967 internals = rte_eth_devices[bonded_port_id].data->dev_private;
969 return internals->link_status_polling_interval_ms;
973 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
977 struct bond_dev_private *internals;
979 if (valid_bonded_port_id(bonded_port_id) != 0)
982 internals = rte_eth_devices[bonded_port_id].data->dev_private;
983 internals->link_down_delay_ms = delay_ms;
989 rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
991 struct bond_dev_private *internals;
993 if (valid_bonded_port_id(bonded_port_id) != 0)
996 internals = rte_eth_devices[bonded_port_id].data->dev_private;
998 return internals->link_down_delay_ms;
1002 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
1005 struct bond_dev_private *internals;
1007 if (valid_bonded_port_id(bonded_port_id) != 0)
1010 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1011 internals->link_up_delay_ms = delay_ms;
1017 rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
1019 struct bond_dev_private *internals;
1021 if (valid_bonded_port_id(bonded_port_id) != 0)
1024 internals = rte_eth_devices[bonded_port_id].data->dev_private;
1026 return internals->link_up_delay_ms;