4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
41 #include "rte_eth_bond.h"
42 #include "rte_eth_bond_private.h"
43 #include "rte_eth_bond_8023ad_private.h"
45 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
47 const char pmd_bond_driver_name[] = "rte_bond_pmd";
50 valid_bonded_ethdev(const struct rte_eth_dev *eth_dev)
52 /* Check valid pointer */
53 if (eth_dev->driver->pci_drv.name == NULL)
56 /* return 0 if driver name matches */
57 return eth_dev->driver->pci_drv.name != pmd_bond_driver_name;
61 valid_bonded_port_id(uint8_t port_id)
63 if (!rte_eth_dev_is_valid_port(port_id))
66 return valid_bonded_ethdev(&rte_eth_devices[port_id]);
70 valid_slave_port_id(uint8_t port_id)
72 /* Verify that port id's are valid */
73 if (!rte_eth_dev_is_valid_port(port_id))
76 /* Verify that port_id refers to a non bonded port */
77 if (!valid_bonded_ethdev(&rte_eth_devices[port_id]))
84 activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
86 struct bond_dev_private *internals = eth_dev->data->dev_private;
87 uint8_t active_count = internals->active_slave_count;
89 if (internals->mode == BONDING_MODE_8023AD)
90 bond_mode_8023ad_activate_slave(eth_dev, port_id);
92 if (internals->mode == BONDING_MODE_TLB
93 || internals->mode == BONDING_MODE_ALB) {
95 internals->tlb_slaves_order[active_count] = port_id;
98 RTE_VERIFY(internals->active_slave_count <
99 (RTE_DIM(internals->active_slaves) - 1));
101 internals->active_slaves[internals->active_slave_count] = port_id;
102 internals->active_slave_count++;
104 if (internals->mode == BONDING_MODE_TLB)
105 bond_tlb_activate_slave(internals);
106 if (internals->mode == BONDING_MODE_ALB)
107 bond_mode_alb_client_list_upd(eth_dev);
111 deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
114 struct bond_dev_private *internals = eth_dev->data->dev_private;
115 uint8_t active_count = internals->active_slave_count;
117 if (internals->mode == BONDING_MODE_8023AD) {
118 bond_mode_8023ad_stop(eth_dev);
119 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
120 } else if (internals->mode == BONDING_MODE_TLB
121 || internals->mode == BONDING_MODE_ALB)
122 bond_tlb_disable(internals);
124 slave_pos = find_slave_by_id(internals->active_slaves, active_count,
127 /* If slave was not at the end of the list
128 * shift active slaves up active array list */
129 if (slave_pos < active_count) {
131 memmove(internals->active_slaves + slave_pos,
132 internals->active_slaves + slave_pos + 1,
133 (active_count - slave_pos) *
134 sizeof(internals->active_slaves[0]));
137 RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
138 internals->active_slave_count = active_count;
140 if (eth_dev->data->dev_started) {
141 if (internals->mode == BONDING_MODE_8023AD) {
142 bond_mode_8023ad_start(eth_dev);
143 } else if (internals->mode == BONDING_MODE_TLB) {
144 bond_tlb_enable(internals);
145 } else if (internals->mode == BONDING_MODE_ALB) {
146 bond_tlb_enable(internals);
147 bond_mode_alb_client_list_upd(eth_dev);
153 number_of_sockets(void)
157 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
159 for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
160 if (sockets < ms[i].socket_id)
161 sockets = ms[i].socket_id;
164 /* Number of sockets = maximum socket_id + 1 */
168 static struct rte_pci_id pci_id_table = {
169 .device_id = PCI_ANY_ID,
170 .subsystem_device_id = PCI_ANY_ID,
171 .vendor_id = PCI_ANY_ID,
172 .subsystem_vendor_id = PCI_ANY_ID,
175 static struct eth_driver rte_bond_pmd = {
177 .name = pmd_bond_driver_name,
178 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_DETACHABLE,
179 .id_table = &pci_id_table,
184 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
186 struct rte_pci_device *pci_dev = NULL;
187 struct bond_dev_private *internals = NULL;
188 struct rte_eth_dev *eth_dev = NULL;
189 struct rte_pci_driver *pci_drv = NULL;
191 /* now do all data allocation - for eth_dev structure, dummy pci driver
192 * and internal (private) data
196 RTE_BOND_LOG(ERR, "Invalid name specified");
200 if (socket_id >= number_of_sockets()) {
202 "Invalid socket id specified to create bonded device on.");
206 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
207 if (pci_dev == NULL) {
208 RTE_BOND_LOG(ERR, "Unable to malloc pci dev on socket");
212 pci_drv = &rte_bond_pmd.pci_drv;
214 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
215 if (internals == NULL) {
216 RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
220 /* reserve an ethdev entry */
221 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
222 if (eth_dev == NULL) {
223 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
227 pci_dev->numa_node = socket_id;
228 pci_drv->name = pmd_bond_driver_name;
229 pci_dev->driver = pci_drv;
231 eth_dev->driver = &rte_bond_pmd;
232 eth_dev->data->dev_private = internals;
233 eth_dev->data->nb_rx_queues = (uint16_t)1;
234 eth_dev->data->nb_tx_queues = (uint16_t)1;
236 TAILQ_INIT(&(eth_dev->link_intr_cbs));
238 eth_dev->data->dev_link.link_status = 0;
240 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
242 if (eth_dev->data->mac_addrs == NULL) {
243 RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
247 eth_dev->data->dev_started = 0;
248 eth_dev->data->promiscuous = 0;
249 eth_dev->data->scattered_rx = 0;
250 eth_dev->data->all_multicast = 0;
252 eth_dev->dev_ops = &default_dev_ops;
253 eth_dev->pci_dev = pci_dev;
255 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
256 RTE_ETH_DEV_DETACHABLE;
257 eth_dev->driver = NULL;
258 eth_dev->data->kdrv = RTE_KDRV_NONE;
259 eth_dev->data->drv_name = pmd_bond_driver_name;
260 eth_dev->data->numa_node = socket_id;
262 rte_spinlock_init(&internals->lock);
264 internals->port_id = eth_dev->data->port_id;
265 internals->mode = BONDING_MODE_INVALID;
266 internals->current_primary_port = 0;
267 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
268 internals->xmit_hash = xmit_l2_hash;
269 internals->user_defined_mac = 0;
270 internals->link_props_set = 0;
272 internals->link_status_polling_enabled = 0;
274 internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
275 internals->link_down_delay_ms = 0;
276 internals->link_up_delay_ms = 0;
278 internals->slave_count = 0;
279 internals->active_slave_count = 0;
280 internals->rx_offload_capa = 0;
281 internals->tx_offload_capa = 0;
283 /* Initially allow to choose any offload type */
284 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
286 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
287 memset(internals->slaves, 0, sizeof(internals->slaves));
289 /* Set mode 4 default configuration */
290 bond_mode_8023ad_setup(eth_dev, NULL);
291 if (bond_ethdev_mode_set(eth_dev, mode)) {
292 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
293 eth_dev->data->port_id, mode);
297 return eth_dev->data->port_id;
302 if (eth_dev != NULL) {
303 rte_free(eth_dev->data->mac_addrs);
304 rte_eth_dev_release_port(eth_dev);
310 rte_eth_bond_free(const char *name)
312 struct rte_eth_dev *eth_dev = NULL;
314 /* now free all data allocation - for eth_dev structure,
315 * dummy pci driver and internal (private) data
318 /* find an ethdev entry */
319 eth_dev = rte_eth_dev_allocated(name);
323 if (eth_dev->data->dev_started == 1) {
324 bond_ethdev_stop(eth_dev);
325 bond_ethdev_close(eth_dev);
328 eth_dev->dev_ops = NULL;
329 eth_dev->rx_pkt_burst = NULL;
330 eth_dev->tx_pkt_burst = NULL;
332 rte_free(eth_dev->pci_dev);
333 rte_free(eth_dev->data->dev_private);
334 rte_free(eth_dev->data->mac_addrs);
336 rte_eth_dev_release_port(eth_dev);
342 __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
344 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
345 struct bond_dev_private *internals;
346 struct bond_dev_private *temp_internals;
347 struct rte_eth_link link_props;
348 struct rte_eth_dev_info dev_info;
352 if (valid_slave_port_id(slave_port_id) != 0)
355 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
356 internals = bonded_eth_dev->data->dev_private;
358 /* Verify that new slave device is not already a slave of another
360 for (i = rte_eth_dev_count()-1; i >= 0; i--) {
361 if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) {
362 temp_internals = rte_eth_devices[i].data->dev_private;
364 for (j = 0; j < temp_internals->slave_count; j++) {
365 /* Device already a slave of a bonded device */
366 if (temp_internals->slaves[j].port_id == slave_port_id) {
367 RTE_BOND_LOG(ERR, "Slave port %d is already a slave",
375 slave_eth_dev = &rte_eth_devices[slave_port_id];
377 /* Add slave details to bonded device */
378 slave_add(internals, slave_eth_dev);
380 rte_eth_dev_info_get(slave_port_id, &dev_info);
382 /* We need to store slaves reta_size to be able to synchronize RETA for all
383 * slave devices even if its sizes are different.
385 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
387 if (internals->slave_count < 1) {
388 /* if MAC is not user defined then use MAC of first slave add to
390 if (!internals->user_defined_mac)
391 mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
393 /* Inherit eth dev link properties from first slave */
394 link_properties_set(bonded_eth_dev,
395 &(slave_eth_dev->data->dev_link));
397 /* Make primary slave */
398 internals->primary_port = slave_port_id;
400 /* Inherit queues settings from first slave */
401 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
402 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
404 internals->reta_size = dev_info.reta_size;
406 /* Take the first dev's offload capabilities */
407 internals->rx_offload_capa = dev_info.rx_offload_capa;
408 internals->tx_offload_capa = dev_info.tx_offload_capa;
409 internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
412 /* Check slave link properties are supported if props are set,
413 * all slaves must be the same */
414 if (internals->link_props_set) {
415 if (link_properties_valid(&(bonded_eth_dev->data->dev_link),
416 &(slave_eth_dev->data->dev_link))) {
418 "Slave port %d link speed/duplex not supported",
423 link_properties_set(bonded_eth_dev,
424 &(slave_eth_dev->data->dev_link));
426 internals->rx_offload_capa &= dev_info.rx_offload_capa;
427 internals->tx_offload_capa &= dev_info.tx_offload_capa;
428 internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
430 /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
431 * the power of 2, the lower one is GCD
433 if (internals->reta_size > dev_info.reta_size)
434 internals->reta_size = dev_info.reta_size;
438 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
439 internals->flow_type_rss_offloads;
441 internals->slave_count++;
443 /* Update all slave devices MACs*/
444 mac_address_slaves_update(bonded_eth_dev);
446 if (bonded_eth_dev->data->dev_started) {
447 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
448 RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
454 /* Register link status change callback with bonded device pointer as
456 rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
457 bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
459 /* If bonded device is started then we can add the slave to our active
461 if (bonded_eth_dev->data->dev_started) {
462 rte_eth_link_get_nowait(slave_port_id, &link_props);
464 if (link_props.link_status == 1)
465 activate_slave(bonded_eth_dev, slave_port_id);
472 rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
474 struct rte_eth_dev *bonded_eth_dev;
475 struct bond_dev_private *internals;
479 /* Verify that port id's are valid bonded and slave ports */
480 if (valid_bonded_port_id(bonded_port_id) != 0)
483 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
484 internals = bonded_eth_dev->data->dev_private;
486 rte_spinlock_lock(&internals->lock);
488 retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
490 rte_spinlock_unlock(&internals->lock);
496 __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
498 struct rte_eth_dev *bonded_eth_dev;
499 struct bond_dev_private *internals;
503 if (valid_slave_port_id(slave_port_id) != 0)
506 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
507 internals = bonded_eth_dev->data->dev_private;
509 /* first remove from active slave list */
510 slave_idx = find_slave_by_id(internals->active_slaves,
511 internals->active_slave_count, slave_port_id);
513 if (slave_idx < internals->active_slave_count)
514 deactivate_slave(bonded_eth_dev, slave_port_id);
517 /* now find in slave list */
518 for (i = 0; i < internals->slave_count; i++)
519 if (internals->slaves[i].port_id == slave_port_id) {
525 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
526 internals->slave_count);
530 /* Un-register link status change callback with bonded device pointer as
532 rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
533 bond_ethdev_lsc_event_callback,
534 &rte_eth_devices[bonded_port_id].data->port_id);
536 /* Restore original MAC address of slave device */
537 mac_address_set(&rte_eth_devices[slave_port_id],
538 &(internals->slaves[slave_idx].persisted_mac_addr));
540 slave_remove(internals, &rte_eth_devices[slave_port_id]);
542 /* first slave in the active list will be the primary by default,
543 * otherwise use first device in list */
544 if (internals->current_primary_port == slave_port_id) {
545 if (internals->active_slave_count > 0)
546 internals->current_primary_port = internals->active_slaves[0];
547 else if (internals->slave_count > 0)
548 internals->current_primary_port = internals->slaves[0].port_id;
550 internals->primary_port = 0;
553 if (internals->active_slave_count < 1) {
554 /* reset device link properties as no slaves are active */
555 link_properties_reset(&rte_eth_devices[bonded_port_id]);
557 /* if no slaves are any longer attached to bonded device and MAC is not
558 * user defined then clear MAC of bonded device as it will be reset
559 * when a new slave is added */
560 if (internals->slave_count < 1 && !internals->user_defined_mac)
561 memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
562 sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
564 if (internals->slave_count == 0) {
565 internals->rx_offload_capa = 0;
566 internals->tx_offload_capa = 0;
567 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
568 internals->reta_size = 0;
574 rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
576 struct rte_eth_dev *bonded_eth_dev;
577 struct bond_dev_private *internals;
580 if (valid_bonded_port_id(bonded_port_id) != 0)
583 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
584 internals = bonded_eth_dev->data->dev_private;
586 rte_spinlock_lock(&internals->lock);
588 retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
590 rte_spinlock_unlock(&internals->lock);
596 rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
598 if (valid_bonded_port_id(bonded_port_id) != 0)
601 return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
605 rte_eth_bond_mode_get(uint8_t bonded_port_id)
607 struct bond_dev_private *internals;
609 if (valid_bonded_port_id(bonded_port_id) != 0)
612 internals = rte_eth_devices[bonded_port_id].data->dev_private;
614 return internals->mode;
618 rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
620 struct bond_dev_private *internals;
622 if (valid_bonded_port_id(bonded_port_id) != 0)
625 if (valid_slave_port_id(slave_port_id) != 0)
628 internals = rte_eth_devices[bonded_port_id].data->dev_private;
630 internals->user_defined_primary_port = 1;
631 internals->primary_port = slave_port_id;
633 bond_ethdev_primary_set(internals, slave_port_id);
639 rte_eth_bond_primary_get(uint8_t bonded_port_id)
641 struct bond_dev_private *internals;
643 if (valid_bonded_port_id(bonded_port_id) != 0)
646 internals = rte_eth_devices[bonded_port_id].data->dev_private;
648 if (internals->slave_count < 1)
651 return internals->current_primary_port;
655 rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
657 struct bond_dev_private *internals;
660 if (valid_bonded_port_id(bonded_port_id) != 0)
666 internals = rte_eth_devices[bonded_port_id].data->dev_private;
668 if (internals->slave_count > len)
671 for (i = 0; i < internals->slave_count; i++)
672 slaves[i] = internals->slaves[i].port_id;
674 return internals->slave_count;
678 rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
681 struct bond_dev_private *internals;
683 if (valid_bonded_port_id(bonded_port_id) != 0)
689 internals = rte_eth_devices[bonded_port_id].data->dev_private;
691 if (internals->active_slave_count > len)
694 memcpy(slaves, internals->active_slaves, internals->active_slave_count);
696 return internals->active_slave_count;
700 rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
701 struct ether_addr *mac_addr)
703 struct rte_eth_dev *bonded_eth_dev;
704 struct bond_dev_private *internals;
706 if (valid_bonded_port_id(bonded_port_id) != 0)
709 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
710 internals = bonded_eth_dev->data->dev_private;
712 /* Set MAC Address of Bonded Device */
713 if (mac_address_set(bonded_eth_dev, mac_addr))
716 internals->user_defined_mac = 1;
718 /* Update all slave devices MACs*/
719 if (internals->slave_count > 0)
720 return mac_address_slaves_update(bonded_eth_dev);
726 rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
728 struct rte_eth_dev *bonded_eth_dev;
729 struct bond_dev_private *internals;
731 if (valid_bonded_port_id(bonded_port_id) != 0)
734 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
735 internals = bonded_eth_dev->data->dev_private;
737 internals->user_defined_mac = 0;
739 if (internals->slave_count > 0) {
740 /* Set MAC Address of Bonded Device */
741 if (mac_address_set(bonded_eth_dev,
742 &internals->slaves[internals->primary_port].persisted_mac_addr)
744 RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
747 /* Update all slave devices MAC addresses */
748 return mac_address_slaves_update(bonded_eth_dev);
750 /* No need to update anything as no slaves present */
755 rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
757 struct bond_dev_private *internals;
759 if (valid_bonded_port_id(bonded_port_id) != 0)
762 internals = rte_eth_devices[bonded_port_id].data->dev_private;
765 case BALANCE_XMIT_POLICY_LAYER2:
766 internals->balance_xmit_policy = policy;
767 internals->xmit_hash = xmit_l2_hash;
769 case BALANCE_XMIT_POLICY_LAYER23:
770 internals->balance_xmit_policy = policy;
771 internals->xmit_hash = xmit_l23_hash;
773 case BALANCE_XMIT_POLICY_LAYER34:
774 internals->balance_xmit_policy = policy;
775 internals->xmit_hash = xmit_l34_hash;
785 rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
787 struct bond_dev_private *internals;
789 if (valid_bonded_port_id(bonded_port_id) != 0)
792 internals = rte_eth_devices[bonded_port_id].data->dev_private;
794 return internals->balance_xmit_policy;
798 rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
800 struct bond_dev_private *internals;
802 if (valid_bonded_port_id(bonded_port_id) != 0)
805 internals = rte_eth_devices[bonded_port_id].data->dev_private;
806 internals->link_status_polling_interval_ms = internal_ms;
812 rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
814 struct bond_dev_private *internals;
816 if (valid_bonded_port_id(bonded_port_id) != 0)
819 internals = rte_eth_devices[bonded_port_id].data->dev_private;
821 return internals->link_status_polling_interval_ms;
825 rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
828 struct bond_dev_private *internals;
830 if (valid_bonded_port_id(bonded_port_id) != 0)
833 internals = rte_eth_devices[bonded_port_id].data->dev_private;
834 internals->link_down_delay_ms = delay_ms;
840 rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
842 struct bond_dev_private *internals;
844 if (valid_bonded_port_id(bonded_port_id) != 0)
847 internals = rte_eth_devices[bonded_port_id].data->dev_private;
849 return internals->link_down_delay_ms;
853 rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
856 struct bond_dev_private *internals;
858 if (valid_bonded_port_id(bonded_port_id) != 0)
861 internals = rte_eth_devices[bonded_port_id].data->dev_private;
862 internals->link_up_delay_ms = delay_ms;
868 rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
870 struct bond_dev_private *internals;
872 if (valid_bonded_port_id(bonded_port_id) != 0)
875 internals = rte_eth_devices[bonded_port_id].data->dev_private;
877 return internals->link_up_delay_ms;