net/bonding: provide default Rx/Tx configuration
[dpdk.git] / drivers / net / bonding / rte_eth_bond_api.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <string.h>
6
7 #include <rte_mbuf.h>
8 #include <rte_malloc.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_tcp.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13
14 #include "rte_eth_bond.h"
15 #include "rte_eth_bond_private.h"
16 #include "rte_eth_bond_8023ad_private.h"
17
18 int
19 check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
20 {
21         /* Check valid pointer */
22         if (eth_dev->device->driver->name == NULL)
23                 return -1;
24
25         /* return 0 if driver name matches */
26         return eth_dev->device->driver->name != pmd_bond_drv.driver.name;
27 }
28
29 int
30 valid_bonded_port_id(uint16_t port_id)
31 {
32         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
33         return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
34 }
35
36 int
37 check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
38 {
39         int i;
40         struct bond_dev_private *internals;
41
42         if (check_for_bonded_ethdev(eth_dev) != 0)
43                 return 0;
44
45         internals = eth_dev->data->dev_private;
46
47         /* Check if any of slave devices is a bonded device */
48         for (i = 0; i < internals->slave_count; i++)
49                 if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
50                         return 1;
51
52         return 0;
53 }
54
55 int
56 valid_slave_port_id(uint16_t port_id, uint8_t mode)
57 {
58         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
59
60         /* Verify that port_id refers to a non bonded port */
61         if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0 &&
62                         mode == BONDING_MODE_8023AD) {
63                 RTE_BOND_LOG(ERR, "Cannot add slave to bonded device in 802.3ad"
64                                 " mode as slave is also a bonded device, only "
65                                 "physical devices can be support in this mode.");
66                 return -1;
67         }
68
69         return 0;
70 }
71
72 void
73 activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
74 {
75         struct bond_dev_private *internals = eth_dev->data->dev_private;
76         uint8_t active_count = internals->active_slave_count;
77
78         if (internals->mode == BONDING_MODE_8023AD)
79                 bond_mode_8023ad_activate_slave(eth_dev, port_id);
80
81         if (internals->mode == BONDING_MODE_TLB
82                         || internals->mode == BONDING_MODE_ALB) {
83
84                 internals->tlb_slaves_order[active_count] = port_id;
85         }
86
87         RTE_ASSERT(internals->active_slave_count <
88                         (RTE_DIM(internals->active_slaves) - 1));
89
90         internals->active_slaves[internals->active_slave_count] = port_id;
91         internals->active_slave_count++;
92
93         if (internals->mode == BONDING_MODE_TLB)
94                 bond_tlb_activate_slave(internals);
95         if (internals->mode == BONDING_MODE_ALB)
96                 bond_mode_alb_client_list_upd(eth_dev);
97 }
98
99 void
100 deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
101 {
102         uint16_t slave_pos;
103         struct bond_dev_private *internals = eth_dev->data->dev_private;
104         uint16_t active_count = internals->active_slave_count;
105
106         if (internals->mode == BONDING_MODE_8023AD) {
107                 bond_mode_8023ad_stop(eth_dev);
108                 bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
109         } else if (internals->mode == BONDING_MODE_TLB
110                         || internals->mode == BONDING_MODE_ALB)
111                 bond_tlb_disable(internals);
112
113         slave_pos = find_slave_by_id(internals->active_slaves, active_count,
114                         port_id);
115
116         /* If slave was not at the end of the list
117          * shift active slaves up active array list */
118         if (slave_pos < active_count) {
119                 active_count--;
120                 memmove(internals->active_slaves + slave_pos,
121                                 internals->active_slaves + slave_pos + 1,
122                                 (active_count - slave_pos) *
123                                         sizeof(internals->active_slaves[0]));
124         }
125
126         RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
127         internals->active_slave_count = active_count;
128
129         if (eth_dev->data->dev_started) {
130                 if (internals->mode == BONDING_MODE_8023AD) {
131                         bond_mode_8023ad_start(eth_dev);
132                 } else if (internals->mode == BONDING_MODE_TLB) {
133                         bond_tlb_enable(internals);
134                 } else if (internals->mode == BONDING_MODE_ALB) {
135                         bond_tlb_enable(internals);
136                         bond_mode_alb_client_list_upd(eth_dev);
137                 }
138         }
139 }
140
141 int
142 rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
143 {
144         struct bond_dev_private *internals;
145         char devargs[52];
146         uint16_t port_id;
147         int ret;
148
149         if (name == NULL) {
150                 RTE_BOND_LOG(ERR, "Invalid name specified");
151                 return -EINVAL;
152         }
153
154         ret = snprintf(devargs, sizeof(devargs),
155                 "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
156         if (ret < 0 || ret >= (int)sizeof(devargs))
157                 return -ENOMEM;
158
159         ret = rte_vdev_init(name, devargs);
160         if (ret)
161                 return -ENOMEM;
162
163         ret = rte_eth_dev_get_port_by_name(name, &port_id);
164         RTE_ASSERT(!ret);
165
166         /*
167          * To make bond_ethdev_configure() happy we need to free the
168          * internals->kvlist here.
169          *
170          * Also see comment in bond_ethdev_configure().
171          */
172         internals = rte_eth_devices[port_id].data->dev_private;
173         rte_kvargs_free(internals->kvlist);
174         internals->kvlist = NULL;
175
176         return port_id;
177 }
178
179 int
180 rte_eth_bond_free(const char *name)
181 {
182         return rte_vdev_uninit(name);
183 }
184
185 static int
186 slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
187 {
188         struct rte_eth_dev *bonded_eth_dev;
189         struct bond_dev_private *internals;
190         int found;
191         int res = 0;
192         uint64_t slab = 0;
193         uint32_t pos = 0;
194         uint16_t first;
195
196         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
197         if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
198                         DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
199                 return 0;
200
201         internals = bonded_eth_dev->data->dev_private;
202         found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
203         first = pos;
204
205         if (!found)
206                 return 0;
207
208         do {
209                 uint32_t i;
210                 uint64_t mask;
211
212                 for (i = 0, mask = 1;
213                      i < RTE_BITMAP_SLAB_BIT_SIZE;
214                      i ++, mask <<= 1) {
215                         if (unlikely(slab & mask)) {
216                                 uint16_t vlan_id = pos + i;
217
218                                 res = rte_eth_dev_vlan_filter(slave_port_id,
219                                                               vlan_id, 1);
220                         }
221                 }
222                 found = rte_bitmap_scan(internals->vlan_filter_bmp,
223                                         &pos, &slab);
224         } while (found && first != pos && res == 0);
225
226         return res;
227 }
228
229 static int
230 slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
231 {
232         struct rte_flow *flow;
233         struct rte_flow_error ferror;
234         uint16_t slave_port_id = internals->slaves[slave_id].port_id;
235
236         if (internals->flow_isolated_valid != 0) {
237                 rte_eth_dev_stop(slave_port_id);
238                 if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
239                     &ferror)) {
240                         RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
241                                      " %d: %s", slave_id, ferror.message ?
242                                      ferror.message : "(no stated reason)");
243                         return -1;
244                 }
245         }
246         TAILQ_FOREACH(flow, &internals->flow_list, next) {
247                 flow->flows[slave_id] = rte_flow_create(slave_port_id,
248                                                         &flow->fd->attr,
249                                                         flow->fd->items,
250                                                         flow->fd->actions,
251                                                         &ferror);
252                 if (flow->flows[slave_id] == NULL) {
253                         RTE_BOND_LOG(ERR, "Cannot create flow for slave"
254                                      " %d: %s", slave_id,
255                                      ferror.message ? ferror.message :
256                                      "(no stated reason)");
257                         /* Destroy successful bond flows from the slave */
258                         TAILQ_FOREACH(flow, &internals->flow_list, next) {
259                                 if (flow->flows[slave_id] != NULL) {
260                                         rte_flow_destroy(slave_port_id,
261                                                          flow->flows[slave_id],
262                                                          &ferror);
263                                         flow->flows[slave_id] = NULL;
264                                 }
265                         }
266                         return -1;
267                 }
268         }
269         return 0;
270 }
271
272 static void
273 eth_bond_slave_inherit_dev_info_rx_first(struct bond_dev_private *internals,
274                                          const struct rte_eth_dev_info *di)
275 {
276         struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
277
278         internals->reta_size = di->reta_size;
279
280         /* Inherit Rx offload capabilities from the first slave device */
281         internals->rx_offload_capa = di->rx_offload_capa;
282         internals->rx_queue_offload_capa = di->rx_queue_offload_capa;
283         internals->flow_type_rss_offloads = di->flow_type_rss_offloads;
284
285         /* Inherit maximum Rx packet size from the first slave device */
286         internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
287
288         /* Inherit default Rx queue settings from the first slave device */
289         memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i));
290
291         /*
292          * Turn off descriptor prefetch and writeback by default for all
293          * slave devices. Applications may tweak this setting if need be.
294          */
295         rxconf_i->rx_thresh.pthresh = 0;
296         rxconf_i->rx_thresh.hthresh = 0;
297         rxconf_i->rx_thresh.wthresh = 0;
298
299         /* Setting this to zero should effectively enable default values */
300         rxconf_i->rx_free_thresh = 0;
301
302         /* Disable deferred start by default for all slave devices */
303         rxconf_i->rx_deferred_start = 0;
304 }
305
306 static void
307 eth_bond_slave_inherit_dev_info_tx_first(struct bond_dev_private *internals,
308                                          const struct rte_eth_dev_info *di)
309 {
310         struct rte_eth_txconf *txconf_i = &internals->default_txconf;
311
312         /* Inherit Tx offload capabilities from the first slave device */
313         internals->tx_offload_capa = di->tx_offload_capa;
314         internals->tx_queue_offload_capa = di->tx_queue_offload_capa;
315
316         /* Inherit default Tx queue settings from the first slave device */
317         memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i));
318
319         /*
320          * Turn off descriptor prefetch and writeback by default for all
321          * slave devices. Applications may tweak this setting if need be.
322          */
323         txconf_i->tx_thresh.pthresh = 0;
324         txconf_i->tx_thresh.hthresh = 0;
325         txconf_i->tx_thresh.wthresh = 0;
326
327         /*
328          * Setting these parameters to zero assumes that default
329          * values will be configured implicitly by slave devices.
330          */
331         txconf_i->tx_free_thresh = 0;
332         txconf_i->tx_rs_thresh = 0;
333
334         /* Disable deferred start by default for all slave devices */
335         txconf_i->tx_deferred_start = 0;
336 }
337
338 static void
339 eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals,
340                                         const struct rte_eth_dev_info *di)
341 {
342         struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf;
343         const struct rte_eth_rxconf *rxconf = &di->default_rxconf;
344
345         internals->rx_offload_capa &= di->rx_offload_capa;
346         internals->rx_queue_offload_capa &= di->rx_queue_offload_capa;
347         internals->flow_type_rss_offloads &= di->flow_type_rss_offloads;
348
349         /*
350          * If at least one slave device suggests enabling this
351          * setting by default, enable it for all slave devices
352          * since disabling it may not be necessarily supported.
353          */
354         if (rxconf->rx_drop_en == 1)
355                 rxconf_i->rx_drop_en = 1;
356
357         /*
358          * Adding a new slave device may cause some of previously inherited
359          * offloads to be withdrawn from the internal rx_queue_offload_capa
360          * value. Thus, the new internal value of default Rx queue offloads
361          * has to be masked by rx_queue_offload_capa to make sure that only
362          * commonly supported offloads are preserved from both the previous
363          * value and the value being inhereted from the new slave device.
364          */
365         rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
366                              internals->rx_queue_offload_capa;
367
368         /*
369          * RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
370          * the power of 2, the lower one is GCD
371          */
372         if (internals->reta_size > di->reta_size)
373                 internals->reta_size = di->reta_size;
374
375         if (!internals->max_rx_pktlen &&
376             di->max_rx_pktlen < internals->candidate_max_rx_pktlen)
377                 internals->candidate_max_rx_pktlen = di->max_rx_pktlen;
378 }
379
380 static void
381 eth_bond_slave_inherit_dev_info_tx_next(struct bond_dev_private *internals,
382                                         const struct rte_eth_dev_info *di)
383 {
384         struct rte_eth_txconf *txconf_i = &internals->default_txconf;
385         const struct rte_eth_txconf *txconf = &di->default_txconf;
386
387         internals->tx_offload_capa &= di->tx_offload_capa;
388         internals->tx_queue_offload_capa &= di->tx_queue_offload_capa;
389
390         /*
391          * Adding a new slave device may cause some of previously inherited
392          * offloads to be withdrawn from the internal tx_queue_offload_capa
393          * value. Thus, the new internal value of default Tx queue offloads
394          * has to be masked by tx_queue_offload_capa to make sure that only
395          * commonly supported offloads are preserved from both the previous
396          * value and the value being inhereted from the new slave device.
397          */
398         txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
399                              internals->tx_queue_offload_capa;
400 }
401
402 static int
403 __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
404 {
405         struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
406         struct bond_dev_private *internals;
407         struct rte_eth_link link_props;
408         struct rte_eth_dev_info dev_info;
409
410         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
411         internals = bonded_eth_dev->data->dev_private;
412
413         if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
414                 return -1;
415
416         slave_eth_dev = &rte_eth_devices[slave_port_id];
417         if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
418                 RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
419                 return -1;
420         }
421
422         rte_eth_dev_info_get(slave_port_id, &dev_info);
423         if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
424                 RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
425                              slave_port_id);
426                 return -1;
427         }
428
429         slave_add(internals, slave_eth_dev);
430
431         /* We need to store slaves reta_size to be able to synchronize RETA for all
432          * slave devices even if its sizes are different.
433          */
434         internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
435
436         if (internals->slave_count < 1) {
437                 /* if MAC is not user defined then use MAC of first slave add to
438                  * bonded device */
439                 if (!internals->user_defined_mac) {
440                         if (mac_address_set(bonded_eth_dev,
441                                             slave_eth_dev->data->mac_addrs)) {
442                                 RTE_BOND_LOG(ERR, "Failed to set MAC address");
443                                 return -1;
444                         }
445                 }
446
447                 /* Inherit eth dev link properties from first slave */
448                 link_properties_set(bonded_eth_dev,
449                                 &(slave_eth_dev->data->dev_link));
450
451                 /* Make primary slave */
452                 internals->primary_port = slave_port_id;
453                 internals->current_primary_port = slave_port_id;
454
455                 /* Inherit queues settings from first slave */
456                 internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
457                 internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
458
459                 eth_bond_slave_inherit_dev_info_rx_first(internals, &dev_info);
460                 eth_bond_slave_inherit_dev_info_tx_first(internals, &dev_info);
461         } else {
462                 eth_bond_slave_inherit_dev_info_rx_next(internals, &dev_info);
463                 eth_bond_slave_inherit_dev_info_tx_next(internals, &dev_info);
464         }
465
466         bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
467                         internals->flow_type_rss_offloads;
468
469         if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
470                 RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
471                              slave_port_id);
472                 return -1;
473         }
474
475         /* Add additional MAC addresses to the slave */
476         if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
477                 RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
478                                 slave_port_id);
479                 return -1;
480         }
481
482         internals->slave_count++;
483
484         if (bonded_eth_dev->data->dev_started) {
485                 if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
486                         internals->slave_count--;
487                         RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
488                                         slave_port_id);
489                         return -1;
490                 }
491         }
492
493         /* Add slave details to bonded device */
494         slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
495
496         /* Update all slave devices MACs */
497         mac_address_slaves_update(bonded_eth_dev);
498
499         /* Register link status change callback with bonded device pointer as
500          * argument*/
501         rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
502                         bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id);
503
504         /* If bonded device is started then we can add the slave to our active
505          * slave array */
506         if (bonded_eth_dev->data->dev_started) {
507                 rte_eth_link_get_nowait(slave_port_id, &link_props);
508
509                  if (link_props.link_status == ETH_LINK_UP) {
510                         if (internals->active_slave_count == 0 &&
511                             !internals->user_defined_primary_port)
512                                 bond_ethdev_primary_set(internals,
513                                                         slave_port_id);
514                 }
515         }
516
517         slave_vlan_filter_set(bonded_port_id, slave_port_id);
518
519         return 0;
520
521 }
522
523 int
524 rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
525 {
526         struct rte_eth_dev *bonded_eth_dev;
527         struct bond_dev_private *internals;
528
529         int retval;
530
531         /* Verify that port id's are valid bonded and slave ports */
532         if (valid_bonded_port_id(bonded_port_id) != 0)
533                 return -1;
534
535         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
536         internals = bonded_eth_dev->data->dev_private;
537
538         rte_spinlock_lock(&internals->lock);
539
540         retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);
541
542         rte_spinlock_unlock(&internals->lock);
543
544         return retval;
545 }
546
547 static int
548 __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
549                                    uint16_t slave_port_id)
550 {
551         struct rte_eth_dev *bonded_eth_dev;
552         struct bond_dev_private *internals;
553         struct rte_eth_dev *slave_eth_dev;
554         struct rte_flow_error flow_error;
555         struct rte_flow *flow;
556         int i, slave_idx;
557
558         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
559         internals = bonded_eth_dev->data->dev_private;
560
561         if (valid_slave_port_id(slave_port_id, internals->mode) < 0)
562                 return -1;
563
564         /* first remove from active slave list */
565         slave_idx = find_slave_by_id(internals->active_slaves,
566                 internals->active_slave_count, slave_port_id);
567
568         if (slave_idx < internals->active_slave_count)
569                 deactivate_slave(bonded_eth_dev, slave_port_id);
570
571         slave_idx = -1;
572         /* now find in slave list */
573         for (i = 0; i < internals->slave_count; i++)
574                 if (internals->slaves[i].port_id == slave_port_id) {
575                         slave_idx = i;
576                         break;
577                 }
578
579         if (slave_idx < 0) {
580                 RTE_BOND_LOG(ERR, "Couldn't find slave in port list, slave count %d",
581                                 internals->slave_count);
582                 return -1;
583         }
584
585         /* Un-register link status change callback with bonded device pointer as
586          * argument*/
587         rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
588                         bond_ethdev_lsc_event_callback,
589                         &rte_eth_devices[bonded_port_id].data->port_id);
590
591         /* Restore original MAC address of slave device */
592         rte_eth_dev_default_mac_addr_set(slave_port_id,
593                         &(internals->slaves[slave_idx].persisted_mac_addr));
594
595         /* remove additional MAC addresses from the slave */
596         slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
597
598         /*
599          * Remove bond device flows from slave device.
600          * Note: don't restore flow isolate mode.
601          */
602         TAILQ_FOREACH(flow, &internals->flow_list, next) {
603                 if (flow->flows[slave_idx] != NULL) {
604                         rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
605                                          &flow_error);
606                         flow->flows[slave_idx] = NULL;
607                 }
608         }
609
610         slave_eth_dev = &rte_eth_devices[slave_port_id];
611         slave_remove(internals, slave_eth_dev);
612         slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
613
614         /*  first slave in the active list will be the primary by default,
615          *  otherwise use first device in list */
616         if (internals->current_primary_port == slave_port_id) {
617                 if (internals->active_slave_count > 0)
618                         internals->current_primary_port = internals->active_slaves[0];
619                 else if (internals->slave_count > 0)
620                         internals->current_primary_port = internals->slaves[0].port_id;
621                 else
622                         internals->primary_port = 0;
623         }
624
625         if (internals->active_slave_count < 1) {
626                 /* if no slaves are any longer attached to bonded device and MAC is not
627                  * user defined then clear MAC of bonded device as it will be reset
628                  * when a new slave is added */
629                 if (internals->slave_count < 1 && !internals->user_defined_mac)
630                         memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 0,
631                                         sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
632         }
633         if (internals->slave_count == 0) {
634                 internals->rx_offload_capa = 0;
635                 internals->tx_offload_capa = 0;
636                 internals->rx_queue_offload_capa = 0;
637                 internals->tx_queue_offload_capa = 0;
638                 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
639                 internals->reta_size = 0;
640                 internals->candidate_max_rx_pktlen = 0;
641                 internals->max_rx_pktlen = 0;
642         }
643         return 0;
644 }
645
646 int
647 rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
648 {
649         struct rte_eth_dev *bonded_eth_dev;
650         struct bond_dev_private *internals;
651         int retval;
652
653         if (valid_bonded_port_id(bonded_port_id) != 0)
654                 return -1;
655
656         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
657         internals = bonded_eth_dev->data->dev_private;
658
659         rte_spinlock_lock(&internals->lock);
660
661         retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);
662
663         rte_spinlock_unlock(&internals->lock);
664
665         return retval;
666 }
667
668 int
669 rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
670 {
671         struct rte_eth_dev *bonded_eth_dev;
672
673         if (valid_bonded_port_id(bonded_port_id) != 0)
674                 return -1;
675
676         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
677
678         if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
679                         mode == BONDING_MODE_8023AD)
680                 return -1;
681
682         return bond_ethdev_mode_set(bonded_eth_dev, mode);
683 }
684
685 int
686 rte_eth_bond_mode_get(uint16_t bonded_port_id)
687 {
688         struct bond_dev_private *internals;
689
690         if (valid_bonded_port_id(bonded_port_id) != 0)
691                 return -1;
692
693         internals = rte_eth_devices[bonded_port_id].data->dev_private;
694
695         return internals->mode;
696 }
697
698 int
699 rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
700 {
701         struct bond_dev_private *internals;
702
703         if (valid_bonded_port_id(bonded_port_id) != 0)
704                 return -1;
705
706         internals = rte_eth_devices[bonded_port_id].data->dev_private;
707
708         if (valid_slave_port_id(slave_port_id, internals->mode) != 0)
709                 return -1;
710
711         internals->user_defined_primary_port = 1;
712         internals->primary_port = slave_port_id;
713
714         bond_ethdev_primary_set(internals, slave_port_id);
715
716         return 0;
717 }
718
719 int
720 rte_eth_bond_primary_get(uint16_t bonded_port_id)
721 {
722         struct bond_dev_private *internals;
723
724         if (valid_bonded_port_id(bonded_port_id) != 0)
725                 return -1;
726
727         internals = rte_eth_devices[bonded_port_id].data->dev_private;
728
729         if (internals->slave_count < 1)
730                 return -1;
731
732         return internals->current_primary_port;
733 }
734
735 int
736 rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
737                         uint16_t len)
738 {
739         struct bond_dev_private *internals;
740         uint8_t i;
741
742         if (valid_bonded_port_id(bonded_port_id) != 0)
743                 return -1;
744
745         if (slaves == NULL)
746                 return -1;
747
748         internals = rte_eth_devices[bonded_port_id].data->dev_private;
749
750         if (internals->slave_count > len)
751                 return -1;
752
753         for (i = 0; i < internals->slave_count; i++)
754                 slaves[i] = internals->slaves[i].port_id;
755
756         return internals->slave_count;
757 }
758
759 int
760 rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
761                 uint16_t len)
762 {
763         struct bond_dev_private *internals;
764
765         if (valid_bonded_port_id(bonded_port_id) != 0)
766                 return -1;
767
768         if (slaves == NULL)
769                 return -1;
770
771         internals = rte_eth_devices[bonded_port_id].data->dev_private;
772
773         if (internals->active_slave_count > len)
774                 return -1;
775
776         memcpy(slaves, internals->active_slaves,
777         internals->active_slave_count * sizeof(internals->active_slaves[0]));
778
779         return internals->active_slave_count;
780 }
781
782 int
783 rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
784                 struct ether_addr *mac_addr)
785 {
786         struct rte_eth_dev *bonded_eth_dev;
787         struct bond_dev_private *internals;
788
789         if (valid_bonded_port_id(bonded_port_id) != 0)
790                 return -1;
791
792         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
793         internals = bonded_eth_dev->data->dev_private;
794
795         /* Set MAC Address of Bonded Device */
796         if (mac_address_set(bonded_eth_dev, mac_addr))
797                 return -1;
798
799         internals->user_defined_mac = 1;
800
801         /* Update all slave devices MACs*/
802         if (internals->slave_count > 0)
803                 return mac_address_slaves_update(bonded_eth_dev);
804
805         return 0;
806 }
807
808 int
809 rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
810 {
811         struct rte_eth_dev *bonded_eth_dev;
812         struct bond_dev_private *internals;
813
814         if (valid_bonded_port_id(bonded_port_id) != 0)
815                 return -1;
816
817         bonded_eth_dev = &rte_eth_devices[bonded_port_id];
818         internals = bonded_eth_dev->data->dev_private;
819
820         internals->user_defined_mac = 0;
821
822         if (internals->slave_count > 0) {
823                 int slave_port;
824                 /* Get the primary slave location based on the primary port
825                  * number as, while slave_add(), we will keep the primary
826                  * slave based on slave_count,but not based on the primary port.
827                  */
828                 for (slave_port = 0; slave_port < internals->slave_count;
829                      slave_port++) {
830                         if (internals->slaves[slave_port].port_id ==
831                             internals->primary_port)
832                                 break;
833                 }
834
835                 /* Set MAC Address of Bonded Device */
836                 if (mac_address_set(bonded_eth_dev,
837                         &internals->slaves[slave_port].persisted_mac_addr)
838                                 != 0) {
839                         RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
840                         return -1;
841                 }
842                 /* Update all slave devices MAC addresses */
843                 return mac_address_slaves_update(bonded_eth_dev);
844         }
845         /* No need to update anything as no slaves present */
846         return 0;
847 }
848
849 int
850 rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
851 {
852         struct bond_dev_private *internals;
853
854         if (valid_bonded_port_id(bonded_port_id) != 0)
855                 return -1;
856
857         internals = rte_eth_devices[bonded_port_id].data->dev_private;
858
859         switch (policy) {
860         case BALANCE_XMIT_POLICY_LAYER2:
861                 internals->balance_xmit_policy = policy;
862                 internals->burst_xmit_hash = burst_xmit_l2_hash;
863                 break;
864         case BALANCE_XMIT_POLICY_LAYER23:
865                 internals->balance_xmit_policy = policy;
866                 internals->burst_xmit_hash = burst_xmit_l23_hash;
867                 break;
868         case BALANCE_XMIT_POLICY_LAYER34:
869                 internals->balance_xmit_policy = policy;
870                 internals->burst_xmit_hash = burst_xmit_l34_hash;
871                 break;
872
873         default:
874                 return -1;
875         }
876         return 0;
877 }
878
879 int
880 rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
881 {
882         struct bond_dev_private *internals;
883
884         if (valid_bonded_port_id(bonded_port_id) != 0)
885                 return -1;
886
887         internals = rte_eth_devices[bonded_port_id].data->dev_private;
888
889         return internals->balance_xmit_policy;
890 }
891
892 int
893 rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
894 {
895         struct bond_dev_private *internals;
896
897         if (valid_bonded_port_id(bonded_port_id) != 0)
898                 return -1;
899
900         internals = rte_eth_devices[bonded_port_id].data->dev_private;
901         internals->link_status_polling_interval_ms = internal_ms;
902
903         return 0;
904 }
905
906 int
907 rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
908 {
909         struct bond_dev_private *internals;
910
911         if (valid_bonded_port_id(bonded_port_id) != 0)
912                 return -1;
913
914         internals = rte_eth_devices[bonded_port_id].data->dev_private;
915
916         return internals->link_status_polling_interval_ms;
917 }
918
919 int
920 rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
921                                        uint32_t delay_ms)
922
923 {
924         struct bond_dev_private *internals;
925
926         if (valid_bonded_port_id(bonded_port_id) != 0)
927                 return -1;
928
929         internals = rte_eth_devices[bonded_port_id].data->dev_private;
930         internals->link_down_delay_ms = delay_ms;
931
932         return 0;
933 }
934
935 int
936 rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
937 {
938         struct bond_dev_private *internals;
939
940         if (valid_bonded_port_id(bonded_port_id) != 0)
941                 return -1;
942
943         internals = rte_eth_devices[bonded_port_id].data->dev_private;
944
945         return internals->link_down_delay_ms;
946 }
947
948 int
949 rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
950
951 {
952         struct bond_dev_private *internals;
953
954         if (valid_bonded_port_id(bonded_port_id) != 0)
955                 return -1;
956
957         internals = rte_eth_devices[bonded_port_id].data->dev_private;
958         internals->link_up_delay_ms = delay_ms;
959
960         return 0;
961 }
962
963 int
964 rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
965 {
966         struct bond_dev_private *internals;
967
968         if (valid_bonded_port_id(bonded_port_id) != 0)
969                 return -1;
970
971         internals = rte_eth_devices[bonded_port_id].data->dev_private;
972
973         return internals->link_up_delay_ms;
974 }