4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_alarm.h>
39 #include <rte_malloc.h>
40 #include <rte_errno.h>
41 #include <rte_cycles.h>
42 #include <rte_compat.h>
44 #include "rte_eth_bond_private.h"
46 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
48 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
49 #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
50 bond_dbg_get_time_diff_ms(), slave_id, \
51 __func__, ##__VA_ARGS__)
53 static uint64_t start_time;
56 bond_dbg_get_time_diff_ms(void)
64 return ((now - start_time) * 1000) / rte_get_tsc_hz();
68 bond_print_lacp(struct lacpdu *l)
72 char a_state[256] = { 0 };
73 char p_state[256] = { 0 };
75 static const char * const state_labels[] = {
76 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
84 addr = l->actor.port_params.system.addr_bytes;
85 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
86 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
88 addr = l->partner.port_params.system.addr_bytes;
89 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
90 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
92 for (i = 0; i < 8; i++) {
93 if ((l->actor.state >> i) & 1) {
94 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
98 if ((l->partner.state >> i) & 1) {
99 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
104 if (a_len && a_state[a_len-1] == ' ')
105 a_state[a_len-1] = '\0';
107 if (p_len && p_state[p_len-1] == ' ')
108 p_state[p_len-1] = '\0';
110 RTE_LOG(DEBUG, PMD, "LACP: {\n"\
113 " actor={ tlv=%02X, len=%02X\n"\
114 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
117 " partner={ tlv=%02X, len=%02X\n"\
118 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
121 " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
122 "type_term=%02X, terminator_length = %02X}\n",\
125 l->actor.tlv_type_info,\
126 l->actor.info_length,\
127 l->actor.port_params.system_priority,\
129 l->actor.port_params.key,\
130 l->actor.port_params.port_priority,\
131 l->actor.port_params.port_number,\
133 l->partner.tlv_type_info,\
134 l->partner.info_length,\
135 l->partner.port_params.system_priority,\
137 l->partner.port_params.key,\
138 l->partner.port_params.port_priority,\
139 l->partner.port_params.port_number,\
141 l->tlv_type_collector_info,\
142 l->collector_info_length,\
143 l->collector_max_delay,\
144 l->tlv_type_terminator,\
145 l->terminator_length);
148 #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
150 #define BOND_PRINT_LACP(lacpdu) do { } while (0)
151 #define MODE4_DEBUG(fmt, ...) do { } while (0)
154 static const struct ether_addr lacp_mac_addr = {
155 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
158 struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
161 timer_cancel(uint64_t *timer)
167 timer_set(uint64_t *timer, uint64_t timeout)
169 *timer = rte_rdtsc() + timeout;
172 /* Forces given timer to be in expired state. */
174 timer_force_expired(uint64_t *timer)
176 *timer = rte_rdtsc();
180 timer_is_stopped(uint64_t *timer)
186 timer_is_expired(uint64_t *timer)
188 return *timer < rte_rdtsc();
191 /* Timer is in running state if it is not stopped nor expired */
193 timer_is_running(uint64_t *timer)
195 return !timer_is_stopped(timer) && !timer_is_expired(timer);
199 set_warning_flags(struct port *port, uint16_t flags)
203 uint16_t new_flag = 0;
206 old = port->warnings_to_show;
207 new_flag = old | flags;
208 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
209 } while (unlikely(retval == 0));
213 show_warnings(uint8_t slave_id)
215 struct port *port = &mode_8023ad_ports[slave_id];
219 warnings = port->warnings_to_show;
220 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
225 if (!timer_is_expired(&port->warning_timer))
229 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
230 rte_get_tsc_hz() / 1000);
232 if (warnings & WRN_RX_QUEUE_FULL) {
234 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
235 "Receive and transmit functions must be invoked on bonded\n"
236 "interface at least 10 times per second or LACP will not\n"
237 "work correctly\n", slave_id);
240 if (warnings & WRN_TX_QUEUE_FULL) {
242 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
243 "Receive and transmit functions must be invoked on bonded\n"
244 "interface at least 10 times per second or LACP will not\n"
245 "work correctly\n", slave_id);
248 if (warnings & WRN_RX_MARKER_TO_FAST)
249 RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
251 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
253 "Slave %u: ignoring unknown slow protocol frame type", slave_id);
256 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
257 RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
259 if (warnings & WRN_NOT_LACP_CAPABLE)
260 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
264 record_default(struct port *port)
266 /* Record default parameters for partner. Partner admin parameters
267 * are not implemented so set them to arbitrary default (last known) and
268 * mark actor that parner is in defaulted state. */
269 port->partner_state = STATE_LACP_ACTIVE;
270 ACTOR_STATE_SET(port, DEFAULTED);
273 /** Function handles rx state machine.
275 * This function implements Receive State Machine from point 5.4.12 in
276 * 802.1AX documentation. It should be called periodically.
278 * @param lacpdu LACPDU received.
279 * @param port Port on which LACPDU was received.
282 rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
285 struct port *agg, *port = &mode_8023ad_ports[slave_id];
288 if (SM_FLAG(port, BEGIN)) {
289 /* Initialize stuff */
290 MODE4_DEBUG("-> INITIALIZE\n");
291 SM_FLAG_CLR(port, MOVED);
292 port->selected = UNSELECTED;
294 record_default(port);
296 ACTOR_STATE_CLR(port, EXPIRED);
297 timer_cancel(&port->current_while_timer);
299 /* DISABLED: On initialization partner is out of sync */
300 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
302 /* LACP DISABLED stuff if LACP not enabled on this port */
303 if (!SM_FLAG(port, LACP_ENABLED))
304 PARTNER_STATE_CLR(port, AGGREGATION);
306 PARTNER_STATE_SET(port, AGGREGATION);
309 if (!SM_FLAG(port, LACP_ENABLED)) {
310 /* Update parameters only if state changed */
311 if (!timer_is_stopped(&port->current_while_timer)) {
312 port->selected = UNSELECTED;
313 record_default(port);
314 PARTNER_STATE_CLR(port, AGGREGATION);
315 ACTOR_STATE_CLR(port, EXPIRED);
316 timer_cancel(&port->current_while_timer);
322 MODE4_DEBUG("LACP -> CURRENT\n");
323 BOND_PRINT_LACP(lacp);
324 /* Update selected flag. If partner parameters are defaulted assume they
325 * are match. If not defaulted compare LACP actor with ports parner
327 if (!ACTOR_STATE(port, DEFAULTED) &&
328 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
329 || memcmp(&port->partner, &lacp->actor.port_params,
330 sizeof(port->partner)) != 0)) {
331 MODE4_DEBUG("selected <- UNSELECTED\n");
332 port->selected = UNSELECTED;
335 /* Record this PDU actor params as partner params */
336 memcpy(&port->partner, &lacp->actor.port_params,
337 sizeof(struct port_params));
338 port->partner_state = lacp->actor.state;
340 /* Partner parameters are not defaulted any more */
341 ACTOR_STATE_CLR(port, DEFAULTED);
343 /* If LACP partner params match this port actor params */
344 agg = &mode_8023ad_ports[port->aggregator_port_id];
345 bool match = port->actor.system_priority ==
346 lacp->partner.port_params.system_priority &&
347 is_same_ether_addr(&agg->actor.system,
348 &lacp->partner.port_params.system) &&
349 port->actor.port_priority ==
350 lacp->partner.port_params.port_priority &&
351 port->actor.port_number ==
352 lacp->partner.port_params.port_number;
354 /* Update NTT if partners information are outdated (xored and masked
356 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
357 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
359 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
361 SM_FLAG_SET(port, NTT);
364 /* If LACP partner params match this port actor params */
365 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
366 PARTNER_STATE(port, AGGREGATION))
367 PARTNER_STATE_SET(port, SYNCHRONIZATION);
368 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
370 PARTNER_STATE_SET(port, SYNCHRONIZATION);
372 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
374 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
375 timeout = internals->mode4.short_timeout;
377 timeout = internals->mode4.long_timeout;
379 timer_set(&port->current_while_timer, timeout);
380 ACTOR_STATE_CLR(port, EXPIRED);
381 return; /* No state change */
384 /* If CURRENT state timer is not running (stopped or expired)
385 * transit to EXPIRED state from DISABLED or CURRENT */
386 if (!timer_is_running(&port->current_while_timer)) {
387 ACTOR_STATE_SET(port, EXPIRED);
388 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
389 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
390 timer_set(&port->current_while_timer, internals->mode4.short_timeout);
395 * Function handles periodic tx state machine.
397 * Function implements Periodic Transmission state machine from point 5.4.13
398 * in 802.1AX documentation. It should be called periodically.
400 * @param port Port to handle state machine.
403 periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
405 struct port *port = &mode_8023ad_ports[slave_id];
406 /* Calculate if either site is LACP enabled */
408 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
409 PARTNER_STATE(port, LACP_ACTIVE);
411 uint8_t is_partner_fast, was_partner_fast;
412 /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
413 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
414 timer_cancel(&port->periodic_timer);
415 timer_force_expired(&port->tx_machine_timer);
416 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
418 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
419 SM_FLAG(port, BEGIN) ? "begind " : "",
420 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
421 active ? "LACP active " : "LACP pasive ");
425 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
426 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
428 /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
429 * Other case: check if timer expire or partners settings changed. */
430 if (!timer_is_stopped(&port->periodic_timer)) {
431 if (timer_is_expired(&port->periodic_timer)) {
432 SM_FLAG_SET(port, NTT);
433 } else if (is_partner_fast != was_partner_fast) {
434 /* Partners timeout was slow and now it is fast -> send LACP.
435 * In other case (was fast and now it is slow) just switch
436 * timeout to slow without forcing send of LACP (because standard
439 SM_FLAG_SET(port, NTT);
441 return; /* Nothing changed */
444 /* Handle state transition to FAST/SLOW LACP timeout */
445 if (is_partner_fast) {
446 timeout = internals->mode4.fast_periodic_timeout;
447 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
449 timeout = internals->mode4.slow_periodic_timeout;
450 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
453 timer_set(&port->periodic_timer, timeout);
457 * Function handles mux state machine.
459 * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
460 * It should be called periodically.
462 * @param port Port to handle state machine.
465 mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
467 struct port *port = &mode_8023ad_ports[slave_id];
469 /* Save current state for later use */
470 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
473 /* Enter DETACHED state on BEGIN condition or from any other state if
474 * port was unselected */
475 if (SM_FLAG(port, BEGIN) ||
476 port->selected == UNSELECTED || (port->selected == STANDBY &&
477 (port->actor_state & state_mask) != 0)) {
478 /* detach mux from aggregator */
479 port->actor_state &= ~state_mask;
480 /* Set ntt to true if BEGIN condition or transition from any other state
481 * which is indicated that wait_while_timer was started */
482 if (SM_FLAG(port, BEGIN) ||
483 !timer_is_stopped(&port->wait_while_timer)) {
484 SM_FLAG_SET(port, NTT);
485 MODE4_DEBUG("-> DETACHED\n");
487 timer_cancel(&port->wait_while_timer);
490 if (timer_is_stopped(&port->wait_while_timer)) {
491 if (port->selected == SELECTED || port->selected == STANDBY) {
492 timer_set(&port->wait_while_timer,
493 internals->mode4.aggregate_wait_timeout);
495 MODE4_DEBUG("DETACHED -> WAITING\n");
497 /* Waiting state entered */
501 /* Transit next state if port is ready */
502 if (!timer_is_expired(&port->wait_while_timer))
505 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
506 !PARTNER_STATE(port, SYNCHRONIZATION)) {
507 /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
508 * sync transit to ATACHED state. */
509 ACTOR_STATE_CLR(port, DISTRIBUTING);
510 ACTOR_STATE_CLR(port, COLLECTING);
511 /* Clear actor sync to activate transit ATACHED in condition bellow */
512 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
513 MODE4_DEBUG("Out of sync -> ATTACHED\n");
516 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
517 /* attach mux to aggregator */
518 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
519 STATE_DISTRIBUTING)) == 0);
521 ACTOR_STATE_SET(port, SYNCHRONIZATION);
522 SM_FLAG_SET(port, NTT);
523 MODE4_DEBUG("ATTACHED Entered\n");
524 } else if (!ACTOR_STATE(port, COLLECTING)) {
525 /* Start collecting if in sync */
526 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
527 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
528 ACTOR_STATE_SET(port, COLLECTING);
529 SM_FLAG_SET(port, NTT);
531 } else if (ACTOR_STATE(port, COLLECTING)) {
532 /* Check if partner is in COLLECTING state. If so this port can
533 * distribute frames to it */
534 if (!ACTOR_STATE(port, DISTRIBUTING)) {
535 if (PARTNER_STATE(port, COLLECTING)) {
536 /* Enable DISTRIBUTING if partner is collecting */
537 ACTOR_STATE_SET(port, DISTRIBUTING);
538 SM_FLAG_SET(port, NTT);
539 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
541 "Bond %u: slave id %u distributing started.\n",
542 internals->port_id, slave_id);
545 if (!PARTNER_STATE(port, COLLECTING)) {
546 /* Disable DISTRIBUTING (enter COLLECTING state) if partner
547 * is not collecting */
548 ACTOR_STATE_CLR(port, DISTRIBUTING);
549 SM_FLAG_SET(port, NTT);
550 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
552 "Bond %u: slave id %u distributing stopped.\n",
553 internals->port_id, slave_id);
560 * Function handles transmit state machine.
562 * Function implements Transmit Machine from point 5.4.16 in 802.1AX
568 tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
570 struct port *agg, *port = &mode_8023ad_ports[slave_id];
572 struct rte_mbuf *lacp_pkt = NULL;
573 struct lacpdu_header *hdr;
574 struct lacpdu *lacpdu;
576 /* If periodic timer is not running periodic machine is in NO PERIODIC and
577 * according to 802.3ax standard tx machine should not transmit any frames
578 * and set ntt to false. */
579 if (timer_is_stopped(&port->periodic_timer))
580 SM_FLAG_CLR(port, NTT);
582 if (!SM_FLAG(port, NTT))
585 if (!timer_is_expired(&port->tx_machine_timer))
588 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
589 if (lacp_pkt == NULL) {
590 RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
594 lacp_pkt->data_len = sizeof(*hdr);
595 lacp_pkt->pkt_len = sizeof(*hdr);
597 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
599 /* Source and destination MAC */
600 ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
601 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
602 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
604 lacpdu = &hdr->lacpdu;
605 memset(lacpdu, 0, sizeof(*lacpdu));
607 /* Initialize LACP part */
608 lacpdu->subtype = SLOW_SUBTYPE_LACP;
609 lacpdu->version_number = 1;
612 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
613 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
614 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
615 sizeof(port->actor));
616 agg = &mode_8023ad_ports[port->aggregator_port_id];
617 ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
618 lacpdu->actor.state = port->actor_state;
621 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
622 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
623 memcpy(&lacpdu->partner.port_params, &port->partner,
624 sizeof(struct port_params));
625 lacpdu->partner.state = port->partner_state;
628 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
629 lacpdu->collector_info_length = 0x10;
630 lacpdu->collector_max_delay = 0;
632 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
633 lacpdu->terminator_length = 0;
635 MODE4_DEBUG("Sending LACP frame\n");
636 BOND_PRINT_LACP(lacpdu);
638 if (internals->mode4.dedicated_queues.enabled == 0) {
639 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
641 /* If TX ring full, drop packet and free message.
642 Retransmission will happen in next function call. */
643 rte_pktmbuf_free(lacp_pkt);
644 set_warning_flags(port, WRN_TX_QUEUE_FULL);
648 uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
649 internals->mode4.dedicated_queues.tx_qid,
651 if (pkts_sent != 1) {
652 rte_pktmbuf_free(lacp_pkt);
653 set_warning_flags(port, WRN_TX_QUEUE_FULL);
659 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
660 SM_FLAG_CLR(port, NTT);
664 * Function assigns port to aggregator.
666 * @param bond_dev_private Pointer to bond_dev_private structure.
667 * @param port_pos Port to assign.
670 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
672 struct port *agg, *port;
673 uint8_t slaves_count, new_agg_id, i;
676 slaves = internals->active_slaves;
677 slaves_count = internals->active_slave_count;
678 port = &mode_8023ad_ports[slave_id];
680 /* Search for aggregator suitable for this port */
681 for (i = 0; i < slaves_count; ++i) {
682 agg = &mode_8023ad_ports[slaves[i]];
683 /* Skip ports that are not aggreagators */
684 if (agg->aggregator_port_id != slaves[i])
687 /* Actors system ID is not checked since all slave device have the same
688 * ID (MAC address). */
689 if ((agg->actor.key == port->actor.key &&
690 agg->partner.system_priority == port->partner.system_priority &&
691 is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
692 && (agg->partner.key == port->partner.key)) &&
693 is_zero_ether_addr(&port->partner.system) != 1 &&
695 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
701 /* By default, port uses it self as agregator */
702 if (i == slaves_count)
703 new_agg_id = slave_id;
705 new_agg_id = slaves[i];
707 if (new_agg_id != port->aggregator_port_id) {
708 port->aggregator_port_id = new_agg_id;
710 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
711 "\t%s aggregator ID=%3u\n",
712 port->aggregator_port_id,
713 port->aggregator_port_id == slave_id ?
714 "aggregator not found, using default" : "aggregator found",
715 port->aggregator_port_id);
718 port->selected = SELECTED;
721 /* Function maps DPDK speed to bonding speed stored in key field */
723 link_speed_key(uint16_t speed) {
727 case ETH_SPEED_NUM_NONE:
730 case ETH_SPEED_NUM_10M:
731 key_speed = BOND_LINK_SPEED_KEY_10M;
733 case ETH_SPEED_NUM_100M:
734 key_speed = BOND_LINK_SPEED_KEY_100M;
736 case ETH_SPEED_NUM_1G:
737 key_speed = BOND_LINK_SPEED_KEY_1000M;
739 case ETH_SPEED_NUM_10G:
740 key_speed = BOND_LINK_SPEED_KEY_10G;
742 case ETH_SPEED_NUM_20G:
743 key_speed = BOND_LINK_SPEED_KEY_20G;
745 case ETH_SPEED_NUM_40G:
746 key_speed = BOND_LINK_SPEED_KEY_40G;
757 rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
758 struct rte_mbuf *lacp_pkt) {
759 struct lacpdu_header *lacp;
761 if (lacp_pkt != NULL) {
762 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
763 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
765 /* This is LACP frame so pass it to rx_machine */
766 rx_machine(internals, slave_id, &lacp->lacpdu);
767 rte_pktmbuf_free(lacp_pkt);
769 rx_machine(internals, slave_id, NULL);
773 bond_mode_8023ad_periodic_cb(void *arg)
775 struct rte_eth_dev *bond_dev = arg;
776 struct bond_dev_private *internals = bond_dev->data->dev_private;
778 struct rte_eth_link link_info;
779 struct ether_addr slave_addr;
780 struct rte_mbuf *lacp_pkt = NULL;
785 /* Update link status on each port */
786 for (i = 0; i < internals->active_slave_count; i++) {
789 slave_id = internals->active_slaves[i];
790 rte_eth_link_get_nowait(slave_id, &link_info);
791 rte_eth_macaddr_get(slave_id, &slave_addr);
793 if (link_info.link_status != 0) {
794 key = link_speed_key(link_info.link_speed) << 1;
795 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
796 key |= BOND_LINK_FULL_DUPLEX_KEY;
800 port = &mode_8023ad_ports[slave_id];
802 key = rte_cpu_to_be_16(key);
803 if (key != port->actor.key) {
804 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
805 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
807 port->actor.key = key;
808 SM_FLAG_SET(port, NTT);
811 if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
812 ether_addr_copy(&slave_addr, &port->actor.system);
813 if (port->aggregator_port_id == slave_id)
814 SM_FLAG_SET(port, NTT);
818 for (i = 0; i < internals->active_slave_count; i++) {
819 slave_id = internals->active_slaves[i];
820 port = &mode_8023ad_ports[slave_id];
822 if ((port->actor.key &
823 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
825 SM_FLAG_SET(port, BEGIN);
827 /* LACP is disabled on half duples or link is down */
828 if (SM_FLAG(port, LACP_ENABLED)) {
829 /* If port was enabled set it to BEGIN state */
830 SM_FLAG_CLR(port, LACP_ENABLED);
831 ACTOR_STATE_CLR(port, DISTRIBUTING);
832 ACTOR_STATE_CLR(port, COLLECTING);
835 /* Skip this port processing */
839 SM_FLAG_SET(port, LACP_ENABLED);
841 if (internals->mode4.dedicated_queues.enabled == 0) {
842 /* Find LACP packet to this port. Do not check subtype,
843 * it is done in function that queued packet
845 int retval = rte_ring_dequeue(port->rx_ring,
851 rx_machine_update(internals, slave_id, lacp_pkt);
853 uint16_t rx_count = rte_eth_rx_burst(slave_id,
854 internals->mode4.dedicated_queues.rx_qid,
858 bond_mode_8023ad_handle_slow_pkt(internals,
861 rx_machine_update(internals, slave_id, NULL);
864 periodic_machine(internals, slave_id);
865 mux_machine(internals, slave_id);
866 tx_machine(internals, slave_id);
867 selection_logic(internals, slave_id);
869 SM_FLAG_CLR(port, BEGIN);
870 show_warnings(slave_id);
873 rte_eal_alarm_set(internals->mode4.update_timeout_us,
874 bond_mode_8023ad_periodic_cb, arg);
878 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
880 struct bond_dev_private *internals = bond_dev->data->dev_private;
882 struct port *port = &mode_8023ad_ports[slave_id];
883 struct port_params initial = {
885 .system_priority = rte_cpu_to_be_16(0xFFFF),
886 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
887 .port_priority = rte_cpu_to_be_16(0x00FF),
891 char mem_name[RTE_ETH_NAME_MAX_LEN];
893 unsigned element_size;
894 uint32_t total_tx_desc;
895 struct bond_tx_queue *bd_tx_q;
898 /* Given slave mus not be in active list */
899 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
900 internals->active_slave_count, slave_id) == internals->active_slave_count);
901 RTE_SET_USED(internals); /* used only for assert when enabled */
903 memcpy(&port->actor, &initial, sizeof(struct port_params));
904 /* Standard requires that port ID must be grater than 0.
905 * Add 1 do get corresponding port_number */
906 port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
908 memcpy(&port->partner, &initial, sizeof(struct port_params));
911 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
912 port->partner_state = STATE_LACP_ACTIVE;
913 port->sm_flags = SM_FLAGS_BEGIN;
915 /* use this port as agregator */
916 port->aggregator_port_id = slave_id;
917 rte_eth_promiscuous_enable(slave_id);
919 timer_cancel(&port->warning_timer);
921 if (port->mbuf_pool != NULL)
924 RTE_ASSERT(port->rx_ring == NULL);
925 RTE_ASSERT(port->tx_ring == NULL);
927 socket_id = rte_eth_dev_socket_id(slave_id);
928 if (socket_id == (int)LCORE_ID_ANY)
929 socket_id = rte_socket_id();
931 element_size = sizeof(struct slow_protocol_frame) +
932 RTE_PKTMBUF_HEADROOM;
934 /* The size of the mempool should be at least:
935 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
936 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
937 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
938 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
939 total_tx_desc += bd_tx_q->nb_tx_desc;
942 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
943 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
944 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
945 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
946 0, element_size, socket_id);
948 /* Any memory allocation failure in initialization is critical because
949 * resources can't be free, so reinitialization is impossible. */
950 if (port->mbuf_pool == NULL) {
951 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
952 slave_id, mem_name, rte_strerror(rte_errno));
955 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
956 port->rx_ring = rte_ring_create(mem_name,
957 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
959 if (port->rx_ring == NULL) {
960 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
961 mem_name, rte_strerror(rte_errno));
964 /* TX ring is at least one pkt longer to make room for marker packet. */
965 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
966 port->tx_ring = rte_ring_create(mem_name,
967 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
969 if (port->tx_ring == NULL) {
970 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
971 mem_name, rte_strerror(rte_errno));
976 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
979 struct bond_dev_private *internals = bond_dev->data->dev_private;
984 /* Given slave must be in active list */
985 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
986 internals->active_slave_count, slave_id) < internals->active_slave_count);
988 /* Exclude slave from transmit policy. If this slave is an aggregator
989 * make all aggregated slaves unselected to force selection logic
990 * to select suitable aggregator for this port. */
991 for (i = 0; i < internals->active_slave_count; i++) {
992 port = &mode_8023ad_ports[internals->active_slaves[i]];
993 if (port->aggregator_port_id != slave_id)
996 port->selected = UNSELECTED;
998 /* Use default aggregator */
999 port->aggregator_port_id = internals->active_slaves[i];
1002 port = &mode_8023ad_ports[slave_id];
1003 port->selected = UNSELECTED;
1004 port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
1007 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
1008 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1010 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
1011 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1016 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
1018 struct bond_dev_private *internals = bond_dev->data->dev_private;
1019 struct ether_addr slave_addr;
1020 struct port *slave, *agg_slave;
1021 uint8_t slave_id, i, j;
1023 bond_mode_8023ad_stop(bond_dev);
1025 for (i = 0; i < internals->active_slave_count; i++) {
1026 slave_id = internals->active_slaves[i];
1027 slave = &mode_8023ad_ports[slave_id];
1028 rte_eth_macaddr_get(slave_id, &slave_addr);
1030 if (is_same_ether_addr(&slave_addr, &slave->actor.system))
1033 ether_addr_copy(&slave_addr, &slave->actor.system);
1034 /* Do nothing if this port is not an aggregator. In other case
1035 * Set NTT flag on every port that use this aggregator. */
1036 if (slave->aggregator_port_id != slave_id)
1039 for (j = 0; j < internals->active_slave_count; j++) {
1040 agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
1041 if (agg_slave->aggregator_port_id == slave_id)
1042 SM_FLAG_SET(agg_slave, NTT);
1046 if (bond_dev->data->dev_started)
1047 bond_mode_8023ad_start(bond_dev);
1051 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1052 struct rte_eth_bond_8023ad_conf *conf)
1054 struct bond_dev_private *internals = dev->data->dev_private;
1055 struct mode8023ad_private *mode4 = &internals->mode4;
1056 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1058 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1059 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1060 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1061 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1062 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1063 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1064 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1065 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1069 bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
1070 struct rte_eth_bond_8023ad_conf *conf)
1072 struct bond_dev_private *internals = dev->data->dev_private;
1073 struct mode8023ad_private *mode4 = &internals->mode4;
1075 bond_mode_8023ad_conf_get(dev, conf);
1076 conf->slowrx_cb = mode4->slowrx_cb;
1080 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1082 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1083 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1084 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1085 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1086 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1087 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1088 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1089 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1090 conf->slowrx_cb = NULL;
1094 bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1095 struct rte_eth_bond_8023ad_conf *conf)
1097 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1099 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1100 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1101 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1102 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1103 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1104 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1105 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1106 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1108 mode4->dedicated_queues.enabled = 0;
1109 mode4->dedicated_queues.rx_qid = UINT16_MAX;
1110 mode4->dedicated_queues.tx_qid = UINT16_MAX;
1114 bond_mode_8023ad_setup_v20(struct rte_eth_dev *dev,
1115 struct rte_eth_bond_8023ad_conf *conf)
1117 struct rte_eth_bond_8023ad_conf def_conf;
1118 struct bond_dev_private *internals = dev->data->dev_private;
1119 struct mode8023ad_private *mode4 = &internals->mode4;
1123 bond_mode_8023ad_conf_get_default(conf);
1126 bond_mode_8023ad_stop(dev);
1127 bond_mode_8023ad_conf_assign(mode4, conf);
1129 if (dev->data->dev_started)
1130 bond_mode_8023ad_start(dev);
1135 bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1136 struct rte_eth_bond_8023ad_conf *conf)
1138 struct rte_eth_bond_8023ad_conf def_conf;
1139 struct bond_dev_private *internals = dev->data->dev_private;
1140 struct mode8023ad_private *mode4 = &internals->mode4;
1144 bond_mode_8023ad_conf_get_default(conf);
1147 bond_mode_8023ad_stop(dev);
1148 bond_mode_8023ad_conf_assign(mode4, conf);
1149 mode4->slowrx_cb = conf->slowrx_cb;
1151 if (dev->data->dev_started)
1152 bond_mode_8023ad_start(dev);
1156 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1158 struct bond_dev_private *internals = bond_dev->data->dev_private;
1161 for (i = 0; i < internals->active_slave_count; i++)
1162 bond_mode_8023ad_activate_slave(bond_dev, i);
1168 bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1170 struct bond_dev_private *internals = bond_dev->data->dev_private;
1171 struct mode8023ad_private *mode4 = &internals->mode4;
1172 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1174 if (mode4->slowrx_cb)
1175 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1178 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1182 bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1184 struct bond_dev_private *internals = bond_dev->data->dev_private;
1185 struct mode8023ad_private *mode4 = &internals->mode4;
1187 if (mode4->slowrx_cb) {
1188 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1192 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1196 bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1197 uint8_t slave_id, struct rte_mbuf *pkt)
1199 struct mode8023ad_private *mode4 = &internals->mode4;
1200 struct port *port = &mode_8023ad_ports[slave_id];
1201 struct marker_header *m_hdr;
1202 uint64_t marker_timer, old_marker_timer;
1204 uint8_t wrn, subtype;
1205 /* If packet is a marker, we send response now by reusing given packet
1206 * and update only source MAC, destination MAC is multicast so don't
1207 * update it. Other frames will be handled later by state machines */
1208 subtype = rte_pktmbuf_mtod(pkt,
1209 struct slow_protocol_frame *)->slow_protocol.subtype;
1211 if (subtype == SLOW_SUBTYPE_MARKER) {
1212 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1214 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1215 wrn = WRN_UNKNOWN_MARKER_TYPE;
1219 /* Setup marker timer. Do it in loop in case concurrent access. */
1221 old_marker_timer = port->rx_marker_timer;
1222 if (!timer_is_expired(&old_marker_timer)) {
1223 wrn = WRN_RX_MARKER_TO_FAST;
1227 timer_set(&marker_timer, mode4->rx_marker_timeout);
1228 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1229 old_marker_timer, marker_timer);
1230 } while (unlikely(retval == 0));
1232 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1233 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1235 if (internals->mode4.dedicated_queues.enabled == 0) {
1236 int retval = rte_ring_enqueue(port->tx_ring, pkt);
1239 port->rx_marker_timer = 0;
1240 wrn = WRN_TX_QUEUE_FULL;
1244 /* Send packet directly to the slow queue */
1245 uint16_t tx_count = rte_eth_tx_burst(slave_id,
1246 internals->mode4.dedicated_queues.tx_qid,
1248 if (tx_count != 1) {
1250 port->rx_marker_timer = 0;
1251 wrn = WRN_TX_QUEUE_FULL;
1255 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1256 if (internals->mode4.dedicated_queues.enabled == 0) {
1257 int retval = rte_ring_enqueue(port->rx_ring, pkt);
1259 /* If RX fing full free lacpdu message and drop packet */
1260 wrn = WRN_RX_QUEUE_FULL;
1264 rx_machine_update(internals, slave_id, pkt);
1266 wrn = WRN_UNKNOWN_SLOW_TYPE;
1273 set_warning_flags(port, wrn);
1274 rte_pktmbuf_free(pkt);
1278 rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
1279 struct rte_eth_bond_8023ad_conf *conf)
1281 struct rte_eth_dev *bond_dev;
1283 if (valid_bonded_port_id(port_id) != 0)
1289 bond_dev = &rte_eth_devices[port_id];
1290 bond_mode_8023ad_conf_get(bond_dev, conf);
1293 VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v20, 2.0);
1296 rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
1297 struct rte_eth_bond_8023ad_conf *conf)
1299 struct rte_eth_dev *bond_dev;
1301 if (valid_bonded_port_id(port_id) != 0)
1307 bond_dev = &rte_eth_devices[port_id];
1308 bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
1311 BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
1312 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
1313 struct rte_eth_bond_8023ad_conf *conf),
1314 rte_eth_bond_8023ad_conf_get_v1607);
1317 bond_8023ad_setup_validate(uint8_t port_id,
1318 struct rte_eth_bond_8023ad_conf *conf)
1320 if (valid_bonded_port_id(port_id) != 0)
1324 /* Basic sanity check */
1325 if (conf->slow_periodic_ms == 0 ||
1326 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1327 conf->long_timeout_ms == 0 ||
1328 conf->short_timeout_ms >= conf->long_timeout_ms ||
1329 conf->aggregate_wait_timeout_ms == 0 ||
1330 conf->tx_period_ms == 0 ||
1331 conf->rx_marker_period_ms == 0 ||
1332 conf->update_timeout_ms == 0) {
1333 RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
1342 rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
1343 struct rte_eth_bond_8023ad_conf *conf)
1345 struct rte_eth_dev *bond_dev;
1348 err = bond_8023ad_setup_validate(port_id, conf);
1352 bond_dev = &rte_eth_devices[port_id];
1353 bond_mode_8023ad_setup_v20(bond_dev, conf);
1357 VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v20, 2.0);
1360 rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
1361 struct rte_eth_bond_8023ad_conf *conf)
1363 struct rte_eth_dev *bond_dev;
1366 err = bond_8023ad_setup_validate(port_id, conf);
1370 bond_dev = &rte_eth_devices[port_id];
1371 bond_mode_8023ad_setup(bond_dev, conf);
1375 BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
1376 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
1377 struct rte_eth_bond_8023ad_conf *conf),
1378 rte_eth_bond_8023ad_setup_v1607);
1381 rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
1382 struct rte_eth_bond_8023ad_slave_info *info)
1384 struct rte_eth_dev *bond_dev;
1385 struct bond_dev_private *internals;
1388 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1389 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1392 bond_dev = &rte_eth_devices[port_id];
1394 internals = bond_dev->data->dev_private;
1395 if (find_slave_by_id(internals->active_slaves,
1396 internals->active_slave_count, slave_id) ==
1397 internals->active_slave_count)
1400 port = &mode_8023ad_ports[slave_id];
1401 info->selected = port->selected;
1403 info->actor_state = port->actor_state;
1404 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1406 info->partner_state = port->partner_state;
1407 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1409 info->agg_port_id = port->aggregator_port_id;
1414 bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
1416 struct rte_eth_dev *bond_dev;
1417 struct bond_dev_private *internals;
1418 struct mode8023ad_private *mode4;
1420 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1423 bond_dev = &rte_eth_devices[port_id];
1425 if (!bond_dev->data->dev_started)
1428 internals = bond_dev->data->dev_private;
1429 if (find_slave_by_id(internals->active_slaves,
1430 internals->active_slave_count, slave_id) ==
1431 internals->active_slave_count)
1434 mode4 = &internals->mode4;
1435 if (mode4->slowrx_cb == NULL)
1442 rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
1447 res = bond_8023ad_ext_validate(port_id, slave_id);
1451 port = &mode_8023ad_ports[slave_id];
1454 ACTOR_STATE_SET(port, COLLECTING);
1456 ACTOR_STATE_CLR(port, COLLECTING);
1462 rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
1467 res = bond_8023ad_ext_validate(port_id, slave_id);
1471 port = &mode_8023ad_ports[slave_id];
1474 ACTOR_STATE_SET(port, DISTRIBUTING);
1476 ACTOR_STATE_CLR(port, DISTRIBUTING);
1482 rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
1487 err = bond_8023ad_ext_validate(port_id, slave_id);
1491 port = &mode_8023ad_ports[slave_id];
1492 return ACTOR_STATE(port, DISTRIBUTING);
1496 rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
1501 err = bond_8023ad_ext_validate(port_id, slave_id);
1505 port = &mode_8023ad_ports[slave_id];
1506 return ACTOR_STATE(port, COLLECTING);
1510 rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
1511 struct rte_mbuf *lacp_pkt)
1516 res = bond_8023ad_ext_validate(port_id, slave_id);
1520 port = &mode_8023ad_ports[slave_id];
1522 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1525 struct lacpdu_header *lacp;
1527 /* only enqueue LACPDUs */
1528 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1529 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1532 MODE4_DEBUG("sending LACP frame\n");
1534 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1538 bond_mode_8023ad_ext_periodic_cb(void *arg)
1540 struct rte_eth_dev *bond_dev = arg;
1541 struct bond_dev_private *internals = bond_dev->data->dev_private;
1542 struct mode8023ad_private *mode4 = &internals->mode4;
1545 uint16_t i, slave_id;
1547 for (i = 0; i < internals->active_slave_count; i++) {
1548 slave_id = internals->active_slaves[i];
1549 port = &mode_8023ad_ports[slave_id];
1551 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1552 struct rte_mbuf *lacp_pkt = pkt;
1553 struct lacpdu_header *lacp;
1555 lacp = rte_pktmbuf_mtod(lacp_pkt,
1556 struct lacpdu_header *);
1557 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1559 /* This is LACP frame so pass it to rx callback.
1560 * Callback is responsible for freeing mbuf.
1562 mode4->slowrx_cb(slave_id, lacp_pkt);
1566 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1567 bond_mode_8023ad_ext_periodic_cb, arg);
1571 rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
1574 struct rte_eth_dev *dev = &rte_eth_devices[port];
1575 struct bond_dev_private *internals = (struct bond_dev_private *)
1576 dev->data->dev_private;
1578 if (check_for_bonded_ethdev(dev) != 0)
1581 if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
1584 /* Device must be stopped to set up slow queue */
1585 if (dev->data->dev_started)
1588 internals->mode4.dedicated_queues.enabled = 1;
1590 bond_ethdev_mode_set(dev, internals->mode);
1595 rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
1598 struct rte_eth_dev *dev = &rte_eth_devices[port];
1599 struct bond_dev_private *internals = (struct bond_dev_private *)
1600 dev->data->dev_private;
1602 if (check_for_bonded_ethdev(dev) != 0)
1605 /* Device must be stopped to set up slow queue */
1606 if (dev->data->dev_started)
1609 internals->mode4.dedicated_queues.enabled = 0;
1611 bond_ethdev_mode_set(dev, internals->mode);