4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_alarm.h>
39 #include <rte_malloc.h>
40 #include <rte_errno.h>
41 #include <rte_cycles.h>
42 #include <rte_compat.h>
44 #include "rte_eth_bond_private.h"
46 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
48 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
49 #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
50 bond_dbg_get_time_diff_ms(), slave_id, \
51 __func__, ##__VA_ARGS__)
53 static uint64_t start_time;
56 bond_dbg_get_time_diff_ms(void)
64 return ((now - start_time) * 1000) / rte_get_tsc_hz();
68 bond_print_lacp(struct lacpdu *l)
72 char a_state[256] = { 0 };
73 char p_state[256] = { 0 };
75 static const char * const state_labels[] = {
76 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
84 addr = l->actor.port_params.system.addr_bytes;
85 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
86 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
88 addr = l->partner.port_params.system.addr_bytes;
89 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
90 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
92 for (i = 0; i < 8; i++) {
93 if ((l->actor.state >> i) & 1) {
94 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
98 if ((l->partner.state >> i) & 1) {
99 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
104 if (a_len && a_state[a_len-1] == ' ')
105 a_state[a_len-1] = '\0';
107 if (p_len && p_state[p_len-1] == ' ')
108 p_state[p_len-1] = '\0';
110 RTE_LOG(DEBUG, PMD, "LACP: {\n"\
113 " actor={ tlv=%02X, len=%02X\n"\
114 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
117 " partner={ tlv=%02X, len=%02X\n"\
118 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
121 " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
122 "type_term=%02X, terminator_length = %02X}\n",\
125 l->actor.tlv_type_info,\
126 l->actor.info_length,\
127 l->actor.port_params.system_priority,\
129 l->actor.port_params.key,\
130 l->actor.port_params.port_priority,\
131 l->actor.port_params.port_number,\
133 l->partner.tlv_type_info,\
134 l->partner.info_length,\
135 l->partner.port_params.system_priority,\
137 l->partner.port_params.key,\
138 l->partner.port_params.port_priority,\
139 l->partner.port_params.port_number,\
141 l->tlv_type_collector_info,\
142 l->collector_info_length,\
143 l->collector_max_delay,\
144 l->tlv_type_terminator,\
145 l->terminator_length);
148 #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
150 #define BOND_PRINT_LACP(lacpdu) do { } while (0)
151 #define MODE4_DEBUG(fmt, ...) do { } while (0)
154 static const struct ether_addr lacp_mac_addr = {
155 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
158 struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
161 timer_cancel(uint64_t *timer)
167 timer_set(uint64_t *timer, uint64_t timeout)
169 *timer = rte_rdtsc() + timeout;
172 /* Forces given timer to be in expired state. */
174 timer_force_expired(uint64_t *timer)
176 *timer = rte_rdtsc();
180 timer_is_stopped(uint64_t *timer)
186 timer_is_expired(uint64_t *timer)
188 return *timer < rte_rdtsc();
191 /* Timer is in running state if it is not stopped nor expired */
193 timer_is_running(uint64_t *timer)
195 return !timer_is_stopped(timer) && !timer_is_expired(timer);
199 set_warning_flags(struct port *port, uint16_t flags)
203 uint16_t new_flag = 0;
206 old = port->warnings_to_show;
207 new_flag = old | flags;
208 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
209 } while (unlikely(retval == 0));
213 show_warnings(uint8_t slave_id)
215 struct port *port = &mode_8023ad_ports[slave_id];
219 warnings = port->warnings_to_show;
220 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
225 if (!timer_is_expired(&port->warning_timer))
229 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
230 rte_get_tsc_hz() / 1000);
232 if (warnings & WRN_RX_QUEUE_FULL) {
234 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
235 "Receive and transmit functions must be invoked on bonded\n"
236 "interface at least 10 times per second or LACP will not\n"
237 "work correctly\n", slave_id);
240 if (warnings & WRN_TX_QUEUE_FULL) {
242 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
243 "Receive and transmit functions must be invoked on bonded\n"
244 "interface at least 10 times per second or LACP will not\n"
245 "work correctly\n", slave_id);
248 if (warnings & WRN_RX_MARKER_TO_FAST)
249 RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
251 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
253 "Slave %u: ignoring unknown slow protocol frame type", slave_id);
256 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
257 RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
259 if (warnings & WRN_NOT_LACP_CAPABLE)
260 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
264 record_default(struct port *port)
266 /* Record default parameters for partner. Partner admin parameters
267 * are not implemented so set them to arbitrary default (last known) and
268 * mark actor that parner is in defaulted state. */
269 port->partner_state = STATE_LACP_ACTIVE;
270 ACTOR_STATE_SET(port, DEFAULTED);
273 /** Function handles rx state machine.
275 * This function implements Receive State Machine from point 5.4.12 in
276 * 802.1AX documentation. It should be called periodically.
278 * @param lacpdu LACPDU received.
279 * @param port Port on which LACPDU was received.
282 rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
285 struct port *agg, *port = &mode_8023ad_ports[slave_id];
288 if (SM_FLAG(port, BEGIN)) {
289 /* Initialize stuff */
290 MODE4_DEBUG("-> INITIALIZE\n");
291 SM_FLAG_CLR(port, MOVED);
292 port->selected = UNSELECTED;
294 record_default(port);
296 ACTOR_STATE_CLR(port, EXPIRED);
297 timer_cancel(&port->current_while_timer);
299 /* DISABLED: On initialization partner is out of sync */
300 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
302 /* LACP DISABLED stuff if LACP not enabled on this port */
303 if (!SM_FLAG(port, LACP_ENABLED))
304 PARTNER_STATE_CLR(port, AGGREGATION);
306 PARTNER_STATE_SET(port, AGGREGATION);
309 if (!SM_FLAG(port, LACP_ENABLED)) {
310 /* Update parameters only if state changed */
311 if (!timer_is_stopped(&port->current_while_timer)) {
312 port->selected = UNSELECTED;
313 record_default(port);
314 PARTNER_STATE_CLR(port, AGGREGATION);
315 ACTOR_STATE_CLR(port, EXPIRED);
316 timer_cancel(&port->current_while_timer);
322 MODE4_DEBUG("LACP -> CURRENT\n");
323 BOND_PRINT_LACP(lacp);
324 /* Update selected flag. If partner parameters are defaulted assume they
325 * are match. If not defaulted compare LACP actor with ports parner
327 if (!ACTOR_STATE(port, DEFAULTED) &&
328 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
329 || memcmp(&port->partner, &lacp->actor.port_params,
330 sizeof(port->partner)) != 0)) {
331 MODE4_DEBUG("selected <- UNSELECTED\n");
332 port->selected = UNSELECTED;
335 /* Record this PDU actor params as partner params */
336 memcpy(&port->partner, &lacp->actor.port_params,
337 sizeof(struct port_params));
338 port->partner_state = lacp->actor.state;
340 /* Partner parameters are not defaulted any more */
341 ACTOR_STATE_CLR(port, DEFAULTED);
343 /* If LACP partner params match this port actor params */
344 agg = &mode_8023ad_ports[port->aggregator_port_id];
345 bool match = port->actor.system_priority ==
346 lacp->partner.port_params.system_priority &&
347 is_same_ether_addr(&agg->actor.system,
348 &lacp->partner.port_params.system) &&
349 port->actor.port_priority ==
350 lacp->partner.port_params.port_priority &&
351 port->actor.port_number ==
352 lacp->partner.port_params.port_number;
354 /* Update NTT if partners information are outdated (xored and masked
356 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
357 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
359 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
361 SM_FLAG_SET(port, NTT);
364 /* If LACP partner params match this port actor params */
365 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
366 PARTNER_STATE(port, AGGREGATION))
367 PARTNER_STATE_SET(port, SYNCHRONIZATION);
368 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
370 PARTNER_STATE_SET(port, SYNCHRONIZATION);
372 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
374 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
375 timeout = internals->mode4.short_timeout;
377 timeout = internals->mode4.long_timeout;
379 timer_set(&port->current_while_timer, timeout);
380 ACTOR_STATE_CLR(port, EXPIRED);
381 return; /* No state change */
384 /* If CURRENT state timer is not running (stopped or expired)
385 * transit to EXPIRED state from DISABLED or CURRENT */
386 if (!timer_is_running(&port->current_while_timer)) {
387 ACTOR_STATE_SET(port, EXPIRED);
388 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
389 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
390 timer_set(&port->current_while_timer, internals->mode4.short_timeout);
395 * Function handles periodic tx state machine.
397 * Function implements Periodic Transmission state machine from point 5.4.13
398 * in 802.1AX documentation. It should be called periodically.
400 * @param port Port to handle state machine.
403 periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
405 struct port *port = &mode_8023ad_ports[slave_id];
406 /* Calculate if either site is LACP enabled */
408 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
409 PARTNER_STATE(port, LACP_ACTIVE);
411 uint8_t is_partner_fast, was_partner_fast;
412 /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
413 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
414 timer_cancel(&port->periodic_timer);
415 timer_force_expired(&port->tx_machine_timer);
416 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
418 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
419 SM_FLAG(port, BEGIN) ? "begind " : "",
420 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
421 active ? "LACP active " : "LACP pasive ");
425 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
426 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
428 /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
429 * Other case: check if timer expire or partners settings changed. */
430 if (!timer_is_stopped(&port->periodic_timer)) {
431 if (timer_is_expired(&port->periodic_timer)) {
432 SM_FLAG_SET(port, NTT);
433 } else if (is_partner_fast != was_partner_fast) {
434 /* Partners timeout was slow and now it is fast -> send LACP.
435 * In other case (was fast and now it is slow) just switch
436 * timeout to slow without forcing send of LACP (because standard
439 SM_FLAG_SET(port, NTT);
441 return; /* Nothing changed */
444 /* Handle state transition to FAST/SLOW LACP timeout */
445 if (is_partner_fast) {
446 timeout = internals->mode4.fast_periodic_timeout;
447 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
449 timeout = internals->mode4.slow_periodic_timeout;
450 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
453 timer_set(&port->periodic_timer, timeout);
457 * Function handles mux state machine.
459 * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
460 * It should be called periodically.
462 * @param port Port to handle state machine.
465 mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
467 struct port *port = &mode_8023ad_ports[slave_id];
469 /* Save current state for later use */
470 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
473 /* Enter DETACHED state on BEGIN condition or from any other state if
474 * port was unselected */
475 if (SM_FLAG(port, BEGIN) ||
476 port->selected == UNSELECTED || (port->selected == STANDBY &&
477 (port->actor_state & state_mask) != 0)) {
478 /* detach mux from aggregator */
479 port->actor_state &= ~state_mask;
480 /* Set ntt to true if BEGIN condition or transition from any other state
481 * which is indicated that wait_while_timer was started */
482 if (SM_FLAG(port, BEGIN) ||
483 !timer_is_stopped(&port->wait_while_timer)) {
484 SM_FLAG_SET(port, NTT);
485 MODE4_DEBUG("-> DETACHED\n");
487 timer_cancel(&port->wait_while_timer);
490 if (timer_is_stopped(&port->wait_while_timer)) {
491 if (port->selected == SELECTED || port->selected == STANDBY) {
492 timer_set(&port->wait_while_timer,
493 internals->mode4.aggregate_wait_timeout);
495 MODE4_DEBUG("DETACHED -> WAITING\n");
497 /* Waiting state entered */
501 /* Transit next state if port is ready */
502 if (!timer_is_expired(&port->wait_while_timer))
505 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
506 !PARTNER_STATE(port, SYNCHRONIZATION)) {
507 /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
508 * sync transit to ATACHED state. */
509 ACTOR_STATE_CLR(port, DISTRIBUTING);
510 ACTOR_STATE_CLR(port, COLLECTING);
511 /* Clear actor sync to activate transit ATACHED in condition bellow */
512 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
513 MODE4_DEBUG("Out of sync -> ATTACHED\n");
516 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
517 /* attach mux to aggregator */
518 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
519 STATE_DISTRIBUTING)) == 0);
521 ACTOR_STATE_SET(port, SYNCHRONIZATION);
522 SM_FLAG_SET(port, NTT);
523 MODE4_DEBUG("ATTACHED Entered\n");
524 } else if (!ACTOR_STATE(port, COLLECTING)) {
525 /* Start collecting if in sync */
526 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
527 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
528 ACTOR_STATE_SET(port, COLLECTING);
529 SM_FLAG_SET(port, NTT);
531 } else if (ACTOR_STATE(port, COLLECTING)) {
532 /* Check if partner is in COLLECTING state. If so this port can
533 * distribute frames to it */
534 if (!ACTOR_STATE(port, DISTRIBUTING)) {
535 if (PARTNER_STATE(port, COLLECTING)) {
536 /* Enable DISTRIBUTING if partner is collecting */
537 ACTOR_STATE_SET(port, DISTRIBUTING);
538 SM_FLAG_SET(port, NTT);
539 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
541 "Bond %u: slave id %u distributing started.\n",
542 internals->port_id, slave_id);
545 if (!PARTNER_STATE(port, COLLECTING)) {
546 /* Disable DISTRIBUTING (enter COLLECTING state) if partner
547 * is not collecting */
548 ACTOR_STATE_CLR(port, DISTRIBUTING);
549 SM_FLAG_SET(port, NTT);
550 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
552 "Bond %u: slave id %u distributing stopped.\n",
553 internals->port_id, slave_id);
560 * Function handles transmit state machine.
562 * Function implements Transmit Machine from point 5.4.16 in 802.1AX
568 tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
570 struct port *agg, *port = &mode_8023ad_ports[slave_id];
572 struct rte_mbuf *lacp_pkt = NULL;
573 struct lacpdu_header *hdr;
574 struct lacpdu *lacpdu;
576 /* If periodic timer is not running periodic machine is in NO PERIODIC and
577 * according to 802.3ax standard tx machine should not transmit any frames
578 * and set ntt to false. */
579 if (timer_is_stopped(&port->periodic_timer))
580 SM_FLAG_CLR(port, NTT);
582 if (!SM_FLAG(port, NTT))
585 if (!timer_is_expired(&port->tx_machine_timer))
588 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
589 if (lacp_pkt == NULL) {
590 RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
594 lacp_pkt->data_len = sizeof(*hdr);
595 lacp_pkt->pkt_len = sizeof(*hdr);
597 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
599 /* Source and destination MAC */
600 ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
601 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
602 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
604 lacpdu = &hdr->lacpdu;
605 memset(lacpdu, 0, sizeof(*lacpdu));
607 /* Initialize LACP part */
608 lacpdu->subtype = SLOW_SUBTYPE_LACP;
609 lacpdu->version_number = 1;
612 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
613 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
614 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
615 sizeof(port->actor));
616 agg = &mode_8023ad_ports[port->aggregator_port_id];
617 ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
618 lacpdu->actor.state = port->actor_state;
621 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
622 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
623 memcpy(&lacpdu->partner.port_params, &port->partner,
624 sizeof(struct port_params));
625 lacpdu->partner.state = port->partner_state;
628 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
629 lacpdu->collector_info_length = 0x10;
630 lacpdu->collector_max_delay = 0;
632 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
633 lacpdu->terminator_length = 0;
635 if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) {
636 /* If TX ring full, drop packet and free message. Retransmission
637 * will happen in next function call. */
638 rte_pktmbuf_free(lacp_pkt);
639 set_warning_flags(port, WRN_TX_QUEUE_FULL);
643 MODE4_DEBUG("sending LACP frame\n");
644 BOND_PRINT_LACP(lacpdu);
646 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
647 SM_FLAG_CLR(port, NTT);
651 * Function assigns port to aggregator.
653 * @param bond_dev_private Pointer to bond_dev_private structure.
654 * @param port_pos Port to assign.
657 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
659 struct port *agg, *port;
660 uint8_t slaves_count, new_agg_id, i;
663 slaves = internals->active_slaves;
664 slaves_count = internals->active_slave_count;
665 port = &mode_8023ad_ports[slave_id];
667 /* Search for aggregator suitable for this port */
668 for (i = 0; i < slaves_count; ++i) {
669 agg = &mode_8023ad_ports[slaves[i]];
670 /* Skip ports that are not aggreagators */
671 if (agg->aggregator_port_id != slaves[i])
674 /* Actors system ID is not checked since all slave device have the same
675 * ID (MAC address). */
676 if ((agg->actor.key == port->actor.key &&
677 agg->partner.system_priority == port->partner.system_priority &&
678 is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
679 && (agg->partner.key == port->partner.key)) &&
680 is_zero_ether_addr(&port->partner.system) != 1 &&
682 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
688 /* By default, port uses it self as agregator */
689 if (i == slaves_count)
690 new_agg_id = slave_id;
692 new_agg_id = slaves[i];
694 if (new_agg_id != port->aggregator_port_id) {
695 port->aggregator_port_id = new_agg_id;
697 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
698 "\t%s aggregator ID=%3u\n",
699 port->aggregator_port_id,
700 port->aggregator_port_id == slave_id ?
701 "aggregator not found, using default" : "aggregator found",
702 port->aggregator_port_id);
705 port->selected = SELECTED;
708 /* Function maps DPDK speed to bonding speed stored in key field */
710 link_speed_key(uint16_t speed) {
714 case ETH_SPEED_NUM_NONE:
717 case ETH_SPEED_NUM_10M:
718 key_speed = BOND_LINK_SPEED_KEY_10M;
720 case ETH_SPEED_NUM_100M:
721 key_speed = BOND_LINK_SPEED_KEY_100M;
723 case ETH_SPEED_NUM_1G:
724 key_speed = BOND_LINK_SPEED_KEY_1000M;
726 case ETH_SPEED_NUM_10G:
727 key_speed = BOND_LINK_SPEED_KEY_10G;
729 case ETH_SPEED_NUM_20G:
730 key_speed = BOND_LINK_SPEED_KEY_20G;
732 case ETH_SPEED_NUM_40G:
733 key_speed = BOND_LINK_SPEED_KEY_40G;
744 bond_mode_8023ad_periodic_cb(void *arg)
746 struct rte_eth_dev *bond_dev = arg;
747 struct bond_dev_private *internals = bond_dev->data->dev_private;
749 struct rte_eth_link link_info;
750 struct ether_addr slave_addr;
756 /* Update link status on each port */
757 for (i = 0; i < internals->active_slave_count; i++) {
760 slave_id = internals->active_slaves[i];
761 rte_eth_link_get_nowait(slave_id, &link_info);
762 rte_eth_macaddr_get(slave_id, &slave_addr);
764 if (link_info.link_status != 0) {
765 key = link_speed_key(link_info.link_speed) << 1;
766 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
767 key |= BOND_LINK_FULL_DUPLEX_KEY;
771 port = &mode_8023ad_ports[slave_id];
773 key = rte_cpu_to_be_16(key);
774 if (key != port->actor.key) {
775 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
776 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
778 port->actor.key = key;
779 SM_FLAG_SET(port, NTT);
782 if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
783 ether_addr_copy(&slave_addr, &port->actor.system);
784 if (port->aggregator_port_id == slave_id)
785 SM_FLAG_SET(port, NTT);
789 for (i = 0; i < internals->active_slave_count; i++) {
790 slave_id = internals->active_slaves[i];
791 port = &mode_8023ad_ports[slave_id];
793 if ((port->actor.key &
794 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
796 SM_FLAG_SET(port, BEGIN);
798 /* LACP is disabled on half duples or link is down */
799 if (SM_FLAG(port, LACP_ENABLED)) {
800 /* If port was enabled set it to BEGIN state */
801 SM_FLAG_CLR(port, LACP_ENABLED);
802 ACTOR_STATE_CLR(port, DISTRIBUTING);
803 ACTOR_STATE_CLR(port, COLLECTING);
806 /* Skip this port processing */
810 SM_FLAG_SET(port, LACP_ENABLED);
812 /* Find LACP packet to this port. Do not check subtype, it is done in
813 * function that queued packet */
814 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
815 struct rte_mbuf *lacp_pkt = pkt;
816 struct lacpdu_header *lacp;
818 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
819 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
821 /* This is LACP frame so pass it to rx_machine */
822 rx_machine(internals, slave_id, &lacp->lacpdu);
823 rte_pktmbuf_free(lacp_pkt);
825 rx_machine(internals, slave_id, NULL);
827 periodic_machine(internals, slave_id);
828 mux_machine(internals, slave_id);
829 tx_machine(internals, slave_id);
830 selection_logic(internals, slave_id);
832 SM_FLAG_CLR(port, BEGIN);
833 show_warnings(slave_id);
836 rte_eal_alarm_set(internals->mode4.update_timeout_us,
837 bond_mode_8023ad_periodic_cb, arg);
841 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
843 struct bond_dev_private *internals = bond_dev->data->dev_private;
845 struct port *port = &mode_8023ad_ports[slave_id];
846 struct port_params initial = {
848 .system_priority = rte_cpu_to_be_16(0xFFFF),
849 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
850 .port_priority = rte_cpu_to_be_16(0x00FF),
854 char mem_name[RTE_ETH_NAME_MAX_LEN];
856 unsigned element_size;
857 uint32_t total_tx_desc;
858 struct bond_tx_queue *bd_tx_q;
861 /* Given slave mus not be in active list */
862 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
863 internals->active_slave_count, slave_id) == internals->active_slave_count);
864 RTE_SET_USED(internals); /* used only for assert when enabled */
866 memcpy(&port->actor, &initial, sizeof(struct port_params));
867 /* Standard requires that port ID must be grater than 0.
868 * Add 1 do get corresponding port_number */
869 port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
871 memcpy(&port->partner, &initial, sizeof(struct port_params));
874 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
875 port->partner_state = STATE_LACP_ACTIVE;
876 port->sm_flags = SM_FLAGS_BEGIN;
878 /* use this port as agregator */
879 port->aggregator_port_id = slave_id;
880 rte_eth_promiscuous_enable(slave_id);
882 timer_cancel(&port->warning_timer);
884 if (port->mbuf_pool != NULL)
887 RTE_ASSERT(port->rx_ring == NULL);
888 RTE_ASSERT(port->tx_ring == NULL);
890 socket_id = rte_eth_dev_socket_id(slave_id);
891 if (socket_id == (int)LCORE_ID_ANY)
892 socket_id = rte_socket_id();
894 element_size = sizeof(struct slow_protocol_frame) +
895 RTE_PKTMBUF_HEADROOM;
897 /* The size of the mempool should be at least:
898 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
899 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
900 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
901 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
902 total_tx_desc += bd_tx_q->nb_tx_desc;
905 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
906 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
907 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
908 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
909 0, element_size, socket_id);
911 /* Any memory allocation failure in initialization is critical because
912 * resources can't be free, so reinitialization is impossible. */
913 if (port->mbuf_pool == NULL) {
914 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
915 slave_id, mem_name, rte_strerror(rte_errno));
918 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
919 port->rx_ring = rte_ring_create(mem_name,
920 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
922 if (port->rx_ring == NULL) {
923 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
924 mem_name, rte_strerror(rte_errno));
927 /* TX ring is at least one pkt longer to make room for marker packet. */
928 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
929 port->tx_ring = rte_ring_create(mem_name,
930 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
932 if (port->tx_ring == NULL) {
933 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
934 mem_name, rte_strerror(rte_errno));
939 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
942 struct bond_dev_private *internals = bond_dev->data->dev_private;
947 /* Given slave must be in active list */
948 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
949 internals->active_slave_count, slave_id) < internals->active_slave_count);
951 /* Exclude slave from transmit policy. If this slave is an aggregator
952 * make all aggregated slaves unselected to force selection logic
953 * to select suitable aggregator for this port. */
954 for (i = 0; i < internals->active_slave_count; i++) {
955 port = &mode_8023ad_ports[internals->active_slaves[i]];
956 if (port->aggregator_port_id != slave_id)
959 port->selected = UNSELECTED;
961 /* Use default aggregator */
962 port->aggregator_port_id = internals->active_slaves[i];
965 port = &mode_8023ad_ports[slave_id];
966 port->selected = UNSELECTED;
967 port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
970 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
971 rte_pktmbuf_free((struct rte_mbuf *)pkt);
973 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
974 rte_pktmbuf_free((struct rte_mbuf *)pkt);
979 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
981 struct bond_dev_private *internals = bond_dev->data->dev_private;
982 struct ether_addr slave_addr;
983 struct port *slave, *agg_slave;
984 uint8_t slave_id, i, j;
986 bond_mode_8023ad_stop(bond_dev);
988 for (i = 0; i < internals->active_slave_count; i++) {
989 slave_id = internals->active_slaves[i];
990 slave = &mode_8023ad_ports[slave_id];
991 rte_eth_macaddr_get(slave_id, &slave_addr);
993 if (is_same_ether_addr(&slave_addr, &slave->actor.system))
996 ether_addr_copy(&slave_addr, &slave->actor.system);
997 /* Do nothing if this port is not an aggregator. In other case
998 * Set NTT flag on every port that use this aggregator. */
999 if (slave->aggregator_port_id != slave_id)
1002 for (j = 0; j < internals->active_slave_count; j++) {
1003 agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
1004 if (agg_slave->aggregator_port_id == slave_id)
1005 SM_FLAG_SET(agg_slave, NTT);
1009 if (bond_dev->data->dev_started)
1010 bond_mode_8023ad_start(bond_dev);
1014 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1015 struct rte_eth_bond_8023ad_conf *conf)
1017 struct bond_dev_private *internals = dev->data->dev_private;
1018 struct mode8023ad_private *mode4 = &internals->mode4;
1019 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1021 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1022 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1023 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1024 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1025 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1026 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1027 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1028 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1032 bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
1033 struct rte_eth_bond_8023ad_conf *conf)
1035 struct bond_dev_private *internals = dev->data->dev_private;
1036 struct mode8023ad_private *mode4 = &internals->mode4;
1038 bond_mode_8023ad_conf_get(dev, conf);
1039 conf->slowrx_cb = mode4->slowrx_cb;
1043 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1045 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1046 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1047 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1048 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1049 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1050 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1051 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1052 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1053 conf->slowrx_cb = NULL;
1057 bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1058 struct rte_eth_bond_8023ad_conf *conf)
1060 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1062 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1063 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1064 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1065 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1066 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1067 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1068 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1069 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1073 bond_mode_8023ad_setup_v20(struct rte_eth_dev *dev,
1074 struct rte_eth_bond_8023ad_conf *conf)
1076 struct rte_eth_bond_8023ad_conf def_conf;
1077 struct bond_dev_private *internals = dev->data->dev_private;
1078 struct mode8023ad_private *mode4 = &internals->mode4;
1082 bond_mode_8023ad_conf_get_default(conf);
1085 bond_mode_8023ad_stop(dev);
1086 bond_mode_8023ad_conf_assign(mode4, conf);
1088 if (dev->data->dev_started)
1089 bond_mode_8023ad_start(dev);
1094 bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1095 struct rte_eth_bond_8023ad_conf *conf)
1097 struct rte_eth_bond_8023ad_conf def_conf;
1098 struct bond_dev_private *internals = dev->data->dev_private;
1099 struct mode8023ad_private *mode4 = &internals->mode4;
1103 bond_mode_8023ad_conf_get_default(conf);
1106 bond_mode_8023ad_stop(dev);
1107 bond_mode_8023ad_conf_assign(mode4, conf);
1108 mode4->slowrx_cb = conf->slowrx_cb;
1110 if (dev->data->dev_started)
1111 bond_mode_8023ad_start(dev);
1115 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1117 struct bond_dev_private *internals = bond_dev->data->dev_private;
1120 for (i = 0; i < internals->active_slave_count; i++)
1121 bond_mode_8023ad_activate_slave(bond_dev, i);
1127 bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1129 struct bond_dev_private *internals = bond_dev->data->dev_private;
1130 struct mode8023ad_private *mode4 = &internals->mode4;
1131 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1133 if (mode4->slowrx_cb)
1134 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1137 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1141 bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1143 struct bond_dev_private *internals = bond_dev->data->dev_private;
1144 struct mode8023ad_private *mode4 = &internals->mode4;
1146 if (mode4->slowrx_cb) {
1147 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1151 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1155 bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1156 uint8_t slave_id, struct rte_mbuf *pkt)
1158 struct mode8023ad_private *mode4 = &internals->mode4;
1159 struct port *port = &mode_8023ad_ports[slave_id];
1160 struct marker_header *m_hdr;
1161 uint64_t marker_timer, old_marker_timer;
1163 uint8_t wrn, subtype;
1164 /* If packet is a marker, we send response now by reusing given packet
1165 * and update only source MAC, destination MAC is multicast so don't
1166 * update it. Other frames will be handled later by state machines */
1167 subtype = rte_pktmbuf_mtod(pkt,
1168 struct slow_protocol_frame *)->slow_protocol.subtype;
1170 if (subtype == SLOW_SUBTYPE_MARKER) {
1171 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1173 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1174 wrn = WRN_UNKNOWN_MARKER_TYPE;
1178 /* Setup marker timer. Do it in loop in case concurrent access. */
1180 old_marker_timer = port->rx_marker_timer;
1181 if (!timer_is_expired(&old_marker_timer)) {
1182 wrn = WRN_RX_MARKER_TO_FAST;
1186 timer_set(&marker_timer, mode4->rx_marker_timeout);
1187 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1188 old_marker_timer, marker_timer);
1189 } while (unlikely(retval == 0));
1191 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1192 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1194 if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) {
1196 port->rx_marker_timer = 0;
1197 wrn = WRN_TX_QUEUE_FULL;
1200 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1201 if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) {
1202 /* If RX fing full free lacpdu message and drop packet */
1203 wrn = WRN_RX_QUEUE_FULL;
1207 wrn = WRN_UNKNOWN_SLOW_TYPE;
1214 set_warning_flags(port, wrn);
1215 rte_pktmbuf_free(pkt);
1219 rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id,
1220 struct rte_eth_bond_8023ad_conf *conf)
1222 struct rte_eth_dev *bond_dev;
1224 if (valid_bonded_port_id(port_id) != 0)
1230 bond_dev = &rte_eth_devices[port_id];
1231 bond_mode_8023ad_conf_get(bond_dev, conf);
1234 VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v20, 2.0);
1237 rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
1238 struct rte_eth_bond_8023ad_conf *conf)
1240 struct rte_eth_dev *bond_dev;
1242 if (valid_bonded_port_id(port_id) != 0)
1248 bond_dev = &rte_eth_devices[port_id];
1249 bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
1252 BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
1253 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
1254 struct rte_eth_bond_8023ad_conf *conf),
1255 rte_eth_bond_8023ad_conf_get_v1607);
1258 bond_8023ad_setup_validate(uint8_t port_id,
1259 struct rte_eth_bond_8023ad_conf *conf)
1261 if (valid_bonded_port_id(port_id) != 0)
1265 /* Basic sanity check */
1266 if (conf->slow_periodic_ms == 0 ||
1267 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1268 conf->long_timeout_ms == 0 ||
1269 conf->short_timeout_ms >= conf->long_timeout_ms ||
1270 conf->aggregate_wait_timeout_ms == 0 ||
1271 conf->tx_period_ms == 0 ||
1272 conf->rx_marker_period_ms == 0 ||
1273 conf->update_timeout_ms == 0) {
1274 RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
1283 rte_eth_bond_8023ad_setup_v20(uint8_t port_id,
1284 struct rte_eth_bond_8023ad_conf *conf)
1286 struct rte_eth_dev *bond_dev;
1289 err = bond_8023ad_setup_validate(port_id, conf);
1293 bond_dev = &rte_eth_devices[port_id];
1294 bond_mode_8023ad_setup_v20(bond_dev, conf);
1298 VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v20, 2.0);
1301 rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
1302 struct rte_eth_bond_8023ad_conf *conf)
1304 struct rte_eth_dev *bond_dev;
1307 err = bond_8023ad_setup_validate(port_id, conf);
1311 bond_dev = &rte_eth_devices[port_id];
1312 bond_mode_8023ad_setup(bond_dev, conf);
1316 BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
1317 MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
1318 struct rte_eth_bond_8023ad_conf *conf),
1319 rte_eth_bond_8023ad_setup_v1607);
1322 rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
1323 struct rte_eth_bond_8023ad_slave_info *info)
1325 struct rte_eth_dev *bond_dev;
1326 struct bond_dev_private *internals;
1329 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1330 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1333 bond_dev = &rte_eth_devices[port_id];
1335 internals = bond_dev->data->dev_private;
1336 if (find_slave_by_id(internals->active_slaves,
1337 internals->active_slave_count, slave_id) ==
1338 internals->active_slave_count)
1341 port = &mode_8023ad_ports[slave_id];
1342 info->selected = port->selected;
1344 info->actor_state = port->actor_state;
1345 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1347 info->partner_state = port->partner_state;
1348 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1350 info->agg_port_id = port->aggregator_port_id;
1355 bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
1357 struct rte_eth_dev *bond_dev;
1358 struct bond_dev_private *internals;
1359 struct mode8023ad_private *mode4;
1361 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1364 bond_dev = &rte_eth_devices[port_id];
1366 if (!bond_dev->data->dev_started)
1369 internals = bond_dev->data->dev_private;
1370 if (find_slave_by_id(internals->active_slaves,
1371 internals->active_slave_count, slave_id) ==
1372 internals->active_slave_count)
1375 mode4 = &internals->mode4;
1376 if (mode4->slowrx_cb == NULL)
1383 rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
1388 res = bond_8023ad_ext_validate(port_id, slave_id);
1392 port = &mode_8023ad_ports[slave_id];
1395 ACTOR_STATE_SET(port, COLLECTING);
1397 ACTOR_STATE_CLR(port, COLLECTING);
1403 rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
1408 res = bond_8023ad_ext_validate(port_id, slave_id);
1412 port = &mode_8023ad_ports[slave_id];
1415 ACTOR_STATE_SET(port, DISTRIBUTING);
1417 ACTOR_STATE_CLR(port, DISTRIBUTING);
1423 rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
1428 err = bond_8023ad_ext_validate(port_id, slave_id);
1432 port = &mode_8023ad_ports[slave_id];
1433 return ACTOR_STATE(port, DISTRIBUTING);
1437 rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
1442 err = bond_8023ad_ext_validate(port_id, slave_id);
1446 port = &mode_8023ad_ports[slave_id];
1447 return ACTOR_STATE(port, COLLECTING);
1451 rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
1452 struct rte_mbuf *lacp_pkt)
1457 res = bond_8023ad_ext_validate(port_id, slave_id);
1461 port = &mode_8023ad_ports[slave_id];
1463 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1466 struct lacpdu_header *lacp;
1468 /* only enqueue LACPDUs */
1469 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1470 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1473 MODE4_DEBUG("sending LACP frame\n");
1475 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1479 bond_mode_8023ad_ext_periodic_cb(void *arg)
1481 struct rte_eth_dev *bond_dev = arg;
1482 struct bond_dev_private *internals = bond_dev->data->dev_private;
1483 struct mode8023ad_private *mode4 = &internals->mode4;
1486 uint16_t i, slave_id;
1488 for (i = 0; i < internals->active_slave_count; i++) {
1489 slave_id = internals->active_slaves[i];
1490 port = &mode_8023ad_ports[slave_id];
1492 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1493 struct rte_mbuf *lacp_pkt = pkt;
1494 struct lacpdu_header *lacp;
1496 lacp = rte_pktmbuf_mtod(lacp_pkt,
1497 struct lacpdu_header *);
1498 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1500 /* This is LACP frame so pass it to rx callback.
1501 * Callback is responsible for freeing mbuf.
1503 mode4->slowrx_cb(slave_id, lacp_pkt);
1507 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1508 bond_mode_8023ad_ext_periodic_cb, arg);