1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 #include <rte_cycles.h>
13 #include <rte_compat.h>
15 #include "rte_eth_bond_private.h"
17 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
18 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
20 #define MODE4_DEBUG(fmt, ...) \
21 rte_log(RTE_LOG_DEBUG, bond_logtype, \
22 "%6u [Port %u: %s] " fmt, \
23 bond_dbg_get_time_diff_ms(), slave_id, \
24 __func__, ##__VA_ARGS__)
26 static uint64_t start_time;
29 bond_dbg_get_time_diff_ms(void)
37 return ((now - start_time) * 1000) / rte_get_tsc_hz();
41 bond_print_lacp(struct lacpdu *l)
45 char a_state[256] = { 0 };
46 char p_state[256] = { 0 };
48 static const char * const state_labels[] = {
49 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
57 addr = l->actor.port_params.system.addr_bytes;
58 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
59 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
61 addr = l->partner.port_params.system.addr_bytes;
62 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
63 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
65 for (i = 0; i < 8; i++) {
66 if ((l->actor.state >> i) & 1) {
67 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
71 if ((l->partner.state >> i) & 1) {
72 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
77 if (a_len && a_state[a_len-1] == ' ')
78 a_state[a_len-1] = '\0';
80 if (p_len && p_state[p_len-1] == ' ')
81 p_state[p_len-1] = '\0';
87 " actor={ tlv=%02X, len=%02X\n"
88 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
91 " partner={ tlv=%02X, len=%02X\n"
92 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
95 " collector={info=%02X, length=%02X, max_delay=%04X\n, "
96 "type_term=%02X, terminator_length = %02X }",
99 l->actor.tlv_type_info,
100 l->actor.info_length,
101 l->actor.port_params.system_priority,
103 l->actor.port_params.key,
104 l->actor.port_params.port_priority,
105 l->actor.port_params.port_number,
107 l->partner.tlv_type_info,
108 l->partner.info_length,
109 l->partner.port_params.system_priority,
111 l->partner.port_params.key,
112 l->partner.port_params.port_priority,
113 l->partner.port_params.port_number,
115 l->tlv_type_collector_info,
116 l->collector_info_length,
117 l->collector_max_delay,
118 l->tlv_type_terminator,
119 l->terminator_length);
123 #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
125 #define BOND_PRINT_LACP(lacpdu) do { } while (0)
126 #define MODE4_DEBUG(fmt, ...) do { } while (0)
129 static const struct ether_addr lacp_mac_addr = {
130 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
133 struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
136 timer_cancel(uint64_t *timer)
142 timer_set(uint64_t *timer, uint64_t timeout)
144 *timer = rte_rdtsc() + timeout;
147 /* Forces given timer to be in expired state. */
149 timer_force_expired(uint64_t *timer)
151 *timer = rte_rdtsc();
155 timer_is_stopped(uint64_t *timer)
161 timer_is_expired(uint64_t *timer)
163 return *timer < rte_rdtsc();
166 /* Timer is in running state if it is not stopped nor expired */
168 timer_is_running(uint64_t *timer)
170 return !timer_is_stopped(timer) && !timer_is_expired(timer);
174 set_warning_flags(struct port *port, uint16_t flags)
178 uint16_t new_flag = 0;
181 old = port->warnings_to_show;
182 new_flag = old | flags;
183 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
184 } while (unlikely(retval == 0));
188 show_warnings(uint16_t slave_id)
190 struct port *port = &mode_8023ad_ports[slave_id];
194 warnings = port->warnings_to_show;
195 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
200 if (!timer_is_expired(&port->warning_timer))
204 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
205 rte_get_tsc_hz() / 1000);
207 if (warnings & WRN_RX_QUEUE_FULL) {
209 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
210 "Receive and transmit functions must be invoked on bonded"
211 "interface at least 10 times per second or LACP will notwork correctly",
215 if (warnings & WRN_TX_QUEUE_FULL) {
217 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
218 "Receive and transmit functions must be invoked on bonded"
219 "interface at least 10 times per second or LACP will not work correctly",
223 if (warnings & WRN_RX_MARKER_TO_FAST)
224 RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.",
227 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
229 "Slave %u: ignoring unknown slow protocol frame type",
233 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
234 RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type",
237 if (warnings & WRN_NOT_LACP_CAPABLE)
238 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
242 record_default(struct port *port)
244 /* Record default parameters for partner. Partner admin parameters
245 * are not implemented so set them to arbitrary default (last known) and
246 * mark actor that parner is in defaulted state. */
247 port->partner_state = STATE_LACP_ACTIVE;
248 ACTOR_STATE_SET(port, DEFAULTED);
251 /** Function handles rx state machine.
253 * This function implements Receive State Machine from point 5.4.12 in
254 * 802.1AX documentation. It should be called periodically.
256 * @param lacpdu LACPDU received.
257 * @param port Port on which LACPDU was received.
260 rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
263 struct port *agg, *port = &mode_8023ad_ports[slave_id];
266 if (SM_FLAG(port, BEGIN)) {
267 /* Initialize stuff */
268 MODE4_DEBUG("-> INITIALIZE\n");
269 SM_FLAG_CLR(port, MOVED);
270 port->selected = UNSELECTED;
272 record_default(port);
274 ACTOR_STATE_CLR(port, EXPIRED);
275 timer_cancel(&port->current_while_timer);
277 /* DISABLED: On initialization partner is out of sync */
278 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
280 /* LACP DISABLED stuff if LACP not enabled on this port */
281 if (!SM_FLAG(port, LACP_ENABLED))
282 PARTNER_STATE_CLR(port, AGGREGATION);
284 PARTNER_STATE_SET(port, AGGREGATION);
287 if (!SM_FLAG(port, LACP_ENABLED)) {
288 /* Update parameters only if state changed */
289 if (!timer_is_stopped(&port->current_while_timer)) {
290 port->selected = UNSELECTED;
291 record_default(port);
292 PARTNER_STATE_CLR(port, AGGREGATION);
293 ACTOR_STATE_CLR(port, EXPIRED);
294 timer_cancel(&port->current_while_timer);
300 MODE4_DEBUG("LACP -> CURRENT\n");
301 BOND_PRINT_LACP(lacp);
302 /* Update selected flag. If partner parameters are defaulted assume they
303 * are match. If not defaulted compare LACP actor with ports parner
305 if (!ACTOR_STATE(port, DEFAULTED) &&
306 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
307 || memcmp(&port->partner, &lacp->actor.port_params,
308 sizeof(port->partner)) != 0)) {
309 MODE4_DEBUG("selected <- UNSELECTED\n");
310 port->selected = UNSELECTED;
313 /* Record this PDU actor params as partner params */
314 memcpy(&port->partner, &lacp->actor.port_params,
315 sizeof(struct port_params));
316 port->partner_state = lacp->actor.state;
318 /* Partner parameters are not defaulted any more */
319 ACTOR_STATE_CLR(port, DEFAULTED);
321 /* If LACP partner params match this port actor params */
322 agg = &mode_8023ad_ports[port->aggregator_port_id];
323 bool match = port->actor.system_priority ==
324 lacp->partner.port_params.system_priority &&
325 is_same_ether_addr(&agg->actor.system,
326 &lacp->partner.port_params.system) &&
327 port->actor.port_priority ==
328 lacp->partner.port_params.port_priority &&
329 port->actor.port_number ==
330 lacp->partner.port_params.port_number;
332 /* Update NTT if partners information are outdated (xored and masked
334 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
335 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
337 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
339 SM_FLAG_SET(port, NTT);
342 /* If LACP partner params match this port actor params */
343 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
344 PARTNER_STATE(port, AGGREGATION))
345 PARTNER_STATE_SET(port, SYNCHRONIZATION);
346 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
348 PARTNER_STATE_SET(port, SYNCHRONIZATION);
350 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
352 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
353 timeout = internals->mode4.short_timeout;
355 timeout = internals->mode4.long_timeout;
357 timer_set(&port->current_while_timer, timeout);
358 ACTOR_STATE_CLR(port, EXPIRED);
359 return; /* No state change */
362 /* If CURRENT state timer is not running (stopped or expired)
363 * transit to EXPIRED state from DISABLED or CURRENT */
364 if (!timer_is_running(&port->current_while_timer)) {
365 ACTOR_STATE_SET(port, EXPIRED);
366 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
367 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
368 timer_set(&port->current_while_timer, internals->mode4.short_timeout);
373 * Function handles periodic tx state machine.
375 * Function implements Periodic Transmission state machine from point 5.4.13
376 * in 802.1AX documentation. It should be called periodically.
378 * @param port Port to handle state machine.
381 periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
383 struct port *port = &mode_8023ad_ports[slave_id];
384 /* Calculate if either site is LACP enabled */
386 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
387 PARTNER_STATE(port, LACP_ACTIVE);
389 uint8_t is_partner_fast, was_partner_fast;
390 /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
391 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
392 timer_cancel(&port->periodic_timer);
393 timer_force_expired(&port->tx_machine_timer);
394 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
396 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
397 SM_FLAG(port, BEGIN) ? "begind " : "",
398 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
399 active ? "LACP active " : "LACP pasive ");
403 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
404 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
406 /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
407 * Other case: check if timer expire or partners settings changed. */
408 if (!timer_is_stopped(&port->periodic_timer)) {
409 if (timer_is_expired(&port->periodic_timer)) {
410 SM_FLAG_SET(port, NTT);
411 } else if (is_partner_fast != was_partner_fast) {
412 /* Partners timeout was slow and now it is fast -> send LACP.
413 * In other case (was fast and now it is slow) just switch
414 * timeout to slow without forcing send of LACP (because standard
417 SM_FLAG_SET(port, NTT);
419 return; /* Nothing changed */
422 /* Handle state transition to FAST/SLOW LACP timeout */
423 if (is_partner_fast) {
424 timeout = internals->mode4.fast_periodic_timeout;
425 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
427 timeout = internals->mode4.slow_periodic_timeout;
428 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
431 timer_set(&port->periodic_timer, timeout);
435 * Function handles mux state machine.
437 * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
438 * It should be called periodically.
440 * @param port Port to handle state machine.
443 mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
445 struct port *port = &mode_8023ad_ports[slave_id];
447 /* Save current state for later use */
448 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
451 /* Enter DETACHED state on BEGIN condition or from any other state if
452 * port was unselected */
453 if (SM_FLAG(port, BEGIN) ||
454 port->selected == UNSELECTED || (port->selected == STANDBY &&
455 (port->actor_state & state_mask) != 0)) {
456 /* detach mux from aggregator */
457 port->actor_state &= ~state_mask;
458 /* Set ntt to true if BEGIN condition or transition from any other state
459 * which is indicated that wait_while_timer was started */
460 if (SM_FLAG(port, BEGIN) ||
461 !timer_is_stopped(&port->wait_while_timer)) {
462 SM_FLAG_SET(port, NTT);
463 MODE4_DEBUG("-> DETACHED\n");
465 timer_cancel(&port->wait_while_timer);
468 if (timer_is_stopped(&port->wait_while_timer)) {
469 if (port->selected == SELECTED || port->selected == STANDBY) {
470 timer_set(&port->wait_while_timer,
471 internals->mode4.aggregate_wait_timeout);
473 MODE4_DEBUG("DETACHED -> WAITING\n");
475 /* Waiting state entered */
479 /* Transit next state if port is ready */
480 if (!timer_is_expired(&port->wait_while_timer))
483 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
484 !PARTNER_STATE(port, SYNCHRONIZATION)) {
485 /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
486 * sync transit to ATACHED state. */
487 ACTOR_STATE_CLR(port, DISTRIBUTING);
488 ACTOR_STATE_CLR(port, COLLECTING);
489 /* Clear actor sync to activate transit ATACHED in condition bellow */
490 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
491 MODE4_DEBUG("Out of sync -> ATTACHED\n");
494 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
495 /* attach mux to aggregator */
496 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
497 STATE_DISTRIBUTING)) == 0);
499 ACTOR_STATE_SET(port, SYNCHRONIZATION);
500 SM_FLAG_SET(port, NTT);
501 MODE4_DEBUG("ATTACHED Entered\n");
502 } else if (!ACTOR_STATE(port, COLLECTING)) {
503 /* Start collecting if in sync */
504 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
505 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
506 ACTOR_STATE_SET(port, COLLECTING);
507 SM_FLAG_SET(port, NTT);
509 } else if (ACTOR_STATE(port, COLLECTING)) {
510 /* Check if partner is in COLLECTING state. If so this port can
511 * distribute frames to it */
512 if (!ACTOR_STATE(port, DISTRIBUTING)) {
513 if (PARTNER_STATE(port, COLLECTING)) {
514 /* Enable DISTRIBUTING if partner is collecting */
515 ACTOR_STATE_SET(port, DISTRIBUTING);
516 SM_FLAG_SET(port, NTT);
517 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
519 "Bond %u: slave id %u distributing started.",
520 internals->port_id, slave_id);
523 if (!PARTNER_STATE(port, COLLECTING)) {
524 /* Disable DISTRIBUTING (enter COLLECTING state) if partner
525 * is not collecting */
526 ACTOR_STATE_CLR(port, DISTRIBUTING);
527 SM_FLAG_SET(port, NTT);
528 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
530 "Bond %u: slave id %u distributing stopped.",
531 internals->port_id, slave_id);
538 * Function handles transmit state machine.
540 * Function implements Transmit Machine from point 5.4.16 in 802.1AX
546 tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
548 struct port *agg, *port = &mode_8023ad_ports[slave_id];
550 struct rte_mbuf *lacp_pkt = NULL;
551 struct lacpdu_header *hdr;
552 struct lacpdu *lacpdu;
554 /* If periodic timer is not running periodic machine is in NO PERIODIC and
555 * according to 802.3ax standard tx machine should not transmit any frames
556 * and set ntt to false. */
557 if (timer_is_stopped(&port->periodic_timer))
558 SM_FLAG_CLR(port, NTT);
560 if (!SM_FLAG(port, NTT))
563 if (!timer_is_expired(&port->tx_machine_timer))
566 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
567 if (lacp_pkt == NULL) {
568 RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool");
572 lacp_pkt->data_len = sizeof(*hdr);
573 lacp_pkt->pkt_len = sizeof(*hdr);
575 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
577 /* Source and destination MAC */
578 ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
579 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
580 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
582 lacpdu = &hdr->lacpdu;
583 memset(lacpdu, 0, sizeof(*lacpdu));
585 /* Initialize LACP part */
586 lacpdu->subtype = SLOW_SUBTYPE_LACP;
587 lacpdu->version_number = 1;
590 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
591 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
592 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
593 sizeof(port->actor));
594 agg = &mode_8023ad_ports[port->aggregator_port_id];
595 ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
596 lacpdu->actor.state = port->actor_state;
599 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
600 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
601 memcpy(&lacpdu->partner.port_params, &port->partner,
602 sizeof(struct port_params));
603 lacpdu->partner.state = port->partner_state;
606 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
607 lacpdu->collector_info_length = 0x10;
608 lacpdu->collector_max_delay = 0;
610 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
611 lacpdu->terminator_length = 0;
613 MODE4_DEBUG("Sending LACP frame\n");
614 BOND_PRINT_LACP(lacpdu);
616 if (internals->mode4.dedicated_queues.enabled == 0) {
617 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
619 /* If TX ring full, drop packet and free message.
620 Retransmission will happen in next function call. */
621 rte_pktmbuf_free(lacp_pkt);
622 set_warning_flags(port, WRN_TX_QUEUE_FULL);
626 uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
627 internals->mode4.dedicated_queues.tx_qid,
629 if (pkts_sent != 1) {
630 rte_pktmbuf_free(lacp_pkt);
631 set_warning_flags(port, WRN_TX_QUEUE_FULL);
637 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
638 SM_FLAG_CLR(port, NTT);
642 max_index(uint64_t *a, int n)
650 for (i = 1; i < n; ++i) {
661 * Function assigns port to aggregator.
663 * @param bond_dev_private Pointer to bond_dev_private structure.
664 * @param port_pos Port to assign.
667 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
669 struct port *agg, *port;
670 uint16_t slaves_count, new_agg_id, i, j = 0;
672 uint64_t agg_bandwidth[8] = {0};
673 uint64_t agg_count[8] = {0};
674 uint16_t default_slave = 0;
675 uint8_t mode_count_id, mode_band_id;
676 struct rte_eth_link link_info;
678 slaves = internals->active_slaves;
679 slaves_count = internals->active_slave_count;
680 port = &mode_8023ad_ports[slave_id];
682 /* Search for aggregator suitable for this port */
683 for (i = 0; i < slaves_count; ++i) {
684 agg = &mode_8023ad_ports[slaves[i]];
685 /* Skip ports that are not aggreagators */
686 if (agg->aggregator_port_id != slaves[i])
689 agg_count[agg->aggregator_port_id] += 1;
690 rte_eth_link_get_nowait(slaves[i], &link_info);
691 agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
693 /* Actors system ID is not checked since all slave device have the same
694 * ID (MAC address). */
695 if ((agg->actor.key == port->actor.key &&
696 agg->partner.system_priority == port->partner.system_priority &&
697 is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
698 && (agg->partner.key == port->partner.key)) &&
699 is_zero_ether_addr(&port->partner.system) != 1 &&
701 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
709 switch (internals->mode4.agg_selection) {
711 mode_count_id = max_index(
712 (uint64_t *)agg_count, slaves_count);
713 new_agg_id = mode_count_id;
716 mode_band_id = max_index(
717 (uint64_t *)agg_bandwidth, slaves_count);
718 new_agg_id = mode_band_id;
721 if (default_slave == slaves_count)
722 new_agg_id = slave_id;
724 new_agg_id = slaves[default_slave];
727 if (default_slave == slaves_count)
728 new_agg_id = slave_id;
730 new_agg_id = slaves[default_slave];
734 if (new_agg_id != port->aggregator_port_id) {
735 port->aggregator_port_id = new_agg_id;
737 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
738 "\t%s aggregator ID=%3u\n",
739 port->aggregator_port_id,
740 port->aggregator_port_id == slave_id ?
741 "aggregator not found, using default" : "aggregator found",
742 port->aggregator_port_id);
745 port->selected = SELECTED;
748 /* Function maps DPDK speed to bonding speed stored in key field */
750 link_speed_key(uint16_t speed) {
754 case ETH_SPEED_NUM_NONE:
757 case ETH_SPEED_NUM_10M:
758 key_speed = BOND_LINK_SPEED_KEY_10M;
760 case ETH_SPEED_NUM_100M:
761 key_speed = BOND_LINK_SPEED_KEY_100M;
763 case ETH_SPEED_NUM_1G:
764 key_speed = BOND_LINK_SPEED_KEY_1000M;
766 case ETH_SPEED_NUM_10G:
767 key_speed = BOND_LINK_SPEED_KEY_10G;
769 case ETH_SPEED_NUM_20G:
770 key_speed = BOND_LINK_SPEED_KEY_20G;
772 case ETH_SPEED_NUM_40G:
773 key_speed = BOND_LINK_SPEED_KEY_40G;
784 rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
785 struct rte_mbuf *lacp_pkt) {
786 struct lacpdu_header *lacp;
788 if (lacp_pkt != NULL) {
789 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
790 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
792 /* This is LACP frame so pass it to rx_machine */
793 rx_machine(internals, slave_id, &lacp->lacpdu);
794 rte_pktmbuf_free(lacp_pkt);
796 rx_machine(internals, slave_id, NULL);
800 bond_mode_8023ad_periodic_cb(void *arg)
802 struct rte_eth_dev *bond_dev = arg;
803 struct bond_dev_private *internals = bond_dev->data->dev_private;
805 struct rte_eth_link link_info;
806 struct ether_addr slave_addr;
807 struct rte_mbuf *lacp_pkt = NULL;
812 /* Update link status on each port */
813 for (i = 0; i < internals->active_slave_count; i++) {
816 slave_id = internals->active_slaves[i];
817 rte_eth_link_get_nowait(slave_id, &link_info);
818 rte_eth_macaddr_get(slave_id, &slave_addr);
820 if (link_info.link_status != 0) {
821 key = link_speed_key(link_info.link_speed) << 1;
822 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
823 key |= BOND_LINK_FULL_DUPLEX_KEY;
827 port = &mode_8023ad_ports[slave_id];
829 key = rte_cpu_to_be_16(key);
830 if (key != port->actor.key) {
831 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
832 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
834 port->actor.key = key;
835 SM_FLAG_SET(port, NTT);
838 if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
839 ether_addr_copy(&slave_addr, &port->actor.system);
840 if (port->aggregator_port_id == slave_id)
841 SM_FLAG_SET(port, NTT);
845 for (i = 0; i < internals->active_slave_count; i++) {
846 slave_id = internals->active_slaves[i];
847 port = &mode_8023ad_ports[slave_id];
849 if ((port->actor.key &
850 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
852 SM_FLAG_SET(port, BEGIN);
854 /* LACP is disabled on half duples or link is down */
855 if (SM_FLAG(port, LACP_ENABLED)) {
856 /* If port was enabled set it to BEGIN state */
857 SM_FLAG_CLR(port, LACP_ENABLED);
858 ACTOR_STATE_CLR(port, DISTRIBUTING);
859 ACTOR_STATE_CLR(port, COLLECTING);
862 /* Skip this port processing */
866 SM_FLAG_SET(port, LACP_ENABLED);
868 if (internals->mode4.dedicated_queues.enabled == 0) {
869 /* Find LACP packet to this port. Do not check subtype,
870 * it is done in function that queued packet
872 int retval = rte_ring_dequeue(port->rx_ring,
878 rx_machine_update(internals, slave_id, lacp_pkt);
880 uint16_t rx_count = rte_eth_rx_burst(slave_id,
881 internals->mode4.dedicated_queues.rx_qid,
885 bond_mode_8023ad_handle_slow_pkt(internals,
888 rx_machine_update(internals, slave_id, NULL);
891 periodic_machine(internals, slave_id);
892 mux_machine(internals, slave_id);
893 tx_machine(internals, slave_id);
894 selection_logic(internals, slave_id);
896 SM_FLAG_CLR(port, BEGIN);
897 show_warnings(slave_id);
900 rte_eal_alarm_set(internals->mode4.update_timeout_us,
901 bond_mode_8023ad_periodic_cb, arg);
905 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
908 struct bond_dev_private *internals = bond_dev->data->dev_private;
910 struct port *port = &mode_8023ad_ports[slave_id];
911 struct port_params initial = {
913 .system_priority = rte_cpu_to_be_16(0xFFFF),
914 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
915 .port_priority = rte_cpu_to_be_16(0x00FF),
919 char mem_name[RTE_ETH_NAME_MAX_LEN];
921 unsigned element_size;
922 uint32_t total_tx_desc;
923 struct bond_tx_queue *bd_tx_q;
926 /* Given slave mus not be in active list */
927 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
928 internals->active_slave_count, slave_id) == internals->active_slave_count);
929 RTE_SET_USED(internals); /* used only for assert when enabled */
931 memcpy(&port->actor, &initial, sizeof(struct port_params));
932 /* Standard requires that port ID must be grater than 0.
933 * Add 1 do get corresponding port_number */
934 port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
936 memcpy(&port->partner, &initial, sizeof(struct port_params));
939 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
940 port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
941 port->sm_flags = SM_FLAGS_BEGIN;
943 /* use this port as agregator */
944 port->aggregator_port_id = slave_id;
945 rte_eth_promiscuous_enable(slave_id);
947 timer_cancel(&port->warning_timer);
949 if (port->mbuf_pool != NULL)
952 RTE_ASSERT(port->rx_ring == NULL);
953 RTE_ASSERT(port->tx_ring == NULL);
955 socket_id = rte_eth_dev_socket_id(slave_id);
956 if (socket_id == (int)LCORE_ID_ANY)
957 socket_id = rte_socket_id();
959 element_size = sizeof(struct slow_protocol_frame) +
960 RTE_PKTMBUF_HEADROOM;
962 /* The size of the mempool should be at least:
963 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
964 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
965 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
966 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
967 total_tx_desc += bd_tx_q->nb_tx_desc;
970 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
971 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
972 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
973 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
974 0, element_size, socket_id);
976 /* Any memory allocation failure in initialization is critical because
977 * resources can't be free, so reinitialization is impossible. */
978 if (port->mbuf_pool == NULL) {
979 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
980 slave_id, mem_name, rte_strerror(rte_errno));
983 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
984 port->rx_ring = rte_ring_create(mem_name,
985 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
987 if (port->rx_ring == NULL) {
988 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
989 mem_name, rte_strerror(rte_errno));
992 /* TX ring is at least one pkt longer to make room for marker packet. */
993 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
994 port->tx_ring = rte_ring_create(mem_name,
995 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
997 if (port->tx_ring == NULL) {
998 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
999 mem_name, rte_strerror(rte_errno));
1004 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
1008 struct port *port = NULL;
1009 uint8_t old_partner_state;
1011 port = &mode_8023ad_ports[slave_id];
1013 ACTOR_STATE_CLR(port, AGGREGATION);
1014 port->selected = UNSELECTED;
1016 old_partner_state = port->partner_state;
1017 record_default(port);
1019 /* If partner timeout state changes then disable timer */
1020 if (!((old_partner_state ^ port->partner_state) &
1021 STATE_LACP_SHORT_TIMEOUT))
1022 timer_cancel(&port->current_while_timer);
1024 PARTNER_STATE_CLR(port, AGGREGATION);
1025 ACTOR_STATE_CLR(port, EXPIRED);
1027 /* flush rx/tx rings */
1028 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
1029 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1031 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
1032 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1037 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
1039 struct bond_dev_private *internals = bond_dev->data->dev_private;
1040 struct ether_addr slave_addr;
1041 struct port *slave, *agg_slave;
1042 uint16_t slave_id, i, j;
1044 bond_mode_8023ad_stop(bond_dev);
1046 for (i = 0; i < internals->active_slave_count; i++) {
1047 slave_id = internals->active_slaves[i];
1048 slave = &mode_8023ad_ports[slave_id];
1049 rte_eth_macaddr_get(slave_id, &slave_addr);
1051 if (is_same_ether_addr(&slave_addr, &slave->actor.system))
1054 ether_addr_copy(&slave_addr, &slave->actor.system);
1055 /* Do nothing if this port is not an aggregator. In other case
1056 * Set NTT flag on every port that use this aggregator. */
1057 if (slave->aggregator_port_id != slave_id)
1060 for (j = 0; j < internals->active_slave_count; j++) {
1061 agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
1062 if (agg_slave->aggregator_port_id == slave_id)
1063 SM_FLAG_SET(agg_slave, NTT);
1067 if (bond_dev->data->dev_started)
1068 bond_mode_8023ad_start(bond_dev);
1072 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1073 struct rte_eth_bond_8023ad_conf *conf)
1075 struct bond_dev_private *internals = dev->data->dev_private;
1076 struct mode8023ad_private *mode4 = &internals->mode4;
1077 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1079 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1080 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1081 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1082 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1083 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1084 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1085 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1086 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1087 conf->slowrx_cb = mode4->slowrx_cb;
1088 conf->agg_selection = mode4->agg_selection;
1092 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1094 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1095 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1096 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1097 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1098 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1099 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1100 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1101 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1102 conf->slowrx_cb = NULL;
1103 conf->agg_selection = AGG_STABLE;
1107 bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1108 struct rte_eth_bond_8023ad_conf *conf)
1110 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1112 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1113 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1114 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1115 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1116 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1117 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1118 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1119 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1121 mode4->dedicated_queues.enabled = 0;
1122 mode4->dedicated_queues.rx_qid = UINT16_MAX;
1123 mode4->dedicated_queues.tx_qid = UINT16_MAX;
1127 bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1128 struct rte_eth_bond_8023ad_conf *conf)
1130 struct rte_eth_bond_8023ad_conf def_conf;
1131 struct bond_dev_private *internals = dev->data->dev_private;
1132 struct mode8023ad_private *mode4 = &internals->mode4;
1136 bond_mode_8023ad_conf_get_default(conf);
1139 bond_mode_8023ad_stop(dev);
1140 bond_mode_8023ad_conf_assign(mode4, conf);
1141 mode4->slowrx_cb = conf->slowrx_cb;
1142 mode4->agg_selection = AGG_STABLE;
1144 if (dev->data->dev_started)
1145 bond_mode_8023ad_start(dev);
1149 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1151 struct bond_dev_private *internals = bond_dev->data->dev_private;
1154 for (i = 0; i < internals->active_slave_count; i++)
1155 bond_mode_8023ad_activate_slave(bond_dev,
1156 internals->active_slaves[i]);
1162 bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1164 struct bond_dev_private *internals = bond_dev->data->dev_private;
1165 struct mode8023ad_private *mode4 = &internals->mode4;
1166 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1168 if (mode4->slowrx_cb)
1169 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1172 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1176 bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1178 struct bond_dev_private *internals = bond_dev->data->dev_private;
1179 struct mode8023ad_private *mode4 = &internals->mode4;
1181 if (mode4->slowrx_cb) {
1182 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1186 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1190 bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1191 uint16_t slave_id, struct rte_mbuf *pkt)
1193 struct mode8023ad_private *mode4 = &internals->mode4;
1194 struct port *port = &mode_8023ad_ports[slave_id];
1195 struct marker_header *m_hdr;
1196 uint64_t marker_timer, old_marker_timer;
1198 uint8_t wrn, subtype;
1199 /* If packet is a marker, we send response now by reusing given packet
1200 * and update only source MAC, destination MAC is multicast so don't
1201 * update it. Other frames will be handled later by state machines */
1202 subtype = rte_pktmbuf_mtod(pkt,
1203 struct slow_protocol_frame *)->slow_protocol.subtype;
1205 if (subtype == SLOW_SUBTYPE_MARKER) {
1206 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1208 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1209 wrn = WRN_UNKNOWN_MARKER_TYPE;
1213 /* Setup marker timer. Do it in loop in case concurrent access. */
1215 old_marker_timer = port->rx_marker_timer;
1216 if (!timer_is_expired(&old_marker_timer)) {
1217 wrn = WRN_RX_MARKER_TO_FAST;
1221 timer_set(&marker_timer, mode4->rx_marker_timeout);
1222 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1223 old_marker_timer, marker_timer);
1224 } while (unlikely(retval == 0));
1226 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1227 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1229 if (internals->mode4.dedicated_queues.enabled == 0) {
1230 int retval = rte_ring_enqueue(port->tx_ring, pkt);
1233 port->rx_marker_timer = 0;
1234 wrn = WRN_TX_QUEUE_FULL;
1238 /* Send packet directly to the slow queue */
1239 uint16_t tx_count = rte_eth_tx_burst(slave_id,
1240 internals->mode4.dedicated_queues.tx_qid,
1242 if (tx_count != 1) {
1244 port->rx_marker_timer = 0;
1245 wrn = WRN_TX_QUEUE_FULL;
1249 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1250 if (internals->mode4.dedicated_queues.enabled == 0) {
1251 int retval = rte_ring_enqueue(port->rx_ring, pkt);
1253 /* If RX fing full free lacpdu message and drop packet */
1254 wrn = WRN_RX_QUEUE_FULL;
1258 rx_machine_update(internals, slave_id, pkt);
1260 wrn = WRN_UNKNOWN_SLOW_TYPE;
1267 set_warning_flags(port, wrn);
1268 rte_pktmbuf_free(pkt);
1272 rte_eth_bond_8023ad_conf_get(uint16_t port_id,
1273 struct rte_eth_bond_8023ad_conf *conf)
1275 struct rte_eth_dev *bond_dev;
1277 if (valid_bonded_port_id(port_id) != 0)
1283 bond_dev = &rte_eth_devices[port_id];
1284 bond_mode_8023ad_conf_get(bond_dev, conf);
1289 rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
1290 enum rte_bond_8023ad_agg_selection agg_selection)
1292 struct rte_eth_dev *bond_dev;
1293 struct bond_dev_private *internals;
1294 struct mode8023ad_private *mode4;
1296 bond_dev = &rte_eth_devices[port_id];
1297 internals = bond_dev->data->dev_private;
1299 if (valid_bonded_port_id(port_id) != 0)
1301 if (internals->mode != 4)
1304 mode4 = &internals->mode4;
1305 if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
1306 || agg_selection == AGG_STABLE)
1307 mode4->agg_selection = agg_selection;
1311 int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
1313 struct rte_eth_dev *bond_dev;
1314 struct bond_dev_private *internals;
1315 struct mode8023ad_private *mode4;
1317 bond_dev = &rte_eth_devices[port_id];
1318 internals = bond_dev->data->dev_private;
1320 if (valid_bonded_port_id(port_id) != 0)
1322 if (internals->mode != 4)
1324 mode4 = &internals->mode4;
1326 return mode4->agg_selection;
1332 bond_8023ad_setup_validate(uint16_t port_id,
1333 struct rte_eth_bond_8023ad_conf *conf)
1335 if (valid_bonded_port_id(port_id) != 0)
1339 /* Basic sanity check */
1340 if (conf->slow_periodic_ms == 0 ||
1341 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1342 conf->long_timeout_ms == 0 ||
1343 conf->short_timeout_ms >= conf->long_timeout_ms ||
1344 conf->aggregate_wait_timeout_ms == 0 ||
1345 conf->tx_period_ms == 0 ||
1346 conf->rx_marker_period_ms == 0 ||
1347 conf->update_timeout_ms == 0) {
1348 RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid");
1358 rte_eth_bond_8023ad_setup(uint16_t port_id,
1359 struct rte_eth_bond_8023ad_conf *conf)
1361 struct rte_eth_dev *bond_dev;
1364 err = bond_8023ad_setup_validate(port_id, conf);
1368 bond_dev = &rte_eth_devices[port_id];
1369 bond_mode_8023ad_setup(bond_dev, conf);
1379 rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
1380 struct rte_eth_bond_8023ad_slave_info *info)
1382 struct rte_eth_dev *bond_dev;
1383 struct bond_dev_private *internals;
1386 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1387 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1390 bond_dev = &rte_eth_devices[port_id];
1392 internals = bond_dev->data->dev_private;
1393 if (find_slave_by_id(internals->active_slaves,
1394 internals->active_slave_count, slave_id) ==
1395 internals->active_slave_count)
1398 port = &mode_8023ad_ports[slave_id];
1399 info->selected = port->selected;
1401 info->actor_state = port->actor_state;
1402 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1404 info->partner_state = port->partner_state;
1405 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1407 info->agg_port_id = port->aggregator_port_id;
1412 bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
1414 struct rte_eth_dev *bond_dev;
1415 struct bond_dev_private *internals;
1416 struct mode8023ad_private *mode4;
1418 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1421 bond_dev = &rte_eth_devices[port_id];
1423 if (!bond_dev->data->dev_started)
1426 internals = bond_dev->data->dev_private;
1427 if (find_slave_by_id(internals->active_slaves,
1428 internals->active_slave_count, slave_id) ==
1429 internals->active_slave_count)
1432 mode4 = &internals->mode4;
1433 if (mode4->slowrx_cb == NULL)
1440 rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
1446 res = bond_8023ad_ext_validate(port_id, slave_id);
1450 port = &mode_8023ad_ports[slave_id];
1453 ACTOR_STATE_SET(port, COLLECTING);
1455 ACTOR_STATE_CLR(port, COLLECTING);
1461 rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
1467 res = bond_8023ad_ext_validate(port_id, slave_id);
1471 port = &mode_8023ad_ports[slave_id];
1474 ACTOR_STATE_SET(port, DISTRIBUTING);
1476 ACTOR_STATE_CLR(port, DISTRIBUTING);
1482 rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
1487 err = bond_8023ad_ext_validate(port_id, slave_id);
1491 port = &mode_8023ad_ports[slave_id];
1492 return ACTOR_STATE(port, DISTRIBUTING);
1496 rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
1501 err = bond_8023ad_ext_validate(port_id, slave_id);
1505 port = &mode_8023ad_ports[slave_id];
1506 return ACTOR_STATE(port, COLLECTING);
1510 rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
1511 struct rte_mbuf *lacp_pkt)
1516 res = bond_8023ad_ext_validate(port_id, slave_id);
1520 port = &mode_8023ad_ports[slave_id];
1522 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1525 struct lacpdu_header *lacp;
1527 /* only enqueue LACPDUs */
1528 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1529 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1532 MODE4_DEBUG("sending LACP frame\n");
1534 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1538 bond_mode_8023ad_ext_periodic_cb(void *arg)
1540 struct rte_eth_dev *bond_dev = arg;
1541 struct bond_dev_private *internals = bond_dev->data->dev_private;
1542 struct mode8023ad_private *mode4 = &internals->mode4;
1545 uint16_t i, slave_id;
1547 for (i = 0; i < internals->active_slave_count; i++) {
1548 slave_id = internals->active_slaves[i];
1549 port = &mode_8023ad_ports[slave_id];
1551 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1552 struct rte_mbuf *lacp_pkt = pkt;
1553 struct lacpdu_header *lacp;
1555 lacp = rte_pktmbuf_mtod(lacp_pkt,
1556 struct lacpdu_header *);
1557 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1559 /* This is LACP frame so pass it to rx callback.
1560 * Callback is responsible for freeing mbuf.
1562 mode4->slowrx_cb(slave_id, lacp_pkt);
1566 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1567 bond_mode_8023ad_ext_periodic_cb, arg);
1571 rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
1574 struct rte_eth_dev *dev = &rte_eth_devices[port];
1575 struct bond_dev_private *internals = (struct bond_dev_private *)
1576 dev->data->dev_private;
1578 if (check_for_bonded_ethdev(dev) != 0)
1581 if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
1584 /* Device must be stopped to set up slow queue */
1585 if (dev->data->dev_started)
1588 internals->mode4.dedicated_queues.enabled = 1;
1590 bond_ethdev_mode_set(dev, internals->mode);
1595 rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
1598 struct rte_eth_dev *dev = &rte_eth_devices[port];
1599 struct bond_dev_private *internals = (struct bond_dev_private *)
1600 dev->data->dev_private;
1602 if (check_for_bonded_ethdev(dev) != 0)
1605 /* Device must be stopped to set up slow queue */
1606 if (dev->data->dev_started)
1609 internals->mode4.dedicated_queues.enabled = 0;
1611 bond_ethdev_mode_set(dev, internals->mode);