1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 #include <rte_cycles.h>
13 #include <rte_compat.h>
15 #include "rte_eth_bond_private.h"
17 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
18 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
19 #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
20 bond_dbg_get_time_diff_ms(), slave_id, \
21 __func__, ##__VA_ARGS__)
23 static uint64_t start_time;
26 bond_dbg_get_time_diff_ms(void)
34 return ((now - start_time) * 1000) / rte_get_tsc_hz();
38 bond_print_lacp(struct lacpdu *l)
42 char a_state[256] = { 0 };
43 char p_state[256] = { 0 };
45 static const char * const state_labels[] = {
46 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
54 addr = l->actor.port_params.system.addr_bytes;
55 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
56 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
58 addr = l->partner.port_params.system.addr_bytes;
59 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
60 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
62 for (i = 0; i < 8; i++) {
63 if ((l->actor.state >> i) & 1) {
64 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
68 if ((l->partner.state >> i) & 1) {
69 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
74 if (a_len && a_state[a_len-1] == ' ')
75 a_state[a_len-1] = '\0';
77 if (p_len && p_state[p_len-1] == ' ')
78 p_state[p_len-1] = '\0';
80 RTE_LOG(DEBUG, PMD, "LACP: {\n"\
83 " actor={ tlv=%02X, len=%02X\n"\
84 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
87 " partner={ tlv=%02X, len=%02X\n"\
88 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
91 " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
92 "type_term=%02X, terminator_length = %02X}\n",\
95 l->actor.tlv_type_info,\
96 l->actor.info_length,\
97 l->actor.port_params.system_priority,\
99 l->actor.port_params.key,\
100 l->actor.port_params.port_priority,\
101 l->actor.port_params.port_number,\
103 l->partner.tlv_type_info,\
104 l->partner.info_length,\
105 l->partner.port_params.system_priority,\
107 l->partner.port_params.key,\
108 l->partner.port_params.port_priority,\
109 l->partner.port_params.port_number,\
111 l->tlv_type_collector_info,\
112 l->collector_info_length,\
113 l->collector_max_delay,\
114 l->tlv_type_terminator,\
115 l->terminator_length);
118 #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
120 #define BOND_PRINT_LACP(lacpdu) do { } while (0)
121 #define MODE4_DEBUG(fmt, ...) do { } while (0)
124 static const struct ether_addr lacp_mac_addr = {
125 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
128 struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
131 timer_cancel(uint64_t *timer)
137 timer_set(uint64_t *timer, uint64_t timeout)
139 *timer = rte_rdtsc() + timeout;
142 /* Forces given timer to be in expired state. */
144 timer_force_expired(uint64_t *timer)
146 *timer = rte_rdtsc();
150 timer_is_stopped(uint64_t *timer)
156 timer_is_expired(uint64_t *timer)
158 return *timer < rte_rdtsc();
161 /* Timer is in running state if it is not stopped nor expired */
163 timer_is_running(uint64_t *timer)
165 return !timer_is_stopped(timer) && !timer_is_expired(timer);
169 set_warning_flags(struct port *port, uint16_t flags)
173 uint16_t new_flag = 0;
176 old = port->warnings_to_show;
177 new_flag = old | flags;
178 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
179 } while (unlikely(retval == 0));
183 show_warnings(uint16_t slave_id)
185 struct port *port = &mode_8023ad_ports[slave_id];
189 warnings = port->warnings_to_show;
190 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
195 if (!timer_is_expired(&port->warning_timer))
199 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
200 rte_get_tsc_hz() / 1000);
202 if (warnings & WRN_RX_QUEUE_FULL) {
204 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
205 "Receive and transmit functions must be invoked on bonded\n"
206 "interface at least 10 times per second or LACP will not\n"
207 "work correctly\n", slave_id);
210 if (warnings & WRN_TX_QUEUE_FULL) {
212 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
213 "Receive and transmit functions must be invoked on bonded\n"
214 "interface at least 10 times per second or LACP will not\n"
215 "work correctly\n", slave_id);
218 if (warnings & WRN_RX_MARKER_TO_FAST)
219 RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
221 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
223 "Slave %u: ignoring unknown slow protocol frame type", slave_id);
226 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
227 RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
229 if (warnings & WRN_NOT_LACP_CAPABLE)
230 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
234 record_default(struct port *port)
236 /* Record default parameters for partner. Partner admin parameters
237 * are not implemented so set them to arbitrary default (last known) and
238 * mark actor that parner is in defaulted state. */
239 port->partner_state = STATE_LACP_ACTIVE;
240 ACTOR_STATE_SET(port, DEFAULTED);
243 /** Function handles rx state machine.
245 * This function implements Receive State Machine from point 5.4.12 in
246 * 802.1AX documentation. It should be called periodically.
248 * @param lacpdu LACPDU received.
249 * @param port Port on which LACPDU was received.
252 rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
255 struct port *agg, *port = &mode_8023ad_ports[slave_id];
258 if (SM_FLAG(port, BEGIN)) {
259 /* Initialize stuff */
260 MODE4_DEBUG("-> INITIALIZE\n");
261 SM_FLAG_CLR(port, MOVED);
262 port->selected = UNSELECTED;
264 record_default(port);
266 ACTOR_STATE_CLR(port, EXPIRED);
267 timer_cancel(&port->current_while_timer);
269 /* DISABLED: On initialization partner is out of sync */
270 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
272 /* LACP DISABLED stuff if LACP not enabled on this port */
273 if (!SM_FLAG(port, LACP_ENABLED))
274 PARTNER_STATE_CLR(port, AGGREGATION);
276 PARTNER_STATE_SET(port, AGGREGATION);
279 if (!SM_FLAG(port, LACP_ENABLED)) {
280 /* Update parameters only if state changed */
281 if (!timer_is_stopped(&port->current_while_timer)) {
282 port->selected = UNSELECTED;
283 record_default(port);
284 PARTNER_STATE_CLR(port, AGGREGATION);
285 ACTOR_STATE_CLR(port, EXPIRED);
286 timer_cancel(&port->current_while_timer);
292 MODE4_DEBUG("LACP -> CURRENT\n");
293 BOND_PRINT_LACP(lacp);
294 /* Update selected flag. If partner parameters are defaulted assume they
295 * are match. If not defaulted compare LACP actor with ports parner
297 if (!ACTOR_STATE(port, DEFAULTED) &&
298 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
299 || memcmp(&port->partner, &lacp->actor.port_params,
300 sizeof(port->partner)) != 0)) {
301 MODE4_DEBUG("selected <- UNSELECTED\n");
302 port->selected = UNSELECTED;
305 /* Record this PDU actor params as partner params */
306 memcpy(&port->partner, &lacp->actor.port_params,
307 sizeof(struct port_params));
308 port->partner_state = lacp->actor.state;
310 /* Partner parameters are not defaulted any more */
311 ACTOR_STATE_CLR(port, DEFAULTED);
313 /* If LACP partner params match this port actor params */
314 agg = &mode_8023ad_ports[port->aggregator_port_id];
315 bool match = port->actor.system_priority ==
316 lacp->partner.port_params.system_priority &&
317 is_same_ether_addr(&agg->actor.system,
318 &lacp->partner.port_params.system) &&
319 port->actor.port_priority ==
320 lacp->partner.port_params.port_priority &&
321 port->actor.port_number ==
322 lacp->partner.port_params.port_number;
324 /* Update NTT if partners information are outdated (xored and masked
326 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
327 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
329 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
331 SM_FLAG_SET(port, NTT);
334 /* If LACP partner params match this port actor params */
335 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
336 PARTNER_STATE(port, AGGREGATION))
337 PARTNER_STATE_SET(port, SYNCHRONIZATION);
338 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
340 PARTNER_STATE_SET(port, SYNCHRONIZATION);
342 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
344 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
345 timeout = internals->mode4.short_timeout;
347 timeout = internals->mode4.long_timeout;
349 timer_set(&port->current_while_timer, timeout);
350 ACTOR_STATE_CLR(port, EXPIRED);
351 return; /* No state change */
354 /* If CURRENT state timer is not running (stopped or expired)
355 * transit to EXPIRED state from DISABLED or CURRENT */
356 if (!timer_is_running(&port->current_while_timer)) {
357 ACTOR_STATE_SET(port, EXPIRED);
358 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
359 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
360 timer_set(&port->current_while_timer, internals->mode4.short_timeout);
365 * Function handles periodic tx state machine.
367 * Function implements Periodic Transmission state machine from point 5.4.13
368 * in 802.1AX documentation. It should be called periodically.
370 * @param port Port to handle state machine.
373 periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
375 struct port *port = &mode_8023ad_ports[slave_id];
376 /* Calculate if either site is LACP enabled */
378 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
379 PARTNER_STATE(port, LACP_ACTIVE);
381 uint8_t is_partner_fast, was_partner_fast;
382 /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
383 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
384 timer_cancel(&port->periodic_timer);
385 timer_force_expired(&port->tx_machine_timer);
386 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
388 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
389 SM_FLAG(port, BEGIN) ? "begind " : "",
390 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
391 active ? "LACP active " : "LACP pasive ");
395 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
396 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
398 /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
399 * Other case: check if timer expire or partners settings changed. */
400 if (!timer_is_stopped(&port->periodic_timer)) {
401 if (timer_is_expired(&port->periodic_timer)) {
402 SM_FLAG_SET(port, NTT);
403 } else if (is_partner_fast != was_partner_fast) {
404 /* Partners timeout was slow and now it is fast -> send LACP.
405 * In other case (was fast and now it is slow) just switch
406 * timeout to slow without forcing send of LACP (because standard
409 SM_FLAG_SET(port, NTT);
411 return; /* Nothing changed */
414 /* Handle state transition to FAST/SLOW LACP timeout */
415 if (is_partner_fast) {
416 timeout = internals->mode4.fast_periodic_timeout;
417 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
419 timeout = internals->mode4.slow_periodic_timeout;
420 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
423 timer_set(&port->periodic_timer, timeout);
427 * Function handles mux state machine.
429 * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
430 * It should be called periodically.
432 * @param port Port to handle state machine.
435 mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
437 struct port *port = &mode_8023ad_ports[slave_id];
439 /* Save current state for later use */
440 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
443 /* Enter DETACHED state on BEGIN condition or from any other state if
444 * port was unselected */
445 if (SM_FLAG(port, BEGIN) ||
446 port->selected == UNSELECTED || (port->selected == STANDBY &&
447 (port->actor_state & state_mask) != 0)) {
448 /* detach mux from aggregator */
449 port->actor_state &= ~state_mask;
450 /* Set ntt to true if BEGIN condition or transition from any other state
451 * which is indicated that wait_while_timer was started */
452 if (SM_FLAG(port, BEGIN) ||
453 !timer_is_stopped(&port->wait_while_timer)) {
454 SM_FLAG_SET(port, NTT);
455 MODE4_DEBUG("-> DETACHED\n");
457 timer_cancel(&port->wait_while_timer);
460 if (timer_is_stopped(&port->wait_while_timer)) {
461 if (port->selected == SELECTED || port->selected == STANDBY) {
462 timer_set(&port->wait_while_timer,
463 internals->mode4.aggregate_wait_timeout);
465 MODE4_DEBUG("DETACHED -> WAITING\n");
467 /* Waiting state entered */
471 /* Transit next state if port is ready */
472 if (!timer_is_expired(&port->wait_while_timer))
475 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
476 !PARTNER_STATE(port, SYNCHRONIZATION)) {
477 /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
478 * sync transit to ATACHED state. */
479 ACTOR_STATE_CLR(port, DISTRIBUTING);
480 ACTOR_STATE_CLR(port, COLLECTING);
481 /* Clear actor sync to activate transit ATACHED in condition bellow */
482 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
483 MODE4_DEBUG("Out of sync -> ATTACHED\n");
486 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
487 /* attach mux to aggregator */
488 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
489 STATE_DISTRIBUTING)) == 0);
491 ACTOR_STATE_SET(port, SYNCHRONIZATION);
492 SM_FLAG_SET(port, NTT);
493 MODE4_DEBUG("ATTACHED Entered\n");
494 } else if (!ACTOR_STATE(port, COLLECTING)) {
495 /* Start collecting if in sync */
496 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
497 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
498 ACTOR_STATE_SET(port, COLLECTING);
499 SM_FLAG_SET(port, NTT);
501 } else if (ACTOR_STATE(port, COLLECTING)) {
502 /* Check if partner is in COLLECTING state. If so this port can
503 * distribute frames to it */
504 if (!ACTOR_STATE(port, DISTRIBUTING)) {
505 if (PARTNER_STATE(port, COLLECTING)) {
506 /* Enable DISTRIBUTING if partner is collecting */
507 ACTOR_STATE_SET(port, DISTRIBUTING);
508 SM_FLAG_SET(port, NTT);
509 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
511 "Bond %u: slave id %u distributing started.\n",
512 internals->port_id, slave_id);
515 if (!PARTNER_STATE(port, COLLECTING)) {
516 /* Disable DISTRIBUTING (enter COLLECTING state) if partner
517 * is not collecting */
518 ACTOR_STATE_CLR(port, DISTRIBUTING);
519 SM_FLAG_SET(port, NTT);
520 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
522 "Bond %u: slave id %u distributing stopped.\n",
523 internals->port_id, slave_id);
530 * Function handles transmit state machine.
532 * Function implements Transmit Machine from point 5.4.16 in 802.1AX
538 tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
540 struct port *agg, *port = &mode_8023ad_ports[slave_id];
542 struct rte_mbuf *lacp_pkt = NULL;
543 struct lacpdu_header *hdr;
544 struct lacpdu *lacpdu;
546 /* If periodic timer is not running periodic machine is in NO PERIODIC and
547 * according to 802.3ax standard tx machine should not transmit any frames
548 * and set ntt to false. */
549 if (timer_is_stopped(&port->periodic_timer))
550 SM_FLAG_CLR(port, NTT);
552 if (!SM_FLAG(port, NTT))
555 if (!timer_is_expired(&port->tx_machine_timer))
558 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
559 if (lacp_pkt == NULL) {
560 RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
564 lacp_pkt->data_len = sizeof(*hdr);
565 lacp_pkt->pkt_len = sizeof(*hdr);
567 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
569 /* Source and destination MAC */
570 ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
571 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
572 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
574 lacpdu = &hdr->lacpdu;
575 memset(lacpdu, 0, sizeof(*lacpdu));
577 /* Initialize LACP part */
578 lacpdu->subtype = SLOW_SUBTYPE_LACP;
579 lacpdu->version_number = 1;
582 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
583 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
584 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
585 sizeof(port->actor));
586 agg = &mode_8023ad_ports[port->aggregator_port_id];
587 ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
588 lacpdu->actor.state = port->actor_state;
591 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
592 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
593 memcpy(&lacpdu->partner.port_params, &port->partner,
594 sizeof(struct port_params));
595 lacpdu->partner.state = port->partner_state;
598 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
599 lacpdu->collector_info_length = 0x10;
600 lacpdu->collector_max_delay = 0;
602 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
603 lacpdu->terminator_length = 0;
605 MODE4_DEBUG("Sending LACP frame\n");
606 BOND_PRINT_LACP(lacpdu);
608 if (internals->mode4.dedicated_queues.enabled == 0) {
609 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
611 /* If TX ring full, drop packet and free message.
612 Retransmission will happen in next function call. */
613 rte_pktmbuf_free(lacp_pkt);
614 set_warning_flags(port, WRN_TX_QUEUE_FULL);
618 uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
619 internals->mode4.dedicated_queues.tx_qid,
621 if (pkts_sent != 1) {
622 rte_pktmbuf_free(lacp_pkt);
623 set_warning_flags(port, WRN_TX_QUEUE_FULL);
629 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
630 SM_FLAG_CLR(port, NTT);
634 max_index(uint64_t *a, int n)
642 for (i = 1; i < n; ++i) {
653 * Function assigns port to aggregator.
655 * @param bond_dev_private Pointer to bond_dev_private structure.
656 * @param port_pos Port to assign.
659 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
661 struct port *agg, *port;
662 uint16_t slaves_count, new_agg_id, i, j = 0;
664 uint64_t agg_bandwidth[8] = {0};
665 uint64_t agg_count[8] = {0};
666 uint16_t default_slave = 0;
667 uint8_t mode_count_id, mode_band_id;
668 struct rte_eth_link link_info;
670 slaves = internals->active_slaves;
671 slaves_count = internals->active_slave_count;
672 port = &mode_8023ad_ports[slave_id];
674 /* Search for aggregator suitable for this port */
675 for (i = 0; i < slaves_count; ++i) {
676 agg = &mode_8023ad_ports[slaves[i]];
677 /* Skip ports that are not aggreagators */
678 if (agg->aggregator_port_id != slaves[i])
681 agg_count[agg->aggregator_port_id] += 1;
682 rte_eth_link_get_nowait(slaves[i], &link_info);
683 agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
685 /* Actors system ID is not checked since all slave device have the same
686 * ID (MAC address). */
687 if ((agg->actor.key == port->actor.key &&
688 agg->partner.system_priority == port->partner.system_priority &&
689 is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
690 && (agg->partner.key == port->partner.key)) &&
691 is_zero_ether_addr(&port->partner.system) != 1 &&
693 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
701 switch (internals->mode4.agg_selection) {
703 mode_count_id = max_index(
704 (uint64_t *)agg_count, slaves_count);
705 new_agg_id = mode_count_id;
708 mode_band_id = max_index(
709 (uint64_t *)agg_bandwidth, slaves_count);
710 new_agg_id = mode_band_id;
713 if (default_slave == slaves_count)
714 new_agg_id = slave_id;
716 new_agg_id = slaves[default_slave];
719 if (default_slave == slaves_count)
720 new_agg_id = slave_id;
722 new_agg_id = slaves[default_slave];
726 if (new_agg_id != port->aggregator_port_id) {
727 port->aggregator_port_id = new_agg_id;
729 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
730 "\t%s aggregator ID=%3u\n",
731 port->aggregator_port_id,
732 port->aggregator_port_id == slave_id ?
733 "aggregator not found, using default" : "aggregator found",
734 port->aggregator_port_id);
737 port->selected = SELECTED;
740 /* Function maps DPDK speed to bonding speed stored in key field */
742 link_speed_key(uint16_t speed) {
746 case ETH_SPEED_NUM_NONE:
749 case ETH_SPEED_NUM_10M:
750 key_speed = BOND_LINK_SPEED_KEY_10M;
752 case ETH_SPEED_NUM_100M:
753 key_speed = BOND_LINK_SPEED_KEY_100M;
755 case ETH_SPEED_NUM_1G:
756 key_speed = BOND_LINK_SPEED_KEY_1000M;
758 case ETH_SPEED_NUM_10G:
759 key_speed = BOND_LINK_SPEED_KEY_10G;
761 case ETH_SPEED_NUM_20G:
762 key_speed = BOND_LINK_SPEED_KEY_20G;
764 case ETH_SPEED_NUM_40G:
765 key_speed = BOND_LINK_SPEED_KEY_40G;
776 rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
777 struct rte_mbuf *lacp_pkt) {
778 struct lacpdu_header *lacp;
780 if (lacp_pkt != NULL) {
781 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
782 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
784 /* This is LACP frame so pass it to rx_machine */
785 rx_machine(internals, slave_id, &lacp->lacpdu);
786 rte_pktmbuf_free(lacp_pkt);
788 rx_machine(internals, slave_id, NULL);
792 bond_mode_8023ad_periodic_cb(void *arg)
794 struct rte_eth_dev *bond_dev = arg;
795 struct bond_dev_private *internals = bond_dev->data->dev_private;
797 struct rte_eth_link link_info;
798 struct ether_addr slave_addr;
799 struct rte_mbuf *lacp_pkt = NULL;
804 /* Update link status on each port */
805 for (i = 0; i < internals->active_slave_count; i++) {
808 slave_id = internals->active_slaves[i];
809 rte_eth_link_get_nowait(slave_id, &link_info);
810 rte_eth_macaddr_get(slave_id, &slave_addr);
812 if (link_info.link_status != 0) {
813 key = link_speed_key(link_info.link_speed) << 1;
814 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
815 key |= BOND_LINK_FULL_DUPLEX_KEY;
819 port = &mode_8023ad_ports[slave_id];
821 key = rte_cpu_to_be_16(key);
822 if (key != port->actor.key) {
823 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
824 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
826 port->actor.key = key;
827 SM_FLAG_SET(port, NTT);
830 if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
831 ether_addr_copy(&slave_addr, &port->actor.system);
832 if (port->aggregator_port_id == slave_id)
833 SM_FLAG_SET(port, NTT);
837 for (i = 0; i < internals->active_slave_count; i++) {
838 slave_id = internals->active_slaves[i];
839 port = &mode_8023ad_ports[slave_id];
841 if ((port->actor.key &
842 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
844 SM_FLAG_SET(port, BEGIN);
846 /* LACP is disabled on half duples or link is down */
847 if (SM_FLAG(port, LACP_ENABLED)) {
848 /* If port was enabled set it to BEGIN state */
849 SM_FLAG_CLR(port, LACP_ENABLED);
850 ACTOR_STATE_CLR(port, DISTRIBUTING);
851 ACTOR_STATE_CLR(port, COLLECTING);
854 /* Skip this port processing */
858 SM_FLAG_SET(port, LACP_ENABLED);
860 if (internals->mode4.dedicated_queues.enabled == 0) {
861 /* Find LACP packet to this port. Do not check subtype,
862 * it is done in function that queued packet
864 int retval = rte_ring_dequeue(port->rx_ring,
870 rx_machine_update(internals, slave_id, lacp_pkt);
872 uint16_t rx_count = rte_eth_rx_burst(slave_id,
873 internals->mode4.dedicated_queues.rx_qid,
877 bond_mode_8023ad_handle_slow_pkt(internals,
880 rx_machine_update(internals, slave_id, NULL);
883 periodic_machine(internals, slave_id);
884 mux_machine(internals, slave_id);
885 tx_machine(internals, slave_id);
886 selection_logic(internals, slave_id);
888 SM_FLAG_CLR(port, BEGIN);
889 show_warnings(slave_id);
892 rte_eal_alarm_set(internals->mode4.update_timeout_us,
893 bond_mode_8023ad_periodic_cb, arg);
897 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
900 struct bond_dev_private *internals = bond_dev->data->dev_private;
902 struct port *port = &mode_8023ad_ports[slave_id];
903 struct port_params initial = {
905 .system_priority = rte_cpu_to_be_16(0xFFFF),
906 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
907 .port_priority = rte_cpu_to_be_16(0x00FF),
911 char mem_name[RTE_ETH_NAME_MAX_LEN];
913 unsigned element_size;
914 uint32_t total_tx_desc;
915 struct bond_tx_queue *bd_tx_q;
918 /* Given slave mus not be in active list */
919 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
920 internals->active_slave_count, slave_id) == internals->active_slave_count);
921 RTE_SET_USED(internals); /* used only for assert when enabled */
923 memcpy(&port->actor, &initial, sizeof(struct port_params));
924 /* Standard requires that port ID must be grater than 0.
925 * Add 1 do get corresponding port_number */
926 port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
928 memcpy(&port->partner, &initial, sizeof(struct port_params));
931 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
932 port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
933 port->sm_flags = SM_FLAGS_BEGIN;
935 /* use this port as agregator */
936 port->aggregator_port_id = slave_id;
937 rte_eth_promiscuous_enable(slave_id);
939 timer_cancel(&port->warning_timer);
941 if (port->mbuf_pool != NULL)
944 RTE_ASSERT(port->rx_ring == NULL);
945 RTE_ASSERT(port->tx_ring == NULL);
947 socket_id = rte_eth_dev_socket_id(slave_id);
948 if (socket_id == (int)LCORE_ID_ANY)
949 socket_id = rte_socket_id();
951 element_size = sizeof(struct slow_protocol_frame) +
952 RTE_PKTMBUF_HEADROOM;
954 /* The size of the mempool should be at least:
955 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
956 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
957 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
958 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
959 total_tx_desc += bd_tx_q->nb_tx_desc;
962 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
963 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
964 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
965 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
966 0, element_size, socket_id);
968 /* Any memory allocation failure in initialization is critical because
969 * resources can't be free, so reinitialization is impossible. */
970 if (port->mbuf_pool == NULL) {
971 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
972 slave_id, mem_name, rte_strerror(rte_errno));
975 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
976 port->rx_ring = rte_ring_create(mem_name,
977 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
979 if (port->rx_ring == NULL) {
980 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
981 mem_name, rte_strerror(rte_errno));
984 /* TX ring is at least one pkt longer to make room for marker packet. */
985 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
986 port->tx_ring = rte_ring_create(mem_name,
987 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
989 if (port->tx_ring == NULL) {
990 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
991 mem_name, rte_strerror(rte_errno));
996 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
1000 struct port *port = NULL;
1001 uint8_t old_partner_state;
1003 port = &mode_8023ad_ports[slave_id];
1005 ACTOR_STATE_CLR(port, AGGREGATION);
1006 port->selected = UNSELECTED;
1008 old_partner_state = port->partner_state;
1009 record_default(port);
1011 /* If partner timeout state changes then disable timer */
1012 if (!((old_partner_state ^ port->partner_state) &
1013 STATE_LACP_SHORT_TIMEOUT))
1014 timer_cancel(&port->current_while_timer);
1016 PARTNER_STATE_CLR(port, AGGREGATION);
1017 ACTOR_STATE_CLR(port, EXPIRED);
1019 /* flush rx/tx rings */
1020 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
1021 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1023 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
1024 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1029 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
1031 struct bond_dev_private *internals = bond_dev->data->dev_private;
1032 struct ether_addr slave_addr;
1033 struct port *slave, *agg_slave;
1034 uint16_t slave_id, i, j;
1036 bond_mode_8023ad_stop(bond_dev);
1038 for (i = 0; i < internals->active_slave_count; i++) {
1039 slave_id = internals->active_slaves[i];
1040 slave = &mode_8023ad_ports[slave_id];
1041 rte_eth_macaddr_get(slave_id, &slave_addr);
1043 if (is_same_ether_addr(&slave_addr, &slave->actor.system))
1046 ether_addr_copy(&slave_addr, &slave->actor.system);
1047 /* Do nothing if this port is not an aggregator. In other case
1048 * Set NTT flag on every port that use this aggregator. */
1049 if (slave->aggregator_port_id != slave_id)
1052 for (j = 0; j < internals->active_slave_count; j++) {
1053 agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
1054 if (agg_slave->aggregator_port_id == slave_id)
1055 SM_FLAG_SET(agg_slave, NTT);
1059 if (bond_dev->data->dev_started)
1060 bond_mode_8023ad_start(bond_dev);
1064 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1065 struct rte_eth_bond_8023ad_conf *conf)
1067 struct bond_dev_private *internals = dev->data->dev_private;
1068 struct mode8023ad_private *mode4 = &internals->mode4;
1069 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1071 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1072 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1073 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1074 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1075 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1076 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1077 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1078 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1079 conf->slowrx_cb = mode4->slowrx_cb;
1080 conf->agg_selection = mode4->agg_selection;
1084 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1086 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1087 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1088 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1089 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1090 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1091 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1092 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1093 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1094 conf->slowrx_cb = NULL;
1095 conf->agg_selection = AGG_STABLE;
1099 bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1100 struct rte_eth_bond_8023ad_conf *conf)
1102 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1104 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1105 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1106 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1107 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1108 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1109 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1110 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1111 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1113 mode4->dedicated_queues.enabled = 0;
1114 mode4->dedicated_queues.rx_qid = UINT16_MAX;
1115 mode4->dedicated_queues.tx_qid = UINT16_MAX;
1119 bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1120 struct rte_eth_bond_8023ad_conf *conf)
1122 struct rte_eth_bond_8023ad_conf def_conf;
1123 struct bond_dev_private *internals = dev->data->dev_private;
1124 struct mode8023ad_private *mode4 = &internals->mode4;
1128 bond_mode_8023ad_conf_get_default(conf);
1131 bond_mode_8023ad_stop(dev);
1132 bond_mode_8023ad_conf_assign(mode4, conf);
1133 mode4->slowrx_cb = conf->slowrx_cb;
1134 mode4->agg_selection = AGG_STABLE;
1136 if (dev->data->dev_started)
1137 bond_mode_8023ad_start(dev);
1141 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1143 struct bond_dev_private *internals = bond_dev->data->dev_private;
1146 for (i = 0; i < internals->active_slave_count; i++)
1147 bond_mode_8023ad_activate_slave(bond_dev, i);
1153 bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1155 struct bond_dev_private *internals = bond_dev->data->dev_private;
1156 struct mode8023ad_private *mode4 = &internals->mode4;
1157 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1159 if (mode4->slowrx_cb)
1160 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1163 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1167 bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1169 struct bond_dev_private *internals = bond_dev->data->dev_private;
1170 struct mode8023ad_private *mode4 = &internals->mode4;
1172 if (mode4->slowrx_cb) {
1173 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1177 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1181 bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1182 uint16_t slave_id, struct rte_mbuf *pkt)
1184 struct mode8023ad_private *mode4 = &internals->mode4;
1185 struct port *port = &mode_8023ad_ports[slave_id];
1186 struct marker_header *m_hdr;
1187 uint64_t marker_timer, old_marker_timer;
1189 uint8_t wrn, subtype;
1190 /* If packet is a marker, we send response now by reusing given packet
1191 * and update only source MAC, destination MAC is multicast so don't
1192 * update it. Other frames will be handled later by state machines */
1193 subtype = rte_pktmbuf_mtod(pkt,
1194 struct slow_protocol_frame *)->slow_protocol.subtype;
1196 if (subtype == SLOW_SUBTYPE_MARKER) {
1197 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1199 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1200 wrn = WRN_UNKNOWN_MARKER_TYPE;
1204 /* Setup marker timer. Do it in loop in case concurrent access. */
1206 old_marker_timer = port->rx_marker_timer;
1207 if (!timer_is_expired(&old_marker_timer)) {
1208 wrn = WRN_RX_MARKER_TO_FAST;
1212 timer_set(&marker_timer, mode4->rx_marker_timeout);
1213 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1214 old_marker_timer, marker_timer);
1215 } while (unlikely(retval == 0));
1217 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1218 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1220 if (internals->mode4.dedicated_queues.enabled == 0) {
1221 int retval = rte_ring_enqueue(port->tx_ring, pkt);
1224 port->rx_marker_timer = 0;
1225 wrn = WRN_TX_QUEUE_FULL;
1229 /* Send packet directly to the slow queue */
1230 uint16_t tx_count = rte_eth_tx_burst(slave_id,
1231 internals->mode4.dedicated_queues.tx_qid,
1233 if (tx_count != 1) {
1235 port->rx_marker_timer = 0;
1236 wrn = WRN_TX_QUEUE_FULL;
1240 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1241 if (internals->mode4.dedicated_queues.enabled == 0) {
1242 int retval = rte_ring_enqueue(port->rx_ring, pkt);
1244 /* If RX fing full free lacpdu message and drop packet */
1245 wrn = WRN_RX_QUEUE_FULL;
1249 rx_machine_update(internals, slave_id, pkt);
1251 wrn = WRN_UNKNOWN_SLOW_TYPE;
1258 set_warning_flags(port, wrn);
1259 rte_pktmbuf_free(pkt);
1263 rte_eth_bond_8023ad_conf_get(uint16_t port_id,
1264 struct rte_eth_bond_8023ad_conf *conf)
1266 struct rte_eth_dev *bond_dev;
1268 if (valid_bonded_port_id(port_id) != 0)
1274 bond_dev = &rte_eth_devices[port_id];
1275 bond_mode_8023ad_conf_get(bond_dev, conf);
1280 rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
1281 enum rte_bond_8023ad_agg_selection agg_selection)
1283 struct rte_eth_dev *bond_dev;
1284 struct bond_dev_private *internals;
1285 struct mode8023ad_private *mode4;
1287 bond_dev = &rte_eth_devices[port_id];
1288 internals = bond_dev->data->dev_private;
1290 if (valid_bonded_port_id(port_id) != 0)
1292 if (internals->mode != 4)
1295 mode4 = &internals->mode4;
1296 if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
1297 || agg_selection == AGG_STABLE)
1298 mode4->agg_selection = agg_selection;
1302 int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
1304 struct rte_eth_dev *bond_dev;
1305 struct bond_dev_private *internals;
1306 struct mode8023ad_private *mode4;
1308 bond_dev = &rte_eth_devices[port_id];
1309 internals = bond_dev->data->dev_private;
1311 if (valid_bonded_port_id(port_id) != 0)
1313 if (internals->mode != 4)
1315 mode4 = &internals->mode4;
1317 return mode4->agg_selection;
1323 bond_8023ad_setup_validate(uint16_t port_id,
1324 struct rte_eth_bond_8023ad_conf *conf)
1326 if (valid_bonded_port_id(port_id) != 0)
1330 /* Basic sanity check */
1331 if (conf->slow_periodic_ms == 0 ||
1332 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1333 conf->long_timeout_ms == 0 ||
1334 conf->short_timeout_ms >= conf->long_timeout_ms ||
1335 conf->aggregate_wait_timeout_ms == 0 ||
1336 conf->tx_period_ms == 0 ||
1337 conf->rx_marker_period_ms == 0 ||
1338 conf->update_timeout_ms == 0) {
1339 RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
1349 rte_eth_bond_8023ad_setup(uint16_t port_id,
1350 struct rte_eth_bond_8023ad_conf *conf)
1352 struct rte_eth_dev *bond_dev;
1355 err = bond_8023ad_setup_validate(port_id, conf);
1359 bond_dev = &rte_eth_devices[port_id];
1360 bond_mode_8023ad_setup(bond_dev, conf);
1370 rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
1371 struct rte_eth_bond_8023ad_slave_info *info)
1373 struct rte_eth_dev *bond_dev;
1374 struct bond_dev_private *internals;
1377 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1378 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1381 bond_dev = &rte_eth_devices[port_id];
1383 internals = bond_dev->data->dev_private;
1384 if (find_slave_by_id(internals->active_slaves,
1385 internals->active_slave_count, slave_id) ==
1386 internals->active_slave_count)
1389 port = &mode_8023ad_ports[slave_id];
1390 info->selected = port->selected;
1392 info->actor_state = port->actor_state;
1393 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1395 info->partner_state = port->partner_state;
1396 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1398 info->agg_port_id = port->aggregator_port_id;
1403 bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
1405 struct rte_eth_dev *bond_dev;
1406 struct bond_dev_private *internals;
1407 struct mode8023ad_private *mode4;
1409 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1412 bond_dev = &rte_eth_devices[port_id];
1414 if (!bond_dev->data->dev_started)
1417 internals = bond_dev->data->dev_private;
1418 if (find_slave_by_id(internals->active_slaves,
1419 internals->active_slave_count, slave_id) ==
1420 internals->active_slave_count)
1423 mode4 = &internals->mode4;
1424 if (mode4->slowrx_cb == NULL)
1431 rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
1437 res = bond_8023ad_ext_validate(port_id, slave_id);
1441 port = &mode_8023ad_ports[slave_id];
1444 ACTOR_STATE_SET(port, COLLECTING);
1446 ACTOR_STATE_CLR(port, COLLECTING);
1452 rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
1458 res = bond_8023ad_ext_validate(port_id, slave_id);
1462 port = &mode_8023ad_ports[slave_id];
1465 ACTOR_STATE_SET(port, DISTRIBUTING);
1467 ACTOR_STATE_CLR(port, DISTRIBUTING);
1473 rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
1478 err = bond_8023ad_ext_validate(port_id, slave_id);
1482 port = &mode_8023ad_ports[slave_id];
1483 return ACTOR_STATE(port, DISTRIBUTING);
1487 rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
1492 err = bond_8023ad_ext_validate(port_id, slave_id);
1496 port = &mode_8023ad_ports[slave_id];
1497 return ACTOR_STATE(port, COLLECTING);
1501 rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
1502 struct rte_mbuf *lacp_pkt)
1507 res = bond_8023ad_ext_validate(port_id, slave_id);
1511 port = &mode_8023ad_ports[slave_id];
1513 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1516 struct lacpdu_header *lacp;
1518 /* only enqueue LACPDUs */
1519 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1520 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1523 MODE4_DEBUG("sending LACP frame\n");
1525 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1529 bond_mode_8023ad_ext_periodic_cb(void *arg)
1531 struct rte_eth_dev *bond_dev = arg;
1532 struct bond_dev_private *internals = bond_dev->data->dev_private;
1533 struct mode8023ad_private *mode4 = &internals->mode4;
1536 uint16_t i, slave_id;
1538 for (i = 0; i < internals->active_slave_count; i++) {
1539 slave_id = internals->active_slaves[i];
1540 port = &mode_8023ad_ports[slave_id];
1542 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1543 struct rte_mbuf *lacp_pkt = pkt;
1544 struct lacpdu_header *lacp;
1546 lacp = rte_pktmbuf_mtod(lacp_pkt,
1547 struct lacpdu_header *);
1548 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1550 /* This is LACP frame so pass it to rx callback.
1551 * Callback is responsible for freeing mbuf.
1553 mode4->slowrx_cb(slave_id, lacp_pkt);
1557 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1558 bond_mode_8023ad_ext_periodic_cb, arg);
1562 rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
1565 struct rte_eth_dev *dev = &rte_eth_devices[port];
1566 struct bond_dev_private *internals = (struct bond_dev_private *)
1567 dev->data->dev_private;
1569 if (check_for_bonded_ethdev(dev) != 0)
1572 if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
1575 /* Device must be stopped to set up slow queue */
1576 if (dev->data->dev_started)
1579 internals->mode4.dedicated_queues.enabled = 1;
1581 bond_ethdev_mode_set(dev, internals->mode);
1586 rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
1589 struct rte_eth_dev *dev = &rte_eth_devices[port];
1590 struct bond_dev_private *internals = (struct bond_dev_private *)
1591 dev->data->dev_private;
1593 if (check_for_bonded_ethdev(dev) != 0)
1596 /* Device must be stopped to set up slow queue */
1597 if (dev->data->dev_started)
1600 internals->mode4.dedicated_queues.enabled = 0;
1602 bond_ethdev_mode_set(dev, internals->mode);