1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 #include <rte_cycles.h>
13 #include <rte_compat.h>
15 #include "rte_eth_bond_private.h"
17 static void bond_mode_8023ad_ext_periodic_cb(void *arg);
18 #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
19 #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
20 bond_dbg_get_time_diff_ms(), slave_id, \
21 __func__, ##__VA_ARGS__)
23 static uint64_t start_time;
26 bond_dbg_get_time_diff_ms(void)
34 return ((now - start_time) * 1000) / rte_get_tsc_hz();
38 bond_print_lacp(struct lacpdu *l)
42 char a_state[256] = { 0 };
43 char p_state[256] = { 0 };
45 static const char * const state_labels[] = {
46 "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP"
54 addr = l->actor.port_params.system.addr_bytes;
55 snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X",
56 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
58 addr = l->partner.port_params.system.addr_bytes;
59 snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X",
60 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
62 for (i = 0; i < 8; i++) {
63 if ((l->actor.state >> i) & 1) {
64 a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ",
68 if ((l->partner.state >> i) & 1) {
69 p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ",
74 if (a_len && a_state[a_len-1] == ' ')
75 a_state[a_len-1] = '\0';
77 if (p_len && p_state[p_len-1] == ' ')
78 p_state[p_len-1] = '\0';
80 RTE_LOG(DEBUG, PMD, "LACP: {\n"\
83 " actor={ tlv=%02X, len=%02X\n"\
84 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
87 " partner={ tlv=%02X, len=%02X\n"\
88 " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
91 " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
92 "type_term=%02X, terminator_length = %02X}\n",\
95 l->actor.tlv_type_info,\
96 l->actor.info_length,\
97 l->actor.port_params.system_priority,\
99 l->actor.port_params.key,\
100 l->actor.port_params.port_priority,\
101 l->actor.port_params.port_number,\
103 l->partner.tlv_type_info,\
104 l->partner.info_length,\
105 l->partner.port_params.system_priority,\
107 l->partner.port_params.key,\
108 l->partner.port_params.port_priority,\
109 l->partner.port_params.port_number,\
111 l->tlv_type_collector_info,\
112 l->collector_info_length,\
113 l->collector_max_delay,\
114 l->tlv_type_terminator,\
115 l->terminator_length);
118 #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
120 #define BOND_PRINT_LACP(lacpdu) do { } while (0)
121 #define MODE4_DEBUG(fmt, ...) do { } while (0)
124 static const struct ether_addr lacp_mac_addr = {
125 .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
128 struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
131 timer_cancel(uint64_t *timer)
137 timer_set(uint64_t *timer, uint64_t timeout)
139 *timer = rte_rdtsc() + timeout;
142 /* Forces given timer to be in expired state. */
144 timer_force_expired(uint64_t *timer)
146 *timer = rte_rdtsc();
150 timer_is_stopped(uint64_t *timer)
156 timer_is_expired(uint64_t *timer)
158 return *timer < rte_rdtsc();
161 /* Timer is in running state if it is not stopped nor expired */
163 timer_is_running(uint64_t *timer)
165 return !timer_is_stopped(timer) && !timer_is_expired(timer);
169 set_warning_flags(struct port *port, uint16_t flags)
173 uint16_t new_flag = 0;
176 old = port->warnings_to_show;
177 new_flag = old | flags;
178 retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag);
179 } while (unlikely(retval == 0));
183 show_warnings(uint16_t slave_id)
185 struct port *port = &mode_8023ad_ports[slave_id];
189 warnings = port->warnings_to_show;
190 } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0);
195 if (!timer_is_expired(&port->warning_timer))
199 timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS *
200 rte_get_tsc_hz() / 1000);
202 if (warnings & WRN_RX_QUEUE_FULL) {
204 "Slave %u: failed to enqueue LACP packet into RX ring.\n"
205 "Receive and transmit functions must be invoked on bonded\n"
206 "interface at least 10 times per second or LACP will not\n"
207 "work correctly\n", slave_id);
210 if (warnings & WRN_TX_QUEUE_FULL) {
212 "Slave %u: failed to enqueue LACP packet into TX ring.\n"
213 "Receive and transmit functions must be invoked on bonded\n"
214 "interface at least 10 times per second or LACP will not\n"
215 "work correctly\n", slave_id);
218 if (warnings & WRN_RX_MARKER_TO_FAST)
219 RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
221 if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
223 "Slave %u: ignoring unknown slow protocol frame type", slave_id);
226 if (warnings & WRN_UNKNOWN_MARKER_TYPE)
227 RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
229 if (warnings & WRN_NOT_LACP_CAPABLE)
230 MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
234 record_default(struct port *port)
236 /* Record default parameters for partner. Partner admin parameters
237 * are not implemented so set them to arbitrary default (last known) and
238 * mark actor that parner is in defaulted state. */
239 port->partner_state = STATE_LACP_ACTIVE;
240 ACTOR_STATE_SET(port, DEFAULTED);
243 /** Function handles rx state machine.
245 * This function implements Receive State Machine from point 5.4.12 in
246 * 802.1AX documentation. It should be called periodically.
248 * @param lacpdu LACPDU received.
249 * @param port Port on which LACPDU was received.
252 rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
255 struct port *agg, *port = &mode_8023ad_ports[slave_id];
258 if (SM_FLAG(port, BEGIN)) {
259 /* Initialize stuff */
260 MODE4_DEBUG("-> INITIALIZE\n");
261 SM_FLAG_CLR(port, MOVED);
262 port->selected = UNSELECTED;
264 record_default(port);
266 ACTOR_STATE_CLR(port, EXPIRED);
267 timer_cancel(&port->current_while_timer);
269 /* DISABLED: On initialization partner is out of sync */
270 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
272 /* LACP DISABLED stuff if LACP not enabled on this port */
273 if (!SM_FLAG(port, LACP_ENABLED))
274 PARTNER_STATE_CLR(port, AGGREGATION);
276 PARTNER_STATE_SET(port, AGGREGATION);
279 if (!SM_FLAG(port, LACP_ENABLED)) {
280 /* Update parameters only if state changed */
281 if (!timer_is_stopped(&port->current_while_timer)) {
282 port->selected = UNSELECTED;
283 record_default(port);
284 PARTNER_STATE_CLR(port, AGGREGATION);
285 ACTOR_STATE_CLR(port, EXPIRED);
286 timer_cancel(&port->current_while_timer);
292 MODE4_DEBUG("LACP -> CURRENT\n");
293 BOND_PRINT_LACP(lacp);
294 /* Update selected flag. If partner parameters are defaulted assume they
295 * are match. If not defaulted compare LACP actor with ports parner
297 if (!ACTOR_STATE(port, DEFAULTED) &&
298 (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
299 || memcmp(&port->partner, &lacp->actor.port_params,
300 sizeof(port->partner)) != 0)) {
301 MODE4_DEBUG("selected <- UNSELECTED\n");
302 port->selected = UNSELECTED;
305 /* Record this PDU actor params as partner params */
306 memcpy(&port->partner, &lacp->actor.port_params,
307 sizeof(struct port_params));
308 port->partner_state = lacp->actor.state;
310 /* Partner parameters are not defaulted any more */
311 ACTOR_STATE_CLR(port, DEFAULTED);
313 /* If LACP partner params match this port actor params */
314 agg = &mode_8023ad_ports[port->aggregator_port_id];
315 bool match = port->actor.system_priority ==
316 lacp->partner.port_params.system_priority &&
317 is_same_ether_addr(&agg->actor.system,
318 &lacp->partner.port_params.system) &&
319 port->actor.port_priority ==
320 lacp->partner.port_params.port_priority &&
321 port->actor.port_number ==
322 lacp->partner.port_params.port_number;
324 /* Update NTT if partners information are outdated (xored and masked
326 uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT |
327 STATE_SYNCHRONIZATION | STATE_AGGREGATION;
329 if (((port->actor_state ^ lacp->partner.state) & state_mask) ||
331 SM_FLAG_SET(port, NTT);
334 /* If LACP partner params match this port actor params */
335 if (match == true && ACTOR_STATE(port, AGGREGATION) ==
336 PARTNER_STATE(port, AGGREGATION))
337 PARTNER_STATE_SET(port, SYNCHRONIZATION);
338 else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port,
340 PARTNER_STATE_SET(port, SYNCHRONIZATION);
342 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
344 if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT))
345 timeout = internals->mode4.short_timeout;
347 timeout = internals->mode4.long_timeout;
349 timer_set(&port->current_while_timer, timeout);
350 ACTOR_STATE_CLR(port, EXPIRED);
351 return; /* No state change */
354 /* If CURRENT state timer is not running (stopped or expired)
355 * transit to EXPIRED state from DISABLED or CURRENT */
356 if (!timer_is_running(&port->current_while_timer)) {
357 ACTOR_STATE_SET(port, EXPIRED);
358 PARTNER_STATE_CLR(port, SYNCHRONIZATION);
359 PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT);
360 timer_set(&port->current_while_timer, internals->mode4.short_timeout);
365 * Function handles periodic tx state machine.
367 * Function implements Periodic Transmission state machine from point 5.4.13
368 * in 802.1AX documentation. It should be called periodically.
370 * @param port Port to handle state machine.
373 periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
375 struct port *port = &mode_8023ad_ports[slave_id];
376 /* Calculate if either site is LACP enabled */
378 uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
379 PARTNER_STATE(port, LACP_ACTIVE);
381 uint8_t is_partner_fast, was_partner_fast;
382 /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
383 if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
384 timer_cancel(&port->periodic_timer);
385 timer_force_expired(&port->tx_machine_timer);
386 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
388 MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
389 SM_FLAG(port, BEGIN) ? "begind " : "",
390 SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
391 active ? "LACP active " : "LACP pasive ");
395 is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT);
396 was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT);
398 /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW.
399 * Other case: check if timer expire or partners settings changed. */
400 if (!timer_is_stopped(&port->periodic_timer)) {
401 if (timer_is_expired(&port->periodic_timer)) {
402 SM_FLAG_SET(port, NTT);
403 } else if (is_partner_fast != was_partner_fast) {
404 /* Partners timeout was slow and now it is fast -> send LACP.
405 * In other case (was fast and now it is slow) just switch
406 * timeout to slow without forcing send of LACP (because standard
409 SM_FLAG_SET(port, NTT);
411 return; /* Nothing changed */
414 /* Handle state transition to FAST/SLOW LACP timeout */
415 if (is_partner_fast) {
416 timeout = internals->mode4.fast_periodic_timeout;
417 SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT);
419 timeout = internals->mode4.slow_periodic_timeout;
420 SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
423 timer_set(&port->periodic_timer, timeout);
427 * Function handles mux state machine.
429 * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation.
430 * It should be called periodically.
432 * @param port Port to handle state machine.
435 mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
437 struct port *port = &mode_8023ad_ports[slave_id];
439 /* Save current state for later use */
440 const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
443 /* Enter DETACHED state on BEGIN condition or from any other state if
444 * port was unselected */
445 if (SM_FLAG(port, BEGIN) ||
446 port->selected == UNSELECTED || (port->selected == STANDBY &&
447 (port->actor_state & state_mask) != 0)) {
448 /* detach mux from aggregator */
449 port->actor_state &= ~state_mask;
450 /* Set ntt to true if BEGIN condition or transition from any other state
451 * which is indicated that wait_while_timer was started */
452 if (SM_FLAG(port, BEGIN) ||
453 !timer_is_stopped(&port->wait_while_timer)) {
454 SM_FLAG_SET(port, NTT);
455 MODE4_DEBUG("-> DETACHED\n");
457 timer_cancel(&port->wait_while_timer);
460 if (timer_is_stopped(&port->wait_while_timer)) {
461 if (port->selected == SELECTED || port->selected == STANDBY) {
462 timer_set(&port->wait_while_timer,
463 internals->mode4.aggregate_wait_timeout);
465 MODE4_DEBUG("DETACHED -> WAITING\n");
467 /* Waiting state entered */
471 /* Transit next state if port is ready */
472 if (!timer_is_expired(&port->wait_while_timer))
475 if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
476 !PARTNER_STATE(port, SYNCHRONIZATION)) {
477 /* If in COLLECTING or DISTRIBUTING state and partner becomes out of
478 * sync transit to ATACHED state. */
479 ACTOR_STATE_CLR(port, DISTRIBUTING);
480 ACTOR_STATE_CLR(port, COLLECTING);
481 /* Clear actor sync to activate transit ATACHED in condition bellow */
482 ACTOR_STATE_CLR(port, SYNCHRONIZATION);
483 MODE4_DEBUG("Out of sync -> ATTACHED\n");
486 if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
487 /* attach mux to aggregator */
488 RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
489 STATE_DISTRIBUTING)) == 0);
491 ACTOR_STATE_SET(port, SYNCHRONIZATION);
492 SM_FLAG_SET(port, NTT);
493 MODE4_DEBUG("ATTACHED Entered\n");
494 } else if (!ACTOR_STATE(port, COLLECTING)) {
495 /* Start collecting if in sync */
496 if (PARTNER_STATE(port, SYNCHRONIZATION)) {
497 MODE4_DEBUG("ATTACHED -> COLLECTING\n");
498 ACTOR_STATE_SET(port, COLLECTING);
499 SM_FLAG_SET(port, NTT);
501 } else if (ACTOR_STATE(port, COLLECTING)) {
502 /* Check if partner is in COLLECTING state. If so this port can
503 * distribute frames to it */
504 if (!ACTOR_STATE(port, DISTRIBUTING)) {
505 if (PARTNER_STATE(port, COLLECTING)) {
506 /* Enable DISTRIBUTING if partner is collecting */
507 ACTOR_STATE_SET(port, DISTRIBUTING);
508 SM_FLAG_SET(port, NTT);
509 MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
511 "Bond %u: slave id %u distributing started.\n",
512 internals->port_id, slave_id);
515 if (!PARTNER_STATE(port, COLLECTING)) {
516 /* Disable DISTRIBUTING (enter COLLECTING state) if partner
517 * is not collecting */
518 ACTOR_STATE_CLR(port, DISTRIBUTING);
519 SM_FLAG_SET(port, NTT);
520 MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
522 "Bond %u: slave id %u distributing stopped.\n",
523 internals->port_id, slave_id);
530 * Function handles transmit state machine.
532 * Function implements Transmit Machine from point 5.4.16 in 802.1AX
538 tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
540 struct port *agg, *port = &mode_8023ad_ports[slave_id];
542 struct rte_mbuf *lacp_pkt = NULL;
543 struct lacpdu_header *hdr;
544 struct lacpdu *lacpdu;
546 /* If periodic timer is not running periodic machine is in NO PERIODIC and
547 * according to 802.3ax standard tx machine should not transmit any frames
548 * and set ntt to false. */
549 if (timer_is_stopped(&port->periodic_timer))
550 SM_FLAG_CLR(port, NTT);
552 if (!SM_FLAG(port, NTT))
555 if (!timer_is_expired(&port->tx_machine_timer))
558 lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
559 if (lacp_pkt == NULL) {
560 RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
564 lacp_pkt->data_len = sizeof(*hdr);
565 lacp_pkt->pkt_len = sizeof(*hdr);
567 hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
569 /* Source and destination MAC */
570 ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
571 rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
572 hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
574 lacpdu = &hdr->lacpdu;
575 memset(lacpdu, 0, sizeof(*lacpdu));
577 /* Initialize LACP part */
578 lacpdu->subtype = SLOW_SUBTYPE_LACP;
579 lacpdu->version_number = 1;
582 lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION;
583 lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
584 memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
585 sizeof(port->actor));
586 agg = &mode_8023ad_ports[port->aggregator_port_id];
587 ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
588 lacpdu->actor.state = port->actor_state;
591 lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION;
592 lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params);
593 memcpy(&lacpdu->partner.port_params, &port->partner,
594 sizeof(struct port_params));
595 lacpdu->partner.state = port->partner_state;
598 lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION;
599 lacpdu->collector_info_length = 0x10;
600 lacpdu->collector_max_delay = 0;
602 lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION;
603 lacpdu->terminator_length = 0;
605 MODE4_DEBUG("Sending LACP frame\n");
606 BOND_PRINT_LACP(lacpdu);
608 if (internals->mode4.dedicated_queues.enabled == 0) {
609 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt);
611 /* If TX ring full, drop packet and free message.
612 Retransmission will happen in next function call. */
613 rte_pktmbuf_free(lacp_pkt);
614 set_warning_flags(port, WRN_TX_QUEUE_FULL);
618 uint16_t pkts_sent = rte_eth_tx_burst(slave_id,
619 internals->mode4.dedicated_queues.tx_qid,
621 if (pkts_sent != 1) {
622 rte_pktmbuf_free(lacp_pkt);
623 set_warning_flags(port, WRN_TX_QUEUE_FULL);
629 timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout);
630 SM_FLAG_CLR(port, NTT);
634 max_index(uint64_t *a, int n)
642 for (i = 1; i < n; ++i) {
653 * Function assigns port to aggregator.
655 * @param bond_dev_private Pointer to bond_dev_private structure.
656 * @param port_pos Port to assign.
659 selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
661 struct port *agg, *port;
662 uint16_t slaves_count, new_agg_id, i, j = 0;
664 uint64_t agg_bandwidth[8] = {0};
665 uint64_t agg_count[8] = {0};
666 uint16_t default_slave = 0;
667 uint8_t mode_count_id, mode_band_id;
668 struct rte_eth_link link_info;
670 slaves = internals->active_slaves;
671 slaves_count = internals->active_slave_count;
672 port = &mode_8023ad_ports[slave_id];
674 /* Search for aggregator suitable for this port */
675 for (i = 0; i < slaves_count; ++i) {
676 agg = &mode_8023ad_ports[slaves[i]];
677 /* Skip ports that are not aggreagators */
678 if (agg->aggregator_port_id != slaves[i])
681 agg_count[agg->aggregator_port_id] += 1;
682 rte_eth_link_get_nowait(slaves[i], &link_info);
683 agg_bandwidth[agg->aggregator_port_id] += link_info.link_speed;
685 /* Actors system ID is not checked since all slave device have the same
686 * ID (MAC address). */
687 if ((agg->actor.key == port->actor.key &&
688 agg->partner.system_priority == port->partner.system_priority &&
689 is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1
690 && (agg->partner.key == port->partner.key)) &&
691 is_zero_ether_addr(&port->partner.system) != 1 &&
693 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) {
701 switch (internals->mode4.agg_selection) {
703 mode_count_id = max_index(
704 (uint64_t *)agg_count, slaves_count);
705 new_agg_id = mode_count_id;
708 mode_band_id = max_index(
709 (uint64_t *)agg_bandwidth, slaves_count);
710 new_agg_id = mode_band_id;
713 if (default_slave == slaves_count)
714 new_agg_id = slave_id;
716 new_agg_id = slaves[default_slave];
719 if (default_slave == slaves_count)
720 new_agg_id = slave_id;
722 new_agg_id = slaves[default_slave];
726 if (new_agg_id != port->aggregator_port_id) {
727 port->aggregator_port_id = new_agg_id;
729 MODE4_DEBUG("-> SELECTED: ID=%3u\n"
730 "\t%s aggregator ID=%3u\n",
731 port->aggregator_port_id,
732 port->aggregator_port_id == slave_id ?
733 "aggregator not found, using default" : "aggregator found",
734 port->aggregator_port_id);
737 port->selected = SELECTED;
740 /* Function maps DPDK speed to bonding speed stored in key field */
742 link_speed_key(uint16_t speed) {
746 case ETH_SPEED_NUM_NONE:
749 case ETH_SPEED_NUM_10M:
750 key_speed = BOND_LINK_SPEED_KEY_10M;
752 case ETH_SPEED_NUM_100M:
753 key_speed = BOND_LINK_SPEED_KEY_100M;
755 case ETH_SPEED_NUM_1G:
756 key_speed = BOND_LINK_SPEED_KEY_1000M;
758 case ETH_SPEED_NUM_10G:
759 key_speed = BOND_LINK_SPEED_KEY_10G;
761 case ETH_SPEED_NUM_20G:
762 key_speed = BOND_LINK_SPEED_KEY_20G;
764 case ETH_SPEED_NUM_40G:
765 key_speed = BOND_LINK_SPEED_KEY_40G;
776 rx_machine_update(struct bond_dev_private *internals, uint8_t slave_id,
777 struct rte_mbuf *lacp_pkt) {
778 struct lacpdu_header *lacp;
780 if (lacp_pkt != NULL) {
781 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
782 RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
784 /* This is LACP frame so pass it to rx_machine */
785 rx_machine(internals, slave_id, &lacp->lacpdu);
786 rte_pktmbuf_free(lacp_pkt);
788 rx_machine(internals, slave_id, NULL);
792 bond_mode_8023ad_periodic_cb(void *arg)
794 struct rte_eth_dev *bond_dev = arg;
795 struct bond_dev_private *internals = bond_dev->data->dev_private;
797 struct rte_eth_link link_info;
798 struct ether_addr slave_addr;
799 struct rte_mbuf *lacp_pkt = NULL;
804 /* Update link status on each port */
805 for (i = 0; i < internals->active_slave_count; i++) {
808 slave_id = internals->active_slaves[i];
809 rte_eth_link_get_nowait(slave_id, &link_info);
810 rte_eth_macaddr_get(slave_id, &slave_addr);
812 if (link_info.link_status != 0) {
813 key = link_speed_key(link_info.link_speed) << 1;
814 if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
815 key |= BOND_LINK_FULL_DUPLEX_KEY;
819 port = &mode_8023ad_ports[slave_id];
821 key = rte_cpu_to_be_16(key);
822 if (key != port->actor.key) {
823 if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)))
824 set_warning_flags(port, WRN_NOT_LACP_CAPABLE);
826 port->actor.key = key;
827 SM_FLAG_SET(port, NTT);
830 if (!is_same_ether_addr(&port->actor.system, &slave_addr)) {
831 ether_addr_copy(&slave_addr, &port->actor.system);
832 if (port->aggregator_port_id == slave_id)
833 SM_FLAG_SET(port, NTT);
837 for (i = 0; i < internals->active_slave_count; i++) {
838 slave_id = internals->active_slaves[i];
839 port = &mode_8023ad_ports[slave_id];
841 if ((port->actor.key &
842 rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
844 SM_FLAG_SET(port, BEGIN);
846 /* LACP is disabled on half duples or link is down */
847 if (SM_FLAG(port, LACP_ENABLED)) {
848 /* If port was enabled set it to BEGIN state */
849 SM_FLAG_CLR(port, LACP_ENABLED);
850 ACTOR_STATE_CLR(port, DISTRIBUTING);
851 ACTOR_STATE_CLR(port, COLLECTING);
854 /* Skip this port processing */
858 SM_FLAG_SET(port, LACP_ENABLED);
860 if (internals->mode4.dedicated_queues.enabled == 0) {
861 /* Find LACP packet to this port. Do not check subtype,
862 * it is done in function that queued packet
864 int retval = rte_ring_dequeue(port->rx_ring,
870 rx_machine_update(internals, slave_id, lacp_pkt);
872 uint16_t rx_count = rte_eth_rx_burst(slave_id,
873 internals->mode4.dedicated_queues.rx_qid,
877 bond_mode_8023ad_handle_slow_pkt(internals,
880 rx_machine_update(internals, slave_id, NULL);
883 periodic_machine(internals, slave_id);
884 mux_machine(internals, slave_id);
885 tx_machine(internals, slave_id);
886 selection_logic(internals, slave_id);
888 SM_FLAG_CLR(port, BEGIN);
889 show_warnings(slave_id);
892 rte_eal_alarm_set(internals->mode4.update_timeout_us,
893 bond_mode_8023ad_periodic_cb, arg);
897 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
900 struct bond_dev_private *internals = bond_dev->data->dev_private;
902 struct port *port = &mode_8023ad_ports[slave_id];
903 struct port_params initial = {
905 .system_priority = rte_cpu_to_be_16(0xFFFF),
906 .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY),
907 .port_priority = rte_cpu_to_be_16(0x00FF),
911 char mem_name[RTE_ETH_NAME_MAX_LEN];
913 unsigned element_size;
914 uint32_t total_tx_desc;
915 struct bond_tx_queue *bd_tx_q;
918 /* Given slave mus not be in active list */
919 RTE_ASSERT(find_slave_by_id(internals->active_slaves,
920 internals->active_slave_count, slave_id) == internals->active_slave_count);
921 RTE_SET_USED(internals); /* used only for assert when enabled */
923 memcpy(&port->actor, &initial, sizeof(struct port_params));
924 /* Standard requires that port ID must be grater than 0.
925 * Add 1 do get corresponding port_number */
926 port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
928 memcpy(&port->partner, &initial, sizeof(struct port_params));
931 port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED;
932 port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
933 port->sm_flags = SM_FLAGS_BEGIN;
935 /* use this port as agregator */
936 port->aggregator_port_id = slave_id;
937 rte_eth_promiscuous_enable(slave_id);
939 timer_cancel(&port->warning_timer);
941 if (port->mbuf_pool != NULL)
944 RTE_ASSERT(port->rx_ring == NULL);
945 RTE_ASSERT(port->tx_ring == NULL);
947 socket_id = rte_eth_dev_socket_id(slave_id);
948 if (socket_id == (int)LCORE_ID_ANY)
949 socket_id = rte_socket_id();
951 element_size = sizeof(struct slow_protocol_frame) +
952 RTE_PKTMBUF_HEADROOM;
954 /* The size of the mempool should be at least:
955 * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
956 total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS;
957 for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) {
958 bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id];
959 total_tx_desc += bd_tx_q->nb_tx_desc;
962 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
963 port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
964 RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
965 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
966 0, element_size, socket_id);
968 /* Any memory allocation failure in initialization is critical because
969 * resources can't be free, so reinitialization is impossible. */
970 if (port->mbuf_pool == NULL) {
971 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
972 slave_id, mem_name, rte_strerror(rte_errno));
975 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id);
976 port->rx_ring = rte_ring_create(mem_name,
977 rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0);
979 if (port->rx_ring == NULL) {
980 rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id,
981 mem_name, rte_strerror(rte_errno));
984 /* TX ring is at least one pkt longer to make room for marker packet. */
985 snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id);
986 port->tx_ring = rte_ring_create(mem_name,
987 rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0);
989 if (port->tx_ring == NULL) {
990 rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id,
991 mem_name, rte_strerror(rte_errno));
996 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
1000 struct port *port = NULL;
1001 uint8_t old_partner_state;
1003 port = &mode_8023ad_ports[slave_id];
1005 ACTOR_STATE_CLR(port, AGGREGATION);
1006 port->selected = UNSELECTED;
1008 old_partner_state = port->partner_state;
1009 record_default(port);
1011 /* If partner timeout state changes then disable timer */
1012 if (!((old_partner_state ^ port->partner_state) &
1013 STATE_LACP_SHORT_TIMEOUT))
1014 timer_cancel(&port->current_while_timer);
1016 PARTNER_STATE_CLR(port, AGGREGATION);
1017 ACTOR_STATE_CLR(port, EXPIRED);
1019 /* flush rx/tx rings */
1020 while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
1021 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1023 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0)
1024 rte_pktmbuf_free((struct rte_mbuf *)pkt);
1029 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
1031 struct bond_dev_private *internals = bond_dev->data->dev_private;
1032 struct ether_addr slave_addr;
1033 struct port *slave, *agg_slave;
1034 uint16_t slave_id, i, j;
1036 bond_mode_8023ad_stop(bond_dev);
1038 for (i = 0; i < internals->active_slave_count; i++) {
1039 slave_id = internals->active_slaves[i];
1040 slave = &mode_8023ad_ports[slave_id];
1041 rte_eth_macaddr_get(slave_id, &slave_addr);
1043 if (is_same_ether_addr(&slave_addr, &slave->actor.system))
1046 ether_addr_copy(&slave_addr, &slave->actor.system);
1047 /* Do nothing if this port is not an aggregator. In other case
1048 * Set NTT flag on every port that use this aggregator. */
1049 if (slave->aggregator_port_id != slave_id)
1052 for (j = 0; j < internals->active_slave_count; j++) {
1053 agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
1054 if (agg_slave->aggregator_port_id == slave_id)
1055 SM_FLAG_SET(agg_slave, NTT);
1059 if (bond_dev->data->dev_started)
1060 bond_mode_8023ad_start(bond_dev);
1064 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
1065 struct rte_eth_bond_8023ad_conf *conf)
1067 struct bond_dev_private *internals = dev->data->dev_private;
1068 struct mode8023ad_private *mode4 = &internals->mode4;
1069 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1071 conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks;
1072 conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks;
1073 conf->short_timeout_ms = mode4->short_timeout / ms_ticks;
1074 conf->long_timeout_ms = mode4->long_timeout / ms_ticks;
1075 conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks;
1076 conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks;
1077 conf->update_timeout_ms = mode4->update_timeout_us / 1000;
1078 conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
1079 conf->slowrx_cb = mode4->slowrx_cb;
1080 conf->agg_selection = mode4->agg_selection;
1084 bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
1086 conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
1087 conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
1088 conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
1089 conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
1090 conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
1091 conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
1092 conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
1093 conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
1094 conf->slowrx_cb = NULL;
1095 conf->agg_selection = AGG_STABLE;
1099 bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
1100 struct rte_eth_bond_8023ad_conf *conf)
1102 uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
1104 mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
1105 mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
1106 mode4->short_timeout = conf->short_timeout_ms * ms_ticks;
1107 mode4->long_timeout = conf->long_timeout_ms * ms_ticks;
1108 mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks;
1109 mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks;
1110 mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks;
1111 mode4->update_timeout_us = conf->update_timeout_ms * 1000;
1113 mode4->dedicated_queues.enabled = 0;
1114 mode4->dedicated_queues.rx_qid = UINT16_MAX;
1115 mode4->dedicated_queues.tx_qid = UINT16_MAX;
1119 bond_mode_8023ad_setup(struct rte_eth_dev *dev,
1120 struct rte_eth_bond_8023ad_conf *conf)
1122 struct rte_eth_bond_8023ad_conf def_conf;
1123 struct bond_dev_private *internals = dev->data->dev_private;
1124 struct mode8023ad_private *mode4 = &internals->mode4;
1128 bond_mode_8023ad_conf_get_default(conf);
1131 bond_mode_8023ad_stop(dev);
1132 bond_mode_8023ad_conf_assign(mode4, conf);
1133 mode4->slowrx_cb = conf->slowrx_cb;
1134 mode4->agg_selection = AGG_STABLE;
1136 if (dev->data->dev_started)
1137 bond_mode_8023ad_start(dev);
1141 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
1143 struct bond_dev_private *internals = bond_dev->data->dev_private;
1146 for (i = 0; i < internals->active_slave_count; i++)
1147 bond_mode_8023ad_activate_slave(bond_dev,
1148 internals->active_slaves[i]);
1154 bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
1156 struct bond_dev_private *internals = bond_dev->data->dev_private;
1157 struct mode8023ad_private *mode4 = &internals->mode4;
1158 static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
1160 if (mode4->slowrx_cb)
1161 return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
1164 return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
1168 bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
1170 struct bond_dev_private *internals = bond_dev->data->dev_private;
1171 struct mode8023ad_private *mode4 = &internals->mode4;
1173 if (mode4->slowrx_cb) {
1174 rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
1178 rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
1182 bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
1183 uint16_t slave_id, struct rte_mbuf *pkt)
1185 struct mode8023ad_private *mode4 = &internals->mode4;
1186 struct port *port = &mode_8023ad_ports[slave_id];
1187 struct marker_header *m_hdr;
1188 uint64_t marker_timer, old_marker_timer;
1190 uint8_t wrn, subtype;
1191 /* If packet is a marker, we send response now by reusing given packet
1192 * and update only source MAC, destination MAC is multicast so don't
1193 * update it. Other frames will be handled later by state machines */
1194 subtype = rte_pktmbuf_mtod(pkt,
1195 struct slow_protocol_frame *)->slow_protocol.subtype;
1197 if (subtype == SLOW_SUBTYPE_MARKER) {
1198 m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *);
1200 if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) {
1201 wrn = WRN_UNKNOWN_MARKER_TYPE;
1205 /* Setup marker timer. Do it in loop in case concurrent access. */
1207 old_marker_timer = port->rx_marker_timer;
1208 if (!timer_is_expired(&old_marker_timer)) {
1209 wrn = WRN_RX_MARKER_TO_FAST;
1213 timer_set(&marker_timer, mode4->rx_marker_timeout);
1214 retval = rte_atomic64_cmpset(&port->rx_marker_timer,
1215 old_marker_timer, marker_timer);
1216 } while (unlikely(retval == 0));
1218 m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP;
1219 rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr);
1221 if (internals->mode4.dedicated_queues.enabled == 0) {
1222 int retval = rte_ring_enqueue(port->tx_ring, pkt);
1225 port->rx_marker_timer = 0;
1226 wrn = WRN_TX_QUEUE_FULL;
1230 /* Send packet directly to the slow queue */
1231 uint16_t tx_count = rte_eth_tx_burst(slave_id,
1232 internals->mode4.dedicated_queues.tx_qid,
1234 if (tx_count != 1) {
1236 port->rx_marker_timer = 0;
1237 wrn = WRN_TX_QUEUE_FULL;
1241 } else if (likely(subtype == SLOW_SUBTYPE_LACP)) {
1242 if (internals->mode4.dedicated_queues.enabled == 0) {
1243 int retval = rte_ring_enqueue(port->rx_ring, pkt);
1245 /* If RX fing full free lacpdu message and drop packet */
1246 wrn = WRN_RX_QUEUE_FULL;
1250 rx_machine_update(internals, slave_id, pkt);
1252 wrn = WRN_UNKNOWN_SLOW_TYPE;
1259 set_warning_flags(port, wrn);
1260 rte_pktmbuf_free(pkt);
1264 rte_eth_bond_8023ad_conf_get(uint16_t port_id,
1265 struct rte_eth_bond_8023ad_conf *conf)
1267 struct rte_eth_dev *bond_dev;
1269 if (valid_bonded_port_id(port_id) != 0)
1275 bond_dev = &rte_eth_devices[port_id];
1276 bond_mode_8023ad_conf_get(bond_dev, conf);
1281 rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
1282 enum rte_bond_8023ad_agg_selection agg_selection)
1284 struct rte_eth_dev *bond_dev;
1285 struct bond_dev_private *internals;
1286 struct mode8023ad_private *mode4;
1288 bond_dev = &rte_eth_devices[port_id];
1289 internals = bond_dev->data->dev_private;
1291 if (valid_bonded_port_id(port_id) != 0)
1293 if (internals->mode != 4)
1296 mode4 = &internals->mode4;
1297 if (agg_selection == AGG_COUNT || agg_selection == AGG_BANDWIDTH
1298 || agg_selection == AGG_STABLE)
1299 mode4->agg_selection = agg_selection;
1303 int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
1305 struct rte_eth_dev *bond_dev;
1306 struct bond_dev_private *internals;
1307 struct mode8023ad_private *mode4;
1309 bond_dev = &rte_eth_devices[port_id];
1310 internals = bond_dev->data->dev_private;
1312 if (valid_bonded_port_id(port_id) != 0)
1314 if (internals->mode != 4)
1316 mode4 = &internals->mode4;
1318 return mode4->agg_selection;
1324 bond_8023ad_setup_validate(uint16_t port_id,
1325 struct rte_eth_bond_8023ad_conf *conf)
1327 if (valid_bonded_port_id(port_id) != 0)
1331 /* Basic sanity check */
1332 if (conf->slow_periodic_ms == 0 ||
1333 conf->fast_periodic_ms >= conf->slow_periodic_ms ||
1334 conf->long_timeout_ms == 0 ||
1335 conf->short_timeout_ms >= conf->long_timeout_ms ||
1336 conf->aggregate_wait_timeout_ms == 0 ||
1337 conf->tx_period_ms == 0 ||
1338 conf->rx_marker_period_ms == 0 ||
1339 conf->update_timeout_ms == 0) {
1340 RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
1350 rte_eth_bond_8023ad_setup(uint16_t port_id,
1351 struct rte_eth_bond_8023ad_conf *conf)
1353 struct rte_eth_dev *bond_dev;
1356 err = bond_8023ad_setup_validate(port_id, conf);
1360 bond_dev = &rte_eth_devices[port_id];
1361 bond_mode_8023ad_setup(bond_dev, conf);
1371 rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
1372 struct rte_eth_bond_8023ad_slave_info *info)
1374 struct rte_eth_dev *bond_dev;
1375 struct bond_dev_private *internals;
1378 if (info == NULL || valid_bonded_port_id(port_id) != 0 ||
1379 rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1382 bond_dev = &rte_eth_devices[port_id];
1384 internals = bond_dev->data->dev_private;
1385 if (find_slave_by_id(internals->active_slaves,
1386 internals->active_slave_count, slave_id) ==
1387 internals->active_slave_count)
1390 port = &mode_8023ad_ports[slave_id];
1391 info->selected = port->selected;
1393 info->actor_state = port->actor_state;
1394 rte_memcpy(&info->actor, &port->actor, sizeof(port->actor));
1396 info->partner_state = port->partner_state;
1397 rte_memcpy(&info->partner, &port->partner, sizeof(port->partner));
1399 info->agg_port_id = port->aggregator_port_id;
1404 bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
1406 struct rte_eth_dev *bond_dev;
1407 struct bond_dev_private *internals;
1408 struct mode8023ad_private *mode4;
1410 if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
1413 bond_dev = &rte_eth_devices[port_id];
1415 if (!bond_dev->data->dev_started)
1418 internals = bond_dev->data->dev_private;
1419 if (find_slave_by_id(internals->active_slaves,
1420 internals->active_slave_count, slave_id) ==
1421 internals->active_slave_count)
1424 mode4 = &internals->mode4;
1425 if (mode4->slowrx_cb == NULL)
1432 rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
1438 res = bond_8023ad_ext_validate(port_id, slave_id);
1442 port = &mode_8023ad_ports[slave_id];
1445 ACTOR_STATE_SET(port, COLLECTING);
1447 ACTOR_STATE_CLR(port, COLLECTING);
1453 rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
1459 res = bond_8023ad_ext_validate(port_id, slave_id);
1463 port = &mode_8023ad_ports[slave_id];
1466 ACTOR_STATE_SET(port, DISTRIBUTING);
1468 ACTOR_STATE_CLR(port, DISTRIBUTING);
1474 rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
1479 err = bond_8023ad_ext_validate(port_id, slave_id);
1483 port = &mode_8023ad_ports[slave_id];
1484 return ACTOR_STATE(port, DISTRIBUTING);
1488 rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
1493 err = bond_8023ad_ext_validate(port_id, slave_id);
1497 port = &mode_8023ad_ports[slave_id];
1498 return ACTOR_STATE(port, COLLECTING);
1502 rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
1503 struct rte_mbuf *lacp_pkt)
1508 res = bond_8023ad_ext_validate(port_id, slave_id);
1512 port = &mode_8023ad_ports[slave_id];
1514 if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
1517 struct lacpdu_header *lacp;
1519 /* only enqueue LACPDUs */
1520 lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
1521 if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
1524 MODE4_DEBUG("sending LACP frame\n");
1526 return rte_ring_enqueue(port->tx_ring, lacp_pkt);
1530 bond_mode_8023ad_ext_periodic_cb(void *arg)
1532 struct rte_eth_dev *bond_dev = arg;
1533 struct bond_dev_private *internals = bond_dev->data->dev_private;
1534 struct mode8023ad_private *mode4 = &internals->mode4;
1537 uint16_t i, slave_id;
1539 for (i = 0; i < internals->active_slave_count; i++) {
1540 slave_id = internals->active_slaves[i];
1541 port = &mode_8023ad_ports[slave_id];
1543 if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
1544 struct rte_mbuf *lacp_pkt = pkt;
1545 struct lacpdu_header *lacp;
1547 lacp = rte_pktmbuf_mtod(lacp_pkt,
1548 struct lacpdu_header *);
1549 RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
1551 /* This is LACP frame so pass it to rx callback.
1552 * Callback is responsible for freeing mbuf.
1554 mode4->slowrx_cb(slave_id, lacp_pkt);
1558 rte_eal_alarm_set(internals->mode4.update_timeout_us,
1559 bond_mode_8023ad_ext_periodic_cb, arg);
1563 rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
1566 struct rte_eth_dev *dev = &rte_eth_devices[port];
1567 struct bond_dev_private *internals = (struct bond_dev_private *)
1568 dev->data->dev_private;
1570 if (check_for_bonded_ethdev(dev) != 0)
1573 if (bond_8023ad_slow_pkt_hw_filter_supported(port) != 0)
1576 /* Device must be stopped to set up slow queue */
1577 if (dev->data->dev_started)
1580 internals->mode4.dedicated_queues.enabled = 1;
1582 bond_ethdev_mode_set(dev, internals->mode);
1587 rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
1590 struct rte_eth_dev *dev = &rte_eth_devices[port];
1591 struct bond_dev_private *internals = (struct bond_dev_private *)
1592 dev->data->dev_private;
1594 if (check_for_bonded_ethdev(dev) != 0)
1597 /* Device must be stopped to set up slow queue */
1598 if (dev->data->dev_started)
1601 internals->mode4.dedicated_queues.enabled = 0;
1603 bond_ethdev_mode_set(dev, internals->mode);