4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
43 #include <rte_ip_frag.h>
44 #include <rte_devargs.h>
45 #include <rte_kvargs.h>
47 #include <rte_alarm.h>
48 #include <rte_cycles.h>
50 #include "rte_eth_bond.h"
51 #include "rte_eth_bond_private.h"
52 #include "rte_eth_bond_8023ad_private.h"
54 #define REORDER_PERIOD_MS 10
55 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
57 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
59 /* Table for statistics in mode 5 TLB */
60 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
63 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
65 size_t vlan_offset = 0;
67 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
68 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
70 vlan_offset = sizeof(struct vlan_hdr);
71 *proto = vlan_hdr->eth_proto;
73 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
74 vlan_hdr = vlan_hdr + 1;
75 *proto = vlan_hdr->eth_proto;
76 vlan_offset += sizeof(struct vlan_hdr);
83 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
85 struct bond_dev_private *internals;
87 uint16_t num_rx_slave = 0;
88 uint16_t num_rx_total = 0;
92 /* Cast to structure, containing bonded device's port id and queue id */
93 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
95 internals = bd_rx_q->dev_private;
98 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
99 /* Offset of pointer to *bufs increases as packets are received
100 * from other slaves */
101 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
102 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
104 num_rx_total += num_rx_slave;
105 nb_pkts -= num_rx_slave;
113 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
116 struct bond_dev_private *internals;
118 /* Cast to structure, containing bonded device's port id and queue id */
119 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
121 internals = bd_rx_q->dev_private;
123 return rte_eth_rx_burst(internals->current_primary_port,
124 bd_rx_q->queue_id, bufs, nb_pkts);
127 static inline uint8_t
128 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
130 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
132 return !vlan_tci && (ethertype == ether_type_slow_be &&
133 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
137 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
140 /* Cast to structure, containing bonded device's port id and queue id */
141 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
142 struct bond_dev_private *internals = bd_rx_q->dev_private;
143 struct ether_addr bond_mac;
145 struct ether_hdr *hdr;
147 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
148 uint16_t num_rx_total = 0; /* Total number of received packets */
149 uint8_t slaves[RTE_MAX_ETHPORTS];
150 uint8_t slave_count, idx;
152 uint8_t collecting; /* current slave collecting status */
153 const uint8_t promisc = internals->promiscuous_en;
157 rte_eth_macaddr_get(internals->port_id, &bond_mac);
158 /* Copy slave list to protect against slave up/down changes during tx
160 slave_count = internals->active_slave_count;
161 memcpy(slaves, internals->active_slaves,
162 sizeof(internals->active_slaves[0]) * slave_count);
164 idx = internals->active_slave;
165 if (idx >= slave_count) {
166 internals->active_slave = 0;
169 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
171 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
174 /* Read packets from this slave */
175 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
176 &bufs[num_rx_total], nb_pkts - num_rx_total);
178 for (k = j; k < 2 && k < num_rx_total; k++)
179 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
181 /* Handle slow protocol packets. */
182 while (j < num_rx_total) {
183 if (j + 3 < num_rx_total)
184 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
186 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
187 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
189 /* Remove packet from array if it is slow packet or slave is not
190 * in collecting state or bondign interface is not in promiscus
191 * mode and packet address does not match. */
192 if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
193 !collecting || (!promisc &&
194 !is_multicast_ether_addr(&hdr->d_addr) &&
195 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
197 if (hdr->ether_type == ether_type_slow_be) {
198 bond_mode_8023ad_handle_slow_pkt(
199 internals, slaves[idx], bufs[j]);
201 rte_pktmbuf_free(bufs[j]);
203 /* Packet is managed by mode 4 or dropped, shift the array */
205 if (j < num_rx_total) {
206 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
212 if (unlikely(++idx == slave_count))
216 internals->active_slave = idx;
220 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
221 uint32_t burstnumberRX;
222 uint32_t burstnumberTX;
224 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
227 arp_op_name(uint16_t arp_op, char *buf)
231 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
234 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
236 case ARP_OP_REVREQUEST:
237 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
238 "Reverse ARP Request");
240 case ARP_OP_REVREPLY:
241 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
242 "Reverse ARP Reply");
244 case ARP_OP_INVREQUEST:
245 snprintf(buf, sizeof("Peer Identify Request"), "%s",
246 "Peer Identify Request");
248 case ARP_OP_INVREPLY:
249 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
250 "Peer Identify Reply");
255 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
259 #define MaxIPv4String 16
261 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
265 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
266 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
267 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
271 #define MAX_CLIENTS_NUMBER 128
272 uint8_t active_clients;
273 struct client_stats_t {
276 uint32_t ipv4_rx_packets;
277 uint32_t ipv4_tx_packets;
279 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
282 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
286 for (; i < MAX_CLIENTS_NUMBER; i++) {
287 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
288 /* Just update RX packets number for this client */
289 if (TXorRXindicator == &burstnumberRX)
290 client_stats[i].ipv4_rx_packets++;
292 client_stats[i].ipv4_tx_packets++;
296 /* We have a new client. Insert him to the table, and increment stats */
297 if (TXorRXindicator == &burstnumberRX)
298 client_stats[active_clients].ipv4_rx_packets++;
300 client_stats[active_clients].ipv4_tx_packets++;
301 client_stats[active_clients].ipv4_addr = addr;
302 client_stats[active_clients].port = port;
307 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
308 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
309 RTE_LOG(DEBUG, PMD, \
312 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
314 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
320 eth_h->s_addr.addr_bytes[0], \
321 eth_h->s_addr.addr_bytes[1], \
322 eth_h->s_addr.addr_bytes[2], \
323 eth_h->s_addr.addr_bytes[3], \
324 eth_h->s_addr.addr_bytes[4], \
325 eth_h->s_addr.addr_bytes[5], \
327 eth_h->d_addr.addr_bytes[0], \
328 eth_h->d_addr.addr_bytes[1], \
329 eth_h->d_addr.addr_bytes[2], \
330 eth_h->d_addr.addr_bytes[3], \
331 eth_h->d_addr.addr_bytes[4], \
332 eth_h->d_addr.addr_bytes[5], \
339 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
340 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
342 struct ipv4_hdr *ipv4_h;
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344 struct arp_hdr *arp_h;
351 uint16_t ether_type = eth_h->ether_type;
352 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
354 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
355 snprintf(buf, 16, "%s", info);
358 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
359 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
360 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
361 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
362 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
363 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
365 update_client_stats(ipv4_h->src_addr, port, burstnumber);
367 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
368 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
369 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
370 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
371 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
372 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
373 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
380 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
382 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
383 struct bond_dev_private *internals = bd_tx_q->dev_private;
384 struct ether_hdr *eth_h;
385 uint16_t ether_type, offset;
386 uint16_t nb_recv_pkts;
389 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
391 for (i = 0; i < nb_recv_pkts; i++) {
392 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
393 ether_type = eth_h->ether_type;
394 offset = get_vlan_offset(eth_h, ðer_type);
396 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
397 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
398 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
400 bond_mode_alb_arp_recv(eth_h, offset, internals);
402 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
403 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
404 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
412 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
415 struct bond_dev_private *internals;
416 struct bond_tx_queue *bd_tx_q;
418 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
419 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
421 uint8_t num_of_slaves;
422 uint8_t slaves[RTE_MAX_ETHPORTS];
424 uint16_t num_tx_total = 0, num_tx_slave;
426 static int slave_idx = 0;
427 int i, cslave_idx = 0, tx_fail_total = 0;
429 bd_tx_q = (struct bond_tx_queue *)queue;
430 internals = bd_tx_q->dev_private;
432 /* Copy slave list to protect against slave up/down changes during tx
434 num_of_slaves = internals->active_slave_count;
435 memcpy(slaves, internals->active_slaves,
436 sizeof(internals->active_slaves[0]) * num_of_slaves);
438 if (num_of_slaves < 1)
441 /* Populate slaves mbuf with which packets are to be sent on it */
442 for (i = 0; i < nb_pkts; i++) {
443 cslave_idx = (slave_idx + i) % num_of_slaves;
444 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
447 /* increment current slave index so the next call to tx burst starts on the
449 slave_idx = ++cslave_idx;
451 /* Send packet burst on each slave device */
452 for (i = 0; i < num_of_slaves; i++) {
453 if (slave_nb_pkts[i] > 0) {
454 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
455 slave_bufs[i], slave_nb_pkts[i]);
457 /* if tx burst fails move packets to end of bufs */
458 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
459 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
461 tx_fail_total += tx_fail_slave;
463 memcpy(&bufs[nb_pkts - tx_fail_total],
464 &slave_bufs[i][num_tx_slave],
465 tx_fail_slave * sizeof(bufs[0]));
467 num_tx_total += num_tx_slave;
475 bond_ethdev_tx_burst_active_backup(void *queue,
476 struct rte_mbuf **bufs, uint16_t nb_pkts)
478 struct bond_dev_private *internals;
479 struct bond_tx_queue *bd_tx_q;
481 bd_tx_q = (struct bond_tx_queue *)queue;
482 internals = bd_tx_q->dev_private;
484 if (internals->active_slave_count < 1)
487 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
491 static inline uint16_t
492 ether_hash(struct ether_hdr *eth_hdr)
494 unaligned_uint16_t *word_src_addr =
495 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
496 unaligned_uint16_t *word_dst_addr =
497 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
499 return (word_src_addr[0] ^ word_dst_addr[0]) ^
500 (word_src_addr[1] ^ word_dst_addr[1]) ^
501 (word_src_addr[2] ^ word_dst_addr[2]);
504 static inline uint32_t
505 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
507 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
510 static inline uint32_t
511 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
513 unaligned_uint32_t *word_src_addr =
514 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
515 unaligned_uint32_t *word_dst_addr =
516 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
518 return (word_src_addr[0] ^ word_dst_addr[0]) ^
519 (word_src_addr[1] ^ word_dst_addr[1]) ^
520 (word_src_addr[2] ^ word_dst_addr[2]) ^
521 (word_src_addr[3] ^ word_dst_addr[3]);
525 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
527 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
529 uint32_t hash = ether_hash(eth_hdr);
531 return (hash ^= hash >> 8) % slave_count;
535 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
537 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
538 uint16_t proto = eth_hdr->ether_type;
539 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
540 uint32_t hash, l3hash = 0;
542 hash = ether_hash(eth_hdr);
544 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
545 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
546 ((char *)(eth_hdr + 1) + vlan_offset);
547 l3hash = ipv4_hash(ipv4_hdr);
549 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
550 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
551 ((char *)(eth_hdr + 1) + vlan_offset);
552 l3hash = ipv6_hash(ipv6_hdr);
555 hash = hash ^ l3hash;
559 return hash % slave_count;
563 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
565 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
566 uint16_t proto = eth_hdr->ether_type;
567 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
569 struct udp_hdr *udp_hdr = NULL;
570 struct tcp_hdr *tcp_hdr = NULL;
571 uint32_t hash, l3hash = 0, l4hash = 0;
573 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
574 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
575 ((char *)(eth_hdr + 1) + vlan_offset);
576 size_t ip_hdr_offset;
578 l3hash = ipv4_hash(ipv4_hdr);
580 /* there is no L4 header in fragmented packet */
581 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
582 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
585 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
586 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
588 l4hash = HASH_L4_PORTS(tcp_hdr);
589 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
590 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
592 l4hash = HASH_L4_PORTS(udp_hdr);
595 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
596 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
597 ((char *)(eth_hdr + 1) + vlan_offset);
598 l3hash = ipv6_hash(ipv6_hdr);
600 if (ipv6_hdr->proto == IPPROTO_TCP) {
601 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
602 l4hash = HASH_L4_PORTS(tcp_hdr);
603 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
604 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
605 l4hash = HASH_L4_PORTS(udp_hdr);
609 hash = l3hash ^ l4hash;
613 return hash % slave_count;
617 uint64_t bwg_left_int;
618 uint64_t bwg_left_remainder;
623 bond_tlb_activate_slave(struct bond_dev_private *internals) {
626 for (i = 0; i < internals->active_slave_count; i++) {
627 tlb_last_obytets[internals->active_slaves[i]] = 0;
632 bandwidth_cmp(const void *a, const void *b)
634 const struct bwg_slave *bwg_a = a;
635 const struct bwg_slave *bwg_b = b;
636 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
637 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
638 (int64_t)bwg_a->bwg_left_remainder;
652 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
653 struct bwg_slave *bwg_slave)
655 struct rte_eth_link link_status;
657 rte_eth_link_get_nowait(port_id, &link_status);
658 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
661 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
662 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
663 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
667 bond_ethdev_update_tlb_slave_cb(void *arg)
669 struct bond_dev_private *internals = arg;
670 struct rte_eth_stats slave_stats;
671 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
675 uint8_t update_stats = 0;
678 internals->slave_update_idx++;
681 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
684 for (i = 0; i < internals->active_slave_count; i++) {
685 slave_id = internals->active_slaves[i];
686 rte_eth_stats_get(slave_id, &slave_stats);
687 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
688 bandwidth_left(slave_id, tx_bytes,
689 internals->slave_update_idx, &bwg_array[i]);
690 bwg_array[i].slave = slave_id;
693 tlb_last_obytets[slave_id] = slave_stats.obytes;
697 if (update_stats == 1)
698 internals->slave_update_idx = 0;
701 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
702 for (i = 0; i < slave_count; i++)
703 internals->tlb_slaves_order[i] = bwg_array[i].slave;
705 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
706 (struct bond_dev_private *)internals);
710 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
712 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
713 struct bond_dev_private *internals = bd_tx_q->dev_private;
715 struct rte_eth_dev *primary_port =
716 &rte_eth_devices[internals->primary_port];
717 uint16_t num_tx_total = 0;
720 uint8_t num_of_slaves = internals->active_slave_count;
721 uint8_t slaves[RTE_MAX_ETHPORTS];
723 struct ether_hdr *ether_hdr;
724 struct ether_addr primary_slave_addr;
725 struct ether_addr active_slave_addr;
727 if (num_of_slaves < 1)
730 memcpy(slaves, internals->tlb_slaves_order,
731 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
734 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
737 for (i = 0; i < 3; i++)
738 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
741 for (i = 0; i < num_of_slaves; i++) {
742 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
743 for (j = num_tx_total; j < nb_pkts; j++) {
745 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
747 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
748 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
749 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
750 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
751 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
755 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
756 bufs + num_tx_total, nb_pkts - num_tx_total);
758 if (num_tx_total == nb_pkts)
766 bond_tlb_disable(struct bond_dev_private *internals)
768 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
772 bond_tlb_enable(struct bond_dev_private *internals)
774 bond_ethdev_update_tlb_slave_cb(internals);
778 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
780 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
781 struct bond_dev_private *internals = bd_tx_q->dev_private;
783 struct ether_hdr *eth_h;
784 uint16_t ether_type, offset;
786 struct client_data *client_info;
789 * We create transmit buffers for every slave and one additional to send
790 * through tlb. In worst case every packet will be send on one port.
792 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
793 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
796 * We create separate transmit buffers for update packets as they won't
797 * be counted in num_tx_total.
799 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
800 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
802 struct rte_mbuf *upd_pkt;
805 uint16_t num_send, num_not_send = 0;
806 uint16_t num_tx_total = 0;
811 /* Search tx buffer for ARP packets and forward them to alb */
812 for (i = 0; i < nb_pkts; i++) {
813 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
814 ether_type = eth_h->ether_type;
815 offset = get_vlan_offset(eth_h, ðer_type);
817 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
818 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
820 /* Change src mac in eth header */
821 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
823 /* Add packet to slave tx buffer */
824 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
825 slave_bufs_pkts[slave_idx]++;
827 /* If packet is not ARP, send it with TLB policy */
828 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
830 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
834 /* Update connected client ARP tables */
835 if (internals->mode6.ntt) {
836 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
837 client_info = &internals->mode6.client_table[i];
839 if (client_info->in_use) {
840 /* Allocate new packet to send ARP update on current slave */
841 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
842 if (upd_pkt == NULL) {
843 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
846 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
847 + client_info->vlan_count * sizeof(struct vlan_hdr);
848 upd_pkt->data_len = pkt_size;
849 upd_pkt->pkt_len = pkt_size;
851 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
854 /* Add packet to update tx buffer */
855 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
856 update_bufs_pkts[slave_idx]++;
859 internals->mode6.ntt = 0;
862 /* Send ARP packets on proper slaves */
863 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864 if (slave_bufs_pkts[i] > 0) {
865 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
866 slave_bufs[i], slave_bufs_pkts[i]);
867 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
868 bufs[nb_pkts - 1 - num_not_send - j] =
869 slave_bufs[i][nb_pkts - 1 - j];
872 num_tx_total += num_send;
873 num_not_send += slave_bufs_pkts[i] - num_send;
875 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
876 /* Print TX stats including update packets */
877 for (j = 0; j < slave_bufs_pkts[i]; j++) {
878 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
879 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
885 /* Send update packets on proper slaves */
886 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
887 if (update_bufs_pkts[i] > 0) {
888 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
889 update_bufs_pkts[i]);
890 for (j = num_send; j < update_bufs_pkts[i]; j++) {
891 rte_pktmbuf_free(update_bufs[i][j]);
893 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
894 for (j = 0; j < update_bufs_pkts[i]; j++) {
895 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
896 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
902 /* Send non-ARP packets using tlb policy */
903 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
904 num_send = bond_ethdev_tx_burst_tlb(queue,
905 slave_bufs[RTE_MAX_ETHPORTS],
906 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
908 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
909 bufs[nb_pkts - 1 - num_not_send - j] =
910 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
913 num_tx_total += num_send;
920 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
923 struct bond_dev_private *internals;
924 struct bond_tx_queue *bd_tx_q;
926 uint8_t num_of_slaves;
927 uint8_t slaves[RTE_MAX_ETHPORTS];
929 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
933 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
934 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
936 bd_tx_q = (struct bond_tx_queue *)queue;
937 internals = bd_tx_q->dev_private;
939 /* Copy slave list to protect against slave up/down changes during tx
941 num_of_slaves = internals->active_slave_count;
942 memcpy(slaves, internals->active_slaves,
943 sizeof(internals->active_slaves[0]) * num_of_slaves);
945 if (num_of_slaves < 1)
948 /* Populate slaves mbuf with the packets which are to be sent on it */
949 for (i = 0; i < nb_pkts; i++) {
950 /* Select output slave using hash based on xmit policy */
951 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
953 /* Populate slave mbuf arrays with mbufs for that slave */
954 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
957 /* Send packet burst on each slave device */
958 for (i = 0; i < num_of_slaves; i++) {
959 if (slave_nb_pkts[i] > 0) {
960 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
961 slave_bufs[i], slave_nb_pkts[i]);
963 /* if tx burst fails move packets to end of bufs */
964 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
965 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
967 tx_fail_total += slave_tx_fail_count;
968 memcpy(&bufs[nb_pkts - tx_fail_total],
969 &slave_bufs[i][num_tx_slave],
970 slave_tx_fail_count * sizeof(bufs[0]));
973 num_tx_total += num_tx_slave;
981 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
984 struct bond_dev_private *internals;
985 struct bond_tx_queue *bd_tx_q;
987 uint8_t num_of_slaves;
988 uint8_t slaves[RTE_MAX_ETHPORTS];
989 /* positions in slaves, not ID */
990 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
991 uint8_t distributing_count;
993 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
994 uint16_t i, j, op_slave_idx;
995 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
997 /* Allocate additional packets in case 8023AD mode. */
998 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
999 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
1001 /* Total amount of packets in slave_bufs */
1002 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1003 /* Slow packets placed in each slave */
1004 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
1006 bd_tx_q = (struct bond_tx_queue *)queue;
1007 internals = bd_tx_q->dev_private;
1009 /* Copy slave list to protect against slave up/down changes during tx
1011 num_of_slaves = internals->active_slave_count;
1012 if (num_of_slaves < 1)
1013 return num_tx_total;
1015 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1017 distributing_count = 0;
1018 for (i = 0; i < num_of_slaves; i++) {
1019 struct port *port = &mode_8023ad_ports[slaves[i]];
1021 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1022 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
1024 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1026 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1027 slave_bufs[i][j] = slow_pkts[j];
1029 if (ACTOR_STATE(port, DISTRIBUTING))
1030 distributing_offsets[distributing_count++] = i;
1033 if (likely(distributing_count > 0)) {
1034 /* Populate slaves mbuf with the packets which are to be sent on it */
1035 for (i = 0; i < nb_pkts; i++) {
1036 /* Select output slave using hash based on xmit policy */
1037 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1039 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1040 * slaves that are currently distributing. */
1041 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1042 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1043 slave_nb_pkts[slave_offset]++;
1047 /* Send packet burst on each slave device */
1048 for (i = 0; i < num_of_slaves; i++) {
1049 if (slave_nb_pkts[i] == 0)
1052 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1053 slave_bufs[i], slave_nb_pkts[i]);
1055 /* If tx burst fails drop slow packets */
1056 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1057 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1059 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1060 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1062 /* If tx burst fails move packets to end of bufs */
1063 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1064 uint16_t j = nb_pkts - num_tx_fail_total;
1065 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1066 bufs[j] = slave_bufs[i][num_tx_slave];
1070 return num_tx_total;
1074 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1077 struct bond_dev_private *internals;
1078 struct bond_tx_queue *bd_tx_q;
1080 uint8_t tx_failed_flag = 0, num_of_slaves;
1081 uint8_t slaves[RTE_MAX_ETHPORTS];
1083 uint16_t max_nb_of_tx_pkts = 0;
1085 int slave_tx_total[RTE_MAX_ETHPORTS];
1086 int i, most_successful_tx_slave = -1;
1088 bd_tx_q = (struct bond_tx_queue *)queue;
1089 internals = bd_tx_q->dev_private;
1091 /* Copy slave list to protect against slave up/down changes during tx
1093 num_of_slaves = internals->active_slave_count;
1094 memcpy(slaves, internals->active_slaves,
1095 sizeof(internals->active_slaves[0]) * num_of_slaves);
1097 if (num_of_slaves < 1)
1100 /* Increment reference count on mbufs */
1101 for (i = 0; i < nb_pkts; i++)
1102 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1104 /* Transmit burst on each active slave */
1105 for (i = 0; i < num_of_slaves; i++) {
1106 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1109 if (unlikely(slave_tx_total[i] < nb_pkts))
1112 /* record the value and slave index for the slave which transmits the
1113 * maximum number of packets */
1114 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1115 max_nb_of_tx_pkts = slave_tx_total[i];
1116 most_successful_tx_slave = i;
1120 /* if slaves fail to transmit packets from burst, the calling application
1121 * is not expected to know about multiple references to packets so we must
1122 * handle failures of all packets except those of the most successful slave
1124 if (unlikely(tx_failed_flag))
1125 for (i = 0; i < num_of_slaves; i++)
1126 if (i != most_successful_tx_slave)
1127 while (slave_tx_total[i] < nb_pkts)
1128 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1130 return max_nb_of_tx_pkts;
1134 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1135 struct rte_eth_link *slave_dev_link)
1137 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1138 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1140 if (slave_dev_link->link_status &&
1141 bonded_eth_dev->data->dev_started) {
1142 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1143 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1145 internals->link_props_set = 1;
1150 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1152 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1154 memset(&(bonded_eth_dev->data->dev_link), 0,
1155 sizeof(bonded_eth_dev->data->dev_link));
1157 internals->link_props_set = 0;
1161 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1162 struct rte_eth_link *slave_dev_link)
1164 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1165 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1172 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1174 struct ether_addr *mac_addr;
1176 if (eth_dev == NULL) {
1177 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1181 if (dst_mac_addr == NULL) {
1182 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1186 mac_addr = eth_dev->data->mac_addrs;
1188 ether_addr_copy(mac_addr, dst_mac_addr);
1193 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1195 struct ether_addr *mac_addr;
1197 if (eth_dev == NULL) {
1198 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1202 if (new_mac_addr == NULL) {
1203 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1207 mac_addr = eth_dev->data->mac_addrs;
1209 /* If new MAC is different to current MAC then update */
1210 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1211 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1217 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1219 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1222 /* Update slave devices MAC addresses */
1223 if (internals->slave_count < 1)
1226 switch (internals->mode) {
1227 case BONDING_MODE_ROUND_ROBIN:
1228 case BONDING_MODE_BALANCE:
1229 case BONDING_MODE_BROADCAST:
1230 for (i = 0; i < internals->slave_count; i++) {
1231 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1232 bonded_eth_dev->data->mac_addrs)) {
1233 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1234 internals->slaves[i].port_id);
1239 case BONDING_MODE_8023AD:
1240 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1242 case BONDING_MODE_ACTIVE_BACKUP:
1243 case BONDING_MODE_TLB:
1244 case BONDING_MODE_ALB:
1246 for (i = 0; i < internals->slave_count; i++) {
1247 if (internals->slaves[i].port_id ==
1248 internals->current_primary_port) {
1249 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1250 bonded_eth_dev->data->mac_addrs)) {
1251 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1252 internals->current_primary_port);
1256 if (mac_address_set(
1257 &rte_eth_devices[internals->slaves[i].port_id],
1258 &internals->slaves[i].persisted_mac_addr)) {
1259 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1260 internals->slaves[i].port_id);
1271 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1273 struct bond_dev_private *internals;
1275 internals = eth_dev->data->dev_private;
1278 case BONDING_MODE_ROUND_ROBIN:
1279 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1280 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1282 case BONDING_MODE_ACTIVE_BACKUP:
1283 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1284 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1286 case BONDING_MODE_BALANCE:
1287 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1288 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1290 case BONDING_MODE_BROADCAST:
1291 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1292 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1294 case BONDING_MODE_8023AD:
1295 if (bond_mode_8023ad_enable(eth_dev) != 0)
1298 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1299 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1300 RTE_LOG(WARNING, PMD,
1301 "Using mode 4, it is necessary to do TX burst and RX burst "
1302 "at least every 100ms.\n");
1304 case BONDING_MODE_TLB:
1305 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1306 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1308 case BONDING_MODE_ALB:
1309 if (bond_mode_alb_enable(eth_dev) != 0)
1312 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1313 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1319 internals->mode = mode;
1325 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1326 struct rte_eth_dev *slave_eth_dev)
1328 struct bond_rx_queue *bd_rx_q;
1329 struct bond_tx_queue *bd_tx_q;
1335 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1337 /* Enable interrupts on slave device if supported */
1338 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1339 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1341 /* If RSS is enabled for bonding, try to enable it for slaves */
1342 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1343 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1345 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1346 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1347 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1348 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1350 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1353 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1354 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1355 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1356 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1359 slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1360 bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1362 /* Configure device */
1363 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1364 bonded_eth_dev->data->nb_rx_queues,
1365 bonded_eth_dev->data->nb_tx_queues,
1366 &(slave_eth_dev->data->dev_conf));
1368 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1369 slave_eth_dev->data->port_id, errval);
1373 /* Setup Rx Queues */
1374 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1375 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1377 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1378 bd_rx_q->nb_rx_desc,
1379 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1380 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1383 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1384 slave_eth_dev->data->port_id, q_id, errval);
1389 /* Setup Tx Queues */
1390 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1391 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1393 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1394 bd_tx_q->nb_tx_desc,
1395 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1399 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1400 slave_eth_dev->data->port_id, q_id, errval);
1406 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1408 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1409 slave_eth_dev->data->port_id, errval);
1413 /* If RSS is enabled for bonding, synchronize RETA */
1414 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1416 struct bond_dev_private *internals;
1418 internals = bonded_eth_dev->data->dev_private;
1420 for (i = 0; i < internals->slave_count; i++) {
1421 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1422 errval = rte_eth_dev_rss_reta_update(
1423 slave_eth_dev->data->port_id,
1424 &internals->reta_conf[0],
1425 internals->slaves[i].reta_size);
1427 RTE_LOG(WARNING, PMD,
1428 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1429 " RSS Configuration for bonding may be inconsistent.\n",
1430 slave_eth_dev->data->port_id, errval);
1437 /* If lsc interrupt is set, check initial slave's link status */
1438 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1439 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1440 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1441 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1449 slave_remove(struct bond_dev_private *internals,
1450 struct rte_eth_dev *slave_eth_dev)
1454 for (i = 0; i < internals->slave_count; i++)
1455 if (internals->slaves[i].port_id ==
1456 slave_eth_dev->data->port_id)
1459 if (i < (internals->slave_count - 1))
1460 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1461 sizeof(internals->slaves[0]) *
1462 (internals->slave_count - i - 1));
1464 internals->slave_count--;
1466 /* force reconfiguration of slave interfaces */
1467 _rte_eth_dev_reset(slave_eth_dev);
1471 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1474 slave_add(struct bond_dev_private *internals,
1475 struct rte_eth_dev *slave_eth_dev)
1477 struct bond_slave_details *slave_details =
1478 &internals->slaves[internals->slave_count];
1480 slave_details->port_id = slave_eth_dev->data->port_id;
1481 slave_details->last_link_status = 0;
1483 /* Mark slave devices that don't support interrupts so we can
1484 * compensate when we start the bond
1486 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1487 slave_details->link_status_poll_enabled = 1;
1490 slave_details->link_status_wait_to_complete = 0;
1491 /* clean tlb_last_obytes when adding port for bonding device */
1492 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1493 sizeof(struct ether_addr));
1497 bond_ethdev_primary_set(struct bond_dev_private *internals,
1498 uint8_t slave_port_id)
1502 if (internals->active_slave_count < 1)
1503 internals->current_primary_port = slave_port_id;
1505 /* Search bonded device slave ports for new proposed primary port */
1506 for (i = 0; i < internals->active_slave_count; i++) {
1507 if (internals->active_slaves[i] == slave_port_id)
1508 internals->current_primary_port = slave_port_id;
1513 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1516 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1518 struct bond_dev_private *internals;
1521 /* slave eth dev will be started by bonded device */
1522 if (check_for_bonded_ethdev(eth_dev)) {
1523 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1524 eth_dev->data->port_id);
1528 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1529 eth_dev->data->dev_started = 1;
1531 internals = eth_dev->data->dev_private;
1533 if (internals->slave_count == 0) {
1534 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1538 if (internals->user_defined_mac == 0) {
1539 struct ether_addr *new_mac_addr = NULL;
1541 for (i = 0; i < internals->slave_count; i++)
1542 if (internals->slaves[i].port_id == internals->primary_port)
1543 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1545 if (new_mac_addr == NULL)
1548 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1549 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1550 eth_dev->data->port_id);
1555 /* Update all slave devices MACs*/
1556 if (mac_address_slaves_update(eth_dev) != 0)
1559 /* If bonded device is configure in promiscuous mode then re-apply config */
1560 if (internals->promiscuous_en)
1561 bond_ethdev_promiscuous_enable(eth_dev);
1563 /* Reconfigure each slave device if starting bonded device */
1564 for (i = 0; i < internals->slave_count; i++) {
1565 if (slave_configure(eth_dev,
1566 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1568 "bonded port (%d) failed to reconfigure slave device (%d)",
1569 eth_dev->data->port_id, internals->slaves[i].port_id);
1572 /* We will need to poll for link status if any slave doesn't
1573 * support interrupts
1575 if (internals->slaves[i].link_status_poll_enabled)
1576 internals->link_status_polling_enabled = 1;
1578 /* start polling if needed */
1579 if (internals->link_status_polling_enabled) {
1581 internals->link_status_polling_interval_ms * 1000,
1582 bond_ethdev_slave_link_status_change_monitor,
1583 (void *)&rte_eth_devices[internals->port_id]);
1586 if (internals->user_defined_primary_port)
1587 bond_ethdev_primary_set(internals, internals->primary_port);
1589 if (internals->mode == BONDING_MODE_8023AD)
1590 bond_mode_8023ad_start(eth_dev);
1592 if (internals->mode == BONDING_MODE_TLB ||
1593 internals->mode == BONDING_MODE_ALB)
1594 bond_tlb_enable(internals);
1600 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1604 if (dev->data->rx_queues != NULL) {
1605 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1606 rte_free(dev->data->rx_queues[i]);
1607 dev->data->rx_queues[i] = NULL;
1609 dev->data->nb_rx_queues = 0;
1612 if (dev->data->tx_queues != NULL) {
1613 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1614 rte_free(dev->data->tx_queues[i]);
1615 dev->data->tx_queues[i] = NULL;
1617 dev->data->nb_tx_queues = 0;
1622 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1624 struct bond_dev_private *internals = eth_dev->data->dev_private;
1627 if (internals->mode == BONDING_MODE_8023AD) {
1631 bond_mode_8023ad_stop(eth_dev);
1633 /* Discard all messages to/from mode 4 state machines */
1634 for (i = 0; i < internals->active_slave_count; i++) {
1635 port = &mode_8023ad_ports[internals->active_slaves[i]];
1637 RTE_ASSERT(port->rx_ring != NULL);
1638 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1639 rte_pktmbuf_free(pkt);
1641 RTE_ASSERT(port->tx_ring != NULL);
1642 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1643 rte_pktmbuf_free(pkt);
1647 if (internals->mode == BONDING_MODE_TLB ||
1648 internals->mode == BONDING_MODE_ALB) {
1649 bond_tlb_disable(internals);
1650 for (i = 0; i < internals->active_slave_count; i++)
1651 tlb_last_obytets[internals->active_slaves[i]] = 0;
1654 internals->active_slave_count = 0;
1655 internals->link_status_polling_enabled = 0;
1656 for (i = 0; i < internals->slave_count; i++)
1657 internals->slaves[i].last_link_status = 0;
1659 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1660 eth_dev->data->dev_started = 0;
1664 bond_ethdev_close(struct rte_eth_dev *dev)
1666 struct bond_dev_private *internals = dev->data->dev_private;
1667 uint8_t bond_port_id = internals->port_id;
1670 RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
1671 while (internals->slave_count != skipped) {
1672 uint8_t port_id = internals->slaves[skipped].port_id;
1674 rte_eth_dev_stop(port_id);
1676 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
1678 "Failed to remove port %d from bonded device "
1679 "%s\n", port_id, dev->device->name);
1683 bond_ethdev_free_queues(dev);
1684 rte_bitmap_reset(internals->vlan_filter_bmp);
1687 /* forward declaration */
1688 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1691 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1693 struct bond_dev_private *internals = dev->data->dev_private;
1695 dev_info->max_mac_addrs = 1;
1697 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
1698 ? internals->candidate_max_rx_pktlen
1699 : ETHER_MAX_JUMBO_FRAME_LEN;
1701 dev_info->max_rx_queues = (uint16_t)128;
1702 dev_info->max_tx_queues = (uint16_t)512;
1704 dev_info->min_rx_bufsize = 0;
1706 dev_info->rx_offload_capa = internals->rx_offload_capa;
1707 dev_info->tx_offload_capa = internals->tx_offload_capa;
1708 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1710 dev_info->reta_size = internals->reta_size;
1714 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1718 struct bond_dev_private *internals = dev->data->dev_private;
1720 /* don't do this while a slave is being added */
1721 rte_spinlock_lock(&internals->lock);
1724 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1726 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1728 for (i = 0; i < internals->slave_count; i++) {
1729 uint8_t port_id = internals->slaves[i].port_id;
1731 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1733 RTE_LOG(WARNING, PMD,
1734 "Setting VLAN filter on slave port %u not supported.\n",
1738 rte_spinlock_unlock(&internals->lock);
1743 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1744 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1745 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1747 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1748 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1749 0, dev->data->numa_node);
1750 if (bd_rx_q == NULL)
1753 bd_rx_q->queue_id = rx_queue_id;
1754 bd_rx_q->dev_private = dev->data->dev_private;
1756 bd_rx_q->nb_rx_desc = nb_rx_desc;
1758 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1759 bd_rx_q->mb_pool = mb_pool;
1761 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1767 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1768 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1769 const struct rte_eth_txconf *tx_conf)
1771 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1772 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1773 0, dev->data->numa_node);
1775 if (bd_tx_q == NULL)
1778 bd_tx_q->queue_id = tx_queue_id;
1779 bd_tx_q->dev_private = dev->data->dev_private;
1781 bd_tx_q->nb_tx_desc = nb_tx_desc;
1782 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1784 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1790 bond_ethdev_rx_queue_release(void *queue)
1799 bond_ethdev_tx_queue_release(void *queue)
1808 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1810 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1811 struct bond_dev_private *internals;
1813 /* Default value for polling slave found is true as we don't want to
1814 * disable the polling thread if we cannot get the lock */
1815 int i, polling_slave_found = 1;
1820 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1821 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1823 if (!bonded_ethdev->data->dev_started ||
1824 !internals->link_status_polling_enabled)
1827 /* If device is currently being configured then don't check slaves link
1828 * status, wait until next period */
1829 if (rte_spinlock_trylock(&internals->lock)) {
1830 if (internals->slave_count > 0)
1831 polling_slave_found = 0;
1833 for (i = 0; i < internals->slave_count; i++) {
1834 if (!internals->slaves[i].link_status_poll_enabled)
1837 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1838 polling_slave_found = 1;
1840 /* Update slave link status */
1841 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1842 internals->slaves[i].link_status_wait_to_complete);
1844 /* if link status has changed since last checked then call lsc
1846 if (slave_ethdev->data->dev_link.link_status !=
1847 internals->slaves[i].last_link_status) {
1848 internals->slaves[i].last_link_status =
1849 slave_ethdev->data->dev_link.link_status;
1851 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1852 RTE_ETH_EVENT_INTR_LSC,
1853 &bonded_ethdev->data->port_id,
1857 rte_spinlock_unlock(&internals->lock);
1860 if (polling_slave_found)
1861 /* Set alarm to continue monitoring link status of slave ethdev's */
1862 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1863 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1867 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1868 int wait_to_complete)
1870 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1872 if (!bonded_eth_dev->data->dev_started ||
1873 internals->active_slave_count == 0) {
1874 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1877 struct rte_eth_dev *slave_eth_dev;
1880 for (i = 0; i < internals->active_slave_count; i++) {
1881 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1883 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1885 if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1891 bonded_eth_dev->data->dev_link.link_status = link_up;
1898 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1900 struct bond_dev_private *internals = dev->data->dev_private;
1901 struct rte_eth_stats slave_stats;
1904 for (i = 0; i < internals->slave_count; i++) {
1905 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1907 stats->ipackets += slave_stats.ipackets;
1908 stats->opackets += slave_stats.opackets;
1909 stats->ibytes += slave_stats.ibytes;
1910 stats->obytes += slave_stats.obytes;
1911 stats->imissed += slave_stats.imissed;
1912 stats->ierrors += slave_stats.ierrors;
1913 stats->oerrors += slave_stats.oerrors;
1914 stats->rx_nombuf += slave_stats.rx_nombuf;
1916 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1917 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1918 stats->q_opackets[j] += slave_stats.q_opackets[j];
1919 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1920 stats->q_obytes[j] += slave_stats.q_obytes[j];
1921 stats->q_errors[j] += slave_stats.q_errors[j];
1928 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1930 struct bond_dev_private *internals = dev->data->dev_private;
1933 for (i = 0; i < internals->slave_count; i++)
1934 rte_eth_stats_reset(internals->slaves[i].port_id);
1938 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1940 struct bond_dev_private *internals = eth_dev->data->dev_private;
1943 internals->promiscuous_en = 1;
1945 switch (internals->mode) {
1946 /* Promiscuous mode is propagated to all slaves */
1947 case BONDING_MODE_ROUND_ROBIN:
1948 case BONDING_MODE_BALANCE:
1949 case BONDING_MODE_BROADCAST:
1950 for (i = 0; i < internals->slave_count; i++)
1951 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1953 /* In mode4 promiscus mode is managed when slave is added/removed */
1954 case BONDING_MODE_8023AD:
1956 /* Promiscuous mode is propagated only to primary slave */
1957 case BONDING_MODE_ACTIVE_BACKUP:
1958 case BONDING_MODE_TLB:
1959 case BONDING_MODE_ALB:
1961 rte_eth_promiscuous_enable(internals->current_primary_port);
1966 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1968 struct bond_dev_private *internals = dev->data->dev_private;
1971 internals->promiscuous_en = 0;
1973 switch (internals->mode) {
1974 /* Promiscuous mode is propagated to all slaves */
1975 case BONDING_MODE_ROUND_ROBIN:
1976 case BONDING_MODE_BALANCE:
1977 case BONDING_MODE_BROADCAST:
1978 for (i = 0; i < internals->slave_count; i++)
1979 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1981 /* In mode4 promiscus mode is set managed when slave is added/removed */
1982 case BONDING_MODE_8023AD:
1984 /* Promiscuous mode is propagated only to primary slave */
1985 case BONDING_MODE_ACTIVE_BACKUP:
1986 case BONDING_MODE_TLB:
1987 case BONDING_MODE_ALB:
1989 rte_eth_promiscuous_disable(internals->current_primary_port);
1994 bond_ethdev_delayed_lsc_propagation(void *arg)
1999 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2000 RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
2004 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
2005 void *param, void *ret_param __rte_unused)
2007 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
2008 struct bond_dev_private *internals;
2009 struct rte_eth_link link;
2012 int i, valid_slave = 0;
2014 uint8_t lsc_flag = 0;
2016 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2019 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
2020 slave_eth_dev = &rte_eth_devices[port_id];
2022 if (check_for_bonded_ethdev(bonded_eth_dev))
2025 internals = bonded_eth_dev->data->dev_private;
2027 /* If the device isn't started don't handle interrupts */
2028 if (!bonded_eth_dev->data->dev_started)
2031 /* verify that port_id is a valid slave of bonded port */
2032 for (i = 0; i < internals->slave_count; i++) {
2033 if (internals->slaves[i].port_id == port_id) {
2042 /* Search for port in active port list */
2043 active_pos = find_slave_by_id(internals->active_slaves,
2044 internals->active_slave_count, port_id);
2046 rte_eth_link_get_nowait(port_id, &link);
2047 if (link.link_status) {
2048 if (active_pos < internals->active_slave_count)
2051 /* if no active slave ports then set this port to be primary port */
2052 if (internals->active_slave_count < 1) {
2053 /* If first active slave, then change link status */
2054 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2055 internals->current_primary_port = port_id;
2058 mac_address_slaves_update(bonded_eth_dev);
2060 /* Inherit eth dev link properties from first active slave */
2061 link_properties_set(bonded_eth_dev,
2062 &(slave_eth_dev->data->dev_link));
2064 if (link_properties_valid(
2065 &bonded_eth_dev->data->dev_link, &link) != 0) {
2066 slave_eth_dev->data->dev_flags &=
2067 (~RTE_ETH_DEV_BONDED_SLAVE);
2069 "port %u invalid speed/duplex\n",
2075 activate_slave(bonded_eth_dev, port_id);
2077 /* If user has defined the primary port then default to using it */
2078 if (internals->user_defined_primary_port &&
2079 internals->primary_port == port_id)
2080 bond_ethdev_primary_set(internals, port_id);
2082 if (active_pos == internals->active_slave_count)
2085 /* Remove from active slave list */
2086 deactivate_slave(bonded_eth_dev, port_id);
2088 /* No active slaves, change link status to down and reset other
2089 * link properties */
2090 if (internals->active_slave_count < 1) {
2092 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2094 link_properties_reset(bonded_eth_dev);
2097 /* Update primary id, take first active slave from list or if none
2098 * available set to -1 */
2099 if (port_id == internals->current_primary_port) {
2100 if (internals->active_slave_count > 0)
2101 bond_ethdev_primary_set(internals,
2102 internals->active_slaves[0]);
2104 internals->current_primary_port = internals->primary_port;
2109 /* Cancel any possible outstanding interrupts if delays are enabled */
2110 if (internals->link_up_delay_ms > 0 ||
2111 internals->link_down_delay_ms > 0)
2112 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2115 if (bonded_eth_dev->data->dev_link.link_status) {
2116 if (internals->link_up_delay_ms > 0)
2117 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2118 bond_ethdev_delayed_lsc_propagation,
2119 (void *)bonded_eth_dev);
2121 _rte_eth_dev_callback_process(bonded_eth_dev,
2122 RTE_ETH_EVENT_INTR_LSC,
2126 if (internals->link_down_delay_ms > 0)
2127 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2128 bond_ethdev_delayed_lsc_propagation,
2129 (void *)bonded_eth_dev);
2131 _rte_eth_dev_callback_process(bonded_eth_dev,
2132 RTE_ETH_EVENT_INTR_LSC,
2140 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2141 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2145 int slave_reta_size;
2146 unsigned reta_count;
2147 struct bond_dev_private *internals = dev->data->dev_private;
2149 if (reta_size != internals->reta_size)
2152 /* Copy RETA table */
2153 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2155 for (i = 0; i < reta_count; i++) {
2156 internals->reta_conf[i].mask = reta_conf[i].mask;
2157 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2158 if ((reta_conf[i].mask >> j) & 0x01)
2159 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2162 /* Fill rest of array */
2163 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2164 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2165 sizeof(internals->reta_conf[0]) * reta_count);
2167 /* Propagate RETA over slaves */
2168 for (i = 0; i < internals->slave_count; i++) {
2169 slave_reta_size = internals->slaves[i].reta_size;
2170 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2171 &internals->reta_conf[0], slave_reta_size);
2180 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2181 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2184 struct bond_dev_private *internals = dev->data->dev_private;
2186 if (reta_size != internals->reta_size)
2189 /* Copy RETA table */
2190 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2191 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2192 if ((reta_conf[i].mask >> j) & 0x01)
2193 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2199 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2200 struct rte_eth_rss_conf *rss_conf)
2203 struct bond_dev_private *internals = dev->data->dev_private;
2204 struct rte_eth_rss_conf bond_rss_conf;
2206 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2208 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2210 if (bond_rss_conf.rss_hf != 0)
2211 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2213 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2214 sizeof(internals->rss_key)) {
2215 if (bond_rss_conf.rss_key_len == 0)
2216 bond_rss_conf.rss_key_len = 40;
2217 internals->rss_key_len = bond_rss_conf.rss_key_len;
2218 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2219 internals->rss_key_len);
2222 for (i = 0; i < internals->slave_count; i++) {
2223 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2233 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2234 struct rte_eth_rss_conf *rss_conf)
2236 struct bond_dev_private *internals = dev->data->dev_private;
2238 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2239 rss_conf->rss_key_len = internals->rss_key_len;
2240 if (rss_conf->rss_key)
2241 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2246 const struct eth_dev_ops default_dev_ops = {
2247 .dev_start = bond_ethdev_start,
2248 .dev_stop = bond_ethdev_stop,
2249 .dev_close = bond_ethdev_close,
2250 .dev_configure = bond_ethdev_configure,
2251 .dev_infos_get = bond_ethdev_info,
2252 .vlan_filter_set = bond_ethdev_vlan_filter_set,
2253 .rx_queue_setup = bond_ethdev_rx_queue_setup,
2254 .tx_queue_setup = bond_ethdev_tx_queue_setup,
2255 .rx_queue_release = bond_ethdev_rx_queue_release,
2256 .tx_queue_release = bond_ethdev_tx_queue_release,
2257 .link_update = bond_ethdev_link_update,
2258 .stats_get = bond_ethdev_stats_get,
2259 .stats_reset = bond_ethdev_stats_reset,
2260 .promiscuous_enable = bond_ethdev_promiscuous_enable,
2261 .promiscuous_disable = bond_ethdev_promiscuous_disable,
2262 .reta_update = bond_ethdev_rss_reta_update,
2263 .reta_query = bond_ethdev_rss_reta_query,
2264 .rss_hash_update = bond_ethdev_rss_hash_update,
2265 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
2269 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
2271 const char *name = rte_vdev_device_name(dev);
2272 uint8_t socket_id = dev->device.numa_node;
2273 struct bond_dev_private *internals = NULL;
2274 struct rte_eth_dev *eth_dev = NULL;
2275 uint32_t vlan_filter_bmp_size;
2277 /* now do all data allocation - for eth_dev structure, dummy pci driver
2278 * and internal (private) data
2281 /* reserve an ethdev entry */
2282 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
2283 if (eth_dev == NULL) {
2284 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
2288 internals = eth_dev->data->dev_private;
2289 eth_dev->data->nb_rx_queues = (uint16_t)1;
2290 eth_dev->data->nb_tx_queues = (uint16_t)1;
2292 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
2294 if (eth_dev->data->mac_addrs == NULL) {
2295 RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
2299 eth_dev->dev_ops = &default_dev_ops;
2300 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
2301 RTE_ETH_DEV_DETACHABLE;
2303 rte_spinlock_init(&internals->lock);
2305 internals->port_id = eth_dev->data->port_id;
2306 internals->mode = BONDING_MODE_INVALID;
2307 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
2308 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
2309 internals->xmit_hash = xmit_l2_hash;
2310 internals->user_defined_mac = 0;
2311 internals->link_props_set = 0;
2313 internals->link_status_polling_enabled = 0;
2315 internals->link_status_polling_interval_ms =
2316 DEFAULT_POLLING_INTERVAL_10_MS;
2317 internals->link_down_delay_ms = 0;
2318 internals->link_up_delay_ms = 0;
2320 internals->slave_count = 0;
2321 internals->active_slave_count = 0;
2322 internals->rx_offload_capa = 0;
2323 internals->tx_offload_capa = 0;
2324 internals->candidate_max_rx_pktlen = 0;
2325 internals->max_rx_pktlen = 0;
2327 /* Initially allow to choose any offload type */
2328 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
2330 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
2331 memset(internals->slaves, 0, sizeof(internals->slaves));
2333 /* Set mode 4 default configuration */
2334 bond_mode_8023ad_setup(eth_dev, NULL);
2335 if (bond_ethdev_mode_set(eth_dev, mode)) {
2336 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
2337 eth_dev->data->port_id, mode);
2341 vlan_filter_bmp_size =
2342 rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
2343 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
2344 RTE_CACHE_LINE_SIZE);
2345 if (internals->vlan_filter_bmpmem == NULL) {
2347 "Failed to allocate vlan bitmap for bonded device %u\n",
2348 eth_dev->data->port_id);
2352 internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
2353 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
2354 if (internals->vlan_filter_bmp == NULL) {
2356 "Failed to init vlan bitmap for bonded device %u\n",
2357 eth_dev->data->port_id);
2358 rte_free(internals->vlan_filter_bmpmem);
2362 return eth_dev->data->port_id;
2365 rte_free(internals);
2366 if (eth_dev != NULL) {
2367 rte_free(eth_dev->data->mac_addrs);
2368 rte_eth_dev_release_port(eth_dev);
2374 bond_probe(struct rte_vdev_device *dev)
2377 struct bond_dev_private *internals;
2378 struct rte_kvargs *kvlist;
2379 uint8_t bonding_mode, socket_id;
2380 int arg_count, port_id;
2385 name = rte_vdev_device_name(dev);
2386 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2388 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
2389 pmd_bond_init_valid_arguments);
2393 /* Parse link bonding mode */
2394 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2395 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2396 &bond_ethdev_parse_slave_mode_kvarg,
2397 &bonding_mode) != 0) {
2398 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2403 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2404 "device %s\n", name);
2408 /* Parse socket id to create bonding device on */
2409 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2410 if (arg_count == 1) {
2411 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2412 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2414 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2415 "bonded device %s\n", name);
2418 } else if (arg_count > 1) {
2419 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2420 "bonded device %s\n", name);
2423 socket_id = rte_socket_id();
2426 dev->device.numa_node = socket_id;
2428 /* Create link bonding eth device */
2429 port_id = bond_alloc(dev, bonding_mode);
2431 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2432 "socket %u.\n", name, bonding_mode, socket_id);
2435 internals = rte_eth_devices[port_id].data->dev_private;
2436 internals->kvlist = kvlist;
2438 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2439 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2443 rte_kvargs_free(kvlist);
2449 bond_remove(struct rte_vdev_device *dev)
2451 struct rte_eth_dev *eth_dev;
2452 struct bond_dev_private *internals;
2458 name = rte_vdev_device_name(dev);
2459 RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2461 /* now free all data allocation - for eth_dev structure,
2462 * dummy pci driver and internal (private) data
2465 /* find an ethdev entry */
2466 eth_dev = rte_eth_dev_allocated(name);
2467 if (eth_dev == NULL)
2470 RTE_ASSERT(eth_dev->device == &dev->device);
2472 internals = eth_dev->data->dev_private;
2473 if (internals->slave_count != 0)
2476 if (eth_dev->data->dev_started == 1) {
2477 bond_ethdev_stop(eth_dev);
2478 bond_ethdev_close(eth_dev);
2481 eth_dev->dev_ops = NULL;
2482 eth_dev->rx_pkt_burst = NULL;
2483 eth_dev->tx_pkt_burst = NULL;
2485 internals = eth_dev->data->dev_private;
2486 rte_bitmap_free(internals->vlan_filter_bmp);
2487 rte_free(internals->vlan_filter_bmpmem);
2488 rte_free(eth_dev->data->dev_private);
2489 rte_free(eth_dev->data->mac_addrs);
2491 rte_eth_dev_release_port(eth_dev);
2496 /* this part will resolve the slave portids after all the other pdev and vdev
2497 * have been allocated */
2499 bond_ethdev_configure(struct rte_eth_dev *dev)
2501 const char *name = dev->device->name;
2502 struct bond_dev_private *internals = dev->data->dev_private;
2503 struct rte_kvargs *kvlist = internals->kvlist;
2505 uint8_t port_id = dev - rte_eth_devices;
2507 static const uint8_t default_rss_key[40] = {
2508 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2509 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2510 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2511 0xBE, 0xAC, 0x01, 0xFA
2516 /* If RSS is enabled, fill table and key with default values */
2517 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2518 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2519 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2520 memcpy(internals->rss_key, default_rss_key, 40);
2522 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2523 internals->reta_conf[i].mask = ~0LL;
2524 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2525 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2529 /* set the max_rx_pktlen */
2530 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2533 * if no kvlist, it means that this bonded device has been created
2534 * through the bonding api.
2539 /* Parse MAC address for bonded device */
2540 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2541 if (arg_count == 1) {
2542 struct ether_addr bond_mac;
2544 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2545 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2546 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2551 /* Set MAC address */
2552 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2554 "Failed to set mac address on bonded device %s\n",
2558 } else if (arg_count > 1) {
2560 "MAC address can be specified only once for bonded device %s\n",
2565 /* Parse/set balance mode transmit policy */
2566 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2567 if (arg_count == 1) {
2568 uint8_t xmit_policy;
2570 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2571 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2574 "Invalid xmit policy specified for bonded device %s\n",
2579 /* Set balance mode transmit policy*/
2580 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2582 "Failed to set balance xmit policy on bonded device %s\n",
2586 } else if (arg_count > 1) {
2588 "Transmit policy can be specified only once for bonded device"
2593 /* Parse/add slave ports to bonded device */
2594 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2595 struct bond_ethdev_slave_ports slave_ports;
2598 memset(&slave_ports, 0, sizeof(slave_ports));
2600 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2601 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2603 "Failed to parse slave ports for bonded device %s\n",
2608 for (i = 0; i < slave_ports.slave_count; i++) {
2609 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2611 "Failed to add port %d as slave to bonded device %s\n",
2612 slave_ports.slaves[i], name);
2617 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2621 /* Parse/set primary slave port id*/
2622 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2623 if (arg_count == 1) {
2624 uint8_t primary_slave_port_id;
2626 if (rte_kvargs_process(kvlist,
2627 PMD_BOND_PRIMARY_SLAVE_KVARG,
2628 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2629 &primary_slave_port_id) < 0) {
2631 "Invalid primary slave port id specified for bonded device"
2636 /* Set balance mode transmit policy*/
2637 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2640 "Failed to set primary slave port %d on bonded device %s\n",
2641 primary_slave_port_id, name);
2644 } else if (arg_count > 1) {
2646 "Primary slave can be specified only once for bonded device"
2651 /* Parse link status monitor polling interval */
2652 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2653 if (arg_count == 1) {
2654 uint32_t lsc_poll_interval_ms;
2656 if (rte_kvargs_process(kvlist,
2657 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2658 &bond_ethdev_parse_time_ms_kvarg,
2659 &lsc_poll_interval_ms) < 0) {
2661 "Invalid lsc polling interval value specified for bonded"
2662 " device %s\n", name);
2666 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2669 "Failed to set lsc monitor polling interval (%u ms) on"
2670 " bonded device %s\n", lsc_poll_interval_ms, name);
2673 } else if (arg_count > 1) {
2675 "LSC polling interval can be specified only once for bonded"
2676 " device %s\n", name);
2680 /* Parse link up interrupt propagation delay */
2681 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2682 if (arg_count == 1) {
2683 uint32_t link_up_delay_ms;
2685 if (rte_kvargs_process(kvlist,
2686 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2687 &bond_ethdev_parse_time_ms_kvarg,
2688 &link_up_delay_ms) < 0) {
2690 "Invalid link up propagation delay value specified for"
2691 " bonded device %s\n", name);
2695 /* Set balance mode transmit policy*/
2696 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2699 "Failed to set link up propagation delay (%u ms) on bonded"
2700 " device %s\n", link_up_delay_ms, name);
2703 } else if (arg_count > 1) {
2705 "Link up propagation delay can be specified only once for"
2706 " bonded device %s\n", name);
2710 /* Parse link down interrupt propagation delay */
2711 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2712 if (arg_count == 1) {
2713 uint32_t link_down_delay_ms;
2715 if (rte_kvargs_process(kvlist,
2716 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2717 &bond_ethdev_parse_time_ms_kvarg,
2718 &link_down_delay_ms) < 0) {
2720 "Invalid link down propagation delay value specified for"
2721 " bonded device %s\n", name);
2725 /* Set balance mode transmit policy*/
2726 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2729 "Failed to set link down propagation delay (%u ms) on"
2730 " bonded device %s\n", link_down_delay_ms, name);
2733 } else if (arg_count > 1) {
2735 "Link down propagation delay can be specified only once for"
2736 " bonded device %s\n", name);
2743 struct rte_vdev_driver pmd_bond_drv = {
2744 .probe = bond_probe,
2745 .remove = bond_remove,
2748 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
2749 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2751 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2755 "xmit_policy=[l2 | l23 | l34] "
2758 "lsc_poll_period_ms=<int> "
2760 "down_delay=<int>");