4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
53 #define REORDER_PERIOD_MS 10
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
63 size_t vlan_offset = 0;
65 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
68 vlan_offset = sizeof(struct vlan_hdr);
69 *proto = vlan_hdr->eth_proto;
71 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72 vlan_hdr = vlan_hdr + 1;
73 *proto = vlan_hdr->eth_proto;
74 vlan_offset += sizeof(struct vlan_hdr);
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
83 struct bond_dev_private *internals;
85 uint16_t num_rx_slave = 0;
86 uint16_t num_rx_total = 0;
90 /* Cast to structure, containing bonded device's port id and queue id */
91 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
93 internals = bd_rx_q->dev_private;
96 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97 /* Offset of pointer to *bufs increases as packets are received
98 * from other slaves */
99 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
102 num_rx_total += num_rx_slave;
103 nb_pkts -= num_rx_slave;
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114 struct bond_dev_private *internals;
116 /* Cast to structure, containing bonded device's port id and queue id */
117 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
119 internals = bd_rx_q->dev_private;
121 return rte_eth_rx_burst(internals->current_primary_port,
122 bd_rx_q->queue_id, bufs, nb_pkts);
125 static inline uint8_t
126 is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
128 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
130 return !vlan_tci && (ethertype == ether_type_slow_be &&
131 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
135 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
138 /* Cast to structure, containing bonded device's port id and queue id */
139 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
140 struct bond_dev_private *internals = bd_rx_q->dev_private;
141 struct ether_addr bond_mac;
143 struct ether_hdr *hdr;
145 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
146 uint16_t num_rx_total = 0; /* Total number of received packets */
147 uint8_t slaves[RTE_MAX_ETHPORTS];
150 uint8_t collecting; /* current slave collecting status */
151 const uint8_t promisc = internals->promiscuous_en;
155 rte_eth_macaddr_get(internals->port_id, &bond_mac);
156 /* Copy slave list to protect against slave up/down changes during tx
158 slave_count = internals->active_slave_count;
159 memcpy(slaves, internals->active_slaves,
160 sizeof(internals->active_slaves[0]) * slave_count);
162 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
164 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
166 /* Read packets from this slave */
167 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
168 &bufs[num_rx_total], nb_pkts - num_rx_total);
170 for (k = j; k < 2 && k < num_rx_total; k++)
171 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
173 /* Handle slow protocol packets. */
174 while (j < num_rx_total) {
175 if (j + 3 < num_rx_total)
176 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
178 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
179 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
181 /* Remove packet from array if it is slow packet or slave is not
182 * in collecting state or bondign interface is not in promiscus
183 * mode and packet address does not match. */
184 if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
185 !collecting || (!promisc &&
186 !is_multicast_ether_addr(&hdr->d_addr) &&
187 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
189 if (hdr->ether_type == ether_type_slow_be) {
190 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
193 rte_pktmbuf_free(bufs[j]);
195 /* Packet is managed by mode 4 or dropped, shift the array */
197 if (j < num_rx_total) {
198 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
209 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
210 uint32_t burstnumberRX;
211 uint32_t burstnumberTX;
213 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
216 arp_op_name(uint16_t arp_op, char *buf)
220 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
223 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
225 case ARP_OP_REVREQUEST:
226 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
227 "Reverse ARP Request");
229 case ARP_OP_REVREPLY:
230 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
231 "Reverse ARP Reply");
233 case ARP_OP_INVREQUEST:
234 snprintf(buf, sizeof("Peer Identify Request"), "%s",
235 "Peer Identify Request");
237 case ARP_OP_INVREPLY:
238 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
239 "Peer Identify Reply");
244 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
248 #define MaxIPv4String 16
250 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
254 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
255 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
256 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
260 #define MAX_CLIENTS_NUMBER 128
261 uint8_t active_clients;
262 struct client_stats_t {
265 uint32_t ipv4_rx_packets;
266 uint32_t ipv4_tx_packets;
268 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
271 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
275 for (; i < MAX_CLIENTS_NUMBER; i++) {
276 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
277 /* Just update RX packets number for this client */
278 if (TXorRXindicator == &burstnumberRX)
279 client_stats[i].ipv4_rx_packets++;
281 client_stats[i].ipv4_tx_packets++;
285 /* We have a new client. Insert him to the table, and increment stats */
286 if (TXorRXindicator == &burstnumberRX)
287 client_stats[active_clients].ipv4_rx_packets++;
289 client_stats[active_clients].ipv4_tx_packets++;
290 client_stats[active_clients].ipv4_addr = addr;
291 client_stats[active_clients].port = port;
296 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
297 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
298 RTE_LOG(DEBUG, PMD, \
301 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
303 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
309 eth_h->s_addr.addr_bytes[0], \
310 eth_h->s_addr.addr_bytes[1], \
311 eth_h->s_addr.addr_bytes[2], \
312 eth_h->s_addr.addr_bytes[3], \
313 eth_h->s_addr.addr_bytes[4], \
314 eth_h->s_addr.addr_bytes[5], \
316 eth_h->d_addr.addr_bytes[0], \
317 eth_h->d_addr.addr_bytes[1], \
318 eth_h->d_addr.addr_bytes[2], \
319 eth_h->d_addr.addr_bytes[3], \
320 eth_h->d_addr.addr_bytes[4], \
321 eth_h->d_addr.addr_bytes[5], \
328 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
329 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
331 struct ipv4_hdr *ipv4_h;
332 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
333 struct arp_hdr *arp_h;
340 uint16_t ether_type = eth_h->ether_type;
341 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
343 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
344 snprintf(buf, 16, "%s", info);
347 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
348 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
349 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
350 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
351 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
352 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
354 update_client_stats(ipv4_h->src_addr, port, burstnumber);
356 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
357 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
358 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
359 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
360 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
361 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
362 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
369 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
371 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
372 struct bond_dev_private *internals = bd_tx_q->dev_private;
373 struct ether_hdr *eth_h;
374 uint16_t ether_type, offset;
375 uint16_t nb_recv_pkts;
378 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
380 for (i = 0; i < nb_recv_pkts; i++) {
381 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
382 ether_type = eth_h->ether_type;
383 offset = get_vlan_offset(eth_h, ðer_type);
385 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
386 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
387 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
389 bond_mode_alb_arp_recv(eth_h, offset, internals);
391 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
392 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
393 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
401 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
404 struct bond_dev_private *internals;
405 struct bond_tx_queue *bd_tx_q;
407 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
408 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
410 uint8_t num_of_slaves;
411 uint8_t slaves[RTE_MAX_ETHPORTS];
413 uint16_t num_tx_total = 0, num_tx_slave;
415 static int slave_idx = 0;
416 int i, cslave_idx = 0, tx_fail_total = 0;
418 bd_tx_q = (struct bond_tx_queue *)queue;
419 internals = bd_tx_q->dev_private;
421 /* Copy slave list to protect against slave up/down changes during tx
423 num_of_slaves = internals->active_slave_count;
424 memcpy(slaves, internals->active_slaves,
425 sizeof(internals->active_slaves[0]) * num_of_slaves);
427 if (num_of_slaves < 1)
430 /* Populate slaves mbuf with which packets are to be sent on it */
431 for (i = 0; i < nb_pkts; i++) {
432 cslave_idx = (slave_idx + i) % num_of_slaves;
433 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
436 /* increment current slave index so the next call to tx burst starts on the
438 slave_idx = ++cslave_idx;
440 /* Send packet burst on each slave device */
441 for (i = 0; i < num_of_slaves; i++) {
442 if (slave_nb_pkts[i] > 0) {
443 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
444 slave_bufs[i], slave_nb_pkts[i]);
446 /* if tx burst fails move packets to end of bufs */
447 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
448 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
450 tx_fail_total += tx_fail_slave;
452 memcpy(&bufs[nb_pkts - tx_fail_total],
453 &slave_bufs[i][num_tx_slave],
454 tx_fail_slave * sizeof(bufs[0]));
456 num_tx_total += num_tx_slave;
464 bond_ethdev_tx_burst_active_backup(void *queue,
465 struct rte_mbuf **bufs, uint16_t nb_pkts)
467 struct bond_dev_private *internals;
468 struct bond_tx_queue *bd_tx_q;
470 bd_tx_q = (struct bond_tx_queue *)queue;
471 internals = bd_tx_q->dev_private;
473 if (internals->active_slave_count < 1)
476 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
480 static inline uint16_t
481 ether_hash(struct ether_hdr *eth_hdr)
483 unaligned_uint16_t *word_src_addr =
484 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
485 unaligned_uint16_t *word_dst_addr =
486 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
488 return (word_src_addr[0] ^ word_dst_addr[0]) ^
489 (word_src_addr[1] ^ word_dst_addr[1]) ^
490 (word_src_addr[2] ^ word_dst_addr[2]);
493 static inline uint32_t
494 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
496 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
499 static inline uint32_t
500 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
502 unaligned_uint32_t *word_src_addr =
503 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
504 unaligned_uint32_t *word_dst_addr =
505 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
507 return (word_src_addr[0] ^ word_dst_addr[0]) ^
508 (word_src_addr[1] ^ word_dst_addr[1]) ^
509 (word_src_addr[2] ^ word_dst_addr[2]) ^
510 (word_src_addr[3] ^ word_dst_addr[3]);
514 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
516 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
518 uint32_t hash = ether_hash(eth_hdr);
520 return (hash ^= hash >> 8) % slave_count;
524 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
526 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
527 uint16_t proto = eth_hdr->ether_type;
528 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
529 uint32_t hash, l3hash = 0;
531 hash = ether_hash(eth_hdr);
533 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
534 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
535 ((char *)(eth_hdr + 1) + vlan_offset);
536 l3hash = ipv4_hash(ipv4_hdr);
538 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
539 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
540 ((char *)(eth_hdr + 1) + vlan_offset);
541 l3hash = ipv6_hash(ipv6_hdr);
544 hash = hash ^ l3hash;
548 return hash % slave_count;
552 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
554 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
555 uint16_t proto = eth_hdr->ether_type;
556 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
558 struct udp_hdr *udp_hdr = NULL;
559 struct tcp_hdr *tcp_hdr = NULL;
560 uint32_t hash, l3hash = 0, l4hash = 0;
562 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
563 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
564 ((char *)(eth_hdr + 1) + vlan_offset);
565 size_t ip_hdr_offset;
567 l3hash = ipv4_hash(ipv4_hdr);
569 /* there is no L4 header in fragmented packet */
570 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
571 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
574 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
575 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
577 l4hash = HASH_L4_PORTS(tcp_hdr);
578 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
579 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
581 l4hash = HASH_L4_PORTS(udp_hdr);
584 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
585 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
586 ((char *)(eth_hdr + 1) + vlan_offset);
587 l3hash = ipv6_hash(ipv6_hdr);
589 if (ipv6_hdr->proto == IPPROTO_TCP) {
590 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
591 l4hash = HASH_L4_PORTS(tcp_hdr);
592 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
593 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
594 l4hash = HASH_L4_PORTS(udp_hdr);
598 hash = l3hash ^ l4hash;
602 return hash % slave_count;
606 uint64_t bwg_left_int;
607 uint64_t bwg_left_remainder;
612 bond_tlb_activate_slave(struct bond_dev_private *internals) {
615 for (i = 0; i < internals->active_slave_count; i++) {
616 tlb_last_obytets[internals->active_slaves[i]] = 0;
621 bandwidth_cmp(const void *a, const void *b)
623 const struct bwg_slave *bwg_a = a;
624 const struct bwg_slave *bwg_b = b;
625 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
626 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
627 (int64_t)bwg_a->bwg_left_remainder;
641 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
642 struct bwg_slave *bwg_slave)
644 struct rte_eth_link link_status;
646 rte_eth_link_get(port_id, &link_status);
647 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
650 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
651 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
652 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
656 bond_ethdev_update_tlb_slave_cb(void *arg)
658 struct bond_dev_private *internals = arg;
659 struct rte_eth_stats slave_stats;
660 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
664 uint8_t update_stats = 0;
667 internals->slave_update_idx++;
670 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
673 for (i = 0; i < internals->active_slave_count; i++) {
674 slave_id = internals->active_slaves[i];
675 rte_eth_stats_get(slave_id, &slave_stats);
676 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
677 bandwidth_left(slave_id, tx_bytes,
678 internals->slave_update_idx, &bwg_array[i]);
679 bwg_array[i].slave = slave_id;
682 tlb_last_obytets[slave_id] = slave_stats.obytes;
686 if (update_stats == 1)
687 internals->slave_update_idx = 0;
690 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
691 for (i = 0; i < slave_count; i++)
692 internals->tlb_slaves_order[i] = bwg_array[i].slave;
694 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
695 (struct bond_dev_private *)internals);
699 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
701 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
702 struct bond_dev_private *internals = bd_tx_q->dev_private;
704 struct rte_eth_dev *primary_port =
705 &rte_eth_devices[internals->primary_port];
706 uint16_t num_tx_total = 0;
709 uint8_t num_of_slaves = internals->active_slave_count;
710 uint8_t slaves[RTE_MAX_ETHPORTS];
712 struct ether_hdr *ether_hdr;
713 struct ether_addr primary_slave_addr;
714 struct ether_addr active_slave_addr;
716 if (num_of_slaves < 1)
719 memcpy(slaves, internals->tlb_slaves_order,
720 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
723 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
726 for (i = 0; i < 3; i++)
727 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
730 for (i = 0; i < num_of_slaves; i++) {
731 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
732 for (j = num_tx_total; j < nb_pkts; j++) {
734 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
736 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
737 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
738 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
739 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
740 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
744 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
745 bufs + num_tx_total, nb_pkts - num_tx_total);
747 if (num_tx_total == nb_pkts)
755 bond_tlb_disable(struct bond_dev_private *internals)
757 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
761 bond_tlb_enable(struct bond_dev_private *internals)
763 bond_ethdev_update_tlb_slave_cb(internals);
767 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
769 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
770 struct bond_dev_private *internals = bd_tx_q->dev_private;
772 struct ether_hdr *eth_h;
773 uint16_t ether_type, offset;
775 struct client_data *client_info;
778 * We create transmit buffers for every slave and one additional to send
779 * through tlb. In worst case every packet will be send on one port.
781 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
782 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
785 * We create separate transmit buffers for update packets as they wont be
786 * counted in num_tx_total.
788 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
789 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
791 struct rte_mbuf *upd_pkt;
794 uint16_t num_send, num_not_send = 0;
795 uint16_t num_tx_total = 0;
800 /* Search tx buffer for ARP packets and forward them to alb */
801 for (i = 0; i < nb_pkts; i++) {
802 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
803 ether_type = eth_h->ether_type;
804 offset = get_vlan_offset(eth_h, ðer_type);
806 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
807 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
809 /* Change src mac in eth header */
810 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
812 /* Add packet to slave tx buffer */
813 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
814 slave_bufs_pkts[slave_idx]++;
816 /* If packet is not ARP, send it with TLB policy */
817 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
819 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
823 /* Update connected client ARP tables */
824 if (internals->mode6.ntt) {
825 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
826 client_info = &internals->mode6.client_table[i];
828 if (client_info->in_use) {
829 /* Allocate new packet to send ARP update on current slave */
830 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
831 if (upd_pkt == NULL) {
832 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
835 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
836 + client_info->vlan_count * sizeof(struct vlan_hdr);
837 upd_pkt->data_len = pkt_size;
838 upd_pkt->pkt_len = pkt_size;
840 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
843 /* Add packet to update tx buffer */
844 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
845 update_bufs_pkts[slave_idx]++;
848 internals->mode6.ntt = 0;
851 /* Send ARP packets on proper slaves */
852 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
853 if (slave_bufs_pkts[i] > 0) {
854 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
855 slave_bufs[i], slave_bufs_pkts[i]);
856 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
857 bufs[nb_pkts - 1 - num_not_send - j] =
858 slave_bufs[i][nb_pkts - 1 - j];
861 num_tx_total += num_send;
862 num_not_send += slave_bufs_pkts[i] - num_send;
864 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
865 /* Print TX stats including update packets */
866 for (j = 0; j < slave_bufs_pkts[i]; j++) {
867 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
868 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
874 /* Send update packets on proper slaves */
875 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
876 if (update_bufs_pkts[i] > 0) {
877 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
878 update_bufs_pkts[i]);
879 for (j = num_send; j < update_bufs_pkts[i]; j++) {
880 rte_pktmbuf_free(update_bufs[i][j]);
882 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
883 for (j = 0; j < update_bufs_pkts[i]; j++) {
884 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
885 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
891 /* Send non-ARP packets using tlb policy */
892 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
893 num_send = bond_ethdev_tx_burst_tlb(queue,
894 slave_bufs[RTE_MAX_ETHPORTS],
895 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
897 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
898 bufs[nb_pkts - 1 - num_not_send - j] =
899 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
902 num_tx_total += num_send;
909 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
912 struct bond_dev_private *internals;
913 struct bond_tx_queue *bd_tx_q;
915 uint8_t num_of_slaves;
916 uint8_t slaves[RTE_MAX_ETHPORTS];
918 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
922 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
923 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
925 bd_tx_q = (struct bond_tx_queue *)queue;
926 internals = bd_tx_q->dev_private;
928 /* Copy slave list to protect against slave up/down changes during tx
930 num_of_slaves = internals->active_slave_count;
931 memcpy(slaves, internals->active_slaves,
932 sizeof(internals->active_slaves[0]) * num_of_slaves);
934 if (num_of_slaves < 1)
937 /* Populate slaves mbuf with the packets which are to be sent on it */
938 for (i = 0; i < nb_pkts; i++) {
939 /* Select output slave using hash based on xmit policy */
940 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
942 /* Populate slave mbuf arrays with mbufs for that slave */
943 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
946 /* Send packet burst on each slave device */
947 for (i = 0; i < num_of_slaves; i++) {
948 if (slave_nb_pkts[i] > 0) {
949 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
950 slave_bufs[i], slave_nb_pkts[i]);
952 /* if tx burst fails move packets to end of bufs */
953 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
954 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
956 tx_fail_total += slave_tx_fail_count;
957 memcpy(&bufs[nb_pkts - tx_fail_total],
958 &slave_bufs[i][num_tx_slave],
959 slave_tx_fail_count * sizeof(bufs[0]));
962 num_tx_total += num_tx_slave;
970 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
973 struct bond_dev_private *internals;
974 struct bond_tx_queue *bd_tx_q;
976 uint8_t num_of_slaves;
977 uint8_t slaves[RTE_MAX_ETHPORTS];
978 /* positions in slaves, not ID */
979 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
980 uint8_t distributing_count;
982 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
983 uint16_t i, j, op_slave_idx;
984 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
986 /* Allocate additional packets in case 8023AD mode. */
987 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
988 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
990 /* Total amount of packets in slave_bufs */
991 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
992 /* Slow packets placed in each slave */
993 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
995 bd_tx_q = (struct bond_tx_queue *)queue;
996 internals = bd_tx_q->dev_private;
998 /* Copy slave list to protect against slave up/down changes during tx
1000 num_of_slaves = internals->active_slave_count;
1001 if (num_of_slaves < 1)
1002 return num_tx_total;
1004 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
1006 distributing_count = 0;
1007 for (i = 0; i < num_of_slaves; i++) {
1008 struct port *port = &mode_8023ad_ports[slaves[i]];
1010 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1011 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1012 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1014 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1015 slave_bufs[i][j] = slow_pkts[j];
1017 if (ACTOR_STATE(port, DISTRIBUTING))
1018 distributing_offsets[distributing_count++] = i;
1021 if (likely(distributing_count > 0)) {
1022 /* Populate slaves mbuf with the packets which are to be sent on it */
1023 for (i = 0; i < nb_pkts; i++) {
1024 /* Select output slave using hash based on xmit policy */
1025 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1027 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1028 * slaves that are currently distributing. */
1029 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1030 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1031 slave_nb_pkts[slave_offset]++;
1035 /* Send packet burst on each slave device */
1036 for (i = 0; i < num_of_slaves; i++) {
1037 if (slave_nb_pkts[i] == 0)
1040 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1041 slave_bufs[i], slave_nb_pkts[i]);
1043 /* If tx burst fails drop slow packets */
1044 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1045 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1047 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1048 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1050 /* If tx burst fails move packets to end of bufs */
1051 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1052 uint16_t j = nb_pkts - num_tx_fail_total;
1053 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1054 bufs[j] = slave_bufs[i][num_tx_slave];
1058 return num_tx_total;
1062 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1065 struct bond_dev_private *internals;
1066 struct bond_tx_queue *bd_tx_q;
1068 uint8_t tx_failed_flag = 0, num_of_slaves;
1069 uint8_t slaves[RTE_MAX_ETHPORTS];
1071 uint16_t max_nb_of_tx_pkts = 0;
1073 int slave_tx_total[RTE_MAX_ETHPORTS];
1074 int i, most_successful_tx_slave = -1;
1076 bd_tx_q = (struct bond_tx_queue *)queue;
1077 internals = bd_tx_q->dev_private;
1079 /* Copy slave list to protect against slave up/down changes during tx
1081 num_of_slaves = internals->active_slave_count;
1082 memcpy(slaves, internals->active_slaves,
1083 sizeof(internals->active_slaves[0]) * num_of_slaves);
1085 if (num_of_slaves < 1)
1088 /* Increment reference count on mbufs */
1089 for (i = 0; i < nb_pkts; i++)
1090 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1092 /* Transmit burst on each active slave */
1093 for (i = 0; i < num_of_slaves; i++) {
1094 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1097 if (unlikely(slave_tx_total[i] < nb_pkts))
1100 /* record the value and slave index for the slave which transmits the
1101 * maximum number of packets */
1102 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1103 max_nb_of_tx_pkts = slave_tx_total[i];
1104 most_successful_tx_slave = i;
1108 /* if slaves fail to transmit packets from burst, the calling application
1109 * is not expected to know about multiple references to packets so we must
1110 * handle failures of all packets except those of the most successful slave
1112 if (unlikely(tx_failed_flag))
1113 for (i = 0; i < num_of_slaves; i++)
1114 if (i != most_successful_tx_slave)
1115 while (slave_tx_total[i] < nb_pkts)
1116 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1118 return max_nb_of_tx_pkts;
1122 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1123 struct rte_eth_link *slave_dev_link)
1125 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1126 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1128 if (slave_dev_link->link_status &&
1129 bonded_eth_dev->data->dev_started) {
1130 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1131 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1133 internals->link_props_set = 1;
1138 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1140 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1142 memset(&(bonded_eth_dev->data->dev_link), 0,
1143 sizeof(bonded_eth_dev->data->dev_link));
1145 internals->link_props_set = 0;
1149 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1150 struct rte_eth_link *slave_dev_link)
1152 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1153 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1160 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1162 struct ether_addr *mac_addr;
1164 if (eth_dev == NULL) {
1165 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1169 if (dst_mac_addr == NULL) {
1170 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1174 mac_addr = eth_dev->data->mac_addrs;
1176 ether_addr_copy(mac_addr, dst_mac_addr);
1181 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1183 struct ether_addr *mac_addr;
1185 if (eth_dev == NULL) {
1186 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1190 if (new_mac_addr == NULL) {
1191 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1195 mac_addr = eth_dev->data->mac_addrs;
1197 /* If new MAC is different to current MAC then update */
1198 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1199 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1205 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1207 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1210 /* Update slave devices MAC addresses */
1211 if (internals->slave_count < 1)
1214 switch (internals->mode) {
1215 case BONDING_MODE_ROUND_ROBIN:
1216 case BONDING_MODE_BALANCE:
1217 case BONDING_MODE_BROADCAST:
1218 for (i = 0; i < internals->slave_count; i++) {
1219 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1220 bonded_eth_dev->data->mac_addrs)) {
1221 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1222 internals->slaves[i].port_id);
1227 case BONDING_MODE_8023AD:
1228 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1230 case BONDING_MODE_ACTIVE_BACKUP:
1231 case BONDING_MODE_TLB:
1232 case BONDING_MODE_ALB:
1234 for (i = 0; i < internals->slave_count; i++) {
1235 if (internals->slaves[i].port_id ==
1236 internals->current_primary_port) {
1237 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1238 bonded_eth_dev->data->mac_addrs)) {
1239 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1240 internals->current_primary_port);
1244 if (mac_address_set(
1245 &rte_eth_devices[internals->slaves[i].port_id],
1246 &internals->slaves[i].persisted_mac_addr)) {
1247 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1248 internals->slaves[i].port_id);
1259 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1261 struct bond_dev_private *internals;
1263 internals = eth_dev->data->dev_private;
1266 case BONDING_MODE_ROUND_ROBIN:
1267 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1268 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1270 case BONDING_MODE_ACTIVE_BACKUP:
1271 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1272 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1274 case BONDING_MODE_BALANCE:
1275 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1276 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1278 case BONDING_MODE_BROADCAST:
1279 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1280 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1282 case BONDING_MODE_8023AD:
1283 if (bond_mode_8023ad_enable(eth_dev) != 0)
1286 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1287 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1288 RTE_LOG(WARNING, PMD,
1289 "Using mode 4, it is necessary to do TX burst and RX burst "
1290 "at least every 100ms.\n");
1292 case BONDING_MODE_TLB:
1293 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1294 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1296 case BONDING_MODE_ALB:
1297 if (bond_mode_alb_enable(eth_dev) != 0)
1300 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1301 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1307 internals->mode = mode;
1313 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1314 struct rte_eth_dev *slave_eth_dev)
1316 struct bond_rx_queue *bd_rx_q;
1317 struct bond_tx_queue *bd_tx_q;
1323 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1325 /* Enable interrupts on slave device if supported */
1326 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1327 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1329 /* If RSS is enabled for bonding, try to enable it for slaves */
1330 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1331 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1333 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1334 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1335 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1336 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1338 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1341 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1342 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1343 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1344 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1347 slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
1348 bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
1350 /* Configure device */
1351 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1352 bonded_eth_dev->data->nb_rx_queues,
1353 bonded_eth_dev->data->nb_tx_queues,
1354 &(slave_eth_dev->data->dev_conf));
1356 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1357 slave_eth_dev->data->port_id, errval);
1361 /* Setup Rx Queues */
1362 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1363 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1365 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1366 bd_rx_q->nb_rx_desc,
1367 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1368 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1371 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1372 slave_eth_dev->data->port_id, q_id, errval);
1377 /* Setup Tx Queues */
1378 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1379 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1381 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1382 bd_tx_q->nb_tx_desc,
1383 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1387 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1388 slave_eth_dev->data->port_id, q_id, errval);
1394 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1396 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1397 slave_eth_dev->data->port_id, errval);
1401 /* If RSS is enabled for bonding, synchronize RETA */
1402 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1404 struct bond_dev_private *internals;
1406 internals = bonded_eth_dev->data->dev_private;
1408 for (i = 0; i < internals->slave_count; i++) {
1409 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1410 errval = rte_eth_dev_rss_reta_update(
1411 slave_eth_dev->data->port_id,
1412 &internals->reta_conf[0],
1413 internals->slaves[i].reta_size);
1415 RTE_LOG(WARNING, PMD,
1416 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1417 " RSS Configuration for bonding may be inconsistent.\n",
1418 slave_eth_dev->data->port_id, errval);
1425 /* If lsc interrupt is set, check initial slave's link status */
1426 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1427 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1428 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1434 slave_remove(struct bond_dev_private *internals,
1435 struct rte_eth_dev *slave_eth_dev)
1439 for (i = 0; i < internals->slave_count; i++)
1440 if (internals->slaves[i].port_id ==
1441 slave_eth_dev->data->port_id)
1444 if (i < (internals->slave_count - 1))
1445 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1446 sizeof(internals->slaves[0]) *
1447 (internals->slave_count - i - 1));
1449 internals->slave_count--;
1451 /* force reconfiguration of slave interfaces */
1452 _rte_eth_dev_reset(slave_eth_dev);
1456 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1459 slave_add(struct bond_dev_private *internals,
1460 struct rte_eth_dev *slave_eth_dev)
1462 struct bond_slave_details *slave_details =
1463 &internals->slaves[internals->slave_count];
1465 slave_details->port_id = slave_eth_dev->data->port_id;
1466 slave_details->last_link_status = 0;
1468 /* Mark slave devices that don't support interrupts so we can
1469 * compensate when we start the bond
1471 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1472 slave_details->link_status_poll_enabled = 1;
1475 slave_details->link_status_wait_to_complete = 0;
1476 /* clean tlb_last_obytes when adding port for bonding device */
1477 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1478 sizeof(struct ether_addr));
1482 bond_ethdev_primary_set(struct bond_dev_private *internals,
1483 uint8_t slave_port_id)
1487 if (internals->active_slave_count < 1)
1488 internals->current_primary_port = slave_port_id;
1490 /* Search bonded device slave ports for new proposed primary port */
1491 for (i = 0; i < internals->active_slave_count; i++) {
1492 if (internals->active_slaves[i] == slave_port_id)
1493 internals->current_primary_port = slave_port_id;
1498 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1501 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1503 struct bond_dev_private *internals;
1506 /* slave eth dev will be started by bonded device */
1507 if (check_for_bonded_ethdev(eth_dev)) {
1508 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1509 eth_dev->data->port_id);
1513 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1514 eth_dev->data->dev_started = 1;
1516 internals = eth_dev->data->dev_private;
1518 if (internals->slave_count == 0) {
1519 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1523 if (internals->user_defined_mac == 0) {
1524 struct ether_addr *new_mac_addr = NULL;
1526 for (i = 0; i < internals->slave_count; i++)
1527 if (internals->slaves[i].port_id == internals->primary_port)
1528 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1530 if (new_mac_addr == NULL)
1533 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1534 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1535 eth_dev->data->port_id);
1540 /* Update all slave devices MACs*/
1541 if (mac_address_slaves_update(eth_dev) != 0)
1544 /* If bonded device is configure in promiscuous mode then re-apply config */
1545 if (internals->promiscuous_en)
1546 bond_ethdev_promiscuous_enable(eth_dev);
1548 /* Reconfigure each slave device if starting bonded device */
1549 for (i = 0; i < internals->slave_count; i++) {
1550 if (slave_configure(eth_dev,
1551 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1553 "bonded port (%d) failed to reconfigure slave device (%d)",
1554 eth_dev->data->port_id, internals->slaves[i].port_id);
1557 /* We will need to poll for link status if any slave doesn't
1558 * support interrupts
1560 if (internals->slaves[i].link_status_poll_enabled)
1561 internals->link_status_polling_enabled = 1;
1563 /* start polling if needed */
1564 if (internals->link_status_polling_enabled) {
1566 internals->link_status_polling_interval_ms * 1000,
1567 bond_ethdev_slave_link_status_change_monitor,
1568 (void *)&rte_eth_devices[internals->port_id]);
1571 if (internals->user_defined_primary_port)
1572 bond_ethdev_primary_set(internals, internals->primary_port);
1574 if (internals->mode == BONDING_MODE_8023AD)
1575 bond_mode_8023ad_start(eth_dev);
1577 if (internals->mode == BONDING_MODE_TLB ||
1578 internals->mode == BONDING_MODE_ALB)
1579 bond_tlb_enable(internals);
1585 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1589 if (dev->data->rx_queues != NULL) {
1590 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1591 rte_free(dev->data->rx_queues[i]);
1592 dev->data->rx_queues[i] = NULL;
1594 dev->data->nb_rx_queues = 0;
1597 if (dev->data->tx_queues != NULL) {
1598 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1599 rte_free(dev->data->tx_queues[i]);
1600 dev->data->tx_queues[i] = NULL;
1602 dev->data->nb_tx_queues = 0;
1607 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1609 struct bond_dev_private *internals = eth_dev->data->dev_private;
1612 if (internals->mode == BONDING_MODE_8023AD) {
1616 bond_mode_8023ad_stop(eth_dev);
1618 /* Discard all messages to/from mode 4 state machines */
1619 for (i = 0; i < internals->active_slave_count; i++) {
1620 port = &mode_8023ad_ports[internals->active_slaves[i]];
1622 RTE_ASSERT(port->rx_ring != NULL);
1623 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1624 rte_pktmbuf_free(pkt);
1626 RTE_ASSERT(port->tx_ring != NULL);
1627 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1628 rte_pktmbuf_free(pkt);
1632 if (internals->mode == BONDING_MODE_TLB ||
1633 internals->mode == BONDING_MODE_ALB) {
1634 bond_tlb_disable(internals);
1635 for (i = 0; i < internals->active_slave_count; i++)
1636 tlb_last_obytets[internals->active_slaves[i]] = 0;
1639 internals->active_slave_count = 0;
1640 internals->link_status_polling_enabled = 0;
1641 for (i = 0; i < internals->slave_count; i++)
1642 internals->slaves[i].last_link_status = 0;
1644 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1645 eth_dev->data->dev_started = 0;
1649 bond_ethdev_close(struct rte_eth_dev *dev)
1651 struct bond_dev_private *internals = dev->data->dev_private;
1653 bond_ethdev_free_queues(dev);
1654 rte_bitmap_reset(internals->vlan_filter_bmp);
1657 /* forward declaration */
1658 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1661 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1663 struct bond_dev_private *internals = dev->data->dev_private;
1665 dev_info->max_mac_addrs = 1;
1667 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
1668 internals->candidate_max_rx_pktlen : 2048;
1670 dev_info->max_rx_queues = (uint16_t)128;
1671 dev_info->max_tx_queues = (uint16_t)512;
1673 dev_info->min_rx_bufsize = 0;
1675 dev_info->rx_offload_capa = internals->rx_offload_capa;
1676 dev_info->tx_offload_capa = internals->tx_offload_capa;
1677 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1679 dev_info->reta_size = internals->reta_size;
1683 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1687 struct bond_dev_private *internals = dev->data->dev_private;
1689 /* don't do this while a slave is being added */
1690 rte_spinlock_lock(&internals->lock);
1693 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
1695 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
1697 for (i = 0; i < internals->slave_count; i++) {
1698 uint8_t port_id = internals->slaves[i].port_id;
1700 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1702 RTE_LOG(WARNING, PMD,
1703 "Setting VLAN filter on slave port %u not supported.\n",
1707 rte_spinlock_unlock(&internals->lock);
1712 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1713 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1714 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1716 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1717 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1718 0, dev->data->numa_node);
1719 if (bd_rx_q == NULL)
1722 bd_rx_q->queue_id = rx_queue_id;
1723 bd_rx_q->dev_private = dev->data->dev_private;
1725 bd_rx_q->nb_rx_desc = nb_rx_desc;
1727 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1728 bd_rx_q->mb_pool = mb_pool;
1730 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1736 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1737 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1738 const struct rte_eth_txconf *tx_conf)
1740 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1741 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1742 0, dev->data->numa_node);
1744 if (bd_tx_q == NULL)
1747 bd_tx_q->queue_id = tx_queue_id;
1748 bd_tx_q->dev_private = dev->data->dev_private;
1750 bd_tx_q->nb_tx_desc = nb_tx_desc;
1751 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1753 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1759 bond_ethdev_rx_queue_release(void *queue)
1768 bond_ethdev_tx_queue_release(void *queue)
1777 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1779 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1780 struct bond_dev_private *internals;
1782 /* Default value for polling slave found is true as we don't want to
1783 * disable the polling thread if we cannot get the lock */
1784 int i, polling_slave_found = 1;
1789 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1790 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1792 if (!bonded_ethdev->data->dev_started ||
1793 !internals->link_status_polling_enabled)
1796 /* If device is currently being configured then don't check slaves link
1797 * status, wait until next period */
1798 if (rte_spinlock_trylock(&internals->lock)) {
1799 if (internals->slave_count > 0)
1800 polling_slave_found = 0;
1802 for (i = 0; i < internals->slave_count; i++) {
1803 if (!internals->slaves[i].link_status_poll_enabled)
1806 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1807 polling_slave_found = 1;
1809 /* Update slave link status */
1810 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1811 internals->slaves[i].link_status_wait_to_complete);
1813 /* if link status has changed since last checked then call lsc
1815 if (slave_ethdev->data->dev_link.link_status !=
1816 internals->slaves[i].last_link_status) {
1817 internals->slaves[i].last_link_status =
1818 slave_ethdev->data->dev_link.link_status;
1820 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1821 RTE_ETH_EVENT_INTR_LSC,
1822 &bonded_ethdev->data->port_id);
1825 rte_spinlock_unlock(&internals->lock);
1828 if (polling_slave_found)
1829 /* Set alarm to continue monitoring link status of slave ethdev's */
1830 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1831 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1835 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1836 int wait_to_complete)
1838 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1840 if (!bonded_eth_dev->data->dev_started ||
1841 internals->active_slave_count == 0) {
1842 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1845 struct rte_eth_dev *slave_eth_dev;
1848 for (i = 0; i < internals->active_slave_count; i++) {
1849 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1851 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1853 if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
1859 bonded_eth_dev->data->dev_link.link_status = link_up;
1866 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1868 struct bond_dev_private *internals = dev->data->dev_private;
1869 struct rte_eth_stats slave_stats;
1872 for (i = 0; i < internals->slave_count; i++) {
1873 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1875 stats->ipackets += slave_stats.ipackets;
1876 stats->opackets += slave_stats.opackets;
1877 stats->ibytes += slave_stats.ibytes;
1878 stats->obytes += slave_stats.obytes;
1879 stats->imissed += slave_stats.imissed;
1880 stats->ierrors += slave_stats.ierrors;
1881 stats->oerrors += slave_stats.oerrors;
1882 stats->rx_nombuf += slave_stats.rx_nombuf;
1884 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1885 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1886 stats->q_opackets[j] += slave_stats.q_opackets[j];
1887 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1888 stats->q_obytes[j] += slave_stats.q_obytes[j];
1889 stats->q_errors[j] += slave_stats.q_errors[j];
1896 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1898 struct bond_dev_private *internals = dev->data->dev_private;
1901 for (i = 0; i < internals->slave_count; i++)
1902 rte_eth_stats_reset(internals->slaves[i].port_id);
1906 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1908 struct bond_dev_private *internals = eth_dev->data->dev_private;
1911 internals->promiscuous_en = 1;
1913 switch (internals->mode) {
1914 /* Promiscuous mode is propagated to all slaves */
1915 case BONDING_MODE_ROUND_ROBIN:
1916 case BONDING_MODE_BALANCE:
1917 case BONDING_MODE_BROADCAST:
1918 for (i = 0; i < internals->slave_count; i++)
1919 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1921 /* In mode4 promiscus mode is managed when slave is added/removed */
1922 case BONDING_MODE_8023AD:
1924 /* Promiscuous mode is propagated only to primary slave */
1925 case BONDING_MODE_ACTIVE_BACKUP:
1926 case BONDING_MODE_TLB:
1927 case BONDING_MODE_ALB:
1929 rte_eth_promiscuous_enable(internals->current_primary_port);
1934 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1936 struct bond_dev_private *internals = dev->data->dev_private;
1939 internals->promiscuous_en = 0;
1941 switch (internals->mode) {
1942 /* Promiscuous mode is propagated to all slaves */
1943 case BONDING_MODE_ROUND_ROBIN:
1944 case BONDING_MODE_BALANCE:
1945 case BONDING_MODE_BROADCAST:
1946 for (i = 0; i < internals->slave_count; i++)
1947 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1949 /* In mode4 promiscus mode is set managed when slave is added/removed */
1950 case BONDING_MODE_8023AD:
1952 /* Promiscuous mode is propagated only to primary slave */
1953 case BONDING_MODE_ACTIVE_BACKUP:
1954 case BONDING_MODE_TLB:
1955 case BONDING_MODE_ALB:
1957 rte_eth_promiscuous_disable(internals->current_primary_port);
1962 bond_ethdev_delayed_lsc_propagation(void *arg)
1967 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1968 RTE_ETH_EVENT_INTR_LSC, NULL);
1972 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1975 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1976 struct bond_dev_private *internals;
1977 struct rte_eth_link link;
1979 int i, valid_slave = 0;
1981 uint8_t lsc_flag = 0;
1983 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1986 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1987 slave_eth_dev = &rte_eth_devices[port_id];
1989 if (check_for_bonded_ethdev(bonded_eth_dev))
1992 internals = bonded_eth_dev->data->dev_private;
1994 /* If the device isn't started don't handle interrupts */
1995 if (!bonded_eth_dev->data->dev_started)
1998 /* verify that port_id is a valid slave of bonded port */
1999 for (i = 0; i < internals->slave_count; i++) {
2000 if (internals->slaves[i].port_id == port_id) {
2009 /* Search for port in active port list */
2010 active_pos = find_slave_by_id(internals->active_slaves,
2011 internals->active_slave_count, port_id);
2013 rte_eth_link_get_nowait(port_id, &link);
2014 if (link.link_status) {
2015 if (active_pos < internals->active_slave_count)
2018 /* if no active slave ports then set this port to be primary port */
2019 if (internals->active_slave_count < 1) {
2020 /* If first active slave, then change link status */
2021 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
2022 internals->current_primary_port = port_id;
2025 mac_address_slaves_update(bonded_eth_dev);
2027 /* Inherit eth dev link properties from first active slave */
2028 link_properties_set(bonded_eth_dev,
2029 &(slave_eth_dev->data->dev_link));
2031 if (link_properties_valid(
2032 &bonded_eth_dev->data->dev_link, &link) != 0) {
2033 slave_eth_dev->data->dev_flags &=
2034 (~RTE_ETH_DEV_BONDED_SLAVE);
2036 "port %u invalid speed/duplex\n",
2042 activate_slave(bonded_eth_dev, port_id);
2044 /* If user has defined the primary port then default to using it */
2045 if (internals->user_defined_primary_port &&
2046 internals->primary_port == port_id)
2047 bond_ethdev_primary_set(internals, port_id);
2049 if (active_pos == internals->active_slave_count)
2052 /* Remove from active slave list */
2053 deactivate_slave(bonded_eth_dev, port_id);
2055 /* No active slaves, change link status to down and reset other
2056 * link properties */
2057 if (internals->active_slave_count < 1) {
2059 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2061 link_properties_reset(bonded_eth_dev);
2064 /* Update primary id, take first active slave from list or if none
2065 * available set to -1 */
2066 if (port_id == internals->current_primary_port) {
2067 if (internals->active_slave_count > 0)
2068 bond_ethdev_primary_set(internals,
2069 internals->active_slaves[0]);
2071 internals->current_primary_port = internals->primary_port;
2076 /* Cancel any possible outstanding interrupts if delays are enabled */
2077 if (internals->link_up_delay_ms > 0 ||
2078 internals->link_down_delay_ms > 0)
2079 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2082 if (bonded_eth_dev->data->dev_link.link_status) {
2083 if (internals->link_up_delay_ms > 0)
2084 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2085 bond_ethdev_delayed_lsc_propagation,
2086 (void *)bonded_eth_dev);
2088 _rte_eth_dev_callback_process(bonded_eth_dev,
2089 RTE_ETH_EVENT_INTR_LSC, NULL);
2092 if (internals->link_down_delay_ms > 0)
2093 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2094 bond_ethdev_delayed_lsc_propagation,
2095 (void *)bonded_eth_dev);
2097 _rte_eth_dev_callback_process(bonded_eth_dev,
2098 RTE_ETH_EVENT_INTR_LSC, NULL);
2104 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2105 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2109 int slave_reta_size;
2110 unsigned reta_count;
2111 struct bond_dev_private *internals = dev->data->dev_private;
2113 if (reta_size != internals->reta_size)
2116 /* Copy RETA table */
2117 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2119 for (i = 0; i < reta_count; i++) {
2120 internals->reta_conf[i].mask = reta_conf[i].mask;
2121 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2122 if ((reta_conf[i].mask >> j) & 0x01)
2123 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2126 /* Fill rest of array */
2127 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2128 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2129 sizeof(internals->reta_conf[0]) * reta_count);
2131 /* Propagate RETA over slaves */
2132 for (i = 0; i < internals->slave_count; i++) {
2133 slave_reta_size = internals->slaves[i].reta_size;
2134 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2135 &internals->reta_conf[0], slave_reta_size);
2144 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2145 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2148 struct bond_dev_private *internals = dev->data->dev_private;
2150 if (reta_size != internals->reta_size)
2153 /* Copy RETA table */
2154 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2155 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2156 if ((reta_conf[i].mask >> j) & 0x01)
2157 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2163 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2164 struct rte_eth_rss_conf *rss_conf)
2167 struct bond_dev_private *internals = dev->data->dev_private;
2168 struct rte_eth_rss_conf bond_rss_conf;
2170 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2172 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2174 if (bond_rss_conf.rss_hf != 0)
2175 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2177 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2178 sizeof(internals->rss_key)) {
2179 if (bond_rss_conf.rss_key_len == 0)
2180 bond_rss_conf.rss_key_len = 40;
2181 internals->rss_key_len = bond_rss_conf.rss_key_len;
2182 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2183 internals->rss_key_len);
2186 for (i = 0; i < internals->slave_count; i++) {
2187 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2197 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2198 struct rte_eth_rss_conf *rss_conf)
2200 struct bond_dev_private *internals = dev->data->dev_private;
2202 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2203 rss_conf->rss_key_len = internals->rss_key_len;
2204 if (rss_conf->rss_key)
2205 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2210 const struct eth_dev_ops default_dev_ops = {
2211 .dev_start = bond_ethdev_start,
2212 .dev_stop = bond_ethdev_stop,
2213 .dev_close = bond_ethdev_close,
2214 .dev_configure = bond_ethdev_configure,
2215 .dev_infos_get = bond_ethdev_info,
2216 .vlan_filter_set = bond_ethdev_vlan_filter_set,
2217 .rx_queue_setup = bond_ethdev_rx_queue_setup,
2218 .tx_queue_setup = bond_ethdev_tx_queue_setup,
2219 .rx_queue_release = bond_ethdev_rx_queue_release,
2220 .tx_queue_release = bond_ethdev_tx_queue_release,
2221 .link_update = bond_ethdev_link_update,
2222 .stats_get = bond_ethdev_stats_get,
2223 .stats_reset = bond_ethdev_stats_reset,
2224 .promiscuous_enable = bond_ethdev_promiscuous_enable,
2225 .promiscuous_disable = bond_ethdev_promiscuous_disable,
2226 .reta_update = bond_ethdev_rss_reta_update,
2227 .reta_query = bond_ethdev_rss_reta_query,
2228 .rss_hash_update = bond_ethdev_rss_hash_update,
2229 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
2233 bond_probe(const char *name, const char *params)
2235 struct bond_dev_private *internals;
2236 struct rte_kvargs *kvlist;
2237 uint8_t bonding_mode, socket_id;
2238 int arg_count, port_id;
2240 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2242 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2246 /* Parse link bonding mode */
2247 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2248 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2249 &bond_ethdev_parse_slave_mode_kvarg,
2250 &bonding_mode) != 0) {
2251 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2256 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2257 "device %s\n", name);
2261 /* Parse socket id to create bonding device on */
2262 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2263 if (arg_count == 1) {
2264 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2265 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2267 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2268 "bonded device %s\n", name);
2271 } else if (arg_count > 1) {
2272 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2273 "bonded device %s\n", name);
2276 socket_id = rte_socket_id();
2279 /* Create link bonding eth device */
2280 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2282 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2283 "socket %u.\n", name, bonding_mode, socket_id);
2286 internals = rte_eth_devices[port_id].data->dev_private;
2287 internals->kvlist = kvlist;
2289 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2290 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2294 rte_kvargs_free(kvlist);
2300 bond_remove(const char *name)
2307 RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2309 /* free link bonding eth device */
2310 ret = rte_eth_bond_free(name);
2312 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2317 /* this part will resolve the slave portids after all the other pdev and vdev
2318 * have been allocated */
2320 bond_ethdev_configure(struct rte_eth_dev *dev)
2322 char *name = dev->data->name;
2323 struct bond_dev_private *internals = dev->data->dev_private;
2324 struct rte_kvargs *kvlist = internals->kvlist;
2326 uint8_t port_id = dev - rte_eth_devices;
2328 static const uint8_t default_rss_key[40] = {
2329 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2330 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2331 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2332 0xBE, 0xAC, 0x01, 0xFA
2337 /* If RSS is enabled, fill table and key with default values */
2338 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2339 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2340 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2341 memcpy(internals->rss_key, default_rss_key, 40);
2343 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2344 internals->reta_conf[i].mask = ~0LL;
2345 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2346 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2350 /* set the max_rx_pktlen */
2351 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
2354 * if no kvlist, it means that this bonded device has been created
2355 * through the bonding api.
2360 /* Parse MAC address for bonded device */
2361 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2362 if (arg_count == 1) {
2363 struct ether_addr bond_mac;
2365 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2366 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2367 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2372 /* Set MAC address */
2373 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2375 "Failed to set mac address on bonded device %s\n",
2379 } else if (arg_count > 1) {
2381 "MAC address can be specified only once for bonded device %s\n",
2386 /* Parse/set balance mode transmit policy */
2387 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2388 if (arg_count == 1) {
2389 uint8_t xmit_policy;
2391 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2392 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2395 "Invalid xmit policy specified for bonded device %s\n",
2400 /* Set balance mode transmit policy*/
2401 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2403 "Failed to set balance xmit policy on bonded device %s\n",
2407 } else if (arg_count > 1) {
2409 "Transmit policy can be specified only once for bonded device"
2414 /* Parse/add slave ports to bonded device */
2415 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2416 struct bond_ethdev_slave_ports slave_ports;
2419 memset(&slave_ports, 0, sizeof(slave_ports));
2421 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2422 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2424 "Failed to parse slave ports for bonded device %s\n",
2429 for (i = 0; i < slave_ports.slave_count; i++) {
2430 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2432 "Failed to add port %d as slave to bonded device %s\n",
2433 slave_ports.slaves[i], name);
2438 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2442 /* Parse/set primary slave port id*/
2443 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2444 if (arg_count == 1) {
2445 uint8_t primary_slave_port_id;
2447 if (rte_kvargs_process(kvlist,
2448 PMD_BOND_PRIMARY_SLAVE_KVARG,
2449 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2450 &primary_slave_port_id) < 0) {
2452 "Invalid primary slave port id specified for bonded device"
2457 /* Set balance mode transmit policy*/
2458 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2461 "Failed to set primary slave port %d on bonded device %s\n",
2462 primary_slave_port_id, name);
2465 } else if (arg_count > 1) {
2467 "Primary slave can be specified only once for bonded device"
2472 /* Parse link status monitor polling interval */
2473 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2474 if (arg_count == 1) {
2475 uint32_t lsc_poll_interval_ms;
2477 if (rte_kvargs_process(kvlist,
2478 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2479 &bond_ethdev_parse_time_ms_kvarg,
2480 &lsc_poll_interval_ms) < 0) {
2482 "Invalid lsc polling interval value specified for bonded"
2483 " device %s\n", name);
2487 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2490 "Failed to set lsc monitor polling interval (%u ms) on"
2491 " bonded device %s\n", lsc_poll_interval_ms, name);
2494 } else if (arg_count > 1) {
2496 "LSC polling interval can be specified only once for bonded"
2497 " device %s\n", name);
2501 /* Parse link up interrupt propagation delay */
2502 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2503 if (arg_count == 1) {
2504 uint32_t link_up_delay_ms;
2506 if (rte_kvargs_process(kvlist,
2507 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2508 &bond_ethdev_parse_time_ms_kvarg,
2509 &link_up_delay_ms) < 0) {
2511 "Invalid link up propagation delay value specified for"
2512 " bonded device %s\n", name);
2516 /* Set balance mode transmit policy*/
2517 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2520 "Failed to set link up propagation delay (%u ms) on bonded"
2521 " device %s\n", link_up_delay_ms, name);
2524 } else if (arg_count > 1) {
2526 "Link up propagation delay can be specified only once for"
2527 " bonded device %s\n", name);
2531 /* Parse link down interrupt propagation delay */
2532 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2533 if (arg_count == 1) {
2534 uint32_t link_down_delay_ms;
2536 if (rte_kvargs_process(kvlist,
2537 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2538 &bond_ethdev_parse_time_ms_kvarg,
2539 &link_down_delay_ms) < 0) {
2541 "Invalid link down propagation delay value specified for"
2542 " bonded device %s\n", name);
2546 /* Set balance mode transmit policy*/
2547 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2550 "Failed to set link down propagation delay (%u ms) on"
2551 " bonded device %s\n", link_down_delay_ms, name);
2554 } else if (arg_count > 1) {
2556 "Link down propagation delay can be specified only once for"
2557 " bonded device %s\n", name);
2564 struct rte_vdev_driver pmd_bond_drv = {
2565 .probe = bond_probe,
2566 .remove = bond_remove,
2569 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
2570 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
2572 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
2576 "xmit_policy=[l2 | l23 | l34] "
2579 "lsc_poll_period_ms=<int> "
2581 "down_delay=<int>");