4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
53 #define REORDER_PERIOD_MS 10
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
63 size_t vlan_offset = 0;
65 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
68 vlan_offset = sizeof(struct vlan_hdr);
69 *proto = vlan_hdr->eth_proto;
71 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72 vlan_hdr = vlan_hdr + 1;
73 *proto = vlan_hdr->eth_proto;
74 vlan_offset += sizeof(struct vlan_hdr);
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
83 struct bond_dev_private *internals;
85 uint16_t num_rx_slave = 0;
86 uint16_t num_rx_total = 0;
90 /* Cast to structure, containing bonded device's port id and queue id */
91 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
93 internals = bd_rx_q->dev_private;
96 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97 /* Offset of pointer to *bufs increases as packets are received
98 * from other slaves */
99 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
102 num_rx_total += num_rx_slave;
103 nb_pkts -= num_rx_slave;
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114 struct bond_dev_private *internals;
116 /* Cast to structure, containing bonded device's port id and queue id */
117 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
119 internals = bd_rx_q->dev_private;
121 return rte_eth_rx_burst(internals->current_primary_port,
122 bd_rx_q->queue_id, bufs, nb_pkts);
126 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
129 /* Cast to structure, containing bonded device's port id and queue id */
130 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
131 struct bond_dev_private *internals = bd_rx_q->dev_private;
132 struct ether_addr bond_mac;
134 struct ether_hdr *hdr;
136 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
137 uint16_t num_rx_total = 0; /* Total number of received packets */
138 uint8_t slaves[RTE_MAX_ETHPORTS];
141 uint8_t collecting; /* current slave collecting status */
142 const uint8_t promisc = internals->promiscuous_en;
145 rte_eth_macaddr_get(internals->port_id, &bond_mac);
146 /* Copy slave list to protect against slave up/down changes during tx
148 slave_count = internals->active_slave_count;
149 memcpy(slaves, internals->active_slaves,
150 sizeof(internals->active_slaves[0]) * slave_count);
152 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
154 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
156 /* Read packets from this slave */
157 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
158 &bufs[num_rx_total], nb_pkts - num_rx_total);
160 for (k = j; k < 2 && k < num_rx_total; k++)
161 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
163 /* Handle slow protocol packets. */
164 while (j < num_rx_total) {
165 if (j + 3 < num_rx_total)
166 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
168 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
169 /* Remove packet from array if it is slow packet or slave is not
170 * in collecting state or bondign interface is not in promiscus
171 * mode and packet address does not match. */
172 if (unlikely(hdr->ether_type == ether_type_slow_be ||
173 !collecting || (!promisc &&
174 !is_multicast_ether_addr(&hdr->d_addr) &&
175 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
177 if (hdr->ether_type == ether_type_slow_be) {
178 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
181 rte_pktmbuf_free(bufs[j]);
183 /* Packet is managed by mode 4 or dropped, shift the array */
185 if (j < num_rx_total) {
186 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
197 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
198 uint32_t burstnumberRX;
199 uint32_t burstnumberTX;
201 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
204 arp_op_name(uint16_t arp_op, char *buf)
208 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
211 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
213 case ARP_OP_REVREQUEST:
214 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
215 "Reverse ARP Request");
217 case ARP_OP_REVREPLY:
218 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
219 "Reverse ARP Reply");
221 case ARP_OP_INVREQUEST:
222 snprintf(buf, sizeof("Peer Identify Request"), "%s",
223 "Peer Identify Request");
225 case ARP_OP_INVREPLY:
226 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
227 "Peer Identify Reply");
232 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
236 #define MaxIPv4String 16
238 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
242 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
243 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
244 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
248 #define MAX_CLIENTS_NUMBER 128
249 uint8_t active_clients;
250 struct client_stats_t {
253 uint32_t ipv4_rx_packets;
254 uint32_t ipv4_tx_packets;
256 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
259 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
263 for (; i < MAX_CLIENTS_NUMBER; i++) {
264 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
265 /* Just update RX packets number for this client */
266 if (TXorRXindicator == &burstnumberRX)
267 client_stats[i].ipv4_rx_packets++;
269 client_stats[i].ipv4_tx_packets++;
273 /* We have a new client. Insert him to the table, and increment stats */
274 if (TXorRXindicator == &burstnumberRX)
275 client_stats[active_clients].ipv4_rx_packets++;
277 client_stats[active_clients].ipv4_tx_packets++;
278 client_stats[active_clients].ipv4_addr = addr;
279 client_stats[active_clients].port = port;
284 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
285 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
286 RTE_LOG(DEBUG, PMD, \
289 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
291 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
297 eth_h->s_addr.addr_bytes[0], \
298 eth_h->s_addr.addr_bytes[1], \
299 eth_h->s_addr.addr_bytes[2], \
300 eth_h->s_addr.addr_bytes[3], \
301 eth_h->s_addr.addr_bytes[4], \
302 eth_h->s_addr.addr_bytes[5], \
304 eth_h->d_addr.addr_bytes[0], \
305 eth_h->d_addr.addr_bytes[1], \
306 eth_h->d_addr.addr_bytes[2], \
307 eth_h->d_addr.addr_bytes[3], \
308 eth_h->d_addr.addr_bytes[4], \
309 eth_h->d_addr.addr_bytes[5], \
316 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
317 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
319 struct ipv4_hdr *ipv4_h;
320 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
321 struct arp_hdr *arp_h;
328 uint16_t ether_type = eth_h->ether_type;
329 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
331 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
332 snprintf(buf, 16, "%s", info);
335 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
336 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
337 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
338 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
339 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
340 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
342 update_client_stats(ipv4_h->src_addr, port, burstnumber);
344 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
345 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
346 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
347 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
348 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
349 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
350 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
357 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
359 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
360 struct bond_dev_private *internals = bd_tx_q->dev_private;
361 struct ether_hdr *eth_h;
362 uint16_t ether_type, offset;
363 uint16_t nb_recv_pkts;
366 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
368 for (i = 0; i < nb_recv_pkts; i++) {
369 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
370 ether_type = eth_h->ether_type;
371 offset = get_vlan_offset(eth_h, ðer_type);
373 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
374 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
375 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
377 bond_mode_alb_arp_recv(eth_h, offset, internals);
379 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
380 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
381 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
389 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
392 struct bond_dev_private *internals;
393 struct bond_tx_queue *bd_tx_q;
395 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
396 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
398 uint8_t num_of_slaves;
399 uint8_t slaves[RTE_MAX_ETHPORTS];
401 uint16_t num_tx_total = 0, num_tx_slave;
403 static int slave_idx = 0;
404 int i, cslave_idx = 0, tx_fail_total = 0;
406 bd_tx_q = (struct bond_tx_queue *)queue;
407 internals = bd_tx_q->dev_private;
409 /* Copy slave list to protect against slave up/down changes during tx
411 num_of_slaves = internals->active_slave_count;
412 memcpy(slaves, internals->active_slaves,
413 sizeof(internals->active_slaves[0]) * num_of_slaves);
415 if (num_of_slaves < 1)
418 /* Populate slaves mbuf with which packets are to be sent on it */
419 for (i = 0; i < nb_pkts; i++) {
420 cslave_idx = (slave_idx + i) % num_of_slaves;
421 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
424 /* increment current slave index so the next call to tx burst starts on the
426 slave_idx = ++cslave_idx;
428 /* Send packet burst on each slave device */
429 for (i = 0; i < num_of_slaves; i++) {
430 if (slave_nb_pkts[i] > 0) {
431 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
432 slave_bufs[i], slave_nb_pkts[i]);
434 /* if tx burst fails move packets to end of bufs */
435 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
436 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
438 tx_fail_total += tx_fail_slave;
440 memcpy(&bufs[nb_pkts - tx_fail_total],
441 &slave_bufs[i][num_tx_slave],
442 tx_fail_slave * sizeof(bufs[0]));
444 num_tx_total += num_tx_slave;
452 bond_ethdev_tx_burst_active_backup(void *queue,
453 struct rte_mbuf **bufs, uint16_t nb_pkts)
455 struct bond_dev_private *internals;
456 struct bond_tx_queue *bd_tx_q;
458 bd_tx_q = (struct bond_tx_queue *)queue;
459 internals = bd_tx_q->dev_private;
461 if (internals->active_slave_count < 1)
464 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
468 static inline uint16_t
469 ether_hash(struct ether_hdr *eth_hdr)
471 unaligned_uint16_t *word_src_addr =
472 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
473 unaligned_uint16_t *word_dst_addr =
474 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
476 return (word_src_addr[0] ^ word_dst_addr[0]) ^
477 (word_src_addr[1] ^ word_dst_addr[1]) ^
478 (word_src_addr[2] ^ word_dst_addr[2]);
481 static inline uint32_t
482 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
484 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
487 static inline uint32_t
488 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
490 unaligned_uint32_t *word_src_addr =
491 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
492 unaligned_uint32_t *word_dst_addr =
493 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
495 return (word_src_addr[0] ^ word_dst_addr[0]) ^
496 (word_src_addr[1] ^ word_dst_addr[1]) ^
497 (word_src_addr[2] ^ word_dst_addr[2]) ^
498 (word_src_addr[3] ^ word_dst_addr[3]);
502 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
504 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
506 uint32_t hash = ether_hash(eth_hdr);
508 return (hash ^= hash >> 8) % slave_count;
512 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
514 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
515 uint16_t proto = eth_hdr->ether_type;
516 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
517 uint32_t hash, l3hash = 0;
519 hash = ether_hash(eth_hdr);
521 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
522 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
523 ((char *)(eth_hdr + 1) + vlan_offset);
524 l3hash = ipv4_hash(ipv4_hdr);
526 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
527 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
528 ((char *)(eth_hdr + 1) + vlan_offset);
529 l3hash = ipv6_hash(ipv6_hdr);
532 hash = hash ^ l3hash;
536 return hash % slave_count;
540 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
542 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
543 uint16_t proto = eth_hdr->ether_type;
544 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
546 struct udp_hdr *udp_hdr = NULL;
547 struct tcp_hdr *tcp_hdr = NULL;
548 uint32_t hash, l3hash = 0, l4hash = 0;
550 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
551 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
552 ((char *)(eth_hdr + 1) + vlan_offset);
553 size_t ip_hdr_offset;
555 l3hash = ipv4_hash(ipv4_hdr);
557 /* there is no L4 header in fragmented packet */
558 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
559 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
562 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
563 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
565 l4hash = HASH_L4_PORTS(tcp_hdr);
566 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
567 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
569 l4hash = HASH_L4_PORTS(udp_hdr);
572 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
573 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
574 ((char *)(eth_hdr + 1) + vlan_offset);
575 l3hash = ipv6_hash(ipv6_hdr);
577 if (ipv6_hdr->proto == IPPROTO_TCP) {
578 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
579 l4hash = HASH_L4_PORTS(tcp_hdr);
580 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
581 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
582 l4hash = HASH_L4_PORTS(udp_hdr);
586 hash = l3hash ^ l4hash;
590 return hash % slave_count;
594 uint64_t bwg_left_int;
595 uint64_t bwg_left_remainder;
600 bond_tlb_activate_slave(struct bond_dev_private *internals) {
603 for (i = 0; i < internals->active_slave_count; i++) {
604 tlb_last_obytets[internals->active_slaves[i]] = 0;
609 bandwidth_cmp(const void *a, const void *b)
611 const struct bwg_slave *bwg_a = a;
612 const struct bwg_slave *bwg_b = b;
613 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
614 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
615 (int64_t)bwg_a->bwg_left_remainder;
629 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
630 struct bwg_slave *bwg_slave)
632 struct rte_eth_link link_status;
634 rte_eth_link_get(port_id, &link_status);
635 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
638 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
639 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
640 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
644 bond_ethdev_update_tlb_slave_cb(void *arg)
646 struct bond_dev_private *internals = arg;
647 struct rte_eth_stats slave_stats;
648 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
652 uint8_t update_stats = 0;
655 internals->slave_update_idx++;
658 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
661 for (i = 0; i < internals->active_slave_count; i++) {
662 slave_id = internals->active_slaves[i];
663 rte_eth_stats_get(slave_id, &slave_stats);
664 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
665 bandwidth_left(slave_id, tx_bytes,
666 internals->slave_update_idx, &bwg_array[i]);
667 bwg_array[i].slave = slave_id;
670 tlb_last_obytets[slave_id] = slave_stats.obytes;
674 if (update_stats == 1)
675 internals->slave_update_idx = 0;
678 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
679 for (i = 0; i < slave_count; i++)
680 internals->tlb_slaves_order[i] = bwg_array[i].slave;
682 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
683 (struct bond_dev_private *)internals);
687 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
689 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
690 struct bond_dev_private *internals = bd_tx_q->dev_private;
692 struct rte_eth_dev *primary_port =
693 &rte_eth_devices[internals->primary_port];
694 uint16_t num_tx_total = 0;
697 uint8_t num_of_slaves = internals->active_slave_count;
698 uint8_t slaves[RTE_MAX_ETHPORTS];
700 struct ether_hdr *ether_hdr;
701 struct ether_addr primary_slave_addr;
702 struct ether_addr active_slave_addr;
704 if (num_of_slaves < 1)
707 memcpy(slaves, internals->tlb_slaves_order,
708 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
711 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
714 for (i = 0; i < 3; i++)
715 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
718 for (i = 0; i < num_of_slaves; i++) {
719 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
720 for (j = num_tx_total; j < nb_pkts; j++) {
722 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
724 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
725 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
726 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
727 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
728 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
732 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
733 bufs + num_tx_total, nb_pkts - num_tx_total);
735 if (num_tx_total == nb_pkts)
743 bond_tlb_disable(struct bond_dev_private *internals)
745 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
749 bond_tlb_enable(struct bond_dev_private *internals)
751 bond_ethdev_update_tlb_slave_cb(internals);
755 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
757 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
758 struct bond_dev_private *internals = bd_tx_q->dev_private;
760 struct ether_hdr *eth_h;
761 uint16_t ether_type, offset;
763 struct client_data *client_info;
766 * We create transmit buffers for every slave and one additional to send
767 * through tlb. In worst case every packet will be send on one port.
769 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
770 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
773 * We create separate transmit buffers for update packets as they wont be
774 * counted in num_tx_total.
776 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
777 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
779 struct rte_mbuf *upd_pkt;
782 uint16_t num_send, num_not_send = 0;
783 uint16_t num_tx_total = 0;
788 /* Search tx buffer for ARP packets and forward them to alb */
789 for (i = 0; i < nb_pkts; i++) {
790 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
791 ether_type = eth_h->ether_type;
792 offset = get_vlan_offset(eth_h, ðer_type);
794 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
795 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
797 /* Change src mac in eth header */
798 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
800 /* Add packet to slave tx buffer */
801 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
802 slave_bufs_pkts[slave_idx]++;
804 /* If packet is not ARP, send it with TLB policy */
805 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
807 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
811 /* Update connected client ARP tables */
812 if (internals->mode6.ntt) {
813 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
814 client_info = &internals->mode6.client_table[i];
816 if (client_info->in_use) {
817 /* Allocate new packet to send ARP update on current slave */
818 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
819 if (upd_pkt == NULL) {
820 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
823 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
824 + client_info->vlan_count * sizeof(struct vlan_hdr);
825 upd_pkt->data_len = pkt_size;
826 upd_pkt->pkt_len = pkt_size;
828 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
831 /* Add packet to update tx buffer */
832 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
833 update_bufs_pkts[slave_idx]++;
836 internals->mode6.ntt = 0;
839 /* Send ARP packets on proper slaves */
840 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
841 if (slave_bufs_pkts[i] > 0) {
842 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
843 slave_bufs[i], slave_bufs_pkts[i]);
844 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
845 bufs[nb_pkts - 1 - num_not_send - j] =
846 slave_bufs[i][nb_pkts - 1 - j];
849 num_tx_total += num_send;
850 num_not_send += slave_bufs_pkts[i] - num_send;
852 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
853 /* Print TX stats including update packets */
854 for (j = 0; j < slave_bufs_pkts[i]; j++) {
855 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
856 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
862 /* Send update packets on proper slaves */
863 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864 if (update_bufs_pkts[i] > 0) {
865 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
866 update_bufs_pkts[i]);
867 for (j = num_send; j < update_bufs_pkts[i]; j++) {
868 rte_pktmbuf_free(update_bufs[i][j]);
870 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
871 for (j = 0; j < update_bufs_pkts[i]; j++) {
872 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
873 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
879 /* Send non-ARP packets using tlb policy */
880 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
881 num_send = bond_ethdev_tx_burst_tlb(queue,
882 slave_bufs[RTE_MAX_ETHPORTS],
883 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
885 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
886 bufs[nb_pkts - 1 - num_not_send - j] =
887 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
890 num_tx_total += num_send;
891 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
898 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
901 struct bond_dev_private *internals;
902 struct bond_tx_queue *bd_tx_q;
904 uint8_t num_of_slaves;
905 uint8_t slaves[RTE_MAX_ETHPORTS];
907 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
911 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
912 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
914 bd_tx_q = (struct bond_tx_queue *)queue;
915 internals = bd_tx_q->dev_private;
917 /* Copy slave list to protect against slave up/down changes during tx
919 num_of_slaves = internals->active_slave_count;
920 memcpy(slaves, internals->active_slaves,
921 sizeof(internals->active_slaves[0]) * num_of_slaves);
923 if (num_of_slaves < 1)
926 /* Populate slaves mbuf with the packets which are to be sent on it */
927 for (i = 0; i < nb_pkts; i++) {
928 /* Select output slave using hash based on xmit policy */
929 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
931 /* Populate slave mbuf arrays with mbufs for that slave */
932 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
935 /* Send packet burst on each slave device */
936 for (i = 0; i < num_of_slaves; i++) {
937 if (slave_nb_pkts[i] > 0) {
938 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
939 slave_bufs[i], slave_nb_pkts[i]);
941 /* if tx burst fails move packets to end of bufs */
942 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
943 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
945 tx_fail_total += slave_tx_fail_count;
946 memcpy(&bufs[nb_pkts - tx_fail_total],
947 &slave_bufs[i][num_tx_slave],
948 slave_tx_fail_count * sizeof(bufs[0]));
951 num_tx_total += num_tx_slave;
959 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
962 struct bond_dev_private *internals;
963 struct bond_tx_queue *bd_tx_q;
965 uint8_t num_of_slaves;
966 uint8_t slaves[RTE_MAX_ETHPORTS];
967 /* positions in slaves, not ID */
968 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
969 uint8_t distributing_count;
971 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
972 uint16_t i, j, op_slave_idx;
973 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
975 /* Allocate additional packets in case 8023AD mode. */
976 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
977 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
979 /* Total amount of packets in slave_bufs */
980 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
981 /* Slow packets placed in each slave */
982 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
984 bd_tx_q = (struct bond_tx_queue *)queue;
985 internals = bd_tx_q->dev_private;
987 /* Copy slave list to protect against slave up/down changes during tx
989 num_of_slaves = internals->active_slave_count;
990 if (num_of_slaves < 1)
993 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
995 distributing_count = 0;
996 for (i = 0; i < num_of_slaves; i++) {
997 struct port *port = &mode_8023ad_ports[slaves[i]];
999 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1000 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1001 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1003 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1004 slave_bufs[i][j] = slow_pkts[j];
1006 if (ACTOR_STATE(port, DISTRIBUTING))
1007 distributing_offsets[distributing_count++] = i;
1010 if (likely(distributing_count > 0)) {
1011 /* Populate slaves mbuf with the packets which are to be sent on it */
1012 for (i = 0; i < nb_pkts; i++) {
1013 /* Select output slave using hash based on xmit policy */
1014 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1016 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1017 * slaves that are currently distributing. */
1018 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1019 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1020 slave_nb_pkts[slave_offset]++;
1024 /* Send packet burst on each slave device */
1025 for (i = 0; i < num_of_slaves; i++) {
1026 if (slave_nb_pkts[i] == 0)
1029 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1030 slave_bufs[i], slave_nb_pkts[i]);
1032 /* If tx burst fails drop slow packets */
1033 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1034 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1036 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1037 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1039 /* If tx burst fails move packets to end of bufs */
1040 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1041 uint16_t j = nb_pkts - num_tx_fail_total;
1042 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1043 bufs[j] = slave_bufs[i][num_tx_slave];
1047 return num_tx_total;
1051 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1054 struct bond_dev_private *internals;
1055 struct bond_tx_queue *bd_tx_q;
1057 uint8_t tx_failed_flag = 0, num_of_slaves;
1058 uint8_t slaves[RTE_MAX_ETHPORTS];
1060 uint16_t max_nb_of_tx_pkts = 0;
1062 int slave_tx_total[RTE_MAX_ETHPORTS];
1063 int i, most_successful_tx_slave = -1;
1065 bd_tx_q = (struct bond_tx_queue *)queue;
1066 internals = bd_tx_q->dev_private;
1068 /* Copy slave list to protect against slave up/down changes during tx
1070 num_of_slaves = internals->active_slave_count;
1071 memcpy(slaves, internals->active_slaves,
1072 sizeof(internals->active_slaves[0]) * num_of_slaves);
1074 if (num_of_slaves < 1)
1077 /* Increment reference count on mbufs */
1078 for (i = 0; i < nb_pkts; i++)
1079 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1081 /* Transmit burst on each active slave */
1082 for (i = 0; i < num_of_slaves; i++) {
1083 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1086 if (unlikely(slave_tx_total[i] < nb_pkts))
1089 /* record the value and slave index for the slave which transmits the
1090 * maximum number of packets */
1091 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1092 max_nb_of_tx_pkts = slave_tx_total[i];
1093 most_successful_tx_slave = i;
1097 /* if slaves fail to transmit packets from burst, the calling application
1098 * is not expected to know about multiple references to packets so we must
1099 * handle failures of all packets except those of the most successful slave
1101 if (unlikely(tx_failed_flag))
1102 for (i = 0; i < num_of_slaves; i++)
1103 if (i != most_successful_tx_slave)
1104 while (slave_tx_total[i] < nb_pkts)
1105 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1107 return max_nb_of_tx_pkts;
1111 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1112 struct rte_eth_link *slave_dev_link)
1114 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1115 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1117 if (slave_dev_link->link_status &&
1118 bonded_eth_dev->data->dev_started) {
1119 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1120 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1122 internals->link_props_set = 1;
1127 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1129 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1131 memset(&(bonded_eth_dev->data->dev_link), 0,
1132 sizeof(bonded_eth_dev->data->dev_link));
1134 internals->link_props_set = 0;
1138 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1139 struct rte_eth_link *slave_dev_link)
1141 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1142 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1149 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1151 struct ether_addr *mac_addr;
1153 if (eth_dev == NULL) {
1154 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1158 if (dst_mac_addr == NULL) {
1159 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1163 mac_addr = eth_dev->data->mac_addrs;
1165 ether_addr_copy(mac_addr, dst_mac_addr);
1170 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1172 struct ether_addr *mac_addr;
1174 if (eth_dev == NULL) {
1175 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1179 if (new_mac_addr == NULL) {
1180 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1184 mac_addr = eth_dev->data->mac_addrs;
1186 /* If new MAC is different to current MAC then update */
1187 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1188 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1194 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1196 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1199 /* Update slave devices MAC addresses */
1200 if (internals->slave_count < 1)
1203 switch (internals->mode) {
1204 case BONDING_MODE_ROUND_ROBIN:
1205 case BONDING_MODE_BALANCE:
1206 case BONDING_MODE_BROADCAST:
1207 for (i = 0; i < internals->slave_count; i++) {
1208 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1209 bonded_eth_dev->data->mac_addrs)) {
1210 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1211 internals->slaves[i].port_id);
1216 case BONDING_MODE_8023AD:
1217 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1219 case BONDING_MODE_ACTIVE_BACKUP:
1220 case BONDING_MODE_TLB:
1221 case BONDING_MODE_ALB:
1223 for (i = 0; i < internals->slave_count; i++) {
1224 if (internals->slaves[i].port_id ==
1225 internals->current_primary_port) {
1226 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1227 bonded_eth_dev->data->mac_addrs)) {
1228 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1229 internals->current_primary_port);
1233 if (mac_address_set(
1234 &rte_eth_devices[internals->slaves[i].port_id],
1235 &internals->slaves[i].persisted_mac_addr)) {
1236 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1237 internals->slaves[i].port_id);
1248 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1250 struct bond_dev_private *internals;
1252 internals = eth_dev->data->dev_private;
1255 case BONDING_MODE_ROUND_ROBIN:
1256 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1257 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1259 case BONDING_MODE_ACTIVE_BACKUP:
1260 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1261 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1263 case BONDING_MODE_BALANCE:
1264 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1265 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1267 case BONDING_MODE_BROADCAST:
1268 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1269 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1271 case BONDING_MODE_8023AD:
1272 if (bond_mode_8023ad_enable(eth_dev) != 0)
1275 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1276 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1277 RTE_LOG(WARNING, PMD,
1278 "Using mode 4, it is necessary to do TX burst and RX burst "
1279 "at least every 100ms.\n");
1281 case BONDING_MODE_TLB:
1282 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1283 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1285 case BONDING_MODE_ALB:
1286 if (bond_mode_alb_enable(eth_dev) != 0)
1289 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1290 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1296 internals->mode = mode;
1302 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1303 struct rte_eth_dev *slave_eth_dev)
1305 struct bond_rx_queue *bd_rx_q;
1306 struct bond_tx_queue *bd_tx_q;
1312 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1314 /* Enable interrupts on slave device if supported */
1315 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1316 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1318 /* If RSS is enabled for bonding, try to enable it for slaves */
1319 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1320 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1322 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1323 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1324 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1325 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1327 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1330 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1331 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1332 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1333 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1336 /* Configure device */
1337 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1338 bonded_eth_dev->data->nb_rx_queues,
1339 bonded_eth_dev->data->nb_tx_queues,
1340 &(slave_eth_dev->data->dev_conf));
1342 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1343 slave_eth_dev->data->port_id, errval);
1347 /* Setup Rx Queues */
1348 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1349 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1351 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1352 bd_rx_q->nb_rx_desc,
1353 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1354 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1357 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1358 slave_eth_dev->data->port_id, q_id, errval);
1363 /* Setup Tx Queues */
1364 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1365 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1367 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1368 bd_tx_q->nb_tx_desc,
1369 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1373 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1374 slave_eth_dev->data->port_id, q_id, errval);
1380 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1382 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1383 slave_eth_dev->data->port_id, errval);
1387 /* If RSS is enabled for bonding, synchronize RETA */
1388 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1390 struct bond_dev_private *internals;
1392 internals = bonded_eth_dev->data->dev_private;
1394 for (i = 0; i < internals->slave_count; i++) {
1395 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1396 errval = rte_eth_dev_rss_reta_update(
1397 slave_eth_dev->data->port_id,
1398 &internals->reta_conf[0],
1399 internals->slaves[i].reta_size);
1401 RTE_LOG(WARNING, PMD,
1402 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1403 " RSS Configuration for bonding may be inconsistent.\n",
1404 slave_eth_dev->data->port_id, errval);
1411 /* If lsc interrupt is set, check initial slave's link status */
1412 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1413 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1414 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1420 slave_remove(struct bond_dev_private *internals,
1421 struct rte_eth_dev *slave_eth_dev)
1425 for (i = 0; i < internals->slave_count; i++)
1426 if (internals->slaves[i].port_id ==
1427 slave_eth_dev->data->port_id)
1430 if (i < (internals->slave_count - 1))
1431 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1432 sizeof(internals->slaves[0]) *
1433 (internals->slave_count - i - 1));
1435 internals->slave_count--;
1439 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1442 slave_add(struct bond_dev_private *internals,
1443 struct rte_eth_dev *slave_eth_dev)
1445 struct bond_slave_details *slave_details =
1446 &internals->slaves[internals->slave_count];
1448 slave_details->port_id = slave_eth_dev->data->port_id;
1449 slave_details->last_link_status = 0;
1451 /* If slave device doesn't support interrupts then we need to enabled
1452 * polling to monitor link status */
1453 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1454 slave_details->link_status_poll_enabled = 1;
1456 if (!internals->link_status_polling_enabled) {
1457 internals->link_status_polling_enabled = 1;
1459 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1460 bond_ethdev_slave_link_status_change_monitor,
1461 (void *)&rte_eth_devices[internals->port_id]);
1465 slave_details->link_status_wait_to_complete = 0;
1466 /* clean tlb_last_obytes when adding port for bonding device */
1467 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1468 sizeof(struct ether_addr));
1472 bond_ethdev_primary_set(struct bond_dev_private *internals,
1473 uint8_t slave_port_id)
1477 if (internals->active_slave_count < 1)
1478 internals->current_primary_port = slave_port_id;
1480 /* Search bonded device slave ports for new proposed primary port */
1481 for (i = 0; i < internals->active_slave_count; i++) {
1482 if (internals->active_slaves[i] == slave_port_id)
1483 internals->current_primary_port = slave_port_id;
1488 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1491 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1493 struct bond_dev_private *internals;
1496 /* slave eth dev will be started by bonded device */
1497 if (check_for_bonded_ethdev(eth_dev)) {
1498 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1499 eth_dev->data->port_id);
1503 eth_dev->data->dev_link.link_status = 0;
1504 eth_dev->data->dev_started = 1;
1506 internals = eth_dev->data->dev_private;
1508 if (internals->slave_count == 0) {
1509 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1513 if (internals->user_defined_mac == 0) {
1514 struct ether_addr *new_mac_addr = NULL;
1516 for (i = 0; i < internals->slave_count; i++)
1517 if (internals->slaves[i].port_id == internals->primary_port)
1518 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1520 if (new_mac_addr == NULL)
1523 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1524 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1525 eth_dev->data->port_id);
1530 /* Update all slave devices MACs*/
1531 if (mac_address_slaves_update(eth_dev) != 0)
1534 /* If bonded device is configure in promiscuous mode then re-apply config */
1535 if (internals->promiscuous_en)
1536 bond_ethdev_promiscuous_enable(eth_dev);
1538 /* Reconfigure each slave device if starting bonded device */
1539 for (i = 0; i < internals->slave_count; i++) {
1540 if (slave_configure(eth_dev,
1541 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1543 "bonded port (%d) failed to reconfigure slave device (%d)",
1544 eth_dev->data->port_id, internals->slaves[i].port_id);
1549 if (internals->user_defined_primary_port)
1550 bond_ethdev_primary_set(internals, internals->primary_port);
1552 if (internals->mode == BONDING_MODE_8023AD)
1553 bond_mode_8023ad_start(eth_dev);
1555 if (internals->mode == BONDING_MODE_TLB ||
1556 internals->mode == BONDING_MODE_ALB)
1557 bond_tlb_enable(internals);
1563 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1567 if (dev->data->rx_queues != NULL) {
1568 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1569 rte_free(dev->data->rx_queues[i]);
1570 dev->data->rx_queues[i] = NULL;
1572 dev->data->nb_rx_queues = 0;
1575 if (dev->data->tx_queues != NULL) {
1576 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1577 rte_free(dev->data->tx_queues[i]);
1578 dev->data->tx_queues[i] = NULL;
1580 dev->data->nb_tx_queues = 0;
1585 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1587 struct bond_dev_private *internals = eth_dev->data->dev_private;
1590 if (internals->mode == BONDING_MODE_8023AD) {
1594 bond_mode_8023ad_stop(eth_dev);
1596 /* Discard all messages to/from mode 4 state machines */
1597 for (i = 0; i < internals->active_slave_count; i++) {
1598 port = &mode_8023ad_ports[internals->active_slaves[i]];
1600 RTE_VERIFY(port->rx_ring != NULL);
1601 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1602 rte_pktmbuf_free(pkt);
1604 RTE_VERIFY(port->tx_ring != NULL);
1605 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1606 rte_pktmbuf_free(pkt);
1610 if (internals->mode == BONDING_MODE_TLB ||
1611 internals->mode == BONDING_MODE_ALB) {
1612 bond_tlb_disable(internals);
1613 for (i = 0; i < internals->active_slave_count; i++)
1614 tlb_last_obytets[internals->active_slaves[i]] = 0;
1617 internals->active_slave_count = 0;
1618 internals->link_status_polling_enabled = 0;
1620 eth_dev->data->dev_link.link_status = 0;
1621 eth_dev->data->dev_started = 0;
1625 bond_ethdev_close(struct rte_eth_dev *dev)
1627 bond_ethdev_free_queues(dev);
1630 /* forward declaration */
1631 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1634 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1636 struct bond_dev_private *internals = dev->data->dev_private;
1638 dev_info->max_mac_addrs = 1;
1640 dev_info->max_rx_pktlen = (uint32_t)2048;
1642 dev_info->max_rx_queues = (uint16_t)128;
1643 dev_info->max_tx_queues = (uint16_t)512;
1645 dev_info->min_rx_bufsize = 0;
1646 dev_info->pci_dev = NULL;
1648 dev_info->rx_offload_capa = internals->rx_offload_capa;
1649 dev_info->tx_offload_capa = internals->tx_offload_capa;
1650 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1652 dev_info->reta_size = internals->reta_size;
1656 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1657 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1658 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1660 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1661 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1662 0, dev->data->numa_node);
1663 if (bd_rx_q == NULL)
1666 bd_rx_q->queue_id = rx_queue_id;
1667 bd_rx_q->dev_private = dev->data->dev_private;
1669 bd_rx_q->nb_rx_desc = nb_rx_desc;
1671 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1672 bd_rx_q->mb_pool = mb_pool;
1674 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1680 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1681 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1682 const struct rte_eth_txconf *tx_conf)
1684 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1685 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1686 0, dev->data->numa_node);
1688 if (bd_tx_q == NULL)
1691 bd_tx_q->queue_id = tx_queue_id;
1692 bd_tx_q->dev_private = dev->data->dev_private;
1694 bd_tx_q->nb_tx_desc = nb_tx_desc;
1695 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1697 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1703 bond_ethdev_rx_queue_release(void *queue)
1712 bond_ethdev_tx_queue_release(void *queue)
1721 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1723 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1724 struct bond_dev_private *internals;
1726 /* Default value for polling slave found is true as we don't want to
1727 * disable the polling thread if we cannot get the lock */
1728 int i, polling_slave_found = 1;
1733 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1734 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1736 if (!bonded_ethdev->data->dev_started ||
1737 !internals->link_status_polling_enabled)
1740 /* If device is currently being configured then don't check slaves link
1741 * status, wait until next period */
1742 if (rte_spinlock_trylock(&internals->lock)) {
1743 if (internals->slave_count > 0)
1744 polling_slave_found = 0;
1746 for (i = 0; i < internals->slave_count; i++) {
1747 if (!internals->slaves[i].link_status_poll_enabled)
1750 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1751 polling_slave_found = 1;
1753 /* Update slave link status */
1754 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1755 internals->slaves[i].link_status_wait_to_complete);
1757 /* if link status has changed since last checked then call lsc
1759 if (slave_ethdev->data->dev_link.link_status !=
1760 internals->slaves[i].last_link_status) {
1761 internals->slaves[i].last_link_status =
1762 slave_ethdev->data->dev_link.link_status;
1764 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1765 RTE_ETH_EVENT_INTR_LSC,
1766 &bonded_ethdev->data->port_id);
1769 rte_spinlock_unlock(&internals->lock);
1772 if (polling_slave_found)
1773 /* Set alarm to continue monitoring link status of slave ethdev's */
1774 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1775 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1779 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1780 int wait_to_complete)
1782 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1784 if (!bonded_eth_dev->data->dev_started ||
1785 internals->active_slave_count == 0) {
1786 bonded_eth_dev->data->dev_link.link_status = 0;
1789 struct rte_eth_dev *slave_eth_dev;
1792 for (i = 0; i < internals->active_slave_count; i++) {
1793 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1795 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1797 if (slave_eth_dev->data->dev_link.link_status == 1) {
1803 bonded_eth_dev->data->dev_link.link_status = link_up;
1810 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1812 struct bond_dev_private *internals = dev->data->dev_private;
1813 struct rte_eth_stats slave_stats;
1816 for (i = 0; i < internals->slave_count; i++) {
1817 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1819 stats->ipackets += slave_stats.ipackets;
1820 stats->opackets += slave_stats.opackets;
1821 stats->ibytes += slave_stats.ibytes;
1822 stats->obytes += slave_stats.obytes;
1823 stats->imissed += slave_stats.imissed;
1824 stats->ierrors += slave_stats.ierrors;
1825 stats->oerrors += slave_stats.oerrors;
1826 stats->imcasts += slave_stats.imcasts;
1827 stats->rx_nombuf += slave_stats.rx_nombuf;
1829 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1830 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1831 stats->q_opackets[j] += slave_stats.q_opackets[j];
1832 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1833 stats->q_obytes[j] += slave_stats.q_obytes[j];
1834 stats->q_errors[j] += slave_stats.q_errors[j];
1841 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1843 struct bond_dev_private *internals = dev->data->dev_private;
1846 for (i = 0; i < internals->slave_count; i++)
1847 rte_eth_stats_reset(internals->slaves[i].port_id);
1851 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1853 struct bond_dev_private *internals = eth_dev->data->dev_private;
1856 internals->promiscuous_en = 1;
1858 switch (internals->mode) {
1859 /* Promiscuous mode is propagated to all slaves */
1860 case BONDING_MODE_ROUND_ROBIN:
1861 case BONDING_MODE_BALANCE:
1862 case BONDING_MODE_BROADCAST:
1863 for (i = 0; i < internals->slave_count; i++)
1864 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1866 /* In mode4 promiscus mode is managed when slave is added/removed */
1867 case BONDING_MODE_8023AD:
1869 /* Promiscuous mode is propagated only to primary slave */
1870 case BONDING_MODE_ACTIVE_BACKUP:
1871 case BONDING_MODE_TLB:
1872 case BONDING_MODE_ALB:
1874 rte_eth_promiscuous_enable(internals->current_primary_port);
1879 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1881 struct bond_dev_private *internals = dev->data->dev_private;
1884 internals->promiscuous_en = 0;
1886 switch (internals->mode) {
1887 /* Promiscuous mode is propagated to all slaves */
1888 case BONDING_MODE_ROUND_ROBIN:
1889 case BONDING_MODE_BALANCE:
1890 case BONDING_MODE_BROADCAST:
1891 for (i = 0; i < internals->slave_count; i++)
1892 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1894 /* In mode4 promiscus mode is set managed when slave is added/removed */
1895 case BONDING_MODE_8023AD:
1897 /* Promiscuous mode is propagated only to primary slave */
1898 case BONDING_MODE_ACTIVE_BACKUP:
1899 case BONDING_MODE_TLB:
1900 case BONDING_MODE_ALB:
1902 rte_eth_promiscuous_disable(internals->current_primary_port);
1907 bond_ethdev_delayed_lsc_propagation(void *arg)
1912 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1913 RTE_ETH_EVENT_INTR_LSC);
1917 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1920 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1921 struct bond_dev_private *internals;
1922 struct rte_eth_link link;
1924 int i, valid_slave = 0;
1926 uint8_t lsc_flag = 0;
1928 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1931 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1932 slave_eth_dev = &rte_eth_devices[port_id];
1934 if (check_for_bonded_ethdev(bonded_eth_dev))
1937 internals = bonded_eth_dev->data->dev_private;
1939 /* If the device isn't started don't handle interrupts */
1940 if (!bonded_eth_dev->data->dev_started)
1943 /* verify that port_id is a valid slave of bonded port */
1944 for (i = 0; i < internals->slave_count; i++) {
1945 if (internals->slaves[i].port_id == port_id) {
1954 /* Search for port in active port list */
1955 active_pos = find_slave_by_id(internals->active_slaves,
1956 internals->active_slave_count, port_id);
1958 rte_eth_link_get_nowait(port_id, &link);
1959 if (link.link_status) {
1960 if (active_pos < internals->active_slave_count)
1963 /* if no active slave ports then set this port to be primary port */
1964 if (internals->active_slave_count < 1) {
1965 /* If first active slave, then change link status */
1966 bonded_eth_dev->data->dev_link.link_status = 1;
1967 internals->current_primary_port = port_id;
1970 mac_address_slaves_update(bonded_eth_dev);
1972 /* Inherit eth dev link properties from first active slave */
1973 link_properties_set(bonded_eth_dev,
1974 &(slave_eth_dev->data->dev_link));
1977 activate_slave(bonded_eth_dev, port_id);
1979 /* If user has defined the primary port then default to using it */
1980 if (internals->user_defined_primary_port &&
1981 internals->primary_port == port_id)
1982 bond_ethdev_primary_set(internals, port_id);
1984 if (active_pos == internals->active_slave_count)
1987 /* Remove from active slave list */
1988 deactivate_slave(bonded_eth_dev, port_id);
1990 /* No active slaves, change link status to down and reset other
1991 * link properties */
1992 if (internals->active_slave_count < 1) {
1994 bonded_eth_dev->data->dev_link.link_status = 0;
1996 link_properties_reset(bonded_eth_dev);
1999 /* Update primary id, take first active slave from list or if none
2000 * available set to -1 */
2001 if (port_id == internals->current_primary_port) {
2002 if (internals->active_slave_count > 0)
2003 bond_ethdev_primary_set(internals,
2004 internals->active_slaves[0]);
2006 internals->current_primary_port = internals->primary_port;
2011 /* Cancel any possible outstanding interrupts if delays are enabled */
2012 if (internals->link_up_delay_ms > 0 ||
2013 internals->link_down_delay_ms > 0)
2014 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2017 if (bonded_eth_dev->data->dev_link.link_status) {
2018 if (internals->link_up_delay_ms > 0)
2019 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2020 bond_ethdev_delayed_lsc_propagation,
2021 (void *)bonded_eth_dev);
2023 _rte_eth_dev_callback_process(bonded_eth_dev,
2024 RTE_ETH_EVENT_INTR_LSC);
2027 if (internals->link_down_delay_ms > 0)
2028 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2029 bond_ethdev_delayed_lsc_propagation,
2030 (void *)bonded_eth_dev);
2032 _rte_eth_dev_callback_process(bonded_eth_dev,
2033 RTE_ETH_EVENT_INTR_LSC);
2039 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2040 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2044 int slave_reta_size;
2045 unsigned reta_count;
2046 struct bond_dev_private *internals = dev->data->dev_private;
2048 if (reta_size != internals->reta_size)
2051 /* Copy RETA table */
2052 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2054 for (i = 0; i < reta_count; i++) {
2055 internals->reta_conf[i].mask = reta_conf[i].mask;
2056 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2057 if ((reta_conf[i].mask >> j) & 0x01)
2058 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2061 /* Fill rest of array */
2062 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2063 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2064 sizeof(internals->reta_conf[0]) * reta_count);
2066 /* Propagate RETA over slaves */
2067 for (i = 0; i < internals->slave_count; i++) {
2068 slave_reta_size = internals->slaves[i].reta_size;
2069 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2070 &internals->reta_conf[0], slave_reta_size);
2079 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2080 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2083 struct bond_dev_private *internals = dev->data->dev_private;
2085 if (reta_size != internals->reta_size)
2088 /* Copy RETA table */
2089 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2090 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2091 if ((reta_conf[i].mask >> j) & 0x01)
2092 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2098 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2099 struct rte_eth_rss_conf *rss_conf)
2102 struct bond_dev_private *internals = dev->data->dev_private;
2103 struct rte_eth_rss_conf bond_rss_conf;
2105 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2107 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2109 if (bond_rss_conf.rss_hf != 0)
2110 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2112 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2113 sizeof(internals->rss_key)) {
2114 if (bond_rss_conf.rss_key_len == 0)
2115 bond_rss_conf.rss_key_len = 40;
2116 internals->rss_key_len = bond_rss_conf.rss_key_len;
2117 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2118 internals->rss_key_len);
2121 for (i = 0; i < internals->slave_count; i++) {
2122 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2132 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2133 struct rte_eth_rss_conf *rss_conf)
2135 struct bond_dev_private *internals = dev->data->dev_private;
2137 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2138 rss_conf->rss_key_len = internals->rss_key_len;
2139 if (rss_conf->rss_key)
2140 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2145 const struct eth_dev_ops default_dev_ops = {
2146 .dev_start = bond_ethdev_start,
2147 .dev_stop = bond_ethdev_stop,
2148 .dev_close = bond_ethdev_close,
2149 .dev_configure = bond_ethdev_configure,
2150 .dev_infos_get = bond_ethdev_info,
2151 .rx_queue_setup = bond_ethdev_rx_queue_setup,
2152 .tx_queue_setup = bond_ethdev_tx_queue_setup,
2153 .rx_queue_release = bond_ethdev_rx_queue_release,
2154 .tx_queue_release = bond_ethdev_tx_queue_release,
2155 .link_update = bond_ethdev_link_update,
2156 .stats_get = bond_ethdev_stats_get,
2157 .stats_reset = bond_ethdev_stats_reset,
2158 .promiscuous_enable = bond_ethdev_promiscuous_enable,
2159 .promiscuous_disable = bond_ethdev_promiscuous_disable,
2160 .reta_update = bond_ethdev_rss_reta_update,
2161 .reta_query = bond_ethdev_rss_reta_query,
2162 .rss_hash_update = bond_ethdev_rss_hash_update,
2163 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
2167 bond_init(const char *name, const char *params)
2169 struct bond_dev_private *internals;
2170 struct rte_kvargs *kvlist;
2171 uint8_t bonding_mode, socket_id;
2172 int arg_count, port_id;
2174 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2176 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2180 /* Parse link bonding mode */
2181 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2182 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2183 &bond_ethdev_parse_slave_mode_kvarg,
2184 &bonding_mode) != 0) {
2185 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2190 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2191 "device %s\n", name);
2195 /* Parse socket id to create bonding device on */
2196 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2197 if (arg_count == 1) {
2198 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2199 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2201 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2202 "bonded device %s\n", name);
2205 } else if (arg_count > 1) {
2206 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2207 "bonded device %s\n", name);
2210 socket_id = rte_socket_id();
2213 /* Create link bonding eth device */
2214 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2216 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2217 "socket %u.\n", name, bonding_mode, socket_id);
2220 internals = rte_eth_devices[port_id].data->dev_private;
2221 internals->kvlist = kvlist;
2223 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2224 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2228 rte_kvargs_free(kvlist);
2234 bond_uninit(const char *name)
2241 RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2243 /* free link bonding eth device */
2244 ret = rte_eth_bond_free(name);
2246 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2251 /* this part will resolve the slave portids after all the other pdev and vdev
2252 * have been allocated */
2254 bond_ethdev_configure(struct rte_eth_dev *dev)
2256 char *name = dev->data->name;
2257 struct bond_dev_private *internals = dev->data->dev_private;
2258 struct rte_kvargs *kvlist = internals->kvlist;
2260 uint8_t port_id = dev - rte_eth_devices;
2262 static const uint8_t default_rss_key[40] = {
2263 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2264 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2265 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2266 0xBE, 0xAC, 0x01, 0xFA
2271 /* If RSS is enabled, fill table and key with default values */
2272 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2273 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2274 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2275 memcpy(internals->rss_key, default_rss_key, 40);
2277 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2278 internals->reta_conf[i].mask = ~0LL;
2279 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2280 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2285 * if no kvlist, it means that this bonded device has been created
2286 * through the bonding api.
2291 /* Parse MAC address for bonded device */
2292 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2293 if (arg_count == 1) {
2294 struct ether_addr bond_mac;
2296 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2297 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2298 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2303 /* Set MAC address */
2304 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2306 "Failed to set mac address on bonded device %s\n",
2310 } else if (arg_count > 1) {
2312 "MAC address can be specified only once for bonded device %s\n",
2317 /* Parse/set balance mode transmit policy */
2318 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2319 if (arg_count == 1) {
2320 uint8_t xmit_policy;
2322 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2323 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2326 "Invalid xmit policy specified for bonded device %s\n",
2331 /* Set balance mode transmit policy*/
2332 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2334 "Failed to set balance xmit policy on bonded device %s\n",
2338 } else if (arg_count > 1) {
2340 "Transmit policy can be specified only once for bonded device"
2345 /* Parse/add slave ports to bonded device */
2346 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2347 struct bond_ethdev_slave_ports slave_ports;
2350 memset(&slave_ports, 0, sizeof(slave_ports));
2352 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2353 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2355 "Failed to parse slave ports for bonded device %s\n",
2360 for (i = 0; i < slave_ports.slave_count; i++) {
2361 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2363 "Failed to add port %d as slave to bonded device %s\n",
2364 slave_ports.slaves[i], name);
2369 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2373 /* Parse/set primary slave port id*/
2374 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2375 if (arg_count == 1) {
2376 uint8_t primary_slave_port_id;
2378 if (rte_kvargs_process(kvlist,
2379 PMD_BOND_PRIMARY_SLAVE_KVARG,
2380 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2381 &primary_slave_port_id) < 0) {
2383 "Invalid primary slave port id specified for bonded device"
2388 /* Set balance mode transmit policy*/
2389 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2392 "Failed to set primary slave port %d on bonded device %s\n",
2393 primary_slave_port_id, name);
2396 } else if (arg_count > 1) {
2398 "Primary slave can be specified only once for bonded device"
2403 /* Parse link status monitor polling interval */
2404 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2405 if (arg_count == 1) {
2406 uint32_t lsc_poll_interval_ms;
2408 if (rte_kvargs_process(kvlist,
2409 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2410 &bond_ethdev_parse_time_ms_kvarg,
2411 &lsc_poll_interval_ms) < 0) {
2413 "Invalid lsc polling interval value specified for bonded"
2414 " device %s\n", name);
2418 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2421 "Failed to set lsc monitor polling interval (%u ms) on"
2422 " bonded device %s\n", lsc_poll_interval_ms, name);
2425 } else if (arg_count > 1) {
2427 "LSC polling interval can be specified only once for bonded"
2428 " device %s\n", name);
2432 /* Parse link up interrupt propagation delay */
2433 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2434 if (arg_count == 1) {
2435 uint32_t link_up_delay_ms;
2437 if (rte_kvargs_process(kvlist,
2438 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2439 &bond_ethdev_parse_time_ms_kvarg,
2440 &link_up_delay_ms) < 0) {
2442 "Invalid link up propagation delay value specified for"
2443 " bonded device %s\n", name);
2447 /* Set balance mode transmit policy*/
2448 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2451 "Failed to set link up propagation delay (%u ms) on bonded"
2452 " device %s\n", link_up_delay_ms, name);
2455 } else if (arg_count > 1) {
2457 "Link up propagation delay can be specified only once for"
2458 " bonded device %s\n", name);
2462 /* Parse link down interrupt propagation delay */
2463 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2464 if (arg_count == 1) {
2465 uint32_t link_down_delay_ms;
2467 if (rte_kvargs_process(kvlist,
2468 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2469 &bond_ethdev_parse_time_ms_kvarg,
2470 &link_down_delay_ms) < 0) {
2472 "Invalid link down propagation delay value specified for"
2473 " bonded device %s\n", name);
2477 /* Set balance mode transmit policy*/
2478 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2481 "Failed to set link down propagation delay (%u ms) on"
2482 " bonded device %s\n", link_down_delay_ms, name);
2485 } else if (arg_count > 1) {
2487 "Link down propagation delay can be specified only once for"
2488 " bonded device %s\n", name);
2495 static struct rte_driver bond_drv = {
2499 .uninit = bond_uninit,
2502 PMD_REGISTER_DRIVER(bond_drv);