4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_ip_frag.h>
43 #include <rte_devargs.h>
44 #include <rte_kvargs.h>
46 #include <rte_alarm.h>
47 #include <rte_cycles.h>
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
51 #include "rte_eth_bond_8023ad_private.h"
53 #define REORDER_PERIOD_MS 10
55 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
57 /* Table for statistics in mode 5 TLB */
58 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
61 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
63 size_t vlan_offset = 0;
65 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
66 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
68 vlan_offset = sizeof(struct vlan_hdr);
69 *proto = vlan_hdr->eth_proto;
71 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
72 vlan_hdr = vlan_hdr + 1;
73 *proto = vlan_hdr->eth_proto;
74 vlan_offset += sizeof(struct vlan_hdr);
81 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
83 struct bond_dev_private *internals;
85 uint16_t num_rx_slave = 0;
86 uint16_t num_rx_total = 0;
90 /* Cast to structure, containing bonded device's port id and queue id */
91 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
93 internals = bd_rx_q->dev_private;
96 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
97 /* Offset of pointer to *bufs increases as packets are received
98 * from other slaves */
99 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
100 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
102 num_rx_total += num_rx_slave;
103 nb_pkts -= num_rx_slave;
111 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
114 struct bond_dev_private *internals;
116 /* Cast to structure, containing bonded device's port id and queue id */
117 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
119 internals = bd_rx_q->dev_private;
121 return rte_eth_rx_burst(internals->current_primary_port,
122 bd_rx_q->queue_id, bufs, nb_pkts);
126 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
129 /* Cast to structure, containing bonded device's port id and queue id */
130 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
131 struct bond_dev_private *internals = bd_rx_q->dev_private;
132 struct ether_addr bond_mac;
134 struct ether_hdr *hdr;
136 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
137 uint16_t num_rx_total = 0; /* Total number of received packets */
138 uint8_t slaves[RTE_MAX_ETHPORTS];
141 uint8_t collecting; /* current slave collecting status */
142 const uint8_t promisc = internals->promiscuous_en;
145 rte_eth_macaddr_get(internals->port_id, &bond_mac);
146 /* Copy slave list to protect against slave up/down changes during tx
148 slave_count = internals->active_slave_count;
149 memcpy(slaves, internals->active_slaves,
150 sizeof(internals->active_slaves[0]) * slave_count);
152 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
154 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
156 /* Read packets from this slave */
157 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
158 &bufs[num_rx_total], nb_pkts - num_rx_total);
160 for (k = j; k < 2 && k < num_rx_total; k++)
161 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
163 /* Handle slow protocol packets. */
164 while (j < num_rx_total) {
165 if (j + 3 < num_rx_total)
166 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
168 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
169 /* Remove packet from array if it is slow packet or slave is not
170 * in collecting state or bondign interface is not in promiscus
171 * mode and packet address does not match. */
172 if (unlikely(hdr->ether_type == ether_type_slow_be ||
173 !collecting || (!promisc &&
174 !is_multicast_ether_addr(&hdr->d_addr) &&
175 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
177 if (hdr->ether_type == ether_type_slow_be) {
178 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
181 rte_pktmbuf_free(bufs[j]);
183 /* Packet is managed by mode 4 or dropped, shift the array */
185 if (j < num_rx_total) {
186 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
197 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
198 uint32_t burstnumberRX;
199 uint32_t burstnumberTX;
201 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
204 arp_op_name(uint16_t arp_op, char *buf)
208 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
211 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
213 case ARP_OP_REVREQUEST:
214 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
215 "Reverse ARP Request");
217 case ARP_OP_REVREPLY:
218 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
219 "Reverse ARP Reply");
221 case ARP_OP_INVREQUEST:
222 snprintf(buf, sizeof("Peer Identify Request"), "%s",
223 "Peer Identify Request");
225 case ARP_OP_INVREPLY:
226 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
227 "Peer Identify Reply");
232 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
236 #define MaxIPv4String 16
238 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
242 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
243 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
244 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
248 #define MAX_CLIENTS_NUMBER 128
249 uint8_t active_clients;
250 struct client_stats_t {
253 uint32_t ipv4_rx_packets;
254 uint32_t ipv4_tx_packets;
256 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
259 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
263 for (; i < MAX_CLIENTS_NUMBER; i++) {
264 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
265 /* Just update RX packets number for this client */
266 if (TXorRXindicator == &burstnumberRX)
267 client_stats[i].ipv4_rx_packets++;
269 client_stats[i].ipv4_tx_packets++;
273 /* We have a new client. Insert him to the table, and increment stats */
274 if (TXorRXindicator == &burstnumberRX)
275 client_stats[active_clients].ipv4_rx_packets++;
277 client_stats[active_clients].ipv4_tx_packets++;
278 client_stats[active_clients].ipv4_addr = addr;
279 client_stats[active_clients].port = port;
284 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
285 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
286 RTE_LOG(DEBUG, PMD, \
289 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
291 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
297 eth_h->s_addr.addr_bytes[0], \
298 eth_h->s_addr.addr_bytes[1], \
299 eth_h->s_addr.addr_bytes[2], \
300 eth_h->s_addr.addr_bytes[3], \
301 eth_h->s_addr.addr_bytes[4], \
302 eth_h->s_addr.addr_bytes[5], \
304 eth_h->d_addr.addr_bytes[0], \
305 eth_h->d_addr.addr_bytes[1], \
306 eth_h->d_addr.addr_bytes[2], \
307 eth_h->d_addr.addr_bytes[3], \
308 eth_h->d_addr.addr_bytes[4], \
309 eth_h->d_addr.addr_bytes[5], \
316 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
317 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
319 struct ipv4_hdr *ipv4_h;
320 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
321 struct arp_hdr *arp_h;
328 uint16_t ether_type = eth_h->ether_type;
329 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
331 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
332 snprintf(buf, 16, "%s", info);
335 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
336 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
337 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
338 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
339 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
340 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
342 update_client_stats(ipv4_h->src_addr, port, burstnumber);
344 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
345 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
346 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
347 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
348 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
349 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
350 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
357 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
359 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
360 struct bond_dev_private *internals = bd_tx_q->dev_private;
361 struct ether_hdr *eth_h;
362 uint16_t ether_type, offset;
363 uint16_t nb_recv_pkts;
366 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
368 for (i = 0; i < nb_recv_pkts; i++) {
369 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
370 ether_type = eth_h->ether_type;
371 offset = get_vlan_offset(eth_h, ðer_type);
373 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
374 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
375 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
377 bond_mode_alb_arp_recv(eth_h, offset, internals);
379 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
380 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
381 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
389 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
392 struct bond_dev_private *internals;
393 struct bond_tx_queue *bd_tx_q;
395 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
396 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
398 uint8_t num_of_slaves;
399 uint8_t slaves[RTE_MAX_ETHPORTS];
401 uint16_t num_tx_total = 0, num_tx_slave;
403 static int slave_idx = 0;
404 int i, cslave_idx = 0, tx_fail_total = 0;
406 bd_tx_q = (struct bond_tx_queue *)queue;
407 internals = bd_tx_q->dev_private;
409 /* Copy slave list to protect against slave up/down changes during tx
411 num_of_slaves = internals->active_slave_count;
412 memcpy(slaves, internals->active_slaves,
413 sizeof(internals->active_slaves[0]) * num_of_slaves);
415 if (num_of_slaves < 1)
418 /* Populate slaves mbuf with which packets are to be sent on it */
419 for (i = 0; i < nb_pkts; i++) {
420 cslave_idx = (slave_idx + i) % num_of_slaves;
421 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
424 /* increment current slave index so the next call to tx burst starts on the
426 slave_idx = ++cslave_idx;
428 /* Send packet burst on each slave device */
429 for (i = 0; i < num_of_slaves; i++) {
430 if (slave_nb_pkts[i] > 0) {
431 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
432 slave_bufs[i], slave_nb_pkts[i]);
434 /* if tx burst fails move packets to end of bufs */
435 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
436 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
438 tx_fail_total += tx_fail_slave;
440 memcpy(&bufs[nb_pkts - tx_fail_total],
441 &slave_bufs[i][num_tx_slave],
442 tx_fail_slave * sizeof(bufs[0]));
444 num_tx_total += num_tx_slave;
452 bond_ethdev_tx_burst_active_backup(void *queue,
453 struct rte_mbuf **bufs, uint16_t nb_pkts)
455 struct bond_dev_private *internals;
456 struct bond_tx_queue *bd_tx_q;
458 bd_tx_q = (struct bond_tx_queue *)queue;
459 internals = bd_tx_q->dev_private;
461 if (internals->active_slave_count < 1)
464 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
468 static inline uint16_t
469 ether_hash(struct ether_hdr *eth_hdr)
471 unaligned_uint16_t *word_src_addr =
472 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
473 unaligned_uint16_t *word_dst_addr =
474 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
476 return (word_src_addr[0] ^ word_dst_addr[0]) ^
477 (word_src_addr[1] ^ word_dst_addr[1]) ^
478 (word_src_addr[2] ^ word_dst_addr[2]);
481 static inline uint32_t
482 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
484 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
487 static inline uint32_t
488 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
490 unaligned_uint32_t *word_src_addr =
491 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
492 unaligned_uint32_t *word_dst_addr =
493 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
495 return (word_src_addr[0] ^ word_dst_addr[0]) ^
496 (word_src_addr[1] ^ word_dst_addr[1]) ^
497 (word_src_addr[2] ^ word_dst_addr[2]) ^
498 (word_src_addr[3] ^ word_dst_addr[3]);
502 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
504 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
506 uint32_t hash = ether_hash(eth_hdr);
508 return (hash ^= hash >> 8) % slave_count;
512 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
514 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
515 uint16_t proto = eth_hdr->ether_type;
516 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
517 uint32_t hash, l3hash = 0;
519 hash = ether_hash(eth_hdr);
521 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
522 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
523 ((char *)(eth_hdr + 1) + vlan_offset);
524 l3hash = ipv4_hash(ipv4_hdr);
526 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
527 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
528 ((char *)(eth_hdr + 1) + vlan_offset);
529 l3hash = ipv6_hash(ipv6_hdr);
532 hash = hash ^ l3hash;
536 return hash % slave_count;
540 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
542 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
543 uint16_t proto = eth_hdr->ether_type;
544 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
546 struct udp_hdr *udp_hdr = NULL;
547 struct tcp_hdr *tcp_hdr = NULL;
548 uint32_t hash, l3hash = 0, l4hash = 0;
550 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
551 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
552 ((char *)(eth_hdr + 1) + vlan_offset);
553 size_t ip_hdr_offset;
555 l3hash = ipv4_hash(ipv4_hdr);
557 /* there is no L4 header in fragmented packet */
558 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
559 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
562 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
563 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
565 l4hash = HASH_L4_PORTS(tcp_hdr);
566 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
567 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
569 l4hash = HASH_L4_PORTS(udp_hdr);
572 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
573 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
574 ((char *)(eth_hdr + 1) + vlan_offset);
575 l3hash = ipv6_hash(ipv6_hdr);
577 if (ipv6_hdr->proto == IPPROTO_TCP) {
578 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
579 l4hash = HASH_L4_PORTS(tcp_hdr);
580 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
581 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
582 l4hash = HASH_L4_PORTS(udp_hdr);
586 hash = l3hash ^ l4hash;
590 return hash % slave_count;
594 uint64_t bwg_left_int;
595 uint64_t bwg_left_remainder;
600 bond_tlb_activate_slave(struct bond_dev_private *internals) {
603 for (i = 0; i < internals->active_slave_count; i++) {
604 tlb_last_obytets[internals->active_slaves[i]] = 0;
609 bandwidth_cmp(const void *a, const void *b)
611 const struct bwg_slave *bwg_a = a;
612 const struct bwg_slave *bwg_b = b;
613 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
614 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
615 (int64_t)bwg_a->bwg_left_remainder;
629 bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
630 struct bwg_slave *bwg_slave)
632 struct rte_eth_link link_status;
634 rte_eth_link_get(port_id, &link_status);
635 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
638 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
639 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
640 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
644 bond_ethdev_update_tlb_slave_cb(void *arg)
646 struct bond_dev_private *internals = arg;
647 struct rte_eth_stats slave_stats;
648 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
652 uint8_t update_stats = 0;
655 internals->slave_update_idx++;
658 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
661 for (i = 0; i < internals->active_slave_count; i++) {
662 slave_id = internals->active_slaves[i];
663 rte_eth_stats_get(slave_id, &slave_stats);
664 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
665 bandwidth_left(slave_id, tx_bytes,
666 internals->slave_update_idx, &bwg_array[i]);
667 bwg_array[i].slave = slave_id;
670 tlb_last_obytets[slave_id] = slave_stats.obytes;
674 if (update_stats == 1)
675 internals->slave_update_idx = 0;
678 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
679 for (i = 0; i < slave_count; i++)
680 internals->tlb_slaves_order[i] = bwg_array[i].slave;
682 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
683 (struct bond_dev_private *)internals);
687 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
689 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
690 struct bond_dev_private *internals = bd_tx_q->dev_private;
692 struct rte_eth_dev *primary_port =
693 &rte_eth_devices[internals->primary_port];
694 uint16_t num_tx_total = 0;
697 uint8_t num_of_slaves = internals->active_slave_count;
698 uint8_t slaves[RTE_MAX_ETHPORTS];
700 struct ether_hdr *ether_hdr;
701 struct ether_addr primary_slave_addr;
702 struct ether_addr active_slave_addr;
704 if (num_of_slaves < 1)
707 memcpy(slaves, internals->tlb_slaves_order,
708 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
711 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
714 for (i = 0; i < 3; i++)
715 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
718 for (i = 0; i < num_of_slaves; i++) {
719 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
720 for (j = num_tx_total; j < nb_pkts; j++) {
722 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
724 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
725 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
726 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
727 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
728 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
732 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
733 bufs + num_tx_total, nb_pkts - num_tx_total);
735 if (num_tx_total == nb_pkts)
743 bond_tlb_disable(struct bond_dev_private *internals)
745 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
749 bond_tlb_enable(struct bond_dev_private *internals)
751 bond_ethdev_update_tlb_slave_cb(internals);
755 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
757 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
758 struct bond_dev_private *internals = bd_tx_q->dev_private;
760 struct ether_hdr *eth_h;
761 uint16_t ether_type, offset;
763 struct client_data *client_info;
766 * We create transmit buffers for every slave and one additional to send
767 * through tlb. In worst case every packet will be send on one port.
769 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
770 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
773 * We create separate transmit buffers for update packets as they wont be
774 * counted in num_tx_total.
776 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
777 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
779 struct rte_mbuf *upd_pkt;
782 uint16_t num_send, num_not_send = 0;
783 uint16_t num_tx_total = 0;
788 /* Search tx buffer for ARP packets and forward them to alb */
789 for (i = 0; i < nb_pkts; i++) {
790 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
791 ether_type = eth_h->ether_type;
792 offset = get_vlan_offset(eth_h, ðer_type);
794 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
795 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
797 /* Change src mac in eth header */
798 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
800 /* Add packet to slave tx buffer */
801 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
802 slave_bufs_pkts[slave_idx]++;
804 /* If packet is not ARP, send it with TLB policy */
805 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
807 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
811 /* Update connected client ARP tables */
812 if (internals->mode6.ntt) {
813 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
814 client_info = &internals->mode6.client_table[i];
816 if (client_info->in_use) {
817 /* Allocate new packet to send ARP update on current slave */
818 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
819 if (upd_pkt == NULL) {
820 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
823 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
824 + client_info->vlan_count * sizeof(struct vlan_hdr);
825 upd_pkt->data_len = pkt_size;
826 upd_pkt->pkt_len = pkt_size;
828 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
831 /* Add packet to update tx buffer */
832 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
833 update_bufs_pkts[slave_idx]++;
836 internals->mode6.ntt = 0;
839 /* Send ARP packets on proper slaves */
840 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
841 if (slave_bufs_pkts[i] > 0) {
842 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
843 slave_bufs[i], slave_bufs_pkts[i]);
844 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
845 bufs[nb_pkts - 1 - num_not_send - j] =
846 slave_bufs[i][nb_pkts - 1 - j];
849 num_tx_total += num_send;
850 num_not_send += slave_bufs_pkts[i] - num_send;
852 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
853 /* Print TX stats including update packets */
854 for (j = 0; j < slave_bufs_pkts[i]; j++) {
855 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
856 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
862 /* Send update packets on proper slaves */
863 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
864 if (update_bufs_pkts[i] > 0) {
865 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
866 update_bufs_pkts[i]);
867 for (j = num_send; j < update_bufs_pkts[i]; j++) {
868 rte_pktmbuf_free(update_bufs[i][j]);
870 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
871 for (j = 0; j < update_bufs_pkts[i]; j++) {
872 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
873 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
879 /* Send non-ARP packets using tlb policy */
880 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
881 num_send = bond_ethdev_tx_burst_tlb(queue,
882 slave_bufs[RTE_MAX_ETHPORTS],
883 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
885 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
886 bufs[nb_pkts - 1 - num_not_send - j] =
887 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
890 num_tx_total += num_send;
891 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
898 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
901 struct bond_dev_private *internals;
902 struct bond_tx_queue *bd_tx_q;
904 uint8_t num_of_slaves;
905 uint8_t slaves[RTE_MAX_ETHPORTS];
907 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
911 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
912 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
914 bd_tx_q = (struct bond_tx_queue *)queue;
915 internals = bd_tx_q->dev_private;
917 /* Copy slave list to protect against slave up/down changes during tx
919 num_of_slaves = internals->active_slave_count;
920 memcpy(slaves, internals->active_slaves,
921 sizeof(internals->active_slaves[0]) * num_of_slaves);
923 if (num_of_slaves < 1)
926 /* Populate slaves mbuf with the packets which are to be sent on it */
927 for (i = 0; i < nb_pkts; i++) {
928 /* Select output slave using hash based on xmit policy */
929 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
931 /* Populate slave mbuf arrays with mbufs for that slave */
932 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
935 /* Send packet burst on each slave device */
936 for (i = 0; i < num_of_slaves; i++) {
937 if (slave_nb_pkts[i] > 0) {
938 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
939 slave_bufs[i], slave_nb_pkts[i]);
941 /* if tx burst fails move packets to end of bufs */
942 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
943 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
945 tx_fail_total += slave_tx_fail_count;
946 memcpy(&bufs[nb_pkts - tx_fail_total],
947 &slave_bufs[i][num_tx_slave],
948 slave_tx_fail_count * sizeof(bufs[0]));
951 num_tx_total += num_tx_slave;
959 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
962 struct bond_dev_private *internals;
963 struct bond_tx_queue *bd_tx_q;
965 uint8_t num_of_slaves;
966 uint8_t slaves[RTE_MAX_ETHPORTS];
967 /* positions in slaves, not ID */
968 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
969 uint8_t distributing_count;
971 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
972 uint16_t i, j, op_slave_idx;
973 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
975 /* Allocate additional packets in case 8023AD mode. */
976 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
977 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
979 /* Total amount of packets in slave_bufs */
980 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
981 /* Slow packets placed in each slave */
982 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
984 bd_tx_q = (struct bond_tx_queue *)queue;
985 internals = bd_tx_q->dev_private;
987 /* Copy slave list to protect against slave up/down changes during tx
989 num_of_slaves = internals->active_slave_count;
990 if (num_of_slaves < 1)
993 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
995 distributing_count = 0;
996 for (i = 0; i < num_of_slaves; i++) {
997 struct port *port = &mode_8023ad_ports[slaves[i]];
999 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1000 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1001 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1003 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1004 slave_bufs[i][j] = slow_pkts[j];
1006 if (ACTOR_STATE(port, DISTRIBUTING))
1007 distributing_offsets[distributing_count++] = i;
1010 if (likely(distributing_count > 0)) {
1011 /* Populate slaves mbuf with the packets which are to be sent on it */
1012 for (i = 0; i < nb_pkts; i++) {
1013 /* Select output slave using hash based on xmit policy */
1014 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1016 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1017 * slaves that are currently distributing. */
1018 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1019 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1020 slave_nb_pkts[slave_offset]++;
1024 /* Send packet burst on each slave device */
1025 for (i = 0; i < num_of_slaves; i++) {
1026 if (slave_nb_pkts[i] == 0)
1029 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1030 slave_bufs[i], slave_nb_pkts[i]);
1032 /* If tx burst fails drop slow packets */
1033 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1034 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1036 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1037 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1039 /* If tx burst fails move packets to end of bufs */
1040 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1041 uint16_t j = nb_pkts - num_tx_fail_total;
1042 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1043 bufs[j] = slave_bufs[i][num_tx_slave];
1047 return num_tx_total;
1051 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1054 struct bond_dev_private *internals;
1055 struct bond_tx_queue *bd_tx_q;
1057 uint8_t tx_failed_flag = 0, num_of_slaves;
1058 uint8_t slaves[RTE_MAX_ETHPORTS];
1060 uint16_t max_nb_of_tx_pkts = 0;
1062 int slave_tx_total[RTE_MAX_ETHPORTS];
1063 int i, most_successful_tx_slave = -1;
1065 bd_tx_q = (struct bond_tx_queue *)queue;
1066 internals = bd_tx_q->dev_private;
1068 /* Copy slave list to protect against slave up/down changes during tx
1070 num_of_slaves = internals->active_slave_count;
1071 memcpy(slaves, internals->active_slaves,
1072 sizeof(internals->active_slaves[0]) * num_of_slaves);
1074 if (num_of_slaves < 1)
1077 /* Increment reference count on mbufs */
1078 for (i = 0; i < nb_pkts; i++)
1079 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1081 /* Transmit burst on each active slave */
1082 for (i = 0; i < num_of_slaves; i++) {
1083 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1086 if (unlikely(slave_tx_total[i] < nb_pkts))
1089 /* record the value and slave index for the slave which transmits the
1090 * maximum number of packets */
1091 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1092 max_nb_of_tx_pkts = slave_tx_total[i];
1093 most_successful_tx_slave = i;
1097 /* if slaves fail to transmit packets from burst, the calling application
1098 * is not expected to know about multiple references to packets so we must
1099 * handle failures of all packets except those of the most successful slave
1101 if (unlikely(tx_failed_flag))
1102 for (i = 0; i < num_of_slaves; i++)
1103 if (i != most_successful_tx_slave)
1104 while (slave_tx_total[i] < nb_pkts)
1105 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1107 return max_nb_of_tx_pkts;
1111 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1112 struct rte_eth_link *slave_dev_link)
1114 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1115 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1117 if (slave_dev_link->link_status &&
1118 bonded_eth_dev->data->dev_started) {
1119 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1120 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1122 internals->link_props_set = 1;
1127 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1129 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1131 memset(&(bonded_eth_dev->data->dev_link), 0,
1132 sizeof(bonded_eth_dev->data->dev_link));
1134 internals->link_props_set = 0;
1138 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1139 struct rte_eth_link *slave_dev_link)
1141 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1142 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1149 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1151 struct ether_addr *mac_addr;
1153 if (eth_dev == NULL) {
1154 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1158 if (dst_mac_addr == NULL) {
1159 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1163 mac_addr = eth_dev->data->mac_addrs;
1165 ether_addr_copy(mac_addr, dst_mac_addr);
1170 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1172 struct ether_addr *mac_addr;
1174 if (eth_dev == NULL) {
1175 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1179 if (new_mac_addr == NULL) {
1180 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1184 mac_addr = eth_dev->data->mac_addrs;
1186 /* If new MAC is different to current MAC then update */
1187 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1188 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1194 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1196 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1199 /* Update slave devices MAC addresses */
1200 if (internals->slave_count < 1)
1203 switch (internals->mode) {
1204 case BONDING_MODE_ROUND_ROBIN:
1205 case BONDING_MODE_BALANCE:
1206 case BONDING_MODE_BROADCAST:
1207 for (i = 0; i < internals->slave_count; i++) {
1208 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1209 bonded_eth_dev->data->mac_addrs)) {
1210 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1211 internals->slaves[i].port_id);
1216 case BONDING_MODE_8023AD:
1217 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1219 case BONDING_MODE_ACTIVE_BACKUP:
1220 case BONDING_MODE_TLB:
1221 case BONDING_MODE_ALB:
1223 for (i = 0; i < internals->slave_count; i++) {
1224 if (internals->slaves[i].port_id ==
1225 internals->current_primary_port) {
1226 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1227 bonded_eth_dev->data->mac_addrs)) {
1228 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1229 internals->current_primary_port);
1233 if (mac_address_set(
1234 &rte_eth_devices[internals->slaves[i].port_id],
1235 &internals->slaves[i].persisted_mac_addr)) {
1236 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1237 internals->slaves[i].port_id);
1248 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1250 struct bond_dev_private *internals;
1252 internals = eth_dev->data->dev_private;
1255 case BONDING_MODE_ROUND_ROBIN:
1256 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1257 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1259 case BONDING_MODE_ACTIVE_BACKUP:
1260 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1261 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1263 case BONDING_MODE_BALANCE:
1264 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1265 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1267 case BONDING_MODE_BROADCAST:
1268 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1269 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1271 case BONDING_MODE_8023AD:
1272 if (bond_mode_8023ad_enable(eth_dev) != 0)
1275 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1276 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1277 RTE_LOG(WARNING, PMD,
1278 "Using mode 4, it is necessary to do TX burst and RX burst "
1279 "at least every 100ms.\n");
1281 case BONDING_MODE_TLB:
1282 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1283 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1285 case BONDING_MODE_ALB:
1286 if (bond_mode_alb_enable(eth_dev) != 0)
1289 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1290 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1296 internals->mode = mode;
1302 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1303 struct rte_eth_dev *slave_eth_dev)
1305 struct bond_rx_queue *bd_rx_q;
1306 struct bond_tx_queue *bd_tx_q;
1308 uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
1309 uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
1314 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1316 /* Enable interrupts on slave device if supported */
1317 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1318 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1320 /* If RSS is enabled for bonding, try to enable it for slaves */
1321 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1322 if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
1324 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1325 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
1326 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1327 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1329 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1332 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1333 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1334 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1335 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1338 /* Configure device */
1339 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1340 bonded_eth_dev->data->nb_rx_queues,
1341 bonded_eth_dev->data->nb_tx_queues,
1342 &(slave_eth_dev->data->dev_conf));
1344 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1345 slave_eth_dev->data->port_id, errval);
1349 /* Setup Rx Queues */
1350 /* Use existing queues, if any */
1351 for (q_id = old_nb_rx_queues;
1352 q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1353 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1355 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1356 bd_rx_q->nb_rx_desc,
1357 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1358 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1361 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1362 slave_eth_dev->data->port_id, q_id, errval);
1367 /* Setup Tx Queues */
1368 /* Use existing queues, if any */
1369 for (q_id = old_nb_tx_queues;
1370 q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1371 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1373 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1374 bd_tx_q->nb_tx_desc,
1375 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1379 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1380 slave_eth_dev->data->port_id, q_id, errval);
1386 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1388 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1389 slave_eth_dev->data->port_id, errval);
1393 /* If RSS is enabled for bonding, synchronize RETA */
1394 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1396 struct bond_dev_private *internals;
1398 internals = bonded_eth_dev->data->dev_private;
1400 for (i = 0; i < internals->slave_count; i++) {
1401 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1402 errval = rte_eth_dev_rss_reta_update(
1403 slave_eth_dev->data->port_id,
1404 &internals->reta_conf[0],
1405 internals->slaves[i].reta_size);
1407 RTE_LOG(WARNING, PMD,
1408 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1409 " RSS Configuration for bonding may be inconsistent.\n",
1410 slave_eth_dev->data->port_id, errval);
1417 /* If lsc interrupt is set, check initial slave's link status */
1418 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1419 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1420 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
1426 slave_remove(struct bond_dev_private *internals,
1427 struct rte_eth_dev *slave_eth_dev)
1431 for (i = 0; i < internals->slave_count; i++)
1432 if (internals->slaves[i].port_id ==
1433 slave_eth_dev->data->port_id)
1436 if (i < (internals->slave_count - 1))
1437 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1438 sizeof(internals->slaves[0]) *
1439 (internals->slave_count - i - 1));
1441 internals->slave_count--;
1445 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1448 slave_add(struct bond_dev_private *internals,
1449 struct rte_eth_dev *slave_eth_dev)
1451 struct bond_slave_details *slave_details =
1452 &internals->slaves[internals->slave_count];
1454 slave_details->port_id = slave_eth_dev->data->port_id;
1455 slave_details->last_link_status = 0;
1457 /* If slave device doesn't support interrupts then we need to enabled
1458 * polling to monitor link status */
1459 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1460 slave_details->link_status_poll_enabled = 1;
1462 if (!internals->link_status_polling_enabled) {
1463 internals->link_status_polling_enabled = 1;
1465 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1466 bond_ethdev_slave_link_status_change_monitor,
1467 (void *)&rte_eth_devices[internals->port_id]);
1471 slave_details->link_status_wait_to_complete = 0;
1472 /* clean tlb_last_obytes when adding port for bonding device */
1473 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1474 sizeof(struct ether_addr));
1478 bond_ethdev_primary_set(struct bond_dev_private *internals,
1479 uint8_t slave_port_id)
1483 if (internals->active_slave_count < 1)
1484 internals->current_primary_port = slave_port_id;
1486 /* Search bonded device slave ports for new proposed primary port */
1487 for (i = 0; i < internals->active_slave_count; i++) {
1488 if (internals->active_slaves[i] == slave_port_id)
1489 internals->current_primary_port = slave_port_id;
1494 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1497 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1499 struct bond_dev_private *internals;
1502 /* slave eth dev will be started by bonded device */
1503 if (check_for_bonded_ethdev(eth_dev)) {
1504 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1505 eth_dev->data->port_id);
1509 eth_dev->data->dev_link.link_status = 0;
1510 eth_dev->data->dev_started = 1;
1512 internals = eth_dev->data->dev_private;
1514 if (internals->slave_count == 0) {
1515 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1519 if (internals->user_defined_mac == 0) {
1520 struct ether_addr *new_mac_addr = NULL;
1522 for (i = 0; i < internals->slave_count; i++)
1523 if (internals->slaves[i].port_id == internals->primary_port)
1524 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1526 if (new_mac_addr == NULL)
1529 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1530 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1531 eth_dev->data->port_id);
1536 /* Update all slave devices MACs*/
1537 if (mac_address_slaves_update(eth_dev) != 0)
1540 /* If bonded device is configure in promiscuous mode then re-apply config */
1541 if (internals->promiscuous_en)
1542 bond_ethdev_promiscuous_enable(eth_dev);
1544 /* Reconfigure each slave device if starting bonded device */
1545 for (i = 0; i < internals->slave_count; i++) {
1546 if (slave_configure(eth_dev,
1547 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1549 "bonded port (%d) failed to reconfigure slave device (%d)",
1550 eth_dev->data->port_id, internals->slaves[i].port_id);
1555 if (internals->user_defined_primary_port)
1556 bond_ethdev_primary_set(internals, internals->primary_port);
1558 if (internals->mode == BONDING_MODE_8023AD)
1559 bond_mode_8023ad_start(eth_dev);
1561 if (internals->mode == BONDING_MODE_TLB ||
1562 internals->mode == BONDING_MODE_ALB)
1563 bond_tlb_enable(internals);
1569 bond_ethdev_free_queues(struct rte_eth_dev *dev)
1573 if (dev->data->rx_queues != NULL) {
1574 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1575 rte_free(dev->data->rx_queues[i]);
1576 dev->data->rx_queues[i] = NULL;
1578 dev->data->nb_rx_queues = 0;
1581 if (dev->data->tx_queues != NULL) {
1582 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1583 rte_free(dev->data->tx_queues[i]);
1584 dev->data->tx_queues[i] = NULL;
1586 dev->data->nb_tx_queues = 0;
1591 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1593 struct bond_dev_private *internals = eth_dev->data->dev_private;
1596 if (internals->mode == BONDING_MODE_8023AD) {
1600 bond_mode_8023ad_stop(eth_dev);
1602 /* Discard all messages to/from mode 4 state machines */
1603 for (i = 0; i < internals->active_slave_count; i++) {
1604 port = &mode_8023ad_ports[internals->active_slaves[i]];
1606 RTE_VERIFY(port->rx_ring != NULL);
1607 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1608 rte_pktmbuf_free(pkt);
1610 RTE_VERIFY(port->tx_ring != NULL);
1611 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1612 rte_pktmbuf_free(pkt);
1616 if (internals->mode == BONDING_MODE_TLB ||
1617 internals->mode == BONDING_MODE_ALB) {
1618 bond_tlb_disable(internals);
1619 for (i = 0; i < internals->active_slave_count; i++)
1620 tlb_last_obytets[internals->active_slaves[i]] = 0;
1623 internals->active_slave_count = 0;
1624 internals->link_status_polling_enabled = 0;
1626 eth_dev->data->dev_link.link_status = 0;
1627 eth_dev->data->dev_started = 0;
1631 bond_ethdev_close(struct rte_eth_dev *dev)
1633 bond_ethdev_free_queues(dev);
1636 /* forward declaration */
1637 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1640 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1642 struct bond_dev_private *internals = dev->data->dev_private;
1644 dev_info->max_mac_addrs = 1;
1646 dev_info->max_rx_pktlen = (uint32_t)2048;
1648 dev_info->max_rx_queues = (uint16_t)128;
1649 dev_info->max_tx_queues = (uint16_t)512;
1651 dev_info->min_rx_bufsize = 0;
1652 dev_info->pci_dev = NULL;
1654 dev_info->rx_offload_capa = internals->rx_offload_capa;
1655 dev_info->tx_offload_capa = internals->tx_offload_capa;
1656 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
1658 dev_info->reta_size = internals->reta_size;
1662 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1663 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1664 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1666 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1667 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1668 0, dev->data->numa_node);
1669 if (bd_rx_q == NULL)
1672 bd_rx_q->queue_id = rx_queue_id;
1673 bd_rx_q->dev_private = dev->data->dev_private;
1675 bd_rx_q->nb_rx_desc = nb_rx_desc;
1677 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1678 bd_rx_q->mb_pool = mb_pool;
1680 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1686 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1687 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1688 const struct rte_eth_txconf *tx_conf)
1690 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1691 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1692 0, dev->data->numa_node);
1694 if (bd_tx_q == NULL)
1697 bd_tx_q->queue_id = tx_queue_id;
1698 bd_tx_q->dev_private = dev->data->dev_private;
1700 bd_tx_q->nb_tx_desc = nb_tx_desc;
1701 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1703 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1709 bond_ethdev_rx_queue_release(void *queue)
1718 bond_ethdev_tx_queue_release(void *queue)
1727 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1729 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1730 struct bond_dev_private *internals;
1732 /* Default value for polling slave found is true as we don't want to
1733 * disable the polling thread if we cannot get the lock */
1734 int i, polling_slave_found = 1;
1739 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1740 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1742 if (!bonded_ethdev->data->dev_started ||
1743 !internals->link_status_polling_enabled)
1746 /* If device is currently being configured then don't check slaves link
1747 * status, wait until next period */
1748 if (rte_spinlock_trylock(&internals->lock)) {
1749 if (internals->slave_count > 0)
1750 polling_slave_found = 0;
1752 for (i = 0; i < internals->slave_count; i++) {
1753 if (!internals->slaves[i].link_status_poll_enabled)
1756 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1757 polling_slave_found = 1;
1759 /* Update slave link status */
1760 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1761 internals->slaves[i].link_status_wait_to_complete);
1763 /* if link status has changed since last checked then call lsc
1765 if (slave_ethdev->data->dev_link.link_status !=
1766 internals->slaves[i].last_link_status) {
1767 internals->slaves[i].last_link_status =
1768 slave_ethdev->data->dev_link.link_status;
1770 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1771 RTE_ETH_EVENT_INTR_LSC,
1772 &bonded_ethdev->data->port_id);
1775 rte_spinlock_unlock(&internals->lock);
1778 if (polling_slave_found)
1779 /* Set alarm to continue monitoring link status of slave ethdev's */
1780 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1781 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1785 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1786 int wait_to_complete)
1788 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1790 if (!bonded_eth_dev->data->dev_started ||
1791 internals->active_slave_count == 0) {
1792 bonded_eth_dev->data->dev_link.link_status = 0;
1795 struct rte_eth_dev *slave_eth_dev;
1798 for (i = 0; i < internals->active_slave_count; i++) {
1799 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1801 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1803 if (slave_eth_dev->data->dev_link.link_status == 1) {
1809 bonded_eth_dev->data->dev_link.link_status = link_up;
1816 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1818 struct bond_dev_private *internals = dev->data->dev_private;
1819 struct rte_eth_stats slave_stats;
1822 for (i = 0; i < internals->slave_count; i++) {
1823 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1825 stats->ipackets += slave_stats.ipackets;
1826 stats->opackets += slave_stats.opackets;
1827 stats->ibytes += slave_stats.ibytes;
1828 stats->obytes += slave_stats.obytes;
1829 stats->imissed += slave_stats.imissed;
1830 stats->ierrors += slave_stats.ierrors;
1831 stats->oerrors += slave_stats.oerrors;
1832 stats->imcasts += slave_stats.imcasts;
1833 stats->rx_nombuf += slave_stats.rx_nombuf;
1835 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
1836 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
1837 stats->q_opackets[j] += slave_stats.q_opackets[j];
1838 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
1839 stats->q_obytes[j] += slave_stats.q_obytes[j];
1840 stats->q_errors[j] += slave_stats.q_errors[j];
1847 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1849 struct bond_dev_private *internals = dev->data->dev_private;
1852 for (i = 0; i < internals->slave_count; i++)
1853 rte_eth_stats_reset(internals->slaves[i].port_id);
1857 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1859 struct bond_dev_private *internals = eth_dev->data->dev_private;
1862 internals->promiscuous_en = 1;
1864 switch (internals->mode) {
1865 /* Promiscuous mode is propagated to all slaves */
1866 case BONDING_MODE_ROUND_ROBIN:
1867 case BONDING_MODE_BALANCE:
1868 case BONDING_MODE_BROADCAST:
1869 for (i = 0; i < internals->slave_count; i++)
1870 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1872 /* In mode4 promiscus mode is managed when slave is added/removed */
1873 case BONDING_MODE_8023AD:
1875 /* Promiscuous mode is propagated only to primary slave */
1876 case BONDING_MODE_ACTIVE_BACKUP:
1877 case BONDING_MODE_TLB:
1878 case BONDING_MODE_ALB:
1880 rte_eth_promiscuous_enable(internals->current_primary_port);
1885 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1887 struct bond_dev_private *internals = dev->data->dev_private;
1890 internals->promiscuous_en = 0;
1892 switch (internals->mode) {
1893 /* Promiscuous mode is propagated to all slaves */
1894 case BONDING_MODE_ROUND_ROBIN:
1895 case BONDING_MODE_BALANCE:
1896 case BONDING_MODE_BROADCAST:
1897 for (i = 0; i < internals->slave_count; i++)
1898 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1900 /* In mode4 promiscus mode is set managed when slave is added/removed */
1901 case BONDING_MODE_8023AD:
1903 /* Promiscuous mode is propagated only to primary slave */
1904 case BONDING_MODE_ACTIVE_BACKUP:
1905 case BONDING_MODE_TLB:
1906 case BONDING_MODE_ALB:
1908 rte_eth_promiscuous_disable(internals->current_primary_port);
1913 bond_ethdev_delayed_lsc_propagation(void *arg)
1918 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1919 RTE_ETH_EVENT_INTR_LSC);
1923 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1926 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1927 struct bond_dev_private *internals;
1928 struct rte_eth_link link;
1930 int i, valid_slave = 0;
1932 uint8_t lsc_flag = 0;
1934 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1937 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1938 slave_eth_dev = &rte_eth_devices[port_id];
1940 if (check_for_bonded_ethdev(bonded_eth_dev))
1943 internals = bonded_eth_dev->data->dev_private;
1945 /* If the device isn't started don't handle interrupts */
1946 if (!bonded_eth_dev->data->dev_started)
1949 /* verify that port_id is a valid slave of bonded port */
1950 for (i = 0; i < internals->slave_count; i++) {
1951 if (internals->slaves[i].port_id == port_id) {
1960 /* Search for port in active port list */
1961 active_pos = find_slave_by_id(internals->active_slaves,
1962 internals->active_slave_count, port_id);
1964 rte_eth_link_get_nowait(port_id, &link);
1965 if (link.link_status) {
1966 if (active_pos < internals->active_slave_count)
1969 /* if no active slave ports then set this port to be primary port */
1970 if (internals->active_slave_count < 1) {
1971 /* If first active slave, then change link status */
1972 bonded_eth_dev->data->dev_link.link_status = 1;
1973 internals->current_primary_port = port_id;
1976 mac_address_slaves_update(bonded_eth_dev);
1978 /* Inherit eth dev link properties from first active slave */
1979 link_properties_set(bonded_eth_dev,
1980 &(slave_eth_dev->data->dev_link));
1983 activate_slave(bonded_eth_dev, port_id);
1985 /* If user has defined the primary port then default to using it */
1986 if (internals->user_defined_primary_port &&
1987 internals->primary_port == port_id)
1988 bond_ethdev_primary_set(internals, port_id);
1990 if (active_pos == internals->active_slave_count)
1993 /* Remove from active slave list */
1994 deactivate_slave(bonded_eth_dev, port_id);
1996 /* No active slaves, change link status to down and reset other
1997 * link properties */
1998 if (internals->active_slave_count < 1) {
2000 bonded_eth_dev->data->dev_link.link_status = 0;
2002 link_properties_reset(bonded_eth_dev);
2005 /* Update primary id, take first active slave from list or if none
2006 * available set to -1 */
2007 if (port_id == internals->current_primary_port) {
2008 if (internals->active_slave_count > 0)
2009 bond_ethdev_primary_set(internals,
2010 internals->active_slaves[0]);
2012 internals->current_primary_port = internals->primary_port;
2017 /* Cancel any possible outstanding interrupts if delays are enabled */
2018 if (internals->link_up_delay_ms > 0 ||
2019 internals->link_down_delay_ms > 0)
2020 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2023 if (bonded_eth_dev->data->dev_link.link_status) {
2024 if (internals->link_up_delay_ms > 0)
2025 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2026 bond_ethdev_delayed_lsc_propagation,
2027 (void *)bonded_eth_dev);
2029 _rte_eth_dev_callback_process(bonded_eth_dev,
2030 RTE_ETH_EVENT_INTR_LSC);
2033 if (internals->link_down_delay_ms > 0)
2034 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2035 bond_ethdev_delayed_lsc_propagation,
2036 (void *)bonded_eth_dev);
2038 _rte_eth_dev_callback_process(bonded_eth_dev,
2039 RTE_ETH_EVENT_INTR_LSC);
2045 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2046 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2050 int slave_reta_size;
2051 unsigned reta_count;
2052 struct bond_dev_private *internals = dev->data->dev_private;
2054 if (reta_size != internals->reta_size)
2057 /* Copy RETA table */
2058 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2060 for (i = 0; i < reta_count; i++) {
2061 internals->reta_conf[i].mask = reta_conf[i].mask;
2062 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2063 if ((reta_conf[i].mask >> j) & 0x01)
2064 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2067 /* Fill rest of array */
2068 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2069 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2070 sizeof(internals->reta_conf[0]) * reta_count);
2072 /* Propagate RETA over slaves */
2073 for (i = 0; i < internals->slave_count; i++) {
2074 slave_reta_size = internals->slaves[i].reta_size;
2075 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2076 &internals->reta_conf[0], slave_reta_size);
2085 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2086 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2089 struct bond_dev_private *internals = dev->data->dev_private;
2091 if (reta_size != internals->reta_size)
2094 /* Copy RETA table */
2095 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2096 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2097 if ((reta_conf[i].mask >> j) & 0x01)
2098 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2104 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2105 struct rte_eth_rss_conf *rss_conf)
2108 struct bond_dev_private *internals = dev->data->dev_private;
2109 struct rte_eth_rss_conf bond_rss_conf;
2111 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2113 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2115 if (bond_rss_conf.rss_hf != 0)
2116 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2118 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2119 sizeof(internals->rss_key)) {
2120 if (bond_rss_conf.rss_key_len == 0)
2121 bond_rss_conf.rss_key_len = 40;
2122 internals->rss_key_len = bond_rss_conf.rss_key_len;
2123 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2124 internals->rss_key_len);
2127 for (i = 0; i < internals->slave_count; i++) {
2128 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2138 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2139 struct rte_eth_rss_conf *rss_conf)
2141 struct bond_dev_private *internals = dev->data->dev_private;
2143 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2144 rss_conf->rss_key_len = internals->rss_key_len;
2145 if (rss_conf->rss_key)
2146 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2151 const struct eth_dev_ops default_dev_ops = {
2152 .dev_start = bond_ethdev_start,
2153 .dev_stop = bond_ethdev_stop,
2154 .dev_close = bond_ethdev_close,
2155 .dev_configure = bond_ethdev_configure,
2156 .dev_infos_get = bond_ethdev_info,
2157 .rx_queue_setup = bond_ethdev_rx_queue_setup,
2158 .tx_queue_setup = bond_ethdev_tx_queue_setup,
2159 .rx_queue_release = bond_ethdev_rx_queue_release,
2160 .tx_queue_release = bond_ethdev_tx_queue_release,
2161 .link_update = bond_ethdev_link_update,
2162 .stats_get = bond_ethdev_stats_get,
2163 .stats_reset = bond_ethdev_stats_reset,
2164 .promiscuous_enable = bond_ethdev_promiscuous_enable,
2165 .promiscuous_disable = bond_ethdev_promiscuous_disable,
2166 .reta_update = bond_ethdev_rss_reta_update,
2167 .reta_query = bond_ethdev_rss_reta_query,
2168 .rss_hash_update = bond_ethdev_rss_hash_update,
2169 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
2173 bond_init(const char *name, const char *params)
2175 struct bond_dev_private *internals;
2176 struct rte_kvargs *kvlist;
2177 uint8_t bonding_mode, socket_id;
2178 int arg_count, port_id;
2180 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2182 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2186 /* Parse link bonding mode */
2187 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2188 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2189 &bond_ethdev_parse_slave_mode_kvarg,
2190 &bonding_mode) != 0) {
2191 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2196 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2197 "device %s\n", name);
2201 /* Parse socket id to create bonding device on */
2202 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2203 if (arg_count == 1) {
2204 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2205 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2207 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2208 "bonded device %s\n", name);
2211 } else if (arg_count > 1) {
2212 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2213 "bonded device %s\n", name);
2216 socket_id = rte_socket_id();
2219 /* Create link bonding eth device */
2220 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2222 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2223 "socket %u.\n", name, bonding_mode, socket_id);
2226 internals = rte_eth_devices[port_id].data->dev_private;
2227 internals->kvlist = kvlist;
2229 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2230 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2234 rte_kvargs_free(kvlist);
2240 bond_uninit(const char *name)
2247 RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
2249 /* free link bonding eth device */
2250 ret = rte_eth_bond_free(name);
2252 RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
2257 /* this part will resolve the slave portids after all the other pdev and vdev
2258 * have been allocated */
2260 bond_ethdev_configure(struct rte_eth_dev *dev)
2262 char *name = dev->data->name;
2263 struct bond_dev_private *internals = dev->data->dev_private;
2264 struct rte_kvargs *kvlist = internals->kvlist;
2266 uint8_t port_id = dev - rte_eth_devices;
2268 static const uint8_t default_rss_key[40] = {
2269 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
2270 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2271 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
2272 0xBE, 0xAC, 0x01, 0xFA
2277 /* If RSS is enabled, fill table and key with default values */
2278 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
2279 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
2280 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
2281 memcpy(internals->rss_key, default_rss_key, 40);
2283 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
2284 internals->reta_conf[i].mask = ~0LL;
2285 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2286 internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
2291 * if no kvlist, it means that this bonded device has been created
2292 * through the bonding api.
2297 /* Parse MAC address for bonded device */
2298 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2299 if (arg_count == 1) {
2300 struct ether_addr bond_mac;
2302 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2303 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2304 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2309 /* Set MAC address */
2310 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2312 "Failed to set mac address on bonded device %s\n",
2316 } else if (arg_count > 1) {
2318 "MAC address can be specified only once for bonded device %s\n",
2323 /* Parse/set balance mode transmit policy */
2324 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2325 if (arg_count == 1) {
2326 uint8_t xmit_policy;
2328 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2329 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2332 "Invalid xmit policy specified for bonded device %s\n",
2337 /* Set balance mode transmit policy*/
2338 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2340 "Failed to set balance xmit policy on bonded device %s\n",
2344 } else if (arg_count > 1) {
2346 "Transmit policy can be specified only once for bonded device"
2351 /* Parse/add slave ports to bonded device */
2352 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2353 struct bond_ethdev_slave_ports slave_ports;
2356 memset(&slave_ports, 0, sizeof(slave_ports));
2358 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2359 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2361 "Failed to parse slave ports for bonded device %s\n",
2366 for (i = 0; i < slave_ports.slave_count; i++) {
2367 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2369 "Failed to add port %d as slave to bonded device %s\n",
2370 slave_ports.slaves[i], name);
2375 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2379 /* Parse/set primary slave port id*/
2380 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2381 if (arg_count == 1) {
2382 uint8_t primary_slave_port_id;
2384 if (rte_kvargs_process(kvlist,
2385 PMD_BOND_PRIMARY_SLAVE_KVARG,
2386 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2387 &primary_slave_port_id) < 0) {
2389 "Invalid primary slave port id specified for bonded device"
2394 /* Set balance mode transmit policy*/
2395 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2398 "Failed to set primary slave port %d on bonded device %s\n",
2399 primary_slave_port_id, name);
2402 } else if (arg_count > 1) {
2404 "Primary slave can be specified only once for bonded device"
2409 /* Parse link status monitor polling interval */
2410 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2411 if (arg_count == 1) {
2412 uint32_t lsc_poll_interval_ms;
2414 if (rte_kvargs_process(kvlist,
2415 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2416 &bond_ethdev_parse_time_ms_kvarg,
2417 &lsc_poll_interval_ms) < 0) {
2419 "Invalid lsc polling interval value specified for bonded"
2420 " device %s\n", name);
2424 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2427 "Failed to set lsc monitor polling interval (%u ms) on"
2428 " bonded device %s\n", lsc_poll_interval_ms, name);
2431 } else if (arg_count > 1) {
2433 "LSC polling interval can be specified only once for bonded"
2434 " device %s\n", name);
2438 /* Parse link up interrupt propagation delay */
2439 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2440 if (arg_count == 1) {
2441 uint32_t link_up_delay_ms;
2443 if (rte_kvargs_process(kvlist,
2444 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2445 &bond_ethdev_parse_time_ms_kvarg,
2446 &link_up_delay_ms) < 0) {
2448 "Invalid link up propagation delay value specified for"
2449 " bonded device %s\n", name);
2453 /* Set balance mode transmit policy*/
2454 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2457 "Failed to set link up propagation delay (%u ms) on bonded"
2458 " device %s\n", link_up_delay_ms, name);
2461 } else if (arg_count > 1) {
2463 "Link up propagation delay can be specified only once for"
2464 " bonded device %s\n", name);
2468 /* Parse link down interrupt propagation delay */
2469 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2470 if (arg_count == 1) {
2471 uint32_t link_down_delay_ms;
2473 if (rte_kvargs_process(kvlist,
2474 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2475 &bond_ethdev_parse_time_ms_kvarg,
2476 &link_down_delay_ms) < 0) {
2478 "Invalid link down propagation delay value specified for"
2479 " bonded device %s\n", name);
2483 /* Set balance mode transmit policy*/
2484 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2487 "Failed to set link down propagation delay (%u ms) on"
2488 " bonded device %s\n", link_down_delay_ms, name);
2491 } else if (arg_count > 1) {
2493 "Link down propagation delay can be specified only once for"
2494 " bonded device %s\n", name);
2501 static struct rte_driver bond_drv = {
2505 .uninit = bond_uninit,
2508 PMD_REGISTER_DRIVER(bond_drv);