1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
6 #include <netinet/in.h>
9 #include <rte_malloc.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
39 size_t vlan_offset = 0;
41 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43 struct rte_vlan_hdr *vlan_hdr =
44 (struct rte_vlan_hdr *)(eth_hdr + 1);
46 vlan_offset = sizeof(struct rte_vlan_hdr);
47 *proto = vlan_hdr->eth_proto;
49 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50 vlan_hdr = vlan_hdr + 1;
51 *proto = vlan_hdr->eth_proto;
52 vlan_offset += sizeof(struct rte_vlan_hdr);
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
61 struct bond_dev_private *internals;
63 uint16_t num_rx_total = 0;
65 uint16_t active_slave;
68 /* Cast to structure, containing bonded device's port id and queue id */
69 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70 internals = bd_rx_q->dev_private;
71 slave_count = internals->active_slave_count;
72 active_slave = bd_rx_q->active_slave;
74 for (i = 0; i < slave_count && nb_pkts; i++) {
75 uint16_t num_rx_slave;
77 /* Offset of pointer to *bufs increases as packets are received
78 * from other slaves */
80 rte_eth_rx_burst(internals->active_slaves[active_slave],
82 bufs + num_rx_total, nb_pkts);
83 num_rx_total += num_rx_slave;
84 nb_pkts -= num_rx_slave;
85 if (++active_slave == slave_count)
89 if (++bd_rx_q->active_slave >= slave_count)
90 bd_rx_q->active_slave = 0;
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
98 struct bond_dev_private *internals;
100 /* Cast to structure, containing bonded device's port id and queue id */
101 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
103 internals = bd_rx_q->dev_private;
105 return rte_eth_rx_burst(internals->current_primary_port,
106 bd_rx_q->queue_id, bufs, nb_pkts);
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
112 const uint16_t ether_type_slow_be =
113 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
115 return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116 (ethertype == ether_type_slow_be &&
117 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
120 /*****************************************************************************
121 * Flow director's setup for mode 4 optimization
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125 .dst.addr_bytes = { 0 },
126 .src.addr_bytes = { 0 },
127 .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131 .dst.addr_bytes = { 0 },
132 .src.addr_bytes = { 0 },
136 static struct rte_flow_item flow_item_8023ad[] = {
138 .type = RTE_FLOW_ITEM_TYPE_ETH,
139 .spec = &flow_item_eth_type_8023ad,
141 .mask = &flow_item_eth_mask_type_8023ad,
144 .type = RTE_FLOW_ITEM_TYPE_END,
151 const struct rte_flow_attr flow_attr_8023ad = {
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161 uint16_t slave_port) {
162 struct rte_eth_dev_info slave_info;
163 struct rte_flow_error error;
164 struct bond_dev_private *internals = bond_dev->data->dev_private;
166 const struct rte_flow_action_queue lacp_queue_conf = {
170 const struct rte_flow_action actions[] = {
172 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173 .conf = &lacp_queue_conf
176 .type = RTE_FLOW_ACTION_TYPE_END,
180 int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181 flow_item_8023ad, actions, &error);
183 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184 __func__, error.message, slave_port,
185 internals->mode4.dedicated_queues.rx_qid);
189 ret = rte_eth_dev_info_get(slave_port, &slave_info);
192 "%s: Error during getting device (port %u) info: %s\n",
193 __func__, slave_port, strerror(-ret));
198 if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199 slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
201 "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202 __func__, slave_port);
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211 struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212 struct bond_dev_private *internals = bond_dev->data->dev_private;
213 struct rte_eth_dev_info bond_info;
217 /* Verify if all slaves in bonding supports flow director and */
218 if (internals->slave_count > 0) {
219 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
222 "%s: Error during getting device (port %u) info: %s\n",
223 __func__, bond_dev->data->port_id,
229 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
232 for (idx = 0; idx < internals->slave_count; idx++) {
233 if (bond_ethdev_8023ad_flow_verify(bond_dev,
234 internals->slaves[idx].port_id) != 0)
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
245 struct rte_flow_error error;
246 struct bond_dev_private *internals = bond_dev->data->dev_private;
247 struct rte_flow_action_queue lacp_queue_conf = {
248 .index = internals->mode4.dedicated_queues.rx_qid,
251 const struct rte_flow_action actions[] = {
253 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254 .conf = &lacp_queue_conf
257 .type = RTE_FLOW_ACTION_TYPE_END,
261 internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262 &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263 if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265 "(slave_port=%d queue_id=%d)",
266 error.message, slave_port,
267 internals->mode4.dedicated_queues.rx_qid);
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
278 /* Cast to structure, containing bonded device's port id and queue id */
279 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280 struct bond_dev_private *internals = bd_rx_q->dev_private;
281 struct rte_eth_dev *bonded_eth_dev =
282 &rte_eth_devices[internals->port_id];
283 struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284 struct rte_ether_hdr *hdr;
286 const uint16_t ether_type_slow_be =
287 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288 uint16_t num_rx_total = 0; /* Total number of received packets */
289 uint16_t slaves[RTE_MAX_ETHPORTS];
290 uint16_t slave_count, idx;
292 uint8_t collecting; /* current slave collecting status */
293 const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294 const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
300 /* Copy slave list to protect against slave up/down changes during tx
302 slave_count = internals->active_slave_count;
303 memcpy(slaves, internals->active_slaves,
304 sizeof(internals->active_slaves[0]) * slave_count);
306 idx = bd_rx_q->active_slave;
307 if (idx >= slave_count) {
308 bd_rx_q->active_slave = 0;
311 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
313 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
316 /* Read packets from this slave */
317 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318 &bufs[num_rx_total], nb_pkts - num_rx_total);
320 for (k = j; k < 2 && k < num_rx_total; k++)
321 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
323 /* Handle slow protocol packets. */
324 while (j < num_rx_total) {
325 if (j + 3 < num_rx_total)
326 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
328 hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
331 /* Remove packet from array if:
332 * - it is slow packet but no dedicated rxq is present,
333 * - slave is not in collecting state,
334 * - bonding interface is not in promiscuous mode:
335 * - packet is unicast and address does not match,
336 * - packet is multicast and bonding interface
337 * is not in allmulti,
341 is_lacp_packets(hdr->ether_type, subtype,
345 ((rte_is_unicast_ether_addr(&hdr->dst_addr) &&
346 !rte_is_same_ether_addr(bond_mac,
349 rte_is_multicast_ether_addr(&hdr->dst_addr)))))) {
351 if (hdr->ether_type == ether_type_slow_be) {
352 bond_mode_8023ad_handle_slow_pkt(
353 internals, slaves[idx], bufs[j]);
355 rte_pktmbuf_free(bufs[j]);
357 /* Packet is managed by mode 4 or dropped, shift the array */
359 if (j < num_rx_total) {
360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
366 if (unlikely(++idx == slave_count))
370 if (++bd_rx_q->active_slave >= slave_count)
371 bd_rx_q->active_slave = 0;
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
380 return rx_burst_8023ad(queue, bufs, nb_pkts, false);
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
387 return rx_burst_8023ad(queue, bufs, nb_pkts, true);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
400 case RTE_ARP_OP_REQUEST:
401 strlcpy(buf, "ARP Request", buf_len);
403 case RTE_ARP_OP_REPLY:
404 strlcpy(buf, "ARP Reply", buf_len);
406 case RTE_ARP_OP_REVREQUEST:
407 strlcpy(buf, "Reverse ARP Request", buf_len);
409 case RTE_ARP_OP_REVREPLY:
410 strlcpy(buf, "Reverse ARP Reply", buf_len);
412 case RTE_ARP_OP_INVREQUEST:
413 strlcpy(buf, "Peer Identify Request", buf_len);
415 case RTE_ARP_OP_INVREPLY:
416 strlcpy(buf, "Peer Identify Reply", buf_len);
421 strlcpy(buf, "Unknown", buf_len);
425 #define MaxIPv4String 16
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
431 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
437 #define MAX_CLIENTS_NUMBER 128
438 uint8_t active_clients;
439 struct client_stats_t {
442 uint32_t ipv4_rx_packets;
443 uint32_t ipv4_tx_packets;
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
452 for (; i < MAX_CLIENTS_NUMBER; i++) {
453 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
454 /* Just update RX packets number for this client */
455 if (TXorRXindicator == &burstnumberRX)
456 client_stats[i].ipv4_rx_packets++;
458 client_stats[i].ipv4_tx_packets++;
462 /* We have a new client. Insert him to the table, and increment stats */
463 if (TXorRXindicator == &burstnumberRX)
464 client_stats[active_clients].ipv4_rx_packets++;
466 client_stats[active_clients].ipv4_tx_packets++;
467 client_stats[active_clients].ipv4_addr = addr;
468 client_stats[active_clients].port = port;
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475 rte_log(RTE_LOG_DEBUG, bond_logtype, \
476 "%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \
477 "DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d\n", \
480 RTE_ETHER_ADDR_BYTES(ð_h->src_addr), \
482 RTE_ETHER_ADDR_BYTES(ð_h->dst_addr), \
484 arp_op, ++burstnumber)
488 mode6_debug(const char __rte_unused *info,
489 struct rte_ether_hdr *eth_h, uint16_t port,
490 uint32_t __rte_unused *burstnumber)
492 struct rte_ipv4_hdr *ipv4_h;
493 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
494 struct rte_arp_hdr *arp_h;
501 uint16_t ether_type = eth_h->ether_type;
502 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
504 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
505 strlcpy(buf, info, 16);
508 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
509 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
510 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
511 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
512 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
513 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
515 update_client_stats(ipv4_h->src_addr, port, burstnumber);
517 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
518 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
519 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
520 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
521 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
522 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
523 ArpOp, sizeof(ArpOp));
524 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
531 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
533 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
534 struct bond_dev_private *internals = bd_rx_q->dev_private;
535 struct rte_ether_hdr *eth_h;
536 uint16_t ether_type, offset;
537 uint16_t nb_recv_pkts;
540 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
542 for (i = 0; i < nb_recv_pkts; i++) {
543 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
544 ether_type = eth_h->ether_type;
545 offset = get_vlan_offset(eth_h, ðer_type);
547 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
548 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
549 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
551 bond_mode_alb_arp_recv(eth_h, offset, internals);
553 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
554 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
555 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
563 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
566 struct bond_dev_private *internals;
567 struct bond_tx_queue *bd_tx_q;
569 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
570 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
572 uint16_t num_of_slaves;
573 uint16_t slaves[RTE_MAX_ETHPORTS];
575 uint16_t num_tx_total = 0, num_tx_slave;
577 static int slave_idx = 0;
578 int i, cslave_idx = 0, tx_fail_total = 0;
580 bd_tx_q = (struct bond_tx_queue *)queue;
581 internals = bd_tx_q->dev_private;
583 /* Copy slave list to protect against slave up/down changes during tx
585 num_of_slaves = internals->active_slave_count;
586 memcpy(slaves, internals->active_slaves,
587 sizeof(internals->active_slaves[0]) * num_of_slaves);
589 if (num_of_slaves < 1)
592 /* Populate slaves mbuf with which packets are to be sent on it */
593 for (i = 0; i < nb_pkts; i++) {
594 cslave_idx = (slave_idx + i) % num_of_slaves;
595 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
598 /* increment current slave index so the next call to tx burst starts on the
600 slave_idx = ++cslave_idx;
602 /* Send packet burst on each slave device */
603 for (i = 0; i < num_of_slaves; i++) {
604 if (slave_nb_pkts[i] > 0) {
605 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
606 slave_bufs[i], slave_nb_pkts[i]);
608 /* if tx burst fails move packets to end of bufs */
609 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
610 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
612 tx_fail_total += tx_fail_slave;
614 memcpy(&bufs[nb_pkts - tx_fail_total],
615 &slave_bufs[i][num_tx_slave],
616 tx_fail_slave * sizeof(bufs[0]));
618 num_tx_total += num_tx_slave;
626 bond_ethdev_tx_burst_active_backup(void *queue,
627 struct rte_mbuf **bufs, uint16_t nb_pkts)
629 struct bond_dev_private *internals;
630 struct bond_tx_queue *bd_tx_q;
632 bd_tx_q = (struct bond_tx_queue *)queue;
633 internals = bd_tx_q->dev_private;
635 if (internals->active_slave_count < 1)
638 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
642 static inline uint16_t
643 ether_hash(struct rte_ether_hdr *eth_hdr)
645 unaligned_uint16_t *word_src_addr =
646 (unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes;
647 unaligned_uint16_t *word_dst_addr =
648 (unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes;
650 return (word_src_addr[0] ^ word_dst_addr[0]) ^
651 (word_src_addr[1] ^ word_dst_addr[1]) ^
652 (word_src_addr[2] ^ word_dst_addr[2]);
655 static inline uint32_t
656 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
658 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
661 static inline uint32_t
662 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
664 unaligned_uint32_t *word_src_addr =
665 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
666 unaligned_uint32_t *word_dst_addr =
667 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
669 return (word_src_addr[0] ^ word_dst_addr[0]) ^
670 (word_src_addr[1] ^ word_dst_addr[1]) ^
671 (word_src_addr[2] ^ word_dst_addr[2]) ^
672 (word_src_addr[3] ^ word_dst_addr[3]);
677 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
678 uint16_t slave_count, uint16_t *slaves)
680 struct rte_ether_hdr *eth_hdr;
684 for (i = 0; i < nb_pkts; i++) {
685 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
687 hash = ether_hash(eth_hdr);
689 slaves[i] = (hash ^= hash >> 8) % slave_count;
694 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
695 uint16_t slave_count, uint16_t *slaves)
698 struct rte_ether_hdr *eth_hdr;
701 uint32_t hash, l3hash;
703 for (i = 0; i < nb_pkts; i++) {
704 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
707 proto = eth_hdr->ether_type;
708 hash = ether_hash(eth_hdr);
710 vlan_offset = get_vlan_offset(eth_hdr, &proto);
712 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
713 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
714 ((char *)(eth_hdr + 1) + vlan_offset);
715 l3hash = ipv4_hash(ipv4_hdr);
717 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
718 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
719 ((char *)(eth_hdr + 1) + vlan_offset);
720 l3hash = ipv6_hash(ipv6_hdr);
723 hash = hash ^ l3hash;
727 slaves[i] = hash % slave_count;
732 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
733 uint16_t slave_count, uint16_t *slaves)
735 struct rte_ether_hdr *eth_hdr;
740 struct rte_udp_hdr *udp_hdr;
741 struct rte_tcp_hdr *tcp_hdr;
742 uint32_t hash, l3hash, l4hash;
744 for (i = 0; i < nb_pkts; i++) {
745 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
746 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
747 proto = eth_hdr->ether_type;
748 vlan_offset = get_vlan_offset(eth_hdr, &proto);
752 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
753 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
754 ((char *)(eth_hdr + 1) + vlan_offset);
755 size_t ip_hdr_offset;
757 l3hash = ipv4_hash(ipv4_hdr);
759 /* there is no L4 header in fragmented packet */
760 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
762 ip_hdr_offset = (ipv4_hdr->version_ihl
763 & RTE_IPV4_HDR_IHL_MASK) *
764 RTE_IPV4_IHL_MULTIPLIER;
766 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
767 tcp_hdr = (struct rte_tcp_hdr *)
770 if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
772 l4hash = HASH_L4_PORTS(tcp_hdr);
773 } else if (ipv4_hdr->next_proto_id ==
775 udp_hdr = (struct rte_udp_hdr *)
778 if ((size_t)udp_hdr + sizeof(*udp_hdr)
780 l4hash = HASH_L4_PORTS(udp_hdr);
783 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
784 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
785 ((char *)(eth_hdr + 1) + vlan_offset);
786 l3hash = ipv6_hash(ipv6_hdr);
788 if (ipv6_hdr->proto == IPPROTO_TCP) {
789 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
790 l4hash = HASH_L4_PORTS(tcp_hdr);
791 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
792 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
793 l4hash = HASH_L4_PORTS(udp_hdr);
797 hash = l3hash ^ l4hash;
801 slaves[i] = hash % slave_count;
806 uint64_t bwg_left_int;
807 uint64_t bwg_left_remainder;
812 bond_tlb_activate_slave(struct bond_dev_private *internals) {
815 for (i = 0; i < internals->active_slave_count; i++) {
816 tlb_last_obytets[internals->active_slaves[i]] = 0;
821 bandwidth_cmp(const void *a, const void *b)
823 const struct bwg_slave *bwg_a = a;
824 const struct bwg_slave *bwg_b = b;
825 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
826 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
827 (int64_t)bwg_a->bwg_left_remainder;
841 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
842 struct bwg_slave *bwg_slave)
844 struct rte_eth_link link_status;
847 ret = rte_eth_link_get_nowait(port_id, &link_status);
849 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
850 port_id, rte_strerror(-ret));
853 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
856 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
857 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
858 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
862 bond_ethdev_update_tlb_slave_cb(void *arg)
864 struct bond_dev_private *internals = arg;
865 struct rte_eth_stats slave_stats;
866 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
867 uint16_t slave_count;
870 uint8_t update_stats = 0;
874 internals->slave_update_idx++;
877 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
880 for (i = 0; i < internals->active_slave_count; i++) {
881 slave_id = internals->active_slaves[i];
882 rte_eth_stats_get(slave_id, &slave_stats);
883 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
884 bandwidth_left(slave_id, tx_bytes,
885 internals->slave_update_idx, &bwg_array[i]);
886 bwg_array[i].slave = slave_id;
889 tlb_last_obytets[slave_id] = slave_stats.obytes;
893 if (update_stats == 1)
894 internals->slave_update_idx = 0;
897 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
898 for (i = 0; i < slave_count; i++)
899 internals->tlb_slaves_order[i] = bwg_array[i].slave;
901 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
902 (struct bond_dev_private *)internals);
906 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
908 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
909 struct bond_dev_private *internals = bd_tx_q->dev_private;
911 struct rte_eth_dev *primary_port =
912 &rte_eth_devices[internals->primary_port];
913 uint16_t num_tx_total = 0;
916 uint16_t num_of_slaves = internals->active_slave_count;
917 uint16_t slaves[RTE_MAX_ETHPORTS];
919 struct rte_ether_hdr *ether_hdr;
920 struct rte_ether_addr primary_slave_addr;
921 struct rte_ether_addr active_slave_addr;
923 if (num_of_slaves < 1)
926 memcpy(slaves, internals->tlb_slaves_order,
927 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
930 rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
933 for (i = 0; i < 3; i++)
934 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
937 for (i = 0; i < num_of_slaves; i++) {
938 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
939 for (j = num_tx_total; j < nb_pkts; j++) {
941 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
943 ether_hdr = rte_pktmbuf_mtod(bufs[j],
944 struct rte_ether_hdr *);
945 if (rte_is_same_ether_addr(ðer_hdr->src_addr,
946 &primary_slave_addr))
947 rte_ether_addr_copy(&active_slave_addr,
948 ðer_hdr->src_addr);
949 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
950 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
954 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
955 bufs + num_tx_total, nb_pkts - num_tx_total);
957 if (num_tx_total == nb_pkts)
965 bond_tlb_disable(struct bond_dev_private *internals)
967 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
971 bond_tlb_enable(struct bond_dev_private *internals)
973 bond_ethdev_update_tlb_slave_cb(internals);
977 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
979 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
980 struct bond_dev_private *internals = bd_tx_q->dev_private;
982 struct rte_ether_hdr *eth_h;
983 uint16_t ether_type, offset;
985 struct client_data *client_info;
988 * We create transmit buffers for every slave and one additional to send
989 * through tlb. In worst case every packet will be send on one port.
991 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
992 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
995 * We create separate transmit buffers for update packets as they won't
996 * be counted in num_tx_total.
998 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
999 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1001 struct rte_mbuf *upd_pkt;
1004 uint16_t num_send, num_not_send = 0;
1005 uint16_t num_tx_total = 0;
1010 /* Search tx buffer for ARP packets and forward them to alb */
1011 for (i = 0; i < nb_pkts; i++) {
1012 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1013 ether_type = eth_h->ether_type;
1014 offset = get_vlan_offset(eth_h, ðer_type);
1016 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1017 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1019 /* Change src mac in eth header */
1020 rte_eth_macaddr_get(slave_idx, ð_h->src_addr);
1022 /* Add packet to slave tx buffer */
1023 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1024 slave_bufs_pkts[slave_idx]++;
1026 /* If packet is not ARP, send it with TLB policy */
1027 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1029 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1033 /* Update connected client ARP tables */
1034 if (internals->mode6.ntt) {
1035 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1036 client_info = &internals->mode6.client_table[i];
1038 if (client_info->in_use) {
1039 /* Allocate new packet to send ARP update on current slave */
1040 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1041 if (upd_pkt == NULL) {
1043 "Failed to allocate ARP packet from pool");
1046 pkt_size = sizeof(struct rte_ether_hdr) +
1047 sizeof(struct rte_arp_hdr) +
1048 client_info->vlan_count *
1049 sizeof(struct rte_vlan_hdr);
1050 upd_pkt->data_len = pkt_size;
1051 upd_pkt->pkt_len = pkt_size;
1053 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1056 /* Add packet to update tx buffer */
1057 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1058 update_bufs_pkts[slave_idx]++;
1061 internals->mode6.ntt = 0;
1064 /* Send ARP packets on proper slaves */
1065 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1066 if (slave_bufs_pkts[i] > 0) {
1067 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1068 slave_bufs[i], slave_bufs_pkts[i]);
1069 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1070 bufs[nb_pkts - 1 - num_not_send - j] =
1071 slave_bufs[i][nb_pkts - 1 - j];
1074 num_tx_total += num_send;
1075 num_not_send += slave_bufs_pkts[i] - num_send;
1077 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1078 /* Print TX stats including update packets */
1079 for (j = 0; j < slave_bufs_pkts[i]; j++) {
1080 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1081 struct rte_ether_hdr *);
1082 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1088 /* Send update packets on proper slaves */
1089 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1090 if (update_bufs_pkts[i] > 0) {
1091 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1092 update_bufs_pkts[i]);
1093 for (j = num_send; j < update_bufs_pkts[i]; j++) {
1094 rte_pktmbuf_free(update_bufs[i][j]);
1096 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1097 for (j = 0; j < update_bufs_pkts[i]; j++) {
1098 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1099 struct rte_ether_hdr *);
1100 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1106 /* Send non-ARP packets using tlb policy */
1107 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1108 num_send = bond_ethdev_tx_burst_tlb(queue,
1109 slave_bufs[RTE_MAX_ETHPORTS],
1110 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1112 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1113 bufs[nb_pkts - 1 - num_not_send - j] =
1114 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1117 num_tx_total += num_send;
1120 return num_tx_total;
1123 static inline uint16_t
1124 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1125 uint16_t *slave_port_ids, uint16_t slave_count)
1127 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1128 struct bond_dev_private *internals = bd_tx_q->dev_private;
1130 /* Array to sort mbufs for transmission on each slave into */
1131 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1132 /* Number of mbufs for transmission on each slave */
1133 uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1134 /* Mapping array generated by hash function to map mbufs to slaves */
1135 uint16_t bufs_slave_port_idxs[nb_bufs];
1137 uint16_t slave_tx_count;
1138 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1143 * Populate slaves mbuf with the packets which are to be sent on it
1144 * selecting output slave using hash based on xmit policy
1146 internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1147 bufs_slave_port_idxs);
1149 for (i = 0; i < nb_bufs; i++) {
1150 /* Populate slave mbuf arrays with mbufs for that slave. */
1151 uint16_t slave_idx = bufs_slave_port_idxs[i];
1153 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1156 /* Send packet burst on each slave device */
1157 for (i = 0; i < slave_count; i++) {
1158 if (slave_nb_bufs[i] == 0)
1161 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1162 bd_tx_q->queue_id, slave_bufs[i],
1165 total_tx_count += slave_tx_count;
1167 /* If tx burst fails move packets to end of bufs */
1168 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1169 int slave_tx_fail_count = slave_nb_bufs[i] -
1171 total_tx_fail_count += slave_tx_fail_count;
1172 memcpy(&bufs[nb_bufs - total_tx_fail_count],
1173 &slave_bufs[i][slave_tx_count],
1174 slave_tx_fail_count * sizeof(bufs[0]));
1178 return total_tx_count;
1182 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1185 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1186 struct bond_dev_private *internals = bd_tx_q->dev_private;
1188 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1189 uint16_t slave_count;
1191 if (unlikely(nb_bufs == 0))
1194 /* Copy slave list to protect against slave up/down changes during tx
1197 slave_count = internals->active_slave_count;
1198 if (unlikely(slave_count < 1))
1201 memcpy(slave_port_ids, internals->active_slaves,
1202 sizeof(slave_port_ids[0]) * slave_count);
1203 return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1207 static inline uint16_t
1208 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1211 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1212 struct bond_dev_private *internals = bd_tx_q->dev_private;
1214 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1215 uint16_t slave_count;
1217 uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1218 uint16_t dist_slave_count;
1220 uint16_t slave_tx_count;
1224 /* Copy slave list to protect against slave up/down changes during tx
1226 slave_count = internals->active_slave_count;
1227 if (unlikely(slave_count < 1))
1230 memcpy(slave_port_ids, internals->active_slaves,
1231 sizeof(slave_port_ids[0]) * slave_count);
1236 /* Check for LACP control packets and send if available */
1237 for (i = 0; i < slave_count; i++) {
1238 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1239 struct rte_mbuf *ctrl_pkt = NULL;
1241 if (likely(rte_ring_empty(port->tx_ring)))
1244 if (rte_ring_dequeue(port->tx_ring,
1245 (void **)&ctrl_pkt) != -ENOENT) {
1246 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1247 bd_tx_q->queue_id, &ctrl_pkt, 1);
1249 * re-enqueue LAG control plane packets to buffering
1250 * ring if transmission fails so the packet isn't lost.
1252 if (slave_tx_count != 1)
1253 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1258 if (unlikely(nb_bufs == 0))
1261 dist_slave_count = 0;
1262 for (i = 0; i < slave_count; i++) {
1263 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1265 if (ACTOR_STATE(port, DISTRIBUTING))
1266 dist_slave_port_ids[dist_slave_count++] =
1270 if (unlikely(dist_slave_count < 1))
1273 return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1278 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1281 return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1285 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1288 return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1292 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1295 struct bond_dev_private *internals;
1296 struct bond_tx_queue *bd_tx_q;
1298 uint16_t slaves[RTE_MAX_ETHPORTS];
1299 uint8_t tx_failed_flag = 0;
1300 uint16_t num_of_slaves;
1302 uint16_t max_nb_of_tx_pkts = 0;
1304 int slave_tx_total[RTE_MAX_ETHPORTS];
1305 int i, most_successful_tx_slave = -1;
1307 bd_tx_q = (struct bond_tx_queue *)queue;
1308 internals = bd_tx_q->dev_private;
1310 /* Copy slave list to protect against slave up/down changes during tx
1312 num_of_slaves = internals->active_slave_count;
1313 memcpy(slaves, internals->active_slaves,
1314 sizeof(internals->active_slaves[0]) * num_of_slaves);
1316 if (num_of_slaves < 1)
1319 /* Increment reference count on mbufs */
1320 for (i = 0; i < nb_pkts; i++)
1321 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1323 /* Transmit burst on each active slave */
1324 for (i = 0; i < num_of_slaves; i++) {
1325 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1328 if (unlikely(slave_tx_total[i] < nb_pkts))
1331 /* record the value and slave index for the slave which transmits the
1332 * maximum number of packets */
1333 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1334 max_nb_of_tx_pkts = slave_tx_total[i];
1335 most_successful_tx_slave = i;
1339 /* if slaves fail to transmit packets from burst, the calling application
1340 * is not expected to know about multiple references to packets so we must
1341 * handle failures of all packets except those of the most successful slave
1343 if (unlikely(tx_failed_flag))
1344 for (i = 0; i < num_of_slaves; i++)
1345 if (i != most_successful_tx_slave)
1346 while (slave_tx_total[i] < nb_pkts)
1347 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1349 return max_nb_of_tx_pkts;
1353 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1355 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1357 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1359 * If in mode 4 then save the link properties of the first
1360 * slave, all subsequent slaves must match these properties
1362 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1364 bond_link->link_autoneg = slave_link->link_autoneg;
1365 bond_link->link_duplex = slave_link->link_duplex;
1366 bond_link->link_speed = slave_link->link_speed;
1369 * In any other mode the link properties are set to default
1370 * values of AUTONEG/DUPLEX
1372 ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
1373 ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1378 link_properties_valid(struct rte_eth_dev *ethdev,
1379 struct rte_eth_link *slave_link)
1381 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1383 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1384 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1386 if (bond_link->link_duplex != slave_link->link_duplex ||
1387 bond_link->link_autoneg != slave_link->link_autoneg ||
1388 bond_link->link_speed != slave_link->link_speed)
1396 mac_address_get(struct rte_eth_dev *eth_dev,
1397 struct rte_ether_addr *dst_mac_addr)
1399 struct rte_ether_addr *mac_addr;
1401 if (eth_dev == NULL) {
1402 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1406 if (dst_mac_addr == NULL) {
1407 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1411 mac_addr = eth_dev->data->mac_addrs;
1413 rte_ether_addr_copy(mac_addr, dst_mac_addr);
1418 mac_address_set(struct rte_eth_dev *eth_dev,
1419 struct rte_ether_addr *new_mac_addr)
1421 struct rte_ether_addr *mac_addr;
1423 if (eth_dev == NULL) {
1424 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1428 if (new_mac_addr == NULL) {
1429 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1433 mac_addr = eth_dev->data->mac_addrs;
1435 /* If new MAC is different to current MAC then update */
1436 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1437 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1442 static const struct rte_ether_addr null_mac_addr;
1445 * Add additional MAC addresses to the slave
1448 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1449 uint16_t slave_port_id)
1452 struct rte_ether_addr *mac_addr;
1454 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1455 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1456 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1459 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1462 for (i--; i > 0; i--)
1463 rte_eth_dev_mac_addr_remove(slave_port_id,
1464 &bonded_eth_dev->data->mac_addrs[i]);
1473 * Remove additional MAC addresses from the slave
1476 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1477 uint16_t slave_port_id)
1480 struct rte_ether_addr *mac_addr;
1483 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1484 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1485 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1488 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1489 /* save only the first error */
1490 if (ret < 0 && rc == 0)
1498 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1500 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1504 /* Update slave devices MAC addresses */
1505 if (internals->slave_count < 1)
1508 switch (internals->mode) {
1509 case BONDING_MODE_ROUND_ROBIN:
1510 case BONDING_MODE_BALANCE:
1511 case BONDING_MODE_BROADCAST:
1512 for (i = 0; i < internals->slave_count; i++) {
1513 if (rte_eth_dev_default_mac_addr_set(
1514 internals->slaves[i].port_id,
1515 bonded_eth_dev->data->mac_addrs)) {
1516 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1517 internals->slaves[i].port_id);
1522 case BONDING_MODE_8023AD:
1523 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1525 case BONDING_MODE_ACTIVE_BACKUP:
1526 case BONDING_MODE_TLB:
1527 case BONDING_MODE_ALB:
1530 for (i = 0; i < internals->slave_count; i++) {
1531 if (internals->slaves[i].port_id ==
1532 internals->current_primary_port) {
1533 if (rte_eth_dev_default_mac_addr_set(
1534 internals->current_primary_port,
1535 bonded_eth_dev->data->mac_addrs)) {
1536 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1537 internals->current_primary_port);
1541 if (rte_eth_dev_default_mac_addr_set(
1542 internals->slaves[i].port_id,
1543 &internals->slaves[i].persisted_mac_addr)) {
1544 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1545 internals->slaves[i].port_id);
1557 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, uint8_t mode)
1559 struct bond_dev_private *internals;
1561 internals = eth_dev->data->dev_private;
1564 case BONDING_MODE_ROUND_ROBIN:
1565 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1566 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1568 case BONDING_MODE_ACTIVE_BACKUP:
1569 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1570 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1572 case BONDING_MODE_BALANCE:
1573 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1574 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1576 case BONDING_MODE_BROADCAST:
1577 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1578 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1580 case BONDING_MODE_8023AD:
1581 if (bond_mode_8023ad_enable(eth_dev) != 0)
1584 if (internals->mode4.dedicated_queues.enabled == 0) {
1585 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1586 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1587 RTE_BOND_LOG(WARNING,
1588 "Using mode 4, it is necessary to do TX burst "
1589 "and RX burst at least every 100ms.");
1591 /* Use flow director's optimization */
1592 eth_dev->rx_pkt_burst =
1593 bond_ethdev_rx_burst_8023ad_fast_queue;
1594 eth_dev->tx_pkt_burst =
1595 bond_ethdev_tx_burst_8023ad_fast_queue;
1598 case BONDING_MODE_TLB:
1599 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1600 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1602 case BONDING_MODE_ALB:
1603 if (bond_mode_alb_enable(eth_dev) != 0)
1606 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1607 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1613 internals->mode = mode;
1620 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1621 struct rte_eth_dev *slave_eth_dev)
1624 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1625 struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1627 if (port->slow_pool == NULL) {
1629 int slave_id = slave_eth_dev->data->port_id;
1631 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1633 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1634 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1635 slave_eth_dev->data->numa_node);
1637 /* Any memory allocation failure in initialization is critical because
1638 * resources can't be free, so reinitialization is impossible. */
1639 if (port->slow_pool == NULL) {
1640 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1641 slave_id, mem_name, rte_strerror(rte_errno));
1645 if (internals->mode4.dedicated_queues.enabled == 1) {
1646 /* Configure slow Rx queue */
1648 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1649 internals->mode4.dedicated_queues.rx_qid, 128,
1650 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1651 NULL, port->slow_pool);
1654 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1655 slave_eth_dev->data->port_id,
1656 internals->mode4.dedicated_queues.rx_qid,
1661 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1662 internals->mode4.dedicated_queues.tx_qid, 512,
1663 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1667 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1668 slave_eth_dev->data->port_id,
1669 internals->mode4.dedicated_queues.tx_qid,
1678 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1679 struct rte_eth_dev *slave_eth_dev)
1681 struct bond_rx_queue *bd_rx_q;
1682 struct bond_tx_queue *bd_tx_q;
1683 uint16_t nb_rx_queues;
1684 uint16_t nb_tx_queues;
1688 struct rte_flow_error flow_error;
1690 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1693 errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
1695 RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
1696 slave_eth_dev->data->port_id, errval);
1698 /* Enable interrupts on slave device if supported */
1699 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1700 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1702 /* If RSS is enabled for bonding, try to enable it for slaves */
1703 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
1704 /* rss_key won't be empty if RSS is configured in bonded dev */
1705 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1706 internals->rss_key_len;
1707 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1710 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1711 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1712 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1713 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1716 slave_eth_dev->data->dev_conf.rxmode.mtu =
1717 bonded_eth_dev->data->dev_conf.rxmode.mtu;
1719 slave_eth_dev->data->dev_conf.txmode.offloads |=
1720 bonded_eth_dev->data->dev_conf.txmode.offloads;
1722 slave_eth_dev->data->dev_conf.txmode.offloads &=
1723 (bonded_eth_dev->data->dev_conf.txmode.offloads |
1724 ~internals->tx_offload_capa);
1726 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1727 bonded_eth_dev->data->dev_conf.rxmode.offloads;
1729 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1730 (bonded_eth_dev->data->dev_conf.rxmode.offloads |
1731 ~internals->rx_offload_capa);
1734 nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1735 nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1737 if (internals->mode == BONDING_MODE_8023AD) {
1738 if (internals->mode4.dedicated_queues.enabled == 1) {
1744 /* Configure device */
1745 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1746 nb_rx_queues, nb_tx_queues,
1747 &(slave_eth_dev->data->dev_conf));
1749 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1750 slave_eth_dev->data->port_id, errval);
1754 errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1755 bonded_eth_dev->data->mtu);
1756 if (errval != 0 && errval != -ENOTSUP) {
1757 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1758 slave_eth_dev->data->port_id, errval);
1762 /* Setup Rx Queues */
1763 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1764 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1766 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1767 bd_rx_q->nb_rx_desc,
1768 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1769 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1772 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1773 slave_eth_dev->data->port_id, q_id, errval);
1778 /* Setup Tx Queues */
1779 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1780 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1782 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1783 bd_tx_q->nb_tx_desc,
1784 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1788 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1789 slave_eth_dev->data->port_id, q_id, errval);
1794 if (internals->mode == BONDING_MODE_8023AD &&
1795 internals->mode4.dedicated_queues.enabled == 1) {
1796 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1800 errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1801 slave_eth_dev->data->port_id);
1804 "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
1805 slave_eth_dev->data->port_id, errval);
1809 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1810 rte_flow_destroy(slave_eth_dev->data->port_id,
1811 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1814 errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1815 slave_eth_dev->data->port_id);
1818 "bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
1819 slave_eth_dev->data->port_id, errval);
1825 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1827 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1828 slave_eth_dev->data->port_id, errval);
1832 /* If RSS is enabled for bonding, synchronize RETA */
1833 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
1835 struct bond_dev_private *internals;
1837 internals = bonded_eth_dev->data->dev_private;
1839 for (i = 0; i < internals->slave_count; i++) {
1840 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1841 errval = rte_eth_dev_rss_reta_update(
1842 slave_eth_dev->data->port_id,
1843 &internals->reta_conf[0],
1844 internals->slaves[i].reta_size);
1846 RTE_BOND_LOG(WARNING,
1847 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1848 " RSS Configuration for bonding may be inconsistent.",
1849 slave_eth_dev->data->port_id, errval);
1856 /* If lsc interrupt is set, check initial slave's link status */
1857 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1858 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1859 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1860 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1868 slave_remove(struct bond_dev_private *internals,
1869 struct rte_eth_dev *slave_eth_dev)
1873 for (i = 0; i < internals->slave_count; i++)
1874 if (internals->slaves[i].port_id ==
1875 slave_eth_dev->data->port_id)
1878 if (i < (internals->slave_count - 1)) {
1879 struct rte_flow *flow;
1881 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1882 sizeof(internals->slaves[0]) *
1883 (internals->slave_count - i - 1));
1884 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1885 memmove(&flow->flows[i], &flow->flows[i + 1],
1886 sizeof(flow->flows[0]) *
1887 (internals->slave_count - i - 1));
1888 flow->flows[internals->slave_count - 1] = NULL;
1892 internals->slave_count--;
1894 /* force reconfiguration of slave interfaces */
1895 rte_eth_dev_internal_reset(slave_eth_dev);
1899 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1902 slave_add(struct bond_dev_private *internals,
1903 struct rte_eth_dev *slave_eth_dev)
1905 struct bond_slave_details *slave_details =
1906 &internals->slaves[internals->slave_count];
1908 slave_details->port_id = slave_eth_dev->data->port_id;
1909 slave_details->last_link_status = 0;
1911 /* Mark slave devices that don't support interrupts so we can
1912 * compensate when we start the bond
1914 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1915 slave_details->link_status_poll_enabled = 1;
1918 slave_details->link_status_wait_to_complete = 0;
1919 /* clean tlb_last_obytes when adding port for bonding device */
1920 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1921 sizeof(struct rte_ether_addr));
1925 bond_ethdev_primary_set(struct bond_dev_private *internals,
1926 uint16_t slave_port_id)
1930 if (internals->active_slave_count < 1)
1931 internals->current_primary_port = slave_port_id;
1933 /* Search bonded device slave ports for new proposed primary port */
1934 for (i = 0; i < internals->active_slave_count; i++) {
1935 if (internals->active_slaves[i] == slave_port_id)
1936 internals->current_primary_port = slave_port_id;
1941 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1944 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1946 struct bond_dev_private *internals;
1949 /* slave eth dev will be started by bonded device */
1950 if (check_for_bonded_ethdev(eth_dev)) {
1951 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1952 eth_dev->data->port_id);
1956 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
1957 eth_dev->data->dev_started = 1;
1959 internals = eth_dev->data->dev_private;
1961 if (internals->slave_count == 0) {
1962 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1966 if (internals->user_defined_mac == 0) {
1967 struct rte_ether_addr *new_mac_addr = NULL;
1969 for (i = 0; i < internals->slave_count; i++)
1970 if (internals->slaves[i].port_id == internals->primary_port)
1971 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1973 if (new_mac_addr == NULL)
1976 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1977 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1978 eth_dev->data->port_id);
1983 if (internals->mode == BONDING_MODE_8023AD) {
1984 if (internals->mode4.dedicated_queues.enabled == 1) {
1985 internals->mode4.dedicated_queues.rx_qid =
1986 eth_dev->data->nb_rx_queues;
1987 internals->mode4.dedicated_queues.tx_qid =
1988 eth_dev->data->nb_tx_queues;
1993 /* Reconfigure each slave device if starting bonded device */
1994 for (i = 0; i < internals->slave_count; i++) {
1995 struct rte_eth_dev *slave_ethdev =
1996 &(rte_eth_devices[internals->slaves[i].port_id]);
1997 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1999 "bonded port (%d) failed to reconfigure slave device (%d)",
2000 eth_dev->data->port_id,
2001 internals->slaves[i].port_id);
2004 /* We will need to poll for link status if any slave doesn't
2005 * support interrupts
2007 if (internals->slaves[i].link_status_poll_enabled)
2008 internals->link_status_polling_enabled = 1;
2011 /* start polling if needed */
2012 if (internals->link_status_polling_enabled) {
2014 internals->link_status_polling_interval_ms * 1000,
2015 bond_ethdev_slave_link_status_change_monitor,
2016 (void *)&rte_eth_devices[internals->port_id]);
2019 /* Update all slave devices MACs*/
2020 if (mac_address_slaves_update(eth_dev) != 0)
2023 if (internals->user_defined_primary_port)
2024 bond_ethdev_primary_set(internals, internals->primary_port);
2026 if (internals->mode == BONDING_MODE_8023AD)
2027 bond_mode_8023ad_start(eth_dev);
2029 if (internals->mode == BONDING_MODE_TLB ||
2030 internals->mode == BONDING_MODE_ALB)
2031 bond_tlb_enable(internals);
2036 eth_dev->data->dev_started = 0;
2041 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2045 if (dev->data->rx_queues != NULL) {
2046 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2047 rte_free(dev->data->rx_queues[i]);
2048 dev->data->rx_queues[i] = NULL;
2050 dev->data->nb_rx_queues = 0;
2053 if (dev->data->tx_queues != NULL) {
2054 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2055 rte_free(dev->data->tx_queues[i]);
2056 dev->data->tx_queues[i] = NULL;
2058 dev->data->nb_tx_queues = 0;
2063 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2065 struct bond_dev_private *internals = eth_dev->data->dev_private;
2069 if (internals->mode == BONDING_MODE_8023AD) {
2073 bond_mode_8023ad_stop(eth_dev);
2075 /* Discard all messages to/from mode 4 state machines */
2076 for (i = 0; i < internals->active_slave_count; i++) {
2077 port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2079 RTE_ASSERT(port->rx_ring != NULL);
2080 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2081 rte_pktmbuf_free(pkt);
2083 RTE_ASSERT(port->tx_ring != NULL);
2084 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2085 rte_pktmbuf_free(pkt);
2089 if (internals->mode == BONDING_MODE_TLB ||
2090 internals->mode == BONDING_MODE_ALB) {
2091 bond_tlb_disable(internals);
2092 for (i = 0; i < internals->active_slave_count; i++)
2093 tlb_last_obytets[internals->active_slaves[i]] = 0;
2096 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2097 eth_dev->data->dev_started = 0;
2099 internals->link_status_polling_enabled = 0;
2100 for (i = 0; i < internals->slave_count; i++) {
2101 uint16_t slave_id = internals->slaves[i].port_id;
2102 if (find_slave_by_id(internals->active_slaves,
2103 internals->active_slave_count, slave_id) !=
2104 internals->active_slave_count) {
2105 internals->slaves[i].last_link_status = 0;
2106 ret = rte_eth_dev_stop(slave_id);
2108 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2112 deactivate_slave(eth_dev, slave_id);
2120 bond_ethdev_close(struct rte_eth_dev *dev)
2122 struct bond_dev_private *internals = dev->data->dev_private;
2123 uint16_t bond_port_id = internals->port_id;
2125 struct rte_flow_error ferror;
2127 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2130 RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2131 while (internals->slave_count != skipped) {
2132 uint16_t port_id = internals->slaves[skipped].port_id;
2134 if (rte_eth_dev_stop(port_id) != 0) {
2135 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2140 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2142 "Failed to remove port %d from bonded device %s",
2143 port_id, dev->device->name);
2147 bond_flow_ops.flush(dev, &ferror);
2148 bond_ethdev_free_queues(dev);
2149 rte_bitmap_reset(internals->vlan_filter_bmp);
2150 rte_bitmap_free(internals->vlan_filter_bmp);
2151 rte_free(internals->vlan_filter_bmpmem);
2153 /* Try to release mempool used in mode6. If the bond
2154 * device is not mode6, free the NULL is not problem.
2156 rte_mempool_free(internals->mode6.mempool);
2158 if (internals->kvlist != NULL)
2159 rte_kvargs_free(internals->kvlist);
2164 /* forward declaration */
2165 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2168 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2170 struct bond_dev_private *internals = dev->data->dev_private;
2171 struct bond_slave_details slave;
2174 uint16_t max_nb_rx_queues = UINT16_MAX;
2175 uint16_t max_nb_tx_queues = UINT16_MAX;
2176 uint16_t max_rx_desc_lim = UINT16_MAX;
2177 uint16_t max_tx_desc_lim = UINT16_MAX;
2179 dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2181 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2182 internals->candidate_max_rx_pktlen :
2183 RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2185 /* Max number of tx/rx queues that the bonded device can support is the
2186 * minimum values of the bonded slaves, as all slaves must be capable
2187 * of supporting the same number of tx/rx queues.
2189 if (internals->slave_count > 0) {
2190 struct rte_eth_dev_info slave_info;
2193 for (idx = 0; idx < internals->slave_count; idx++) {
2194 slave = internals->slaves[idx];
2195 ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2198 "%s: Error during getting device (port %u) info: %s\n",
2206 if (slave_info.max_rx_queues < max_nb_rx_queues)
2207 max_nb_rx_queues = slave_info.max_rx_queues;
2209 if (slave_info.max_tx_queues < max_nb_tx_queues)
2210 max_nb_tx_queues = slave_info.max_tx_queues;
2212 if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2213 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2215 if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2216 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2220 dev_info->max_rx_queues = max_nb_rx_queues;
2221 dev_info->max_tx_queues = max_nb_tx_queues;
2223 memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2224 sizeof(dev_info->default_rxconf));
2225 memcpy(&dev_info->default_txconf, &internals->default_txconf,
2226 sizeof(dev_info->default_txconf));
2228 dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2229 dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2232 * If dedicated hw queues enabled for link bonding device in LACP mode
2233 * then we need to reduce the maximum number of data path queues by 1.
2235 if (internals->mode == BONDING_MODE_8023AD &&
2236 internals->mode4.dedicated_queues.enabled == 1) {
2237 dev_info->max_rx_queues--;
2238 dev_info->max_tx_queues--;
2241 dev_info->min_rx_bufsize = 0;
2243 dev_info->rx_offload_capa = internals->rx_offload_capa;
2244 dev_info->tx_offload_capa = internals->tx_offload_capa;
2245 dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2246 dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2247 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2249 dev_info->reta_size = internals->reta_size;
2250 dev_info->hash_key_size = internals->rss_key_len;
2256 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2260 struct bond_dev_private *internals = dev->data->dev_private;
2262 /* don't do this while a slave is being added */
2263 rte_spinlock_lock(&internals->lock);
2266 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2268 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2270 for (i = 0; i < internals->slave_count; i++) {
2271 uint16_t port_id = internals->slaves[i].port_id;
2273 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2275 RTE_BOND_LOG(WARNING,
2276 "Setting VLAN filter on slave port %u not supported.",
2280 rte_spinlock_unlock(&internals->lock);
2285 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2286 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2287 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2289 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2290 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2291 0, dev->data->numa_node);
2292 if (bd_rx_q == NULL)
2295 bd_rx_q->queue_id = rx_queue_id;
2296 bd_rx_q->dev_private = dev->data->dev_private;
2298 bd_rx_q->nb_rx_desc = nb_rx_desc;
2300 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2301 bd_rx_q->mb_pool = mb_pool;
2303 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2309 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2310 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2311 const struct rte_eth_txconf *tx_conf)
2313 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
2314 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2315 0, dev->data->numa_node);
2317 if (bd_tx_q == NULL)
2320 bd_tx_q->queue_id = tx_queue_id;
2321 bd_tx_q->dev_private = dev->data->dev_private;
2323 bd_tx_q->nb_tx_desc = nb_tx_desc;
2324 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2326 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2332 bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2334 void *queue = dev->data->rx_queues[queue_id];
2343 bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2345 void *queue = dev->data->tx_queues[queue_id];
2354 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2356 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2357 struct bond_dev_private *internals;
2359 /* Default value for polling slave found is true as we don't want to
2360 * disable the polling thread if we cannot get the lock */
2361 int i, polling_slave_found = 1;
2366 bonded_ethdev = cb_arg;
2367 internals = bonded_ethdev->data->dev_private;
2369 if (!bonded_ethdev->data->dev_started ||
2370 !internals->link_status_polling_enabled)
2373 /* If device is currently being configured then don't check slaves link
2374 * status, wait until next period */
2375 if (rte_spinlock_trylock(&internals->lock)) {
2376 if (internals->slave_count > 0)
2377 polling_slave_found = 0;
2379 for (i = 0; i < internals->slave_count; i++) {
2380 if (!internals->slaves[i].link_status_poll_enabled)
2383 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2384 polling_slave_found = 1;
2386 /* Update slave link status */
2387 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2388 internals->slaves[i].link_status_wait_to_complete);
2390 /* if link status has changed since last checked then call lsc
2392 if (slave_ethdev->data->dev_link.link_status !=
2393 internals->slaves[i].last_link_status) {
2394 internals->slaves[i].last_link_status =
2395 slave_ethdev->data->dev_link.link_status;
2397 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2398 RTE_ETH_EVENT_INTR_LSC,
2399 &bonded_ethdev->data->port_id,
2403 rte_spinlock_unlock(&internals->lock);
2406 if (polling_slave_found)
2407 /* Set alarm to continue monitoring link status of slave ethdev's */
2408 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2409 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2413 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2415 int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2417 struct bond_dev_private *bond_ctx;
2418 struct rte_eth_link slave_link;
2420 bool one_link_update_succeeded;
2424 bond_ctx = ethdev->data->dev_private;
2426 ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2428 if (ethdev->data->dev_started == 0 ||
2429 bond_ctx->active_slave_count == 0) {
2430 ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2434 ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
2436 if (wait_to_complete)
2437 link_update = rte_eth_link_get;
2439 link_update = rte_eth_link_get_nowait;
2441 switch (bond_ctx->mode) {
2442 case BONDING_MODE_BROADCAST:
2444 * Setting link speed to UINT32_MAX to ensure we pick up the
2445 * value of the first active slave
2447 ethdev->data->dev_link.link_speed = UINT32_MAX;
2450 * link speed is minimum value of all the slaves link speed as
2451 * packet loss will occur on this slave if transmission at rates
2452 * greater than this are attempted
2454 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2455 ret = link_update(bond_ctx->active_slaves[idx],
2458 ethdev->data->dev_link.link_speed =
2459 RTE_ETH_SPEED_NUM_NONE;
2461 "Slave (port %u) link get failed: %s",
2462 bond_ctx->active_slaves[idx],
2463 rte_strerror(-ret));
2467 if (slave_link.link_speed <
2468 ethdev->data->dev_link.link_speed)
2469 ethdev->data->dev_link.link_speed =
2470 slave_link.link_speed;
2473 case BONDING_MODE_ACTIVE_BACKUP:
2474 /* Current primary slave */
2475 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2477 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2478 bond_ctx->current_primary_port,
2479 rte_strerror(-ret));
2483 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2485 case BONDING_MODE_8023AD:
2486 ethdev->data->dev_link.link_autoneg =
2487 bond_ctx->mode4.slave_link.link_autoneg;
2488 ethdev->data->dev_link.link_duplex =
2489 bond_ctx->mode4.slave_link.link_duplex;
2491 /* to update link speed */
2492 case BONDING_MODE_ROUND_ROBIN:
2493 case BONDING_MODE_BALANCE:
2494 case BONDING_MODE_TLB:
2495 case BONDING_MODE_ALB:
2498 * In theses mode the maximum theoretical link speed is the sum
2501 ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2502 one_link_update_succeeded = false;
2504 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2505 ret = link_update(bond_ctx->active_slaves[idx],
2509 "Slave (port %u) link get failed: %s",
2510 bond_ctx->active_slaves[idx],
2511 rte_strerror(-ret));
2515 one_link_update_succeeded = true;
2516 ethdev->data->dev_link.link_speed +=
2517 slave_link.link_speed;
2520 if (!one_link_update_succeeded) {
2521 RTE_BOND_LOG(ERR, "All slaves link get failed");
2532 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2534 struct bond_dev_private *internals = dev->data->dev_private;
2535 struct rte_eth_stats slave_stats;
2538 for (i = 0; i < internals->slave_count; i++) {
2539 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2541 stats->ipackets += slave_stats.ipackets;
2542 stats->opackets += slave_stats.opackets;
2543 stats->ibytes += slave_stats.ibytes;
2544 stats->obytes += slave_stats.obytes;
2545 stats->imissed += slave_stats.imissed;
2546 stats->ierrors += slave_stats.ierrors;
2547 stats->oerrors += slave_stats.oerrors;
2548 stats->rx_nombuf += slave_stats.rx_nombuf;
2550 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2551 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2552 stats->q_opackets[j] += slave_stats.q_opackets[j];
2553 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2554 stats->q_obytes[j] += slave_stats.q_obytes[j];
2555 stats->q_errors[j] += slave_stats.q_errors[j];
2564 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2566 struct bond_dev_private *internals = dev->data->dev_private;
2571 for (i = 0, err = 0; i < internals->slave_count; i++) {
2572 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2581 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2583 struct bond_dev_private *internals = eth_dev->data->dev_private;
2588 switch (internals->mode) {
2589 /* Promiscuous mode is propagated to all slaves */
2590 case BONDING_MODE_ROUND_ROBIN:
2591 case BONDING_MODE_BALANCE:
2592 case BONDING_MODE_BROADCAST:
2593 case BONDING_MODE_8023AD: {
2594 unsigned int slave_ok = 0;
2596 for (i = 0; i < internals->slave_count; i++) {
2597 port_id = internals->slaves[i].port_id;
2599 ret = rte_eth_promiscuous_enable(port_id);
2602 "Failed to enable promiscuous mode for port %u: %s",
2603 port_id, rte_strerror(-ret));
2608 * Report success if operation is successful on at least
2609 * on one slave. Otherwise return last error code.
2615 /* Promiscuous mode is propagated only to primary slave */
2616 case BONDING_MODE_ACTIVE_BACKUP:
2617 case BONDING_MODE_TLB:
2618 case BONDING_MODE_ALB:
2620 /* Do not touch promisc when there cannot be primary ports */
2621 if (internals->slave_count == 0)
2623 port_id = internals->current_primary_port;
2624 ret = rte_eth_promiscuous_enable(port_id);
2627 "Failed to enable promiscuous mode for port %u: %s",
2628 port_id, rte_strerror(-ret));
2635 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2637 struct bond_dev_private *internals = dev->data->dev_private;
2642 switch (internals->mode) {
2643 /* Promiscuous mode is propagated to all slaves */
2644 case BONDING_MODE_ROUND_ROBIN:
2645 case BONDING_MODE_BALANCE:
2646 case BONDING_MODE_BROADCAST:
2647 case BONDING_MODE_8023AD: {
2648 unsigned int slave_ok = 0;
2650 for (i = 0; i < internals->slave_count; i++) {
2651 port_id = internals->slaves[i].port_id;
2653 if (internals->mode == BONDING_MODE_8023AD &&
2654 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2655 BOND_8023AD_FORCED_PROMISC) {
2659 ret = rte_eth_promiscuous_disable(port_id);
2662 "Failed to disable promiscuous mode for port %u: %s",
2663 port_id, rte_strerror(-ret));
2668 * Report success if operation is successful on at least
2669 * on one slave. Otherwise return last error code.
2675 /* Promiscuous mode is propagated only to primary slave */
2676 case BONDING_MODE_ACTIVE_BACKUP:
2677 case BONDING_MODE_TLB:
2678 case BONDING_MODE_ALB:
2680 /* Do not touch promisc when there cannot be primary ports */
2681 if (internals->slave_count == 0)
2683 port_id = internals->current_primary_port;
2684 ret = rte_eth_promiscuous_disable(port_id);
2687 "Failed to disable promiscuous mode for port %u: %s",
2688 port_id, rte_strerror(-ret));
2695 bond_ethdev_promiscuous_update(struct rte_eth_dev *dev)
2697 struct bond_dev_private *internals = dev->data->dev_private;
2698 uint16_t port_id = internals->current_primary_port;
2700 switch (internals->mode) {
2701 case BONDING_MODE_ROUND_ROBIN:
2702 case BONDING_MODE_BALANCE:
2703 case BONDING_MODE_BROADCAST:
2704 case BONDING_MODE_8023AD:
2705 /* As promiscuous mode is propagated to all slaves for these
2706 * mode, no need to update for bonding device.
2709 case BONDING_MODE_ACTIVE_BACKUP:
2710 case BONDING_MODE_TLB:
2711 case BONDING_MODE_ALB:
2713 /* As promiscuous mode is propagated only to primary slave
2714 * for these mode. When active/standby switchover, promiscuous
2715 * mode should be set to new primary slave according to bonding
2718 if (rte_eth_promiscuous_get(internals->port_id) == 1)
2719 rte_eth_promiscuous_enable(port_id);
2721 rte_eth_promiscuous_disable(port_id);
2728 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2730 struct bond_dev_private *internals = eth_dev->data->dev_private;
2735 switch (internals->mode) {
2736 /* allmulti mode is propagated to all slaves */
2737 case BONDING_MODE_ROUND_ROBIN:
2738 case BONDING_MODE_BALANCE:
2739 case BONDING_MODE_BROADCAST:
2740 case BONDING_MODE_8023AD: {
2741 unsigned int slave_ok = 0;
2743 for (i = 0; i < internals->slave_count; i++) {
2744 port_id = internals->slaves[i].port_id;
2746 ret = rte_eth_allmulticast_enable(port_id);
2749 "Failed to enable allmulti mode for port %u: %s",
2750 port_id, rte_strerror(-ret));
2755 * Report success if operation is successful on at least
2756 * on one slave. Otherwise return last error code.
2762 /* allmulti mode is propagated only to primary slave */
2763 case BONDING_MODE_ACTIVE_BACKUP:
2764 case BONDING_MODE_TLB:
2765 case BONDING_MODE_ALB:
2767 /* Do not touch allmulti when there cannot be primary ports */
2768 if (internals->slave_count == 0)
2770 port_id = internals->current_primary_port;
2771 ret = rte_eth_allmulticast_enable(port_id);
2774 "Failed to enable allmulti mode for port %u: %s",
2775 port_id, rte_strerror(-ret));
2782 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2784 struct bond_dev_private *internals = eth_dev->data->dev_private;
2789 switch (internals->mode) {
2790 /* allmulti mode is propagated to all slaves */
2791 case BONDING_MODE_ROUND_ROBIN:
2792 case BONDING_MODE_BALANCE:
2793 case BONDING_MODE_BROADCAST:
2794 case BONDING_MODE_8023AD: {
2795 unsigned int slave_ok = 0;
2797 for (i = 0; i < internals->slave_count; i++) {
2798 uint16_t port_id = internals->slaves[i].port_id;
2800 if (internals->mode == BONDING_MODE_8023AD &&
2801 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2802 BOND_8023AD_FORCED_ALLMULTI)
2805 ret = rte_eth_allmulticast_disable(port_id);
2808 "Failed to disable allmulti mode for port %u: %s",
2809 port_id, rte_strerror(-ret));
2814 * Report success if operation is successful on at least
2815 * on one slave. Otherwise return last error code.
2821 /* allmulti mode is propagated only to primary slave */
2822 case BONDING_MODE_ACTIVE_BACKUP:
2823 case BONDING_MODE_TLB:
2824 case BONDING_MODE_ALB:
2826 /* Do not touch allmulti when there cannot be primary ports */
2827 if (internals->slave_count == 0)
2829 port_id = internals->current_primary_port;
2830 ret = rte_eth_allmulticast_disable(port_id);
2833 "Failed to disable allmulti mode for port %u: %s",
2834 port_id, rte_strerror(-ret));
2841 bond_ethdev_allmulticast_update(struct rte_eth_dev *dev)
2843 struct bond_dev_private *internals = dev->data->dev_private;
2844 uint16_t port_id = internals->current_primary_port;
2846 switch (internals->mode) {
2847 case BONDING_MODE_ROUND_ROBIN:
2848 case BONDING_MODE_BALANCE:
2849 case BONDING_MODE_BROADCAST:
2850 case BONDING_MODE_8023AD:
2851 /* As allmulticast mode is propagated to all slaves for these
2852 * mode, no need to update for bonding device.
2855 case BONDING_MODE_ACTIVE_BACKUP:
2856 case BONDING_MODE_TLB:
2857 case BONDING_MODE_ALB:
2859 /* As allmulticast mode is propagated only to primary slave
2860 * for these mode. When active/standby switchover, allmulticast
2861 * mode should be set to new primary slave according to bonding
2864 if (rte_eth_allmulticast_get(internals->port_id) == 1)
2865 rte_eth_allmulticast_enable(port_id);
2867 rte_eth_allmulticast_disable(port_id);
2874 bond_ethdev_delayed_lsc_propagation(void *arg)
2879 rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2880 RTE_ETH_EVENT_INTR_LSC, NULL);
2884 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2885 void *param, void *ret_param __rte_unused)
2887 struct rte_eth_dev *bonded_eth_dev;
2888 struct bond_dev_private *internals;
2889 struct rte_eth_link link;
2893 uint8_t lsc_flag = 0;
2894 int valid_slave = 0;
2895 uint16_t active_pos;
2898 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2901 bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2903 if (check_for_bonded_ethdev(bonded_eth_dev))
2906 internals = bonded_eth_dev->data->dev_private;
2908 /* If the device isn't started don't handle interrupts */
2909 if (!bonded_eth_dev->data->dev_started)
2912 /* verify that port_id is a valid slave of bonded port */
2913 for (i = 0; i < internals->slave_count; i++) {
2914 if (internals->slaves[i].port_id == port_id) {
2923 /* Synchronize lsc callback parallel calls either by real link event
2924 * from the slaves PMDs or by the bonding PMD itself.
2926 rte_spinlock_lock(&internals->lsc_lock);
2928 /* Search for port in active port list */
2929 active_pos = find_slave_by_id(internals->active_slaves,
2930 internals->active_slave_count, port_id);
2932 ret = rte_eth_link_get_nowait(port_id, &link);
2934 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2936 if (ret == 0 && link.link_status) {
2937 if (active_pos < internals->active_slave_count)
2940 /* check link state properties if bonded link is up*/
2941 if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
2942 if (link_properties_valid(bonded_eth_dev, &link) != 0)
2943 RTE_BOND_LOG(ERR, "Invalid link properties "
2944 "for slave %d in bonding mode %d",
2945 port_id, internals->mode);
2947 /* inherit slave link properties */
2948 link_properties_set(bonded_eth_dev, &link);
2951 /* If no active slave ports then set this port to be
2954 if (internals->active_slave_count < 1) {
2955 /* If first active slave, then change link status */
2956 bonded_eth_dev->data->dev_link.link_status =
2958 internals->current_primary_port = port_id;
2961 mac_address_slaves_update(bonded_eth_dev);
2962 bond_ethdev_promiscuous_update(bonded_eth_dev);
2963 bond_ethdev_allmulticast_update(bonded_eth_dev);
2966 activate_slave(bonded_eth_dev, port_id);
2968 /* If the user has defined the primary port then default to
2971 if (internals->user_defined_primary_port &&
2972 internals->primary_port == port_id)
2973 bond_ethdev_primary_set(internals, port_id);
2975 if (active_pos == internals->active_slave_count)
2978 /* Remove from active slave list */
2979 deactivate_slave(bonded_eth_dev, port_id);
2981 if (internals->active_slave_count < 1)
2984 /* Update primary id, take first active slave from list or if none
2985 * available set to -1 */
2986 if (port_id == internals->current_primary_port) {
2987 if (internals->active_slave_count > 0)
2988 bond_ethdev_primary_set(internals,
2989 internals->active_slaves[0]);
2991 internals->current_primary_port = internals->primary_port;
2992 mac_address_slaves_update(bonded_eth_dev);
2993 bond_ethdev_promiscuous_update(bonded_eth_dev);
2994 bond_ethdev_allmulticast_update(bonded_eth_dev);
3000 * Update bonded device link properties after any change to active
3003 bond_ethdev_link_update(bonded_eth_dev, 0);
3006 /* Cancel any possible outstanding interrupts if delays are enabled */
3007 if (internals->link_up_delay_ms > 0 ||
3008 internals->link_down_delay_ms > 0)
3009 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
3012 if (bonded_eth_dev->data->dev_link.link_status) {
3013 if (internals->link_up_delay_ms > 0)
3014 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
3015 bond_ethdev_delayed_lsc_propagation,
3016 (void *)bonded_eth_dev);
3018 rte_eth_dev_callback_process(bonded_eth_dev,
3019 RTE_ETH_EVENT_INTR_LSC,
3023 if (internals->link_down_delay_ms > 0)
3024 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
3025 bond_ethdev_delayed_lsc_propagation,
3026 (void *)bonded_eth_dev);
3028 rte_eth_dev_callback_process(bonded_eth_dev,
3029 RTE_ETH_EVENT_INTR_LSC,
3034 rte_spinlock_unlock(&internals->lsc_lock);
3040 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
3041 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3045 int slave_reta_size;
3046 unsigned reta_count;
3047 struct bond_dev_private *internals = dev->data->dev_private;
3049 if (reta_size != internals->reta_size)
3052 /* Copy RETA table */
3053 reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
3054 RTE_ETH_RETA_GROUP_SIZE;
3056 for (i = 0; i < reta_count; i++) {
3057 internals->reta_conf[i].mask = reta_conf[i].mask;
3058 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3059 if ((reta_conf[i].mask >> j) & 0x01)
3060 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
3063 /* Fill rest of array */
3064 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
3065 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
3066 sizeof(internals->reta_conf[0]) * reta_count);
3068 /* Propagate RETA over slaves */
3069 for (i = 0; i < internals->slave_count; i++) {
3070 slave_reta_size = internals->slaves[i].reta_size;
3071 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
3072 &internals->reta_conf[0], slave_reta_size);
3081 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
3082 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3085 struct bond_dev_private *internals = dev->data->dev_private;
3087 if (reta_size != internals->reta_size)
3090 /* Copy RETA table */
3091 for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
3092 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3093 if ((reta_conf[i].mask >> j) & 0x01)
3094 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
3100 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3101 struct rte_eth_rss_conf *rss_conf)
3104 struct bond_dev_private *internals = dev->data->dev_private;
3105 struct rte_eth_rss_conf bond_rss_conf;
3107 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3109 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3111 if (bond_rss_conf.rss_hf != 0)
3112 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3114 if (bond_rss_conf.rss_key) {
3115 if (bond_rss_conf.rss_key_len < internals->rss_key_len)
3117 else if (bond_rss_conf.rss_key_len > internals->rss_key_len)
3118 RTE_BOND_LOG(WARNING, "rss_key will be truncated");
3120 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3121 internals->rss_key_len);
3122 bond_rss_conf.rss_key_len = internals->rss_key_len;
3125 for (i = 0; i < internals->slave_count; i++) {
3126 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3136 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3137 struct rte_eth_rss_conf *rss_conf)
3139 struct bond_dev_private *internals = dev->data->dev_private;
3141 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3142 rss_conf->rss_key_len = internals->rss_key_len;
3143 if (rss_conf->rss_key)
3144 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3150 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3152 struct rte_eth_dev *slave_eth_dev;
3153 struct bond_dev_private *internals = dev->data->dev_private;
3156 rte_spinlock_lock(&internals->lock);
3158 for (i = 0; i < internals->slave_count; i++) {
3159 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3160 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3161 rte_spinlock_unlock(&internals->lock);
3165 for (i = 0; i < internals->slave_count; i++) {
3166 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3168 rte_spinlock_unlock(&internals->lock);
3173 rte_spinlock_unlock(&internals->lock);
3178 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3179 struct rte_ether_addr *addr)
3181 if (mac_address_set(dev, addr)) {
3182 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3190 bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
3191 const struct rte_flow_ops **ops)
3193 *ops = &bond_flow_ops;
3198 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3199 struct rte_ether_addr *mac_addr,
3200 __rte_unused uint32_t index, uint32_t vmdq)
3202 struct rte_eth_dev *slave_eth_dev;
3203 struct bond_dev_private *internals = dev->data->dev_private;
3206 rte_spinlock_lock(&internals->lock);
3208 for (i = 0; i < internals->slave_count; i++) {
3209 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3210 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3211 *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3217 for (i = 0; i < internals->slave_count; i++) {
3218 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3222 for (i--; i >= 0; i--)
3223 rte_eth_dev_mac_addr_remove(
3224 internals->slaves[i].port_id, mac_addr);
3231 rte_spinlock_unlock(&internals->lock);
3236 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3238 struct rte_eth_dev *slave_eth_dev;
3239 struct bond_dev_private *internals = dev->data->dev_private;
3242 rte_spinlock_lock(&internals->lock);
3244 for (i = 0; i < internals->slave_count; i++) {
3245 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3246 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3250 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3252 for (i = 0; i < internals->slave_count; i++)
3253 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3257 rte_spinlock_unlock(&internals->lock);
3260 const struct eth_dev_ops default_dev_ops = {
3261 .dev_start = bond_ethdev_start,
3262 .dev_stop = bond_ethdev_stop,
3263 .dev_close = bond_ethdev_close,
3264 .dev_configure = bond_ethdev_configure,
3265 .dev_infos_get = bond_ethdev_info,
3266 .vlan_filter_set = bond_ethdev_vlan_filter_set,
3267 .rx_queue_setup = bond_ethdev_rx_queue_setup,
3268 .tx_queue_setup = bond_ethdev_tx_queue_setup,
3269 .rx_queue_release = bond_ethdev_rx_queue_release,
3270 .tx_queue_release = bond_ethdev_tx_queue_release,
3271 .link_update = bond_ethdev_link_update,
3272 .stats_get = bond_ethdev_stats_get,
3273 .stats_reset = bond_ethdev_stats_reset,
3274 .promiscuous_enable = bond_ethdev_promiscuous_enable,
3275 .promiscuous_disable = bond_ethdev_promiscuous_disable,
3276 .allmulticast_enable = bond_ethdev_allmulticast_enable,
3277 .allmulticast_disable = bond_ethdev_allmulticast_disable,
3278 .reta_update = bond_ethdev_rss_reta_update,
3279 .reta_query = bond_ethdev_rss_reta_query,
3280 .rss_hash_update = bond_ethdev_rss_hash_update,
3281 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
3282 .mtu_set = bond_ethdev_mtu_set,
3283 .mac_addr_set = bond_ethdev_mac_address_set,
3284 .mac_addr_add = bond_ethdev_mac_addr_add,
3285 .mac_addr_remove = bond_ethdev_mac_addr_remove,
3286 .flow_ops_get = bond_flow_ops_get
3290 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3292 const char *name = rte_vdev_device_name(dev);
3293 uint8_t socket_id = dev->device.numa_node;
3294 struct bond_dev_private *internals = NULL;
3295 struct rte_eth_dev *eth_dev = NULL;
3296 uint32_t vlan_filter_bmp_size;
3298 /* now do all data allocation - for eth_dev structure, dummy pci driver
3299 * and internal (private) data
3302 /* reserve an ethdev entry */
3303 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3304 if (eth_dev == NULL) {
3305 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3309 internals = eth_dev->data->dev_private;
3310 eth_dev->data->nb_rx_queues = (uint16_t)1;
3311 eth_dev->data->nb_tx_queues = (uint16_t)1;
3313 /* Allocate memory for storing MAC addresses */
3314 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3315 BOND_MAX_MAC_ADDRS, 0, socket_id);
3316 if (eth_dev->data->mac_addrs == NULL) {
3318 "Failed to allocate %u bytes needed to store MAC addresses",
3319 RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3323 eth_dev->dev_ops = &default_dev_ops;
3324 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
3325 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3327 rte_spinlock_init(&internals->lock);
3328 rte_spinlock_init(&internals->lsc_lock);
3330 internals->port_id = eth_dev->data->port_id;
3331 internals->mode = BONDING_MODE_INVALID;
3332 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3333 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3334 internals->burst_xmit_hash = burst_xmit_l2_hash;
3335 internals->user_defined_mac = 0;
3337 internals->link_status_polling_enabled = 0;
3339 internals->link_status_polling_interval_ms =
3340 DEFAULT_POLLING_INTERVAL_10_MS;
3341 internals->link_down_delay_ms = 0;
3342 internals->link_up_delay_ms = 0;
3344 internals->slave_count = 0;
3345 internals->active_slave_count = 0;
3346 internals->rx_offload_capa = 0;
3347 internals->tx_offload_capa = 0;
3348 internals->rx_queue_offload_capa = 0;
3349 internals->tx_queue_offload_capa = 0;
3350 internals->candidate_max_rx_pktlen = 0;
3351 internals->max_rx_pktlen = 0;
3353 /* Initially allow to choose any offload type */
3354 internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
3356 memset(&internals->default_rxconf, 0,
3357 sizeof(internals->default_rxconf));
3358 memset(&internals->default_txconf, 0,
3359 sizeof(internals->default_txconf));
3361 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3362 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3364 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3365 memset(internals->slaves, 0, sizeof(internals->slaves));
3367 TAILQ_INIT(&internals->flow_list);
3368 internals->flow_isolated_valid = 0;
3370 /* Set mode 4 default configuration */
3371 bond_mode_8023ad_setup(eth_dev, NULL);
3372 if (bond_ethdev_mode_set(eth_dev, mode)) {
3373 RTE_BOND_LOG(ERR, "Failed to set bonded device %u mode to %u",
3374 eth_dev->data->port_id, mode);
3378 vlan_filter_bmp_size =
3379 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3380 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3381 RTE_CACHE_LINE_SIZE);
3382 if (internals->vlan_filter_bmpmem == NULL) {
3384 "Failed to allocate vlan bitmap for bonded device %u",
3385 eth_dev->data->port_id);
3389 internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3390 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3391 if (internals->vlan_filter_bmp == NULL) {
3393 "Failed to init vlan bitmap for bonded device %u",
3394 eth_dev->data->port_id);
3395 rte_free(internals->vlan_filter_bmpmem);
3399 return eth_dev->data->port_id;
3402 rte_free(internals);
3403 if (eth_dev != NULL)
3404 eth_dev->data->dev_private = NULL;
3405 rte_eth_dev_release_port(eth_dev);
3410 bond_probe(struct rte_vdev_device *dev)
3413 struct bond_dev_private *internals;
3414 struct rte_kvargs *kvlist;
3415 uint8_t bonding_mode;
3416 int arg_count, port_id;
3419 struct rte_eth_dev *eth_dev;
3424 name = rte_vdev_device_name(dev);
3425 RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3427 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3428 eth_dev = rte_eth_dev_attach_secondary(name);
3430 RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3433 /* TODO: request info from primary to set up Rx and Tx */
3434 eth_dev->dev_ops = &default_dev_ops;
3435 eth_dev->device = &dev->device;
3436 rte_eth_dev_probing_finish(eth_dev);
3440 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3441 pmd_bond_init_valid_arguments);
3445 /* Parse link bonding mode */
3446 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3447 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3448 &bond_ethdev_parse_slave_mode_kvarg,
3449 &bonding_mode) != 0) {
3450 RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3455 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3460 /* Parse socket id to create bonding device on */
3461 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3462 if (arg_count == 1) {
3463 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3464 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3466 RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3467 "bonded device %s", name);
3470 } else if (arg_count > 1) {
3471 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3472 "bonded device %s", name);
3475 socket_id = rte_socket_id();
3478 dev->device.numa_node = socket_id;
3480 /* Create link bonding eth device */
3481 port_id = bond_alloc(dev, bonding_mode);
3483 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3484 "socket %u.", name, bonding_mode, socket_id);
3487 internals = rte_eth_devices[port_id].data->dev_private;
3488 internals->kvlist = kvlist;
3490 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3491 if (rte_kvargs_process(kvlist,
3492 PMD_BOND_AGG_MODE_KVARG,
3493 &bond_ethdev_parse_slave_agg_mode_kvarg,
3496 "Failed to parse agg selection mode for bonded device %s",
3501 if (internals->mode == BONDING_MODE_8023AD)
3502 internals->mode4.agg_selection = agg_mode;
3504 internals->mode4.agg_selection = AGG_STABLE;
3507 rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3508 RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3509 "socket %u.", name, port_id, bonding_mode, socket_id);
3513 rte_kvargs_free(kvlist);
3519 bond_remove(struct rte_vdev_device *dev)
3521 struct rte_eth_dev *eth_dev;
3522 struct bond_dev_private *internals;
3529 name = rte_vdev_device_name(dev);
3530 RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3532 /* find an ethdev entry */
3533 eth_dev = rte_eth_dev_allocated(name);
3534 if (eth_dev == NULL)
3535 return 0; /* port already released */
3537 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3538 return rte_eth_dev_release_port(eth_dev);
3540 RTE_ASSERT(eth_dev->device == &dev->device);
3542 internals = eth_dev->data->dev_private;
3543 if (internals->slave_count != 0)
3546 if (eth_dev->data->dev_started == 1) {
3547 ret = bond_ethdev_stop(eth_dev);
3548 bond_ethdev_close(eth_dev);
3550 rte_eth_dev_release_port(eth_dev);
3555 /* this part will resolve the slave portids after all the other pdev and vdev
3556 * have been allocated */
3558 bond_ethdev_configure(struct rte_eth_dev *dev)
3560 const char *name = dev->device->name;
3561 struct bond_dev_private *internals = dev->data->dev_private;
3562 struct rte_kvargs *kvlist = internals->kvlist;
3564 uint16_t port_id = dev - rte_eth_devices;
3567 static const uint8_t default_rss_key[40] = {
3568 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3569 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3570 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3571 0xBE, 0xAC, 0x01, 0xFA
3577 * If RSS is enabled, fill table with default values and
3578 * set key to the value specified in port RSS configuration.
3579 * Fall back to default RSS key if the key is not specified
3581 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
3582 struct rte_eth_rss_conf *rss_conf =
3583 &dev->data->dev_conf.rx_adv_conf.rss_conf;
3585 if (internals->rss_key_len == 0) {
3586 internals->rss_key_len = sizeof(default_rss_key);
3589 if (rss_conf->rss_key != NULL) {
3590 if (internals->rss_key_len > rss_conf->rss_key_len) {
3591 RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
3592 rss_conf->rss_key_len);
3596 memcpy(internals->rss_key, rss_conf->rss_key,
3597 internals->rss_key_len);
3599 if (internals->rss_key_len > sizeof(default_rss_key)) {
3601 "There is no suitable default hash key");
3605 memcpy(internals->rss_key, default_rss_key,
3606 internals->rss_key_len);
3609 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3610 internals->reta_conf[i].mask = ~0LL;
3611 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3612 internals->reta_conf[i].reta[j] =
3613 (i * RTE_ETH_RETA_GROUP_SIZE + j) %
3614 dev->data->nb_rx_queues;
3618 /* set the max_rx_pktlen */
3619 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3622 * if no kvlist, it means that this bonded device has been created
3623 * through the bonding api.
3628 /* Parse MAC address for bonded device */
3629 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3630 if (arg_count == 1) {
3631 struct rte_ether_addr bond_mac;
3633 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3634 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3635 RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3640 /* Set MAC address */
3641 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3643 "Failed to set mac address on bonded device %s",
3647 } else if (arg_count > 1) {
3649 "MAC address can be specified only once for bonded device %s",
3654 /* Parse/set balance mode transmit policy */
3655 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3656 if (arg_count == 1) {
3657 uint8_t xmit_policy;
3659 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3660 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3663 "Invalid xmit policy specified for bonded device %s",
3668 /* Set balance mode transmit policy*/
3669 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3671 "Failed to set balance xmit policy on bonded device %s",
3675 } else if (arg_count > 1) {
3677 "Transmit policy can be specified only once for bonded device %s",
3682 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3683 if (rte_kvargs_process(kvlist,
3684 PMD_BOND_AGG_MODE_KVARG,
3685 &bond_ethdev_parse_slave_agg_mode_kvarg,
3688 "Failed to parse agg selection mode for bonded device %s",
3691 if (internals->mode == BONDING_MODE_8023AD) {
3692 int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3696 "Invalid args for agg selection set for bonded device %s",
3703 /* Parse/add slave ports to bonded device */
3704 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3705 struct bond_ethdev_slave_ports slave_ports;
3708 memset(&slave_ports, 0, sizeof(slave_ports));
3710 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3711 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3713 "Failed to parse slave ports for bonded device %s",
3718 for (i = 0; i < slave_ports.slave_count; i++) {
3719 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3721 "Failed to add port %d as slave to bonded device %s",
3722 slave_ports.slaves[i], name);
3727 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3731 /* Parse/set primary slave port id*/
3732 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3733 if (arg_count == 1) {
3734 uint16_t primary_slave_port_id;
3736 if (rte_kvargs_process(kvlist,
3737 PMD_BOND_PRIMARY_SLAVE_KVARG,
3738 &bond_ethdev_parse_primary_slave_port_id_kvarg,
3739 &primary_slave_port_id) < 0) {
3741 "Invalid primary slave port id specified for bonded device %s",
3746 /* Set balance mode transmit policy*/
3747 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3750 "Failed to set primary slave port %d on bonded device %s",
3751 primary_slave_port_id, name);
3754 } else if (arg_count > 1) {
3756 "Primary slave can be specified only once for bonded device %s",
3761 /* Parse link status monitor polling interval */
3762 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3763 if (arg_count == 1) {
3764 uint32_t lsc_poll_interval_ms;
3766 if (rte_kvargs_process(kvlist,
3767 PMD_BOND_LSC_POLL_PERIOD_KVARG,
3768 &bond_ethdev_parse_time_ms_kvarg,
3769 &lsc_poll_interval_ms) < 0) {
3771 "Invalid lsc polling interval value specified for bonded"
3772 " device %s", name);
3776 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3779 "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3780 lsc_poll_interval_ms, name);
3783 } else if (arg_count > 1) {
3785 "LSC polling interval can be specified only once for bonded"
3786 " device %s", name);
3790 /* Parse link up interrupt propagation delay */
3791 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3792 if (arg_count == 1) {
3793 uint32_t link_up_delay_ms;
3795 if (rte_kvargs_process(kvlist,
3796 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3797 &bond_ethdev_parse_time_ms_kvarg,
3798 &link_up_delay_ms) < 0) {
3800 "Invalid link up propagation delay value specified for"
3801 " bonded device %s", name);
3805 /* Set balance mode transmit policy*/
3806 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3809 "Failed to set link up propagation delay (%u ms) on bonded"
3810 " device %s", link_up_delay_ms, name);
3813 } else if (arg_count > 1) {
3815 "Link up propagation delay can be specified only once for"
3816 " bonded device %s", name);
3820 /* Parse link down interrupt propagation delay */
3821 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3822 if (arg_count == 1) {
3823 uint32_t link_down_delay_ms;
3825 if (rte_kvargs_process(kvlist,
3826 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3827 &bond_ethdev_parse_time_ms_kvarg,
3828 &link_down_delay_ms) < 0) {
3830 "Invalid link down propagation delay value specified for"
3831 " bonded device %s", name);
3835 /* Set balance mode transmit policy*/
3836 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3839 "Failed to set link down propagation delay (%u ms) on bonded device %s",
3840 link_down_delay_ms, name);
3843 } else if (arg_count > 1) {
3845 "Link down propagation delay can be specified only once for bonded device %s",
3853 struct rte_vdev_driver pmd_bond_drv = {
3854 .probe = bond_probe,
3855 .remove = bond_remove,
3858 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3859 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3861 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3865 "xmit_policy=[l2 | l23 | l34] "
3866 "agg_mode=[count | stable | bandwidth] "
3869 "lsc_poll_period_ms=<int> "
3871 "down_delay=<int>");
3873 /* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for
3874 * this library, see meson.build.
3876 RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);