1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
6 #include <netinet/in.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
39 size_t vlan_offset = 0;
41 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43 struct rte_vlan_hdr *vlan_hdr =
44 (struct rte_vlan_hdr *)(eth_hdr + 1);
46 vlan_offset = sizeof(struct rte_vlan_hdr);
47 *proto = vlan_hdr->eth_proto;
49 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50 vlan_hdr = vlan_hdr + 1;
51 *proto = vlan_hdr->eth_proto;
52 vlan_offset += sizeof(struct rte_vlan_hdr);
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
61 struct bond_dev_private *internals;
63 uint16_t num_rx_total = 0;
65 uint16_t active_slave;
68 /* Cast to structure, containing bonded device's port id and queue id */
69 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70 internals = bd_rx_q->dev_private;
71 slave_count = internals->active_slave_count;
72 active_slave = internals->active_slave;
74 for (i = 0; i < slave_count && nb_pkts; i++) {
75 uint16_t num_rx_slave;
77 /* Offset of pointer to *bufs increases as packets are received
78 * from other slaves */
80 rte_eth_rx_burst(internals->active_slaves[active_slave],
82 bufs + num_rx_total, nb_pkts);
83 num_rx_total += num_rx_slave;
84 nb_pkts -= num_rx_slave;
85 if (++active_slave == slave_count)
89 if (++internals->active_slave >= slave_count)
90 internals->active_slave = 0;
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
98 struct bond_dev_private *internals;
100 /* Cast to structure, containing bonded device's port id and queue id */
101 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
103 internals = bd_rx_q->dev_private;
105 return rte_eth_rx_burst(internals->current_primary_port,
106 bd_rx_q->queue_id, bufs, nb_pkts);
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
112 const uint16_t ether_type_slow_be =
113 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
115 return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116 (ethertype == ether_type_slow_be &&
117 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
120 /*****************************************************************************
121 * Flow director's setup for mode 4 optimization
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125 .dst.addr_bytes = { 0 },
126 .src.addr_bytes = { 0 },
127 .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131 .dst.addr_bytes = { 0 },
132 .src.addr_bytes = { 0 },
136 static struct rte_flow_item flow_item_8023ad[] = {
138 .type = RTE_FLOW_ITEM_TYPE_ETH,
139 .spec = &flow_item_eth_type_8023ad,
141 .mask = &flow_item_eth_mask_type_8023ad,
144 .type = RTE_FLOW_ITEM_TYPE_END,
151 const struct rte_flow_attr flow_attr_8023ad = {
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161 uint16_t slave_port) {
162 struct rte_eth_dev_info slave_info;
163 struct rte_flow_error error;
164 struct bond_dev_private *internals = bond_dev->data->dev_private;
166 const struct rte_flow_action_queue lacp_queue_conf = {
170 const struct rte_flow_action actions[] = {
172 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173 .conf = &lacp_queue_conf
176 .type = RTE_FLOW_ACTION_TYPE_END,
180 int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181 flow_item_8023ad, actions, &error);
183 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184 __func__, error.message, slave_port,
185 internals->mode4.dedicated_queues.rx_qid);
189 ret = rte_eth_dev_info_get(slave_port, &slave_info);
192 "%s: Error during getting device (port %u) info: %s\n",
193 __func__, slave_port, strerror(-ret));
198 if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199 slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
201 "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202 __func__, slave_port);
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211 struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212 struct bond_dev_private *internals = bond_dev->data->dev_private;
213 struct rte_eth_dev_info bond_info;
217 /* Verify if all slaves in bonding supports flow director and */
218 if (internals->slave_count > 0) {
219 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
222 "%s: Error during getting device (port %u) info: %s\n",
223 __func__, bond_dev->data->port_id,
229 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
232 for (idx = 0; idx < internals->slave_count; idx++) {
233 if (bond_ethdev_8023ad_flow_verify(bond_dev,
234 internals->slaves[idx].port_id) != 0)
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
245 struct rte_flow_error error;
246 struct bond_dev_private *internals = bond_dev->data->dev_private;
247 struct rte_flow_action_queue lacp_queue_conf = {
248 .index = internals->mode4.dedicated_queues.rx_qid,
251 const struct rte_flow_action actions[] = {
253 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254 .conf = &lacp_queue_conf
257 .type = RTE_FLOW_ACTION_TYPE_END,
261 internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262 &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263 if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265 "(slave_port=%d queue_id=%d)",
266 error.message, slave_port,
267 internals->mode4.dedicated_queues.rx_qid);
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
278 /* Cast to structure, containing bonded device's port id and queue id */
279 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280 struct bond_dev_private *internals = bd_rx_q->dev_private;
281 struct rte_eth_dev *bonded_eth_dev =
282 &rte_eth_devices[internals->port_id];
283 struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284 struct rte_ether_hdr *hdr;
286 const uint16_t ether_type_slow_be =
287 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288 uint16_t num_rx_total = 0; /* Total number of received packets */
289 uint16_t slaves[RTE_MAX_ETHPORTS];
290 uint16_t slave_count, idx;
292 uint8_t collecting; /* current slave collecting status */
293 const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294 const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
300 /* Copy slave list to protect against slave up/down changes during tx
302 slave_count = internals->active_slave_count;
303 memcpy(slaves, internals->active_slaves,
304 sizeof(internals->active_slaves[0]) * slave_count);
306 idx = internals->active_slave;
307 if (idx >= slave_count) {
308 internals->active_slave = 0;
311 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
313 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
316 /* Read packets from this slave */
317 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318 &bufs[num_rx_total], nb_pkts - num_rx_total);
320 for (k = j; k < 2 && k < num_rx_total; k++)
321 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
323 /* Handle slow protocol packets. */
324 while (j < num_rx_total) {
325 if (j + 3 < num_rx_total)
326 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
328 hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
331 /* Remove packet from array if:
332 * - it is slow packet but no dedicated rxq is present,
333 * - slave is not in collecting state,
334 * - bonding interface is not in promiscuous mode:
335 * - packet is unicast and address does not match,
336 * - packet is multicast and bonding interface
337 * is not in allmulti,
341 is_lacp_packets(hdr->ether_type, subtype,
345 ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346 !rte_is_same_ether_addr(bond_mac,
349 rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
351 if (hdr->ether_type == ether_type_slow_be) {
352 bond_mode_8023ad_handle_slow_pkt(
353 internals, slaves[idx], bufs[j]);
355 rte_pktmbuf_free(bufs[j]);
357 /* Packet is managed by mode 4 or dropped, shift the array */
359 if (j < num_rx_total) {
360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
366 if (unlikely(++idx == slave_count))
370 if (++internals->active_slave >= slave_count)
371 internals->active_slave = 0;
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
380 return rx_burst_8023ad(queue, bufs, nb_pkts, false);
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
387 return rx_burst_8023ad(queue, bufs, nb_pkts, true);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
400 case RTE_ARP_OP_REQUEST:
401 strlcpy(buf, "ARP Request", buf_len);
403 case RTE_ARP_OP_REPLY:
404 strlcpy(buf, "ARP Reply", buf_len);
406 case RTE_ARP_OP_REVREQUEST:
407 strlcpy(buf, "Reverse ARP Request", buf_len);
409 case RTE_ARP_OP_REVREPLY:
410 strlcpy(buf, "Reverse ARP Reply", buf_len);
412 case RTE_ARP_OP_INVREQUEST:
413 strlcpy(buf, "Peer Identify Request", buf_len);
415 case RTE_ARP_OP_INVREPLY:
416 strlcpy(buf, "Peer Identify Reply", buf_len);
421 strlcpy(buf, "Unknown", buf_len);
425 #define MaxIPv4String 16
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
431 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
437 #define MAX_CLIENTS_NUMBER 128
438 uint8_t active_clients;
439 struct client_stats_t {
442 uint32_t ipv4_rx_packets;
443 uint32_t ipv4_tx_packets;
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
452 for (; i < MAX_CLIENTS_NUMBER; i++) {
453 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
454 /* Just update RX packets number for this client */
455 if (TXorRXindicator == &burstnumberRX)
456 client_stats[i].ipv4_rx_packets++;
458 client_stats[i].ipv4_tx_packets++;
462 /* We have a new client. Insert him to the table, and increment stats */
463 if (TXorRXindicator == &burstnumberRX)
464 client_stats[active_clients].ipv4_rx_packets++;
466 client_stats[active_clients].ipv4_tx_packets++;
467 client_stats[active_clients].ipv4_addr = addr;
468 client_stats[active_clients].port = port;
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475 rte_log(RTE_LOG_DEBUG, bond_logtype, \
476 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
480 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
484 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
488 arp_op, ++burstnumber)
492 mode6_debug(const char __rte_unused *info,
493 struct rte_ether_hdr *eth_h, uint16_t port,
494 uint32_t __rte_unused *burstnumber)
496 struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498 struct rte_arp_hdr *arp_h;
505 uint16_t ether_type = eth_h->ether_type;
506 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509 strlcpy(buf, info, 16);
512 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
519 update_client_stats(ipv4_h->src_addr, port, burstnumber);
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527 ArpOp, sizeof(ArpOp));
528 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
537 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
538 struct bond_dev_private *internals = bd_tx_q->dev_private;
539 struct rte_ether_hdr *eth_h;
540 uint16_t ether_type, offset;
541 uint16_t nb_recv_pkts;
544 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
546 for (i = 0; i < nb_recv_pkts; i++) {
547 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548 ether_type = eth_h->ether_type;
549 offset = get_vlan_offset(eth_h, ðer_type);
551 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
555 bond_mode_alb_arp_recv(eth_h, offset, internals);
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
570 struct bond_dev_private *internals;
571 struct bond_tx_queue *bd_tx_q;
573 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
576 uint16_t num_of_slaves;
577 uint16_t slaves[RTE_MAX_ETHPORTS];
579 uint16_t num_tx_total = 0, num_tx_slave;
581 static int slave_idx = 0;
582 int i, cslave_idx = 0, tx_fail_total = 0;
584 bd_tx_q = (struct bond_tx_queue *)queue;
585 internals = bd_tx_q->dev_private;
587 /* Copy slave list to protect against slave up/down changes during tx
589 num_of_slaves = internals->active_slave_count;
590 memcpy(slaves, internals->active_slaves,
591 sizeof(internals->active_slaves[0]) * num_of_slaves);
593 if (num_of_slaves < 1)
596 /* Populate slaves mbuf with which packets are to be sent on it */
597 for (i = 0; i < nb_pkts; i++) {
598 cslave_idx = (slave_idx + i) % num_of_slaves;
599 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
602 /* increment current slave index so the next call to tx burst starts on the
604 slave_idx = ++cslave_idx;
606 /* Send packet burst on each slave device */
607 for (i = 0; i < num_of_slaves; i++) {
608 if (slave_nb_pkts[i] > 0) {
609 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610 slave_bufs[i], slave_nb_pkts[i]);
612 /* if tx burst fails move packets to end of bufs */
613 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
616 tx_fail_total += tx_fail_slave;
618 memcpy(&bufs[nb_pkts - tx_fail_total],
619 &slave_bufs[i][num_tx_slave],
620 tx_fail_slave * sizeof(bufs[0]));
622 num_tx_total += num_tx_slave;
630 bond_ethdev_tx_burst_active_backup(void *queue,
631 struct rte_mbuf **bufs, uint16_t nb_pkts)
633 struct bond_dev_private *internals;
634 struct bond_tx_queue *bd_tx_q;
636 bd_tx_q = (struct bond_tx_queue *)queue;
637 internals = bd_tx_q->dev_private;
639 if (internals->active_slave_count < 1)
642 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
649 unaligned_uint16_t *word_src_addr =
650 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651 unaligned_uint16_t *word_dst_addr =
652 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
654 return (word_src_addr[0] ^ word_dst_addr[0]) ^
655 (word_src_addr[1] ^ word_dst_addr[1]) ^
656 (word_src_addr[2] ^ word_dst_addr[2]);
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
662 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
668 unaligned_uint32_t *word_src_addr =
669 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670 unaligned_uint32_t *word_dst_addr =
671 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
673 return (word_src_addr[0] ^ word_dst_addr[0]) ^
674 (word_src_addr[1] ^ word_dst_addr[1]) ^
675 (word_src_addr[2] ^ word_dst_addr[2]) ^
676 (word_src_addr[3] ^ word_dst_addr[3]);
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682 uint16_t slave_count, uint16_t *slaves)
684 struct rte_ether_hdr *eth_hdr;
688 for (i = 0; i < nb_pkts; i++) {
689 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
691 hash = ether_hash(eth_hdr);
693 slaves[i] = (hash ^= hash >> 8) % slave_count;
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699 uint16_t slave_count, uint16_t *slaves)
702 struct rte_ether_hdr *eth_hdr;
705 uint32_t hash, l3hash;
707 for (i = 0; i < nb_pkts; i++) {
708 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
711 proto = eth_hdr->ether_type;
712 hash = ether_hash(eth_hdr);
714 vlan_offset = get_vlan_offset(eth_hdr, &proto);
716 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718 ((char *)(eth_hdr + 1) + vlan_offset);
719 l3hash = ipv4_hash(ipv4_hdr);
721 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723 ((char *)(eth_hdr + 1) + vlan_offset);
724 l3hash = ipv6_hash(ipv6_hdr);
727 hash = hash ^ l3hash;
731 slaves[i] = hash % slave_count;
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737 uint16_t slave_count, uint16_t *slaves)
739 struct rte_ether_hdr *eth_hdr;
744 struct rte_udp_hdr *udp_hdr;
745 struct rte_tcp_hdr *tcp_hdr;
746 uint32_t hash, l3hash, l4hash;
748 for (i = 0; i < nb_pkts; i++) {
749 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751 proto = eth_hdr->ether_type;
752 vlan_offset = get_vlan_offset(eth_hdr, &proto);
756 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758 ((char *)(eth_hdr + 1) + vlan_offset);
759 size_t ip_hdr_offset;
761 l3hash = ipv4_hash(ipv4_hdr);
763 /* there is no L4 header in fragmented packet */
764 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
766 ip_hdr_offset = (ipv4_hdr->version_ihl
767 & RTE_IPV4_HDR_IHL_MASK) *
768 RTE_IPV4_IHL_MULTIPLIER;
770 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771 tcp_hdr = (struct rte_tcp_hdr *)
774 if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
776 l4hash = HASH_L4_PORTS(tcp_hdr);
777 } else if (ipv4_hdr->next_proto_id ==
779 udp_hdr = (struct rte_udp_hdr *)
782 if ((size_t)udp_hdr + sizeof(*udp_hdr)
784 l4hash = HASH_L4_PORTS(udp_hdr);
787 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789 ((char *)(eth_hdr + 1) + vlan_offset);
790 l3hash = ipv6_hash(ipv6_hdr);
792 if (ipv6_hdr->proto == IPPROTO_TCP) {
793 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794 l4hash = HASH_L4_PORTS(tcp_hdr);
795 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797 l4hash = HASH_L4_PORTS(udp_hdr);
801 hash = l3hash ^ l4hash;
805 slaves[i] = hash % slave_count;
810 uint64_t bwg_left_int;
811 uint64_t bwg_left_remainder;
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
819 for (i = 0; i < internals->active_slave_count; i++) {
820 tlb_last_obytets[internals->active_slaves[i]] = 0;
825 bandwidth_cmp(const void *a, const void *b)
827 const struct bwg_slave *bwg_a = a;
828 const struct bwg_slave *bwg_b = b;
829 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831 (int64_t)bwg_a->bwg_left_remainder;
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846 struct bwg_slave *bwg_slave)
848 struct rte_eth_link link_status;
851 ret = rte_eth_link_get_nowait(port_id, &link_status);
853 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
854 port_id, rte_strerror(-ret));
857 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
860 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
861 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
862 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
866 bond_ethdev_update_tlb_slave_cb(void *arg)
868 struct bond_dev_private *internals = arg;
869 struct rte_eth_stats slave_stats;
870 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
871 uint16_t slave_count;
874 uint8_t update_stats = 0;
878 internals->slave_update_idx++;
881 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
884 for (i = 0; i < internals->active_slave_count; i++) {
885 slave_id = internals->active_slaves[i];
886 rte_eth_stats_get(slave_id, &slave_stats);
887 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
888 bandwidth_left(slave_id, tx_bytes,
889 internals->slave_update_idx, &bwg_array[i]);
890 bwg_array[i].slave = slave_id;
893 tlb_last_obytets[slave_id] = slave_stats.obytes;
897 if (update_stats == 1)
898 internals->slave_update_idx = 0;
901 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
902 for (i = 0; i < slave_count; i++)
903 internals->tlb_slaves_order[i] = bwg_array[i].slave;
905 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
906 (struct bond_dev_private *)internals);
910 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
912 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
913 struct bond_dev_private *internals = bd_tx_q->dev_private;
915 struct rte_eth_dev *primary_port =
916 &rte_eth_devices[internals->primary_port];
917 uint16_t num_tx_total = 0;
920 uint16_t num_of_slaves = internals->active_slave_count;
921 uint16_t slaves[RTE_MAX_ETHPORTS];
923 struct rte_ether_hdr *ether_hdr;
924 struct rte_ether_addr primary_slave_addr;
925 struct rte_ether_addr active_slave_addr;
927 if (num_of_slaves < 1)
930 memcpy(slaves, internals->tlb_slaves_order,
931 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
934 rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
937 for (i = 0; i < 3; i++)
938 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
941 for (i = 0; i < num_of_slaves; i++) {
942 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
943 for (j = num_tx_total; j < nb_pkts; j++) {
945 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
947 ether_hdr = rte_pktmbuf_mtod(bufs[j],
948 struct rte_ether_hdr *);
949 if (rte_is_same_ether_addr(ðer_hdr->s_addr,
950 &primary_slave_addr))
951 rte_ether_addr_copy(&active_slave_addr,
953 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
954 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
958 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
959 bufs + num_tx_total, nb_pkts - num_tx_total);
961 if (num_tx_total == nb_pkts)
969 bond_tlb_disable(struct bond_dev_private *internals)
971 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
975 bond_tlb_enable(struct bond_dev_private *internals)
977 bond_ethdev_update_tlb_slave_cb(internals);
981 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
983 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
984 struct bond_dev_private *internals = bd_tx_q->dev_private;
986 struct rte_ether_hdr *eth_h;
987 uint16_t ether_type, offset;
989 struct client_data *client_info;
992 * We create transmit buffers for every slave and one additional to send
993 * through tlb. In worst case every packet will be send on one port.
995 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
996 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
999 * We create separate transmit buffers for update packets as they won't
1000 * be counted in num_tx_total.
1002 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
1003 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1005 struct rte_mbuf *upd_pkt;
1008 uint16_t num_send, num_not_send = 0;
1009 uint16_t num_tx_total = 0;
1014 /* Search tx buffer for ARP packets and forward them to alb */
1015 for (i = 0; i < nb_pkts; i++) {
1016 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1017 ether_type = eth_h->ether_type;
1018 offset = get_vlan_offset(eth_h, ðer_type);
1020 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1021 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1023 /* Change src mac in eth header */
1024 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
1026 /* Add packet to slave tx buffer */
1027 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1028 slave_bufs_pkts[slave_idx]++;
1030 /* If packet is not ARP, send it with TLB policy */
1031 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1033 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1037 /* Update connected client ARP tables */
1038 if (internals->mode6.ntt) {
1039 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1040 client_info = &internals->mode6.client_table[i];
1042 if (client_info->in_use) {
1043 /* Allocate new packet to send ARP update on current slave */
1044 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1045 if (upd_pkt == NULL) {
1047 "Failed to allocate ARP packet from pool");
1050 pkt_size = sizeof(struct rte_ether_hdr) +
1051 sizeof(struct rte_arp_hdr) +
1052 client_info->vlan_count *
1053 sizeof(struct rte_vlan_hdr);
1054 upd_pkt->data_len = pkt_size;
1055 upd_pkt->pkt_len = pkt_size;
1057 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1060 /* Add packet to update tx buffer */
1061 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1062 update_bufs_pkts[slave_idx]++;
1065 internals->mode6.ntt = 0;
1068 /* Send ARP packets on proper slaves */
1069 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1070 if (slave_bufs_pkts[i] > 0) {
1071 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1072 slave_bufs[i], slave_bufs_pkts[i]);
1073 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1074 bufs[nb_pkts - 1 - num_not_send - j] =
1075 slave_bufs[i][nb_pkts - 1 - j];
1078 num_tx_total += num_send;
1079 num_not_send += slave_bufs_pkts[i] - num_send;
1081 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1082 /* Print TX stats including update packets */
1083 for (j = 0; j < slave_bufs_pkts[i]; j++) {
1084 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1085 struct rte_ether_hdr *);
1086 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1092 /* Send update packets on proper slaves */
1093 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1094 if (update_bufs_pkts[i] > 0) {
1095 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1096 update_bufs_pkts[i]);
1097 for (j = num_send; j < update_bufs_pkts[i]; j++) {
1098 rte_pktmbuf_free(update_bufs[i][j]);
1100 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1101 for (j = 0; j < update_bufs_pkts[i]; j++) {
1102 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1103 struct rte_ether_hdr *);
1104 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1110 /* Send non-ARP packets using tlb policy */
1111 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1112 num_send = bond_ethdev_tx_burst_tlb(queue,
1113 slave_bufs[RTE_MAX_ETHPORTS],
1114 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1116 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1117 bufs[nb_pkts - 1 - num_not_send - j] =
1118 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1121 num_tx_total += num_send;
1124 return num_tx_total;
1127 static inline uint16_t
1128 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1129 uint16_t *slave_port_ids, uint16_t slave_count)
1131 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1132 struct bond_dev_private *internals = bd_tx_q->dev_private;
1134 /* Array to sort mbufs for transmission on each slave into */
1135 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1136 /* Number of mbufs for transmission on each slave */
1137 uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1138 /* Mapping array generated by hash function to map mbufs to slaves */
1139 uint16_t bufs_slave_port_idxs[nb_bufs];
1141 uint16_t slave_tx_count;
1142 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1147 * Populate slaves mbuf with the packets which are to be sent on it
1148 * selecting output slave using hash based on xmit policy
1150 internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1151 bufs_slave_port_idxs);
1153 for (i = 0; i < nb_bufs; i++) {
1154 /* Populate slave mbuf arrays with mbufs for that slave. */
1155 uint16_t slave_idx = bufs_slave_port_idxs[i];
1157 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1160 /* Send packet burst on each slave device */
1161 for (i = 0; i < slave_count; i++) {
1162 if (slave_nb_bufs[i] == 0)
1165 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1166 bd_tx_q->queue_id, slave_bufs[i],
1169 total_tx_count += slave_tx_count;
1171 /* If tx burst fails move packets to end of bufs */
1172 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1173 int slave_tx_fail_count = slave_nb_bufs[i] -
1175 total_tx_fail_count += slave_tx_fail_count;
1176 memcpy(&bufs[nb_bufs - total_tx_fail_count],
1177 &slave_bufs[i][slave_tx_count],
1178 slave_tx_fail_count * sizeof(bufs[0]));
1182 return total_tx_count;
1186 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1189 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1190 struct bond_dev_private *internals = bd_tx_q->dev_private;
1192 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1193 uint16_t slave_count;
1195 if (unlikely(nb_bufs == 0))
1198 /* Copy slave list to protect against slave up/down changes during tx
1201 slave_count = internals->active_slave_count;
1202 if (unlikely(slave_count < 1))
1205 memcpy(slave_port_ids, internals->active_slaves,
1206 sizeof(slave_port_ids[0]) * slave_count);
1207 return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1211 static inline uint16_t
1212 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1215 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1216 struct bond_dev_private *internals = bd_tx_q->dev_private;
1218 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1219 uint16_t slave_count;
1221 uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1222 uint16_t dist_slave_count;
1224 uint16_t slave_tx_count;
1228 /* Copy slave list to protect against slave up/down changes during tx
1230 slave_count = internals->active_slave_count;
1231 if (unlikely(slave_count < 1))
1234 memcpy(slave_port_ids, internals->active_slaves,
1235 sizeof(slave_port_ids[0]) * slave_count);
1240 /* Check for LACP control packets and send if available */
1241 for (i = 0; i < slave_count; i++) {
1242 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1243 struct rte_mbuf *ctrl_pkt = NULL;
1245 if (likely(rte_ring_empty(port->tx_ring)))
1248 if (rte_ring_dequeue(port->tx_ring,
1249 (void **)&ctrl_pkt) != -ENOENT) {
1250 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1251 bd_tx_q->queue_id, &ctrl_pkt, 1);
1253 * re-enqueue LAG control plane packets to buffering
1254 * ring if transmission fails so the packet isn't lost.
1256 if (slave_tx_count != 1)
1257 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1262 if (unlikely(nb_bufs == 0))
1265 dist_slave_count = 0;
1266 for (i = 0; i < slave_count; i++) {
1267 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1269 if (ACTOR_STATE(port, DISTRIBUTING))
1270 dist_slave_port_ids[dist_slave_count++] =
1274 if (unlikely(dist_slave_count < 1))
1277 return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1282 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1285 return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1289 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1292 return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1296 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1299 struct bond_dev_private *internals;
1300 struct bond_tx_queue *bd_tx_q;
1302 uint16_t slaves[RTE_MAX_ETHPORTS];
1303 uint8_t tx_failed_flag = 0;
1304 uint16_t num_of_slaves;
1306 uint16_t max_nb_of_tx_pkts = 0;
1308 int slave_tx_total[RTE_MAX_ETHPORTS];
1309 int i, most_successful_tx_slave = -1;
1311 bd_tx_q = (struct bond_tx_queue *)queue;
1312 internals = bd_tx_q->dev_private;
1314 /* Copy slave list to protect against slave up/down changes during tx
1316 num_of_slaves = internals->active_slave_count;
1317 memcpy(slaves, internals->active_slaves,
1318 sizeof(internals->active_slaves[0]) * num_of_slaves);
1320 if (num_of_slaves < 1)
1323 /* Increment reference count on mbufs */
1324 for (i = 0; i < nb_pkts; i++)
1325 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1327 /* Transmit burst on each active slave */
1328 for (i = 0; i < num_of_slaves; i++) {
1329 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1332 if (unlikely(slave_tx_total[i] < nb_pkts))
1335 /* record the value and slave index for the slave which transmits the
1336 * maximum number of packets */
1337 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1338 max_nb_of_tx_pkts = slave_tx_total[i];
1339 most_successful_tx_slave = i;
1343 /* if slaves fail to transmit packets from burst, the calling application
1344 * is not expected to know about multiple references to packets so we must
1345 * handle failures of all packets except those of the most successful slave
1347 if (unlikely(tx_failed_flag))
1348 for (i = 0; i < num_of_slaves; i++)
1349 if (i != most_successful_tx_slave)
1350 while (slave_tx_total[i] < nb_pkts)
1351 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1353 return max_nb_of_tx_pkts;
1357 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1359 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1361 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1363 * If in mode 4 then save the link properties of the first
1364 * slave, all subsequent slaves must match these properties
1366 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1368 bond_link->link_autoneg = slave_link->link_autoneg;
1369 bond_link->link_duplex = slave_link->link_duplex;
1370 bond_link->link_speed = slave_link->link_speed;
1373 * In any other mode the link properties are set to default
1374 * values of AUTONEG/DUPLEX
1376 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1377 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1382 link_properties_valid(struct rte_eth_dev *ethdev,
1383 struct rte_eth_link *slave_link)
1385 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1387 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1388 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1390 if (bond_link->link_duplex != slave_link->link_duplex ||
1391 bond_link->link_autoneg != slave_link->link_autoneg ||
1392 bond_link->link_speed != slave_link->link_speed)
1400 mac_address_get(struct rte_eth_dev *eth_dev,
1401 struct rte_ether_addr *dst_mac_addr)
1403 struct rte_ether_addr *mac_addr;
1405 if (eth_dev == NULL) {
1406 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1410 if (dst_mac_addr == NULL) {
1411 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1415 mac_addr = eth_dev->data->mac_addrs;
1417 rte_ether_addr_copy(mac_addr, dst_mac_addr);
1422 mac_address_set(struct rte_eth_dev *eth_dev,
1423 struct rte_ether_addr *new_mac_addr)
1425 struct rte_ether_addr *mac_addr;
1427 if (eth_dev == NULL) {
1428 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1432 if (new_mac_addr == NULL) {
1433 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1437 mac_addr = eth_dev->data->mac_addrs;
1439 /* If new MAC is different to current MAC then update */
1440 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1441 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1446 static const struct rte_ether_addr null_mac_addr;
1449 * Add additional MAC addresses to the slave
1452 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1453 uint16_t slave_port_id)
1456 struct rte_ether_addr *mac_addr;
1458 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1459 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1460 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1463 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1466 for (i--; i > 0; i--)
1467 rte_eth_dev_mac_addr_remove(slave_port_id,
1468 &bonded_eth_dev->data->mac_addrs[i]);
1477 * Remove additional MAC addresses from the slave
1480 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1481 uint16_t slave_port_id)
1484 struct rte_ether_addr *mac_addr;
1487 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1488 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1489 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1492 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1493 /* save only the first error */
1494 if (ret < 0 && rc == 0)
1502 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1504 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1507 /* Update slave devices MAC addresses */
1508 if (internals->slave_count < 1)
1511 switch (internals->mode) {
1512 case BONDING_MODE_ROUND_ROBIN:
1513 case BONDING_MODE_BALANCE:
1514 case BONDING_MODE_BROADCAST:
1515 for (i = 0; i < internals->slave_count; i++) {
1516 if (rte_eth_dev_default_mac_addr_set(
1517 internals->slaves[i].port_id,
1518 bonded_eth_dev->data->mac_addrs)) {
1519 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1520 internals->slaves[i].port_id);
1525 case BONDING_MODE_8023AD:
1526 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1528 case BONDING_MODE_ACTIVE_BACKUP:
1529 case BONDING_MODE_TLB:
1530 case BONDING_MODE_ALB:
1532 for (i = 0; i < internals->slave_count; i++) {
1533 if (internals->slaves[i].port_id ==
1534 internals->current_primary_port) {
1535 if (rte_eth_dev_default_mac_addr_set(
1536 internals->current_primary_port,
1537 bonded_eth_dev->data->mac_addrs)) {
1538 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1539 internals->current_primary_port);
1543 if (rte_eth_dev_default_mac_addr_set(
1544 internals->slaves[i].port_id,
1545 &internals->slaves[i].persisted_mac_addr)) {
1546 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1547 internals->slaves[i].port_id);
1558 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1560 struct bond_dev_private *internals;
1562 internals = eth_dev->data->dev_private;
1565 case BONDING_MODE_ROUND_ROBIN:
1566 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1567 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1569 case BONDING_MODE_ACTIVE_BACKUP:
1570 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1571 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1573 case BONDING_MODE_BALANCE:
1574 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1575 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1577 case BONDING_MODE_BROADCAST:
1578 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1579 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1581 case BONDING_MODE_8023AD:
1582 if (bond_mode_8023ad_enable(eth_dev) != 0)
1585 if (internals->mode4.dedicated_queues.enabled == 0) {
1586 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1587 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1588 RTE_BOND_LOG(WARNING,
1589 "Using mode 4, it is necessary to do TX burst "
1590 "and RX burst at least every 100ms.");
1592 /* Use flow director's optimization */
1593 eth_dev->rx_pkt_burst =
1594 bond_ethdev_rx_burst_8023ad_fast_queue;
1595 eth_dev->tx_pkt_burst =
1596 bond_ethdev_tx_burst_8023ad_fast_queue;
1599 case BONDING_MODE_TLB:
1600 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1601 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1603 case BONDING_MODE_ALB:
1604 if (bond_mode_alb_enable(eth_dev) != 0)
1607 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1608 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1614 internals->mode = mode;
1621 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1622 struct rte_eth_dev *slave_eth_dev)
1625 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1626 struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1628 if (port->slow_pool == NULL) {
1630 int slave_id = slave_eth_dev->data->port_id;
1632 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1634 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1635 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1636 slave_eth_dev->data->numa_node);
1638 /* Any memory allocation failure in initialization is critical because
1639 * resources can't be free, so reinitialization is impossible. */
1640 if (port->slow_pool == NULL) {
1641 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1642 slave_id, mem_name, rte_strerror(rte_errno));
1646 if (internals->mode4.dedicated_queues.enabled == 1) {
1647 /* Configure slow Rx queue */
1649 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1650 internals->mode4.dedicated_queues.rx_qid, 128,
1651 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1652 NULL, port->slow_pool);
1655 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1656 slave_eth_dev->data->port_id,
1657 internals->mode4.dedicated_queues.rx_qid,
1662 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1663 internals->mode4.dedicated_queues.tx_qid, 512,
1664 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1668 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1669 slave_eth_dev->data->port_id,
1670 internals->mode4.dedicated_queues.tx_qid,
1679 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1680 struct rte_eth_dev *slave_eth_dev)
1682 struct bond_rx_queue *bd_rx_q;
1683 struct bond_tx_queue *bd_tx_q;
1684 uint16_t nb_rx_queues;
1685 uint16_t nb_tx_queues;
1689 struct rte_flow_error flow_error;
1691 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1694 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1696 /* Enable interrupts on slave device if supported */
1697 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1698 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1700 /* If RSS is enabled for bonding, try to enable it for slaves */
1701 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1702 if (internals->rss_key_len != 0) {
1703 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1704 internals->rss_key_len;
1705 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1708 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1711 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1712 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1713 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1714 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1717 if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1718 DEV_RX_OFFLOAD_VLAN_FILTER)
1719 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1720 DEV_RX_OFFLOAD_VLAN_FILTER;
1722 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1723 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1725 nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1726 nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1728 if (internals->mode == BONDING_MODE_8023AD) {
1729 if (internals->mode4.dedicated_queues.enabled == 1) {
1735 errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1736 bonded_eth_dev->data->mtu);
1737 if (errval != 0 && errval != -ENOTSUP) {
1738 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1739 slave_eth_dev->data->port_id, errval);
1743 /* Configure device */
1744 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1745 nb_rx_queues, nb_tx_queues,
1746 &(slave_eth_dev->data->dev_conf));
1748 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1749 slave_eth_dev->data->port_id, errval);
1753 /* Setup Rx Queues */
1754 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1755 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1757 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1758 bd_rx_q->nb_rx_desc,
1759 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1760 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1763 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1764 slave_eth_dev->data->port_id, q_id, errval);
1769 /* Setup Tx Queues */
1770 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1771 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1773 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1774 bd_tx_q->nb_tx_desc,
1775 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1779 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1780 slave_eth_dev->data->port_id, q_id, errval);
1785 if (internals->mode == BONDING_MODE_8023AD &&
1786 internals->mode4.dedicated_queues.enabled == 1) {
1787 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1791 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1792 slave_eth_dev->data->port_id) != 0) {
1794 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1795 slave_eth_dev->data->port_id, q_id, errval);
1799 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1800 rte_flow_destroy(slave_eth_dev->data->port_id,
1801 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1804 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1805 slave_eth_dev->data->port_id);
1809 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1811 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1812 slave_eth_dev->data->port_id, errval);
1816 /* If RSS is enabled for bonding, synchronize RETA */
1817 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1819 struct bond_dev_private *internals;
1821 internals = bonded_eth_dev->data->dev_private;
1823 for (i = 0; i < internals->slave_count; i++) {
1824 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1825 errval = rte_eth_dev_rss_reta_update(
1826 slave_eth_dev->data->port_id,
1827 &internals->reta_conf[0],
1828 internals->slaves[i].reta_size);
1830 RTE_BOND_LOG(WARNING,
1831 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1832 " RSS Configuration for bonding may be inconsistent.",
1833 slave_eth_dev->data->port_id, errval);
1840 /* If lsc interrupt is set, check initial slave's link status */
1841 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1842 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1843 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1844 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1852 slave_remove(struct bond_dev_private *internals,
1853 struct rte_eth_dev *slave_eth_dev)
1857 for (i = 0; i < internals->slave_count; i++)
1858 if (internals->slaves[i].port_id ==
1859 slave_eth_dev->data->port_id)
1862 if (i < (internals->slave_count - 1)) {
1863 struct rte_flow *flow;
1865 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1866 sizeof(internals->slaves[0]) *
1867 (internals->slave_count - i - 1));
1868 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1869 memmove(&flow->flows[i], &flow->flows[i + 1],
1870 sizeof(flow->flows[0]) *
1871 (internals->slave_count - i - 1));
1872 flow->flows[internals->slave_count - 1] = NULL;
1876 internals->slave_count--;
1878 /* force reconfiguration of slave interfaces */
1879 _rte_eth_dev_reset(slave_eth_dev);
1883 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1886 slave_add(struct bond_dev_private *internals,
1887 struct rte_eth_dev *slave_eth_dev)
1889 struct bond_slave_details *slave_details =
1890 &internals->slaves[internals->slave_count];
1892 slave_details->port_id = slave_eth_dev->data->port_id;
1893 slave_details->last_link_status = 0;
1895 /* Mark slave devices that don't support interrupts so we can
1896 * compensate when we start the bond
1898 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1899 slave_details->link_status_poll_enabled = 1;
1902 slave_details->link_status_wait_to_complete = 0;
1903 /* clean tlb_last_obytes when adding port for bonding device */
1904 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1905 sizeof(struct rte_ether_addr));
1909 bond_ethdev_primary_set(struct bond_dev_private *internals,
1910 uint16_t slave_port_id)
1914 if (internals->active_slave_count < 1)
1915 internals->current_primary_port = slave_port_id;
1917 /* Search bonded device slave ports for new proposed primary port */
1918 for (i = 0; i < internals->active_slave_count; i++) {
1919 if (internals->active_slaves[i] == slave_port_id)
1920 internals->current_primary_port = slave_port_id;
1925 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1928 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1930 struct bond_dev_private *internals;
1933 /* slave eth dev will be started by bonded device */
1934 if (check_for_bonded_ethdev(eth_dev)) {
1935 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1936 eth_dev->data->port_id);
1940 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1941 eth_dev->data->dev_started = 1;
1943 internals = eth_dev->data->dev_private;
1945 if (internals->slave_count == 0) {
1946 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1950 if (internals->user_defined_mac == 0) {
1951 struct rte_ether_addr *new_mac_addr = NULL;
1953 for (i = 0; i < internals->slave_count; i++)
1954 if (internals->slaves[i].port_id == internals->primary_port)
1955 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1957 if (new_mac_addr == NULL)
1960 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1961 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1962 eth_dev->data->port_id);
1967 if (internals->mode == BONDING_MODE_8023AD) {
1968 if (internals->mode4.dedicated_queues.enabled == 1) {
1969 internals->mode4.dedicated_queues.rx_qid =
1970 eth_dev->data->nb_rx_queues;
1971 internals->mode4.dedicated_queues.tx_qid =
1972 eth_dev->data->nb_tx_queues;
1977 /* Reconfigure each slave device if starting bonded device */
1978 for (i = 0; i < internals->slave_count; i++) {
1979 struct rte_eth_dev *slave_ethdev =
1980 &(rte_eth_devices[internals->slaves[i].port_id]);
1981 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1983 "bonded port (%d) failed to reconfigure slave device (%d)",
1984 eth_dev->data->port_id,
1985 internals->slaves[i].port_id);
1988 /* We will need to poll for link status if any slave doesn't
1989 * support interrupts
1991 if (internals->slaves[i].link_status_poll_enabled)
1992 internals->link_status_polling_enabled = 1;
1995 /* start polling if needed */
1996 if (internals->link_status_polling_enabled) {
1998 internals->link_status_polling_interval_ms * 1000,
1999 bond_ethdev_slave_link_status_change_monitor,
2000 (void *)&rte_eth_devices[internals->port_id]);
2003 /* Update all slave devices MACs*/
2004 if (mac_address_slaves_update(eth_dev) != 0)
2007 if (internals->user_defined_primary_port)
2008 bond_ethdev_primary_set(internals, internals->primary_port);
2010 if (internals->mode == BONDING_MODE_8023AD)
2011 bond_mode_8023ad_start(eth_dev);
2013 if (internals->mode == BONDING_MODE_TLB ||
2014 internals->mode == BONDING_MODE_ALB)
2015 bond_tlb_enable(internals);
2020 eth_dev->data->dev_started = 0;
2025 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2029 if (dev->data->rx_queues != NULL) {
2030 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2031 rte_free(dev->data->rx_queues[i]);
2032 dev->data->rx_queues[i] = NULL;
2034 dev->data->nb_rx_queues = 0;
2037 if (dev->data->tx_queues != NULL) {
2038 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2039 rte_free(dev->data->tx_queues[i]);
2040 dev->data->tx_queues[i] = NULL;
2042 dev->data->nb_tx_queues = 0;
2047 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2049 struct bond_dev_private *internals = eth_dev->data->dev_private;
2052 if (internals->mode == BONDING_MODE_8023AD) {
2056 bond_mode_8023ad_stop(eth_dev);
2058 /* Discard all messages to/from mode 4 state machines */
2059 for (i = 0; i < internals->active_slave_count; i++) {
2060 port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2062 RTE_ASSERT(port->rx_ring != NULL);
2063 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2064 rte_pktmbuf_free(pkt);
2066 RTE_ASSERT(port->tx_ring != NULL);
2067 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2068 rte_pktmbuf_free(pkt);
2072 if (internals->mode == BONDING_MODE_TLB ||
2073 internals->mode == BONDING_MODE_ALB) {
2074 bond_tlb_disable(internals);
2075 for (i = 0; i < internals->active_slave_count; i++)
2076 tlb_last_obytets[internals->active_slaves[i]] = 0;
2079 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2080 eth_dev->data->dev_started = 0;
2082 internals->link_status_polling_enabled = 0;
2083 for (i = 0; i < internals->slave_count; i++) {
2084 uint16_t slave_id = internals->slaves[i].port_id;
2085 if (find_slave_by_id(internals->active_slaves,
2086 internals->active_slave_count, slave_id) !=
2087 internals->active_slave_count) {
2088 internals->slaves[i].last_link_status = 0;
2089 rte_eth_dev_stop(slave_id);
2090 deactivate_slave(eth_dev, slave_id);
2096 bond_ethdev_close(struct rte_eth_dev *dev)
2098 struct bond_dev_private *internals = dev->data->dev_private;
2099 uint16_t bond_port_id = internals->port_id;
2101 struct rte_flow_error ferror;
2103 RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2104 while (internals->slave_count != skipped) {
2105 uint16_t port_id = internals->slaves[skipped].port_id;
2107 rte_eth_dev_stop(port_id);
2109 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2111 "Failed to remove port %d from bonded device %s",
2112 port_id, dev->device->name);
2116 bond_flow_ops.flush(dev, &ferror);
2117 bond_ethdev_free_queues(dev);
2118 rte_bitmap_reset(internals->vlan_filter_bmp);
2121 /* forward declaration */
2122 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2125 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2127 struct bond_dev_private *internals = dev->data->dev_private;
2128 struct bond_slave_details slave;
2131 uint16_t max_nb_rx_queues = UINT16_MAX;
2132 uint16_t max_nb_tx_queues = UINT16_MAX;
2133 uint16_t max_rx_desc_lim = UINT16_MAX;
2134 uint16_t max_tx_desc_lim = UINT16_MAX;
2136 dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2138 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2139 internals->candidate_max_rx_pktlen :
2140 RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2142 /* Max number of tx/rx queues that the bonded device can support is the
2143 * minimum values of the bonded slaves, as all slaves must be capable
2144 * of supporting the same number of tx/rx queues.
2146 if (internals->slave_count > 0) {
2147 struct rte_eth_dev_info slave_info;
2150 for (idx = 0; idx < internals->slave_count; idx++) {
2151 slave = internals->slaves[idx];
2152 ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2155 "%s: Error during getting device (port %u) info: %s\n",
2163 if (slave_info.max_rx_queues < max_nb_rx_queues)
2164 max_nb_rx_queues = slave_info.max_rx_queues;
2166 if (slave_info.max_tx_queues < max_nb_tx_queues)
2167 max_nb_tx_queues = slave_info.max_tx_queues;
2169 if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2170 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2172 if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2173 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2177 dev_info->max_rx_queues = max_nb_rx_queues;
2178 dev_info->max_tx_queues = max_nb_tx_queues;
2180 memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2181 sizeof(dev_info->default_rxconf));
2182 memcpy(&dev_info->default_txconf, &internals->default_txconf,
2183 sizeof(dev_info->default_txconf));
2185 dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2186 dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2189 * If dedicated hw queues enabled for link bonding device in LACP mode
2190 * then we need to reduce the maximum number of data path queues by 1.
2192 if (internals->mode == BONDING_MODE_8023AD &&
2193 internals->mode4.dedicated_queues.enabled == 1) {
2194 dev_info->max_rx_queues--;
2195 dev_info->max_tx_queues--;
2198 dev_info->min_rx_bufsize = 0;
2200 dev_info->rx_offload_capa = internals->rx_offload_capa;
2201 dev_info->tx_offload_capa = internals->tx_offload_capa;
2202 dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2203 dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2204 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2206 dev_info->reta_size = internals->reta_size;
2212 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2216 struct bond_dev_private *internals = dev->data->dev_private;
2218 /* don't do this while a slave is being added */
2219 rte_spinlock_lock(&internals->lock);
2222 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2224 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2226 for (i = 0; i < internals->slave_count; i++) {
2227 uint16_t port_id = internals->slaves[i].port_id;
2229 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2231 RTE_BOND_LOG(WARNING,
2232 "Setting VLAN filter on slave port %u not supported.",
2236 rte_spinlock_unlock(&internals->lock);
2241 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2242 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2243 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2245 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2246 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2247 0, dev->data->numa_node);
2248 if (bd_rx_q == NULL)
2251 bd_rx_q->queue_id = rx_queue_id;
2252 bd_rx_q->dev_private = dev->data->dev_private;
2254 bd_rx_q->nb_rx_desc = nb_rx_desc;
2256 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2257 bd_rx_q->mb_pool = mb_pool;
2259 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2265 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2266 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2267 const struct rte_eth_txconf *tx_conf)
2269 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
2270 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2271 0, dev->data->numa_node);
2273 if (bd_tx_q == NULL)
2276 bd_tx_q->queue_id = tx_queue_id;
2277 bd_tx_q->dev_private = dev->data->dev_private;
2279 bd_tx_q->nb_tx_desc = nb_tx_desc;
2280 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2282 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2288 bond_ethdev_rx_queue_release(void *queue)
2297 bond_ethdev_tx_queue_release(void *queue)
2306 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2308 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2309 struct bond_dev_private *internals;
2311 /* Default value for polling slave found is true as we don't want to
2312 * disable the polling thread if we cannot get the lock */
2313 int i, polling_slave_found = 1;
2318 bonded_ethdev = cb_arg;
2319 internals = bonded_ethdev->data->dev_private;
2321 if (!bonded_ethdev->data->dev_started ||
2322 !internals->link_status_polling_enabled)
2325 /* If device is currently being configured then don't check slaves link
2326 * status, wait until next period */
2327 if (rte_spinlock_trylock(&internals->lock)) {
2328 if (internals->slave_count > 0)
2329 polling_slave_found = 0;
2331 for (i = 0; i < internals->slave_count; i++) {
2332 if (!internals->slaves[i].link_status_poll_enabled)
2335 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2336 polling_slave_found = 1;
2338 /* Update slave link status */
2339 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2340 internals->slaves[i].link_status_wait_to_complete);
2342 /* if link status has changed since last checked then call lsc
2344 if (slave_ethdev->data->dev_link.link_status !=
2345 internals->slaves[i].last_link_status) {
2346 internals->slaves[i].last_link_status =
2347 slave_ethdev->data->dev_link.link_status;
2349 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2350 RTE_ETH_EVENT_INTR_LSC,
2351 &bonded_ethdev->data->port_id,
2355 rte_spinlock_unlock(&internals->lock);
2358 if (polling_slave_found)
2359 /* Set alarm to continue monitoring link status of slave ethdev's */
2360 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2361 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2365 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2367 int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2369 struct bond_dev_private *bond_ctx;
2370 struct rte_eth_link slave_link;
2372 bool one_link_update_succeeded;
2376 bond_ctx = ethdev->data->dev_private;
2378 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2380 if (ethdev->data->dev_started == 0 ||
2381 bond_ctx->active_slave_count == 0) {
2382 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2386 ethdev->data->dev_link.link_status = ETH_LINK_UP;
2388 if (wait_to_complete)
2389 link_update = rte_eth_link_get;
2391 link_update = rte_eth_link_get_nowait;
2393 switch (bond_ctx->mode) {
2394 case BONDING_MODE_BROADCAST:
2396 * Setting link speed to UINT32_MAX to ensure we pick up the
2397 * value of the first active slave
2399 ethdev->data->dev_link.link_speed = UINT32_MAX;
2402 * link speed is minimum value of all the slaves link speed as
2403 * packet loss will occur on this slave if transmission at rates
2404 * greater than this are attempted
2406 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2407 ret = link_update(bond_ctx->active_slaves[idx],
2410 ethdev->data->dev_link.link_speed =
2413 "Slave (port %u) link get failed: %s",
2414 bond_ctx->active_slaves[idx],
2415 rte_strerror(-ret));
2419 if (slave_link.link_speed <
2420 ethdev->data->dev_link.link_speed)
2421 ethdev->data->dev_link.link_speed =
2422 slave_link.link_speed;
2425 case BONDING_MODE_ACTIVE_BACKUP:
2426 /* Current primary slave */
2427 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2429 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2430 bond_ctx->current_primary_port,
2431 rte_strerror(-ret));
2435 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2437 case BONDING_MODE_8023AD:
2438 ethdev->data->dev_link.link_autoneg =
2439 bond_ctx->mode4.slave_link.link_autoneg;
2440 ethdev->data->dev_link.link_duplex =
2441 bond_ctx->mode4.slave_link.link_duplex;
2443 /* to update link speed */
2444 case BONDING_MODE_ROUND_ROBIN:
2445 case BONDING_MODE_BALANCE:
2446 case BONDING_MODE_TLB:
2447 case BONDING_MODE_ALB:
2450 * In theses mode the maximum theoretical link speed is the sum
2453 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2454 one_link_update_succeeded = false;
2456 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2457 ret = link_update(bond_ctx->active_slaves[idx],
2461 "Slave (port %u) link get failed: %s",
2462 bond_ctx->active_slaves[idx],
2463 rte_strerror(-ret));
2467 one_link_update_succeeded = true;
2468 ethdev->data->dev_link.link_speed +=
2469 slave_link.link_speed;
2472 if (!one_link_update_succeeded) {
2473 RTE_BOND_LOG(ERR, "All slaves link get failed");
2484 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2486 struct bond_dev_private *internals = dev->data->dev_private;
2487 struct rte_eth_stats slave_stats;
2490 for (i = 0; i < internals->slave_count; i++) {
2491 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2493 stats->ipackets += slave_stats.ipackets;
2494 stats->opackets += slave_stats.opackets;
2495 stats->ibytes += slave_stats.ibytes;
2496 stats->obytes += slave_stats.obytes;
2497 stats->imissed += slave_stats.imissed;
2498 stats->ierrors += slave_stats.ierrors;
2499 stats->oerrors += slave_stats.oerrors;
2500 stats->rx_nombuf += slave_stats.rx_nombuf;
2502 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2503 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2504 stats->q_opackets[j] += slave_stats.q_opackets[j];
2505 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2506 stats->q_obytes[j] += slave_stats.q_obytes[j];
2507 stats->q_errors[j] += slave_stats.q_errors[j];
2516 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2518 struct bond_dev_private *internals = dev->data->dev_private;
2523 for (i = 0, err = 0; i < internals->slave_count; i++) {
2524 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2533 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2535 struct bond_dev_private *internals = eth_dev->data->dev_private;
2540 switch (internals->mode) {
2541 /* Promiscuous mode is propagated to all slaves */
2542 case BONDING_MODE_ROUND_ROBIN:
2543 case BONDING_MODE_BALANCE:
2544 case BONDING_MODE_BROADCAST:
2545 case BONDING_MODE_8023AD: {
2546 unsigned int slave_ok = 0;
2548 for (i = 0; i < internals->slave_count; i++) {
2549 port_id = internals->slaves[i].port_id;
2551 ret = rte_eth_promiscuous_enable(port_id);
2554 "Failed to enable promiscuous mode for port %u: %s",
2555 port_id, rte_strerror(-ret));
2560 * Report success if operation is successful on at least
2561 * on one slave. Otherwise return last error code.
2567 /* Promiscuous mode is propagated only to primary slave */
2568 case BONDING_MODE_ACTIVE_BACKUP:
2569 case BONDING_MODE_TLB:
2570 case BONDING_MODE_ALB:
2572 /* Do not touch promisc when there cannot be primary ports */
2573 if (internals->slave_count == 0)
2575 port_id = internals->current_primary_port;
2576 ret = rte_eth_promiscuous_enable(port_id);
2579 "Failed to enable promiscuous mode for port %u: %s",
2580 port_id, rte_strerror(-ret));
2587 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2589 struct bond_dev_private *internals = dev->data->dev_private;
2594 switch (internals->mode) {
2595 /* Promiscuous mode is propagated to all slaves */
2596 case BONDING_MODE_ROUND_ROBIN:
2597 case BONDING_MODE_BALANCE:
2598 case BONDING_MODE_BROADCAST:
2599 case BONDING_MODE_8023AD: {
2600 unsigned int slave_ok = 0;
2602 for (i = 0; i < internals->slave_count; i++) {
2603 port_id = internals->slaves[i].port_id;
2605 if (internals->mode == BONDING_MODE_8023AD &&
2606 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2607 BOND_8023AD_FORCED_PROMISC) {
2611 ret = rte_eth_promiscuous_disable(port_id);
2614 "Failed to disable promiscuous mode for port %u: %s",
2615 port_id, rte_strerror(-ret));
2620 * Report success if operation is successful on at least
2621 * on one slave. Otherwise return last error code.
2627 /* Promiscuous mode is propagated only to primary slave */
2628 case BONDING_MODE_ACTIVE_BACKUP:
2629 case BONDING_MODE_TLB:
2630 case BONDING_MODE_ALB:
2632 /* Do not touch promisc when there cannot be primary ports */
2633 if (internals->slave_count == 0)
2635 port_id = internals->current_primary_port;
2636 ret = rte_eth_promiscuous_disable(port_id);
2639 "Failed to disable promiscuous mode for port %u: %s",
2640 port_id, rte_strerror(-ret));
2647 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2649 struct bond_dev_private *internals = eth_dev->data->dev_private;
2654 switch (internals->mode) {
2655 /* allmulti mode is propagated to all slaves */
2656 case BONDING_MODE_ROUND_ROBIN:
2657 case BONDING_MODE_BALANCE:
2658 case BONDING_MODE_BROADCAST:
2659 case BONDING_MODE_8023AD: {
2660 unsigned int slave_ok = 0;
2662 for (i = 0; i < internals->slave_count; i++) {
2663 port_id = internals->slaves[i].port_id;
2665 ret = rte_eth_allmulticast_enable(port_id);
2668 "Failed to enable allmulti mode for port %u: %s",
2669 port_id, rte_strerror(-ret));
2674 * Report success if operation is successful on at least
2675 * on one slave. Otherwise return last error code.
2681 /* allmulti mode is propagated only to primary slave */
2682 case BONDING_MODE_ACTIVE_BACKUP:
2683 case BONDING_MODE_TLB:
2684 case BONDING_MODE_ALB:
2686 /* Do not touch allmulti when there cannot be primary ports */
2687 if (internals->slave_count == 0)
2689 port_id = internals->current_primary_port;
2690 ret = rte_eth_allmulticast_enable(port_id);
2693 "Failed to enable allmulti mode for port %u: %s",
2694 port_id, rte_strerror(-ret));
2701 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2703 struct bond_dev_private *internals = eth_dev->data->dev_private;
2708 switch (internals->mode) {
2709 /* allmulti mode is propagated to all slaves */
2710 case BONDING_MODE_ROUND_ROBIN:
2711 case BONDING_MODE_BALANCE:
2712 case BONDING_MODE_BROADCAST:
2713 case BONDING_MODE_8023AD: {
2714 unsigned int slave_ok = 0;
2716 for (i = 0; i < internals->slave_count; i++) {
2717 uint16_t port_id = internals->slaves[i].port_id;
2719 if (internals->mode == BONDING_MODE_8023AD &&
2720 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2721 BOND_8023AD_FORCED_ALLMULTI)
2724 ret = rte_eth_allmulticast_disable(port_id);
2727 "Failed to disable allmulti mode for port %u: %s",
2728 port_id, rte_strerror(-ret));
2733 * Report success if operation is successful on at least
2734 * on one slave. Otherwise return last error code.
2740 /* allmulti mode is propagated only to primary slave */
2741 case BONDING_MODE_ACTIVE_BACKUP:
2742 case BONDING_MODE_TLB:
2743 case BONDING_MODE_ALB:
2745 /* Do not touch allmulti when there cannot be primary ports */
2746 if (internals->slave_count == 0)
2748 port_id = internals->current_primary_port;
2749 ret = rte_eth_allmulticast_disable(port_id);
2752 "Failed to disable allmulti mode for port %u: %s",
2753 port_id, rte_strerror(-ret));
2760 bond_ethdev_delayed_lsc_propagation(void *arg)
2765 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2766 RTE_ETH_EVENT_INTR_LSC, NULL);
2770 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2771 void *param, void *ret_param __rte_unused)
2773 struct rte_eth_dev *bonded_eth_dev;
2774 struct bond_dev_private *internals;
2775 struct rte_eth_link link;
2779 uint8_t lsc_flag = 0;
2780 int valid_slave = 0;
2781 uint16_t active_pos;
2784 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2787 bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2789 if (check_for_bonded_ethdev(bonded_eth_dev))
2792 internals = bonded_eth_dev->data->dev_private;
2794 /* If the device isn't started don't handle interrupts */
2795 if (!bonded_eth_dev->data->dev_started)
2798 /* verify that port_id is a valid slave of bonded port */
2799 for (i = 0; i < internals->slave_count; i++) {
2800 if (internals->slaves[i].port_id == port_id) {
2809 /* Synchronize lsc callback parallel calls either by real link event
2810 * from the slaves PMDs or by the bonding PMD itself.
2812 rte_spinlock_lock(&internals->lsc_lock);
2814 /* Search for port in active port list */
2815 active_pos = find_slave_by_id(internals->active_slaves,
2816 internals->active_slave_count, port_id);
2818 ret = rte_eth_link_get_nowait(port_id, &link);
2820 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2822 if (ret == 0 && link.link_status) {
2823 if (active_pos < internals->active_slave_count)
2826 /* check link state properties if bonded link is up*/
2827 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2828 if (link_properties_valid(bonded_eth_dev, &link) != 0)
2829 RTE_BOND_LOG(ERR, "Invalid link properties "
2830 "for slave %d in bonding mode %d",
2831 port_id, internals->mode);
2833 /* inherit slave link properties */
2834 link_properties_set(bonded_eth_dev, &link);
2837 /* If no active slave ports then set this port to be
2840 if (internals->active_slave_count < 1) {
2841 /* If first active slave, then change link status */
2842 bonded_eth_dev->data->dev_link.link_status =
2844 internals->current_primary_port = port_id;
2847 mac_address_slaves_update(bonded_eth_dev);
2850 activate_slave(bonded_eth_dev, port_id);
2852 /* If the user has defined the primary port then default to
2855 if (internals->user_defined_primary_port &&
2856 internals->primary_port == port_id)
2857 bond_ethdev_primary_set(internals, port_id);
2859 if (active_pos == internals->active_slave_count)
2862 /* Remove from active slave list */
2863 deactivate_slave(bonded_eth_dev, port_id);
2865 if (internals->active_slave_count < 1)
2868 /* Update primary id, take first active slave from list or if none
2869 * available set to -1 */
2870 if (port_id == internals->current_primary_port) {
2871 if (internals->active_slave_count > 0)
2872 bond_ethdev_primary_set(internals,
2873 internals->active_slaves[0]);
2875 internals->current_primary_port = internals->primary_port;
2876 mac_address_slaves_update(bonded_eth_dev);
2882 * Update bonded device link properties after any change to active
2885 bond_ethdev_link_update(bonded_eth_dev, 0);
2888 /* Cancel any possible outstanding interrupts if delays are enabled */
2889 if (internals->link_up_delay_ms > 0 ||
2890 internals->link_down_delay_ms > 0)
2891 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2894 if (bonded_eth_dev->data->dev_link.link_status) {
2895 if (internals->link_up_delay_ms > 0)
2896 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2897 bond_ethdev_delayed_lsc_propagation,
2898 (void *)bonded_eth_dev);
2900 _rte_eth_dev_callback_process(bonded_eth_dev,
2901 RTE_ETH_EVENT_INTR_LSC,
2905 if (internals->link_down_delay_ms > 0)
2906 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2907 bond_ethdev_delayed_lsc_propagation,
2908 (void *)bonded_eth_dev);
2910 _rte_eth_dev_callback_process(bonded_eth_dev,
2911 RTE_ETH_EVENT_INTR_LSC,
2916 rte_spinlock_unlock(&internals->lsc_lock);
2922 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2923 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2927 int slave_reta_size;
2928 unsigned reta_count;
2929 struct bond_dev_private *internals = dev->data->dev_private;
2931 if (reta_size != internals->reta_size)
2934 /* Copy RETA table */
2935 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2937 for (i = 0; i < reta_count; i++) {
2938 internals->reta_conf[i].mask = reta_conf[i].mask;
2939 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2940 if ((reta_conf[i].mask >> j) & 0x01)
2941 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2944 /* Fill rest of array */
2945 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2946 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2947 sizeof(internals->reta_conf[0]) * reta_count);
2949 /* Propagate RETA over slaves */
2950 for (i = 0; i < internals->slave_count; i++) {
2951 slave_reta_size = internals->slaves[i].reta_size;
2952 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2953 &internals->reta_conf[0], slave_reta_size);
2962 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2963 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2966 struct bond_dev_private *internals = dev->data->dev_private;
2968 if (reta_size != internals->reta_size)
2971 /* Copy RETA table */
2972 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2973 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2974 if ((reta_conf[i].mask >> j) & 0x01)
2975 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2981 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2982 struct rte_eth_rss_conf *rss_conf)
2985 struct bond_dev_private *internals = dev->data->dev_private;
2986 struct rte_eth_rss_conf bond_rss_conf;
2988 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2990 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2992 if (bond_rss_conf.rss_hf != 0)
2993 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2995 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2996 sizeof(internals->rss_key)) {
2997 if (bond_rss_conf.rss_key_len == 0)
2998 bond_rss_conf.rss_key_len = 40;
2999 internals->rss_key_len = bond_rss_conf.rss_key_len;
3000 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3001 internals->rss_key_len);
3004 for (i = 0; i < internals->slave_count; i++) {
3005 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3015 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3016 struct rte_eth_rss_conf *rss_conf)
3018 struct bond_dev_private *internals = dev->data->dev_private;
3020 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3021 rss_conf->rss_key_len = internals->rss_key_len;
3022 if (rss_conf->rss_key)
3023 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3029 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3031 struct rte_eth_dev *slave_eth_dev;
3032 struct bond_dev_private *internals = dev->data->dev_private;
3035 rte_spinlock_lock(&internals->lock);
3037 for (i = 0; i < internals->slave_count; i++) {
3038 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3039 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3040 rte_spinlock_unlock(&internals->lock);
3044 for (i = 0; i < internals->slave_count; i++) {
3045 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3047 rte_spinlock_unlock(&internals->lock);
3052 rte_spinlock_unlock(&internals->lock);
3057 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3058 struct rte_ether_addr *addr)
3060 if (mac_address_set(dev, addr)) {
3061 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3069 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
3070 enum rte_filter_type type, enum rte_filter_op op, void *arg)
3072 if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
3073 *(const void **)arg = &bond_flow_ops;
3080 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3081 struct rte_ether_addr *mac_addr,
3082 __rte_unused uint32_t index, uint32_t vmdq)
3084 struct rte_eth_dev *slave_eth_dev;
3085 struct bond_dev_private *internals = dev->data->dev_private;
3088 rte_spinlock_lock(&internals->lock);
3090 for (i = 0; i < internals->slave_count; i++) {
3091 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3092 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3093 *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3099 for (i = 0; i < internals->slave_count; i++) {
3100 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3104 for (i--; i >= 0; i--)
3105 rte_eth_dev_mac_addr_remove(
3106 internals->slaves[i].port_id, mac_addr);
3113 rte_spinlock_unlock(&internals->lock);
3118 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3120 struct rte_eth_dev *slave_eth_dev;
3121 struct bond_dev_private *internals = dev->data->dev_private;
3124 rte_spinlock_lock(&internals->lock);
3126 for (i = 0; i < internals->slave_count; i++) {
3127 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3128 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3132 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3134 for (i = 0; i < internals->slave_count; i++)
3135 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3139 rte_spinlock_unlock(&internals->lock);
3142 const struct eth_dev_ops default_dev_ops = {
3143 .dev_start = bond_ethdev_start,
3144 .dev_stop = bond_ethdev_stop,
3145 .dev_close = bond_ethdev_close,
3146 .dev_configure = bond_ethdev_configure,
3147 .dev_infos_get = bond_ethdev_info,
3148 .vlan_filter_set = bond_ethdev_vlan_filter_set,
3149 .rx_queue_setup = bond_ethdev_rx_queue_setup,
3150 .tx_queue_setup = bond_ethdev_tx_queue_setup,
3151 .rx_queue_release = bond_ethdev_rx_queue_release,
3152 .tx_queue_release = bond_ethdev_tx_queue_release,
3153 .link_update = bond_ethdev_link_update,
3154 .stats_get = bond_ethdev_stats_get,
3155 .stats_reset = bond_ethdev_stats_reset,
3156 .promiscuous_enable = bond_ethdev_promiscuous_enable,
3157 .promiscuous_disable = bond_ethdev_promiscuous_disable,
3158 .allmulticast_enable = bond_ethdev_allmulticast_enable,
3159 .allmulticast_disable = bond_ethdev_allmulticast_disable,
3160 .reta_update = bond_ethdev_rss_reta_update,
3161 .reta_query = bond_ethdev_rss_reta_query,
3162 .rss_hash_update = bond_ethdev_rss_hash_update,
3163 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
3164 .mtu_set = bond_ethdev_mtu_set,
3165 .mac_addr_set = bond_ethdev_mac_address_set,
3166 .mac_addr_add = bond_ethdev_mac_addr_add,
3167 .mac_addr_remove = bond_ethdev_mac_addr_remove,
3168 .filter_ctrl = bond_filter_ctrl
3172 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3174 const char *name = rte_vdev_device_name(dev);
3175 uint8_t socket_id = dev->device.numa_node;
3176 struct bond_dev_private *internals = NULL;
3177 struct rte_eth_dev *eth_dev = NULL;
3178 uint32_t vlan_filter_bmp_size;
3180 /* now do all data allocation - for eth_dev structure, dummy pci driver
3181 * and internal (private) data
3184 /* reserve an ethdev entry */
3185 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3186 if (eth_dev == NULL) {
3187 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3191 internals = eth_dev->data->dev_private;
3192 eth_dev->data->nb_rx_queues = (uint16_t)1;
3193 eth_dev->data->nb_tx_queues = (uint16_t)1;
3195 /* Allocate memory for storing MAC addresses */
3196 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3197 BOND_MAX_MAC_ADDRS, 0, socket_id);
3198 if (eth_dev->data->mac_addrs == NULL) {
3200 "Failed to allocate %u bytes needed to store MAC addresses",
3201 RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3205 eth_dev->dev_ops = &default_dev_ops;
3206 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3208 rte_spinlock_init(&internals->lock);
3209 rte_spinlock_init(&internals->lsc_lock);
3211 internals->port_id = eth_dev->data->port_id;
3212 internals->mode = BONDING_MODE_INVALID;
3213 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3214 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3215 internals->burst_xmit_hash = burst_xmit_l2_hash;
3216 internals->user_defined_mac = 0;
3218 internals->link_status_polling_enabled = 0;
3220 internals->link_status_polling_interval_ms =
3221 DEFAULT_POLLING_INTERVAL_10_MS;
3222 internals->link_down_delay_ms = 0;
3223 internals->link_up_delay_ms = 0;
3225 internals->slave_count = 0;
3226 internals->active_slave_count = 0;
3227 internals->rx_offload_capa = 0;
3228 internals->tx_offload_capa = 0;
3229 internals->rx_queue_offload_capa = 0;
3230 internals->tx_queue_offload_capa = 0;
3231 internals->candidate_max_rx_pktlen = 0;
3232 internals->max_rx_pktlen = 0;
3234 /* Initially allow to choose any offload type */
3235 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3237 memset(&internals->default_rxconf, 0,
3238 sizeof(internals->default_rxconf));
3239 memset(&internals->default_txconf, 0,
3240 sizeof(internals->default_txconf));
3242 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3243 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3245 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3246 memset(internals->slaves, 0, sizeof(internals->slaves));
3248 TAILQ_INIT(&internals->flow_list);
3249 internals->flow_isolated_valid = 0;
3251 /* Set mode 4 default configuration */
3252 bond_mode_8023ad_setup(eth_dev, NULL);
3253 if (bond_ethdev_mode_set(eth_dev, mode)) {
3254 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3255 eth_dev->data->port_id, mode);
3259 vlan_filter_bmp_size =
3260 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3261 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3262 RTE_CACHE_LINE_SIZE);
3263 if (internals->vlan_filter_bmpmem == NULL) {
3265 "Failed to allocate vlan bitmap for bonded device %u",
3266 eth_dev->data->port_id);
3270 internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3271 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3272 if (internals->vlan_filter_bmp == NULL) {
3274 "Failed to init vlan bitmap for bonded device %u",
3275 eth_dev->data->port_id);
3276 rte_free(internals->vlan_filter_bmpmem);
3280 return eth_dev->data->port_id;
3283 rte_free(internals);
3284 if (eth_dev != NULL)
3285 eth_dev->data->dev_private = NULL;
3286 rte_eth_dev_release_port(eth_dev);
3291 bond_probe(struct rte_vdev_device *dev)
3294 struct bond_dev_private *internals;
3295 struct rte_kvargs *kvlist;
3296 uint8_t bonding_mode, socket_id/*, agg_mode*/;
3297 int arg_count, port_id;
3299 struct rte_eth_dev *eth_dev;
3304 name = rte_vdev_device_name(dev);
3305 RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3307 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3308 eth_dev = rte_eth_dev_attach_secondary(name);
3310 RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3313 /* TODO: request info from primary to set up Rx and Tx */
3314 eth_dev->dev_ops = &default_dev_ops;
3315 eth_dev->device = &dev->device;
3316 rte_eth_dev_probing_finish(eth_dev);
3320 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3321 pmd_bond_init_valid_arguments);
3325 /* Parse link bonding mode */
3326 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3327 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3328 &bond_ethdev_parse_slave_mode_kvarg,
3329 &bonding_mode) != 0) {
3330 RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3335 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3340 /* Parse socket id to create bonding device on */
3341 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3342 if (arg_count == 1) {
3343 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3344 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3346 RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3347 "bonded device %s", name);
3350 } else if (arg_count > 1) {
3351 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3352 "bonded device %s", name);
3355 socket_id = rte_socket_id();
3358 dev->device.numa_node = socket_id;
3360 /* Create link bonding eth device */
3361 port_id = bond_alloc(dev, bonding_mode);
3363 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3364 "socket %u.", name, bonding_mode, socket_id);
3367 internals = rte_eth_devices[port_id].data->dev_private;
3368 internals->kvlist = kvlist;
3370 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3371 if (rte_kvargs_process(kvlist,
3372 PMD_BOND_AGG_MODE_KVARG,
3373 &bond_ethdev_parse_slave_agg_mode_kvarg,
3376 "Failed to parse agg selection mode for bonded device %s",
3381 if (internals->mode == BONDING_MODE_8023AD)
3382 internals->mode4.agg_selection = agg_mode;
3384 internals->mode4.agg_selection = AGG_STABLE;
3387 rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3388 RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3389 "socket %u.", name, port_id, bonding_mode, socket_id);
3393 rte_kvargs_free(kvlist);
3399 bond_remove(struct rte_vdev_device *dev)
3401 struct rte_eth_dev *eth_dev;
3402 struct bond_dev_private *internals;
3408 name = rte_vdev_device_name(dev);
3409 RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3411 /* now free all data allocation - for eth_dev structure,
3412 * dummy pci driver and internal (private) data
3415 /* find an ethdev entry */
3416 eth_dev = rte_eth_dev_allocated(name);
3417 if (eth_dev == NULL)
3420 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3421 return rte_eth_dev_release_port(eth_dev);
3423 RTE_ASSERT(eth_dev->device == &dev->device);
3425 internals = eth_dev->data->dev_private;
3426 if (internals->slave_count != 0)
3429 if (eth_dev->data->dev_started == 1) {
3430 bond_ethdev_stop(eth_dev);
3431 bond_ethdev_close(eth_dev);
3434 eth_dev->dev_ops = NULL;
3435 eth_dev->rx_pkt_burst = NULL;
3436 eth_dev->tx_pkt_burst = NULL;
3438 internals = eth_dev->data->dev_private;
3439 /* Try to release mempool used in mode6. If the bond
3440 * device is not mode6, free the NULL is not problem.
3442 rte_mempool_free(internals->mode6.mempool);
3443 rte_bitmap_free(internals->vlan_filter_bmp);
3444 rte_free(internals->vlan_filter_bmpmem);
3446 rte_eth_dev_release_port(eth_dev);
3451 /* this part will resolve the slave portids after all the other pdev and vdev
3452 * have been allocated */
3454 bond_ethdev_configure(struct rte_eth_dev *dev)
3456 const char *name = dev->device->name;
3457 struct bond_dev_private *internals = dev->data->dev_private;
3458 struct rte_kvargs *kvlist = internals->kvlist;
3460 uint16_t port_id = dev - rte_eth_devices;
3463 static const uint8_t default_rss_key[40] = {
3464 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3465 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3466 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3467 0xBE, 0xAC, 0x01, 0xFA
3473 * If RSS is enabled, fill table with default values and
3474 * set key to the the value specified in port RSS configuration.
3475 * Fall back to default RSS key if the key is not specified
3477 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3478 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3479 internals->rss_key_len =
3480 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3481 memcpy(internals->rss_key,
3482 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3483 internals->rss_key_len);
3485 internals->rss_key_len = sizeof(default_rss_key);
3486 memcpy(internals->rss_key, default_rss_key,
3487 internals->rss_key_len);
3490 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3491 internals->reta_conf[i].mask = ~0LL;
3492 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3493 internals->reta_conf[i].reta[j] =
3494 (i * RTE_RETA_GROUP_SIZE + j) %
3495 dev->data->nb_rx_queues;
3499 /* set the max_rx_pktlen */
3500 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3503 * if no kvlist, it means that this bonded device has been created
3504 * through the bonding api.
3509 /* Parse MAC address for bonded device */
3510 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3511 if (arg_count == 1) {
3512 struct rte_ether_addr bond_mac;
3514 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3515 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3516 RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3521 /* Set MAC address */
3522 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3524 "Failed to set mac address on bonded device %s",
3528 } else if (arg_count > 1) {
3530 "MAC address can be specified only once for bonded device %s",
3535 /* Parse/set balance mode transmit policy */
3536 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3537 if (arg_count == 1) {
3538 uint8_t xmit_policy;
3540 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3541 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3544 "Invalid xmit policy specified for bonded device %s",
3549 /* Set balance mode transmit policy*/
3550 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3552 "Failed to set balance xmit policy on bonded device %s",
3556 } else if (arg_count > 1) {
3558 "Transmit policy can be specified only once for bonded device %s",
3563 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3564 if (rte_kvargs_process(kvlist,
3565 PMD_BOND_AGG_MODE_KVARG,
3566 &bond_ethdev_parse_slave_agg_mode_kvarg,
3569 "Failed to parse agg selection mode for bonded device %s",
3572 if (internals->mode == BONDING_MODE_8023AD) {
3573 int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3577 "Invalid args for agg selection set for bonded device %s",
3584 /* Parse/add slave ports to bonded device */
3585 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3586 struct bond_ethdev_slave_ports slave_ports;
3589 memset(&slave_ports, 0, sizeof(slave_ports));
3591 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3592 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3594 "Failed to parse slave ports for bonded device %s",
3599 for (i = 0; i < slave_ports.slave_count; i++) {
3600 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3602 "Failed to add port %d as slave to bonded device %s",
3603 slave_ports.slaves[i], name);
3608 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3612 /* Parse/set primary slave port id*/
3613 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3614 if (arg_count == 1) {
3615 uint16_t primary_slave_port_id;
3617 if (rte_kvargs_process(kvlist,
3618 PMD_BOND_PRIMARY_SLAVE_KVARG,
3619 &bond_ethdev_parse_primary_slave_port_id_kvarg,
3620 &primary_slave_port_id) < 0) {
3622 "Invalid primary slave port id specified for bonded device %s",
3627 /* Set balance mode transmit policy*/
3628 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3631 "Failed to set primary slave port %d on bonded device %s",
3632 primary_slave_port_id, name);
3635 } else if (arg_count > 1) {
3637 "Primary slave can be specified only once for bonded device %s",
3642 /* Parse link status monitor polling interval */
3643 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3644 if (arg_count == 1) {
3645 uint32_t lsc_poll_interval_ms;
3647 if (rte_kvargs_process(kvlist,
3648 PMD_BOND_LSC_POLL_PERIOD_KVARG,
3649 &bond_ethdev_parse_time_ms_kvarg,
3650 &lsc_poll_interval_ms) < 0) {
3652 "Invalid lsc polling interval value specified for bonded"
3653 " device %s", name);
3657 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3660 "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3661 lsc_poll_interval_ms, name);
3664 } else if (arg_count > 1) {
3666 "LSC polling interval can be specified only once for bonded"
3667 " device %s", name);
3671 /* Parse link up interrupt propagation delay */
3672 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3673 if (arg_count == 1) {
3674 uint32_t link_up_delay_ms;
3676 if (rte_kvargs_process(kvlist,
3677 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3678 &bond_ethdev_parse_time_ms_kvarg,
3679 &link_up_delay_ms) < 0) {
3681 "Invalid link up propagation delay value specified for"
3682 " bonded device %s", name);
3686 /* Set balance mode transmit policy*/
3687 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3690 "Failed to set link up propagation delay (%u ms) on bonded"
3691 " device %s", link_up_delay_ms, name);
3694 } else if (arg_count > 1) {
3696 "Link up propagation delay can be specified only once for"
3697 " bonded device %s", name);
3701 /* Parse link down interrupt propagation delay */
3702 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3703 if (arg_count == 1) {
3704 uint32_t link_down_delay_ms;
3706 if (rte_kvargs_process(kvlist,
3707 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3708 &bond_ethdev_parse_time_ms_kvarg,
3709 &link_down_delay_ms) < 0) {
3711 "Invalid link down propagation delay value specified for"
3712 " bonded device %s", name);
3716 /* Set balance mode transmit policy*/
3717 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3720 "Failed to set link down propagation delay (%u ms) on bonded device %s",
3721 link_down_delay_ms, name);
3724 } else if (arg_count > 1) {
3726 "Link down propagation delay can be specified only once for bonded device %s",
3734 struct rte_vdev_driver pmd_bond_drv = {
3735 .probe = bond_probe,
3736 .remove = bond_remove,
3739 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3740 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3742 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3746 "xmit_policy=[l2 | l23 | l34] "
3747 "agg_mode=[count | stable | bandwidth] "
3750 "lsc_poll_period_ms=<int> "
3752 "down_delay=<int>");
3754 RTE_LOG_REGISTER(bond_logtype, pmd.net.bond, NOTICE);