1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
6 #include <netinet/in.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
39 size_t vlan_offset = 0;
41 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43 struct rte_vlan_hdr *vlan_hdr =
44 (struct rte_vlan_hdr *)(eth_hdr + 1);
46 vlan_offset = sizeof(struct rte_vlan_hdr);
47 *proto = vlan_hdr->eth_proto;
49 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50 vlan_hdr = vlan_hdr + 1;
51 *proto = vlan_hdr->eth_proto;
52 vlan_offset += sizeof(struct rte_vlan_hdr);
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
61 struct bond_dev_private *internals;
63 uint16_t num_rx_total = 0;
65 uint16_t active_slave;
68 /* Cast to structure, containing bonded device's port id and queue id */
69 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70 internals = bd_rx_q->dev_private;
71 slave_count = internals->active_slave_count;
72 active_slave = bd_rx_q->active_slave;
74 for (i = 0; i < slave_count && nb_pkts; i++) {
75 uint16_t num_rx_slave;
77 /* Offset of pointer to *bufs increases as packets are received
78 * from other slaves */
80 rte_eth_rx_burst(internals->active_slaves[active_slave],
82 bufs + num_rx_total, nb_pkts);
83 num_rx_total += num_rx_slave;
84 nb_pkts -= num_rx_slave;
85 if (++active_slave == slave_count)
89 if (++bd_rx_q->active_slave >= slave_count)
90 bd_rx_q->active_slave = 0;
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
98 struct bond_dev_private *internals;
100 /* Cast to structure, containing bonded device's port id and queue id */
101 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
103 internals = bd_rx_q->dev_private;
105 return rte_eth_rx_burst(internals->current_primary_port,
106 bd_rx_q->queue_id, bufs, nb_pkts);
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
112 const uint16_t ether_type_slow_be =
113 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
115 return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116 (ethertype == ether_type_slow_be &&
117 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
120 /*****************************************************************************
121 * Flow director's setup for mode 4 optimization
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125 .dst.addr_bytes = { 0 },
126 .src.addr_bytes = { 0 },
127 .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131 .dst.addr_bytes = { 0 },
132 .src.addr_bytes = { 0 },
136 static struct rte_flow_item flow_item_8023ad[] = {
138 .type = RTE_FLOW_ITEM_TYPE_ETH,
139 .spec = &flow_item_eth_type_8023ad,
141 .mask = &flow_item_eth_mask_type_8023ad,
144 .type = RTE_FLOW_ITEM_TYPE_END,
151 const struct rte_flow_attr flow_attr_8023ad = {
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161 uint16_t slave_port) {
162 struct rte_eth_dev_info slave_info;
163 struct rte_flow_error error;
164 struct bond_dev_private *internals = bond_dev->data->dev_private;
166 const struct rte_flow_action_queue lacp_queue_conf = {
170 const struct rte_flow_action actions[] = {
172 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173 .conf = &lacp_queue_conf
176 .type = RTE_FLOW_ACTION_TYPE_END,
180 int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181 flow_item_8023ad, actions, &error);
183 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184 __func__, error.message, slave_port,
185 internals->mode4.dedicated_queues.rx_qid);
189 ret = rte_eth_dev_info_get(slave_port, &slave_info);
192 "%s: Error during getting device (port %u) info: %s\n",
193 __func__, slave_port, strerror(-ret));
198 if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199 slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
201 "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202 __func__, slave_port);
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211 struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212 struct bond_dev_private *internals = bond_dev->data->dev_private;
213 struct rte_eth_dev_info bond_info;
217 /* Verify if all slaves in bonding supports flow director and */
218 if (internals->slave_count > 0) {
219 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
222 "%s: Error during getting device (port %u) info: %s\n",
223 __func__, bond_dev->data->port_id,
229 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
232 for (idx = 0; idx < internals->slave_count; idx++) {
233 if (bond_ethdev_8023ad_flow_verify(bond_dev,
234 internals->slaves[idx].port_id) != 0)
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
245 struct rte_flow_error error;
246 struct bond_dev_private *internals = bond_dev->data->dev_private;
247 struct rte_flow_action_queue lacp_queue_conf = {
248 .index = internals->mode4.dedicated_queues.rx_qid,
251 const struct rte_flow_action actions[] = {
253 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254 .conf = &lacp_queue_conf
257 .type = RTE_FLOW_ACTION_TYPE_END,
261 internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262 &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263 if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265 "(slave_port=%d queue_id=%d)",
266 error.message, slave_port,
267 internals->mode4.dedicated_queues.rx_qid);
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
278 /* Cast to structure, containing bonded device's port id and queue id */
279 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280 struct bond_dev_private *internals = bd_rx_q->dev_private;
281 struct rte_eth_dev *bonded_eth_dev =
282 &rte_eth_devices[internals->port_id];
283 struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284 struct rte_ether_hdr *hdr;
286 const uint16_t ether_type_slow_be =
287 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288 uint16_t num_rx_total = 0; /* Total number of received packets */
289 uint16_t slaves[RTE_MAX_ETHPORTS];
290 uint16_t slave_count, idx;
292 uint8_t collecting; /* current slave collecting status */
293 const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294 const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
300 /* Copy slave list to protect against slave up/down changes during tx
302 slave_count = internals->active_slave_count;
303 memcpy(slaves, internals->active_slaves,
304 sizeof(internals->active_slaves[0]) * slave_count);
306 idx = bd_rx_q->active_slave;
307 if (idx >= slave_count) {
308 bd_rx_q->active_slave = 0;
311 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
313 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
316 /* Read packets from this slave */
317 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318 &bufs[num_rx_total], nb_pkts - num_rx_total);
320 for (k = j; k < 2 && k < num_rx_total; k++)
321 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
323 /* Handle slow protocol packets. */
324 while (j < num_rx_total) {
325 if (j + 3 < num_rx_total)
326 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
328 hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
331 /* Remove packet from array if:
332 * - it is slow packet but no dedicated rxq is present,
333 * - slave is not in collecting state,
334 * - bonding interface is not in promiscuous mode:
335 * - packet is unicast and address does not match,
336 * - packet is multicast and bonding interface
337 * is not in allmulti,
341 is_lacp_packets(hdr->ether_type, subtype,
345 ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346 !rte_is_same_ether_addr(bond_mac,
349 rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
351 if (hdr->ether_type == ether_type_slow_be) {
352 bond_mode_8023ad_handle_slow_pkt(
353 internals, slaves[idx], bufs[j]);
355 rte_pktmbuf_free(bufs[j]);
357 /* Packet is managed by mode 4 or dropped, shift the array */
359 if (j < num_rx_total) {
360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
366 if (unlikely(++idx == slave_count))
370 if (++bd_rx_q->active_slave >= slave_count)
371 bd_rx_q->active_slave = 0;
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
380 return rx_burst_8023ad(queue, bufs, nb_pkts, false);
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
387 return rx_burst_8023ad(queue, bufs, nb_pkts, true);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
400 case RTE_ARP_OP_REQUEST:
401 strlcpy(buf, "ARP Request", buf_len);
403 case RTE_ARP_OP_REPLY:
404 strlcpy(buf, "ARP Reply", buf_len);
406 case RTE_ARP_OP_REVREQUEST:
407 strlcpy(buf, "Reverse ARP Request", buf_len);
409 case RTE_ARP_OP_REVREPLY:
410 strlcpy(buf, "Reverse ARP Reply", buf_len);
412 case RTE_ARP_OP_INVREQUEST:
413 strlcpy(buf, "Peer Identify Request", buf_len);
415 case RTE_ARP_OP_INVREPLY:
416 strlcpy(buf, "Peer Identify Reply", buf_len);
421 strlcpy(buf, "Unknown", buf_len);
425 #define MaxIPv4String 16
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
431 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
437 #define MAX_CLIENTS_NUMBER 128
438 uint8_t active_clients;
439 struct client_stats_t {
442 uint32_t ipv4_rx_packets;
443 uint32_t ipv4_tx_packets;
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
452 for (; i < MAX_CLIENTS_NUMBER; i++) {
453 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
454 /* Just update RX packets number for this client */
455 if (TXorRXindicator == &burstnumberRX)
456 client_stats[i].ipv4_rx_packets++;
458 client_stats[i].ipv4_tx_packets++;
462 /* We have a new client. Insert him to the table, and increment stats */
463 if (TXorRXindicator == &burstnumberRX)
464 client_stats[active_clients].ipv4_rx_packets++;
466 client_stats[active_clients].ipv4_tx_packets++;
467 client_stats[active_clients].ipv4_addr = addr;
468 client_stats[active_clients].port = port;
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475 rte_log(RTE_LOG_DEBUG, bond_logtype, \
476 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
480 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
484 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
488 arp_op, ++burstnumber)
492 mode6_debug(const char __rte_unused *info,
493 struct rte_ether_hdr *eth_h, uint16_t port,
494 uint32_t __rte_unused *burstnumber)
496 struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498 struct rte_arp_hdr *arp_h;
505 uint16_t ether_type = eth_h->ether_type;
506 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509 strlcpy(buf, info, 16);
512 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
519 update_client_stats(ipv4_h->src_addr, port, burstnumber);
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527 ArpOp, sizeof(ArpOp));
528 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
537 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
538 struct bond_dev_private *internals = bd_rx_q->dev_private;
539 struct rte_ether_hdr *eth_h;
540 uint16_t ether_type, offset;
541 uint16_t nb_recv_pkts;
544 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
546 for (i = 0; i < nb_recv_pkts; i++) {
547 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548 ether_type = eth_h->ether_type;
549 offset = get_vlan_offset(eth_h, ðer_type);
551 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
555 bond_mode_alb_arp_recv(eth_h, offset, internals);
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
570 struct bond_dev_private *internals;
571 struct bond_tx_queue *bd_tx_q;
573 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
576 uint16_t num_of_slaves;
577 uint16_t slaves[RTE_MAX_ETHPORTS];
579 uint16_t num_tx_total = 0, num_tx_slave;
581 static int slave_idx = 0;
582 int i, cslave_idx = 0, tx_fail_total = 0;
584 bd_tx_q = (struct bond_tx_queue *)queue;
585 internals = bd_tx_q->dev_private;
587 /* Copy slave list to protect against slave up/down changes during tx
589 num_of_slaves = internals->active_slave_count;
590 memcpy(slaves, internals->active_slaves,
591 sizeof(internals->active_slaves[0]) * num_of_slaves);
593 if (num_of_slaves < 1)
596 /* Populate slaves mbuf with which packets are to be sent on it */
597 for (i = 0; i < nb_pkts; i++) {
598 cslave_idx = (slave_idx + i) % num_of_slaves;
599 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
602 /* increment current slave index so the next call to tx burst starts on the
604 slave_idx = ++cslave_idx;
606 /* Send packet burst on each slave device */
607 for (i = 0; i < num_of_slaves; i++) {
608 if (slave_nb_pkts[i] > 0) {
609 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610 slave_bufs[i], slave_nb_pkts[i]);
612 /* if tx burst fails move packets to end of bufs */
613 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
616 tx_fail_total += tx_fail_slave;
618 memcpy(&bufs[nb_pkts - tx_fail_total],
619 &slave_bufs[i][num_tx_slave],
620 tx_fail_slave * sizeof(bufs[0]));
622 num_tx_total += num_tx_slave;
630 bond_ethdev_tx_burst_active_backup(void *queue,
631 struct rte_mbuf **bufs, uint16_t nb_pkts)
633 struct bond_dev_private *internals;
634 struct bond_tx_queue *bd_tx_q;
636 bd_tx_q = (struct bond_tx_queue *)queue;
637 internals = bd_tx_q->dev_private;
639 if (internals->active_slave_count < 1)
642 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
649 unaligned_uint16_t *word_src_addr =
650 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651 unaligned_uint16_t *word_dst_addr =
652 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
654 return (word_src_addr[0] ^ word_dst_addr[0]) ^
655 (word_src_addr[1] ^ word_dst_addr[1]) ^
656 (word_src_addr[2] ^ word_dst_addr[2]);
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
662 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
668 unaligned_uint32_t *word_src_addr =
669 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670 unaligned_uint32_t *word_dst_addr =
671 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
673 return (word_src_addr[0] ^ word_dst_addr[0]) ^
674 (word_src_addr[1] ^ word_dst_addr[1]) ^
675 (word_src_addr[2] ^ word_dst_addr[2]) ^
676 (word_src_addr[3] ^ word_dst_addr[3]);
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682 uint16_t slave_count, uint16_t *slaves)
684 struct rte_ether_hdr *eth_hdr;
688 for (i = 0; i < nb_pkts; i++) {
689 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
691 hash = ether_hash(eth_hdr);
693 slaves[i] = (hash ^= hash >> 8) % slave_count;
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699 uint16_t slave_count, uint16_t *slaves)
702 struct rte_ether_hdr *eth_hdr;
705 uint32_t hash, l3hash;
707 for (i = 0; i < nb_pkts; i++) {
708 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
711 proto = eth_hdr->ether_type;
712 hash = ether_hash(eth_hdr);
714 vlan_offset = get_vlan_offset(eth_hdr, &proto);
716 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718 ((char *)(eth_hdr + 1) + vlan_offset);
719 l3hash = ipv4_hash(ipv4_hdr);
721 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723 ((char *)(eth_hdr + 1) + vlan_offset);
724 l3hash = ipv6_hash(ipv6_hdr);
727 hash = hash ^ l3hash;
731 slaves[i] = hash % slave_count;
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737 uint16_t slave_count, uint16_t *slaves)
739 struct rte_ether_hdr *eth_hdr;
744 struct rte_udp_hdr *udp_hdr;
745 struct rte_tcp_hdr *tcp_hdr;
746 uint32_t hash, l3hash, l4hash;
748 for (i = 0; i < nb_pkts; i++) {
749 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751 proto = eth_hdr->ether_type;
752 vlan_offset = get_vlan_offset(eth_hdr, &proto);
756 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758 ((char *)(eth_hdr + 1) + vlan_offset);
759 size_t ip_hdr_offset;
761 l3hash = ipv4_hash(ipv4_hdr);
763 /* there is no L4 header in fragmented packet */
764 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
766 ip_hdr_offset = (ipv4_hdr->version_ihl
767 & RTE_IPV4_HDR_IHL_MASK) *
768 RTE_IPV4_IHL_MULTIPLIER;
770 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771 tcp_hdr = (struct rte_tcp_hdr *)
774 if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
776 l4hash = HASH_L4_PORTS(tcp_hdr);
777 } else if (ipv4_hdr->next_proto_id ==
779 udp_hdr = (struct rte_udp_hdr *)
782 if ((size_t)udp_hdr + sizeof(*udp_hdr)
784 l4hash = HASH_L4_PORTS(udp_hdr);
787 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789 ((char *)(eth_hdr + 1) + vlan_offset);
790 l3hash = ipv6_hash(ipv6_hdr);
792 if (ipv6_hdr->proto == IPPROTO_TCP) {
793 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794 l4hash = HASH_L4_PORTS(tcp_hdr);
795 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797 l4hash = HASH_L4_PORTS(udp_hdr);
801 hash = l3hash ^ l4hash;
805 slaves[i] = hash % slave_count;
810 uint64_t bwg_left_int;
811 uint64_t bwg_left_remainder;
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
819 for (i = 0; i < internals->active_slave_count; i++) {
820 tlb_last_obytets[internals->active_slaves[i]] = 0;
825 bandwidth_cmp(const void *a, const void *b)
827 const struct bwg_slave *bwg_a = a;
828 const struct bwg_slave *bwg_b = b;
829 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831 (int64_t)bwg_a->bwg_left_remainder;
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846 struct bwg_slave *bwg_slave)
848 struct rte_eth_link link_status;
851 ret = rte_eth_link_get_nowait(port_id, &link_status);
853 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
854 port_id, rte_strerror(-ret));
857 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
860 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
861 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
862 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
866 bond_ethdev_update_tlb_slave_cb(void *arg)
868 struct bond_dev_private *internals = arg;
869 struct rte_eth_stats slave_stats;
870 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
871 uint16_t slave_count;
874 uint8_t update_stats = 0;
878 internals->slave_update_idx++;
881 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
884 for (i = 0; i < internals->active_slave_count; i++) {
885 slave_id = internals->active_slaves[i];
886 rte_eth_stats_get(slave_id, &slave_stats);
887 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
888 bandwidth_left(slave_id, tx_bytes,
889 internals->slave_update_idx, &bwg_array[i]);
890 bwg_array[i].slave = slave_id;
893 tlb_last_obytets[slave_id] = slave_stats.obytes;
897 if (update_stats == 1)
898 internals->slave_update_idx = 0;
901 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
902 for (i = 0; i < slave_count; i++)
903 internals->tlb_slaves_order[i] = bwg_array[i].slave;
905 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
906 (struct bond_dev_private *)internals);
910 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
912 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
913 struct bond_dev_private *internals = bd_tx_q->dev_private;
915 struct rte_eth_dev *primary_port =
916 &rte_eth_devices[internals->primary_port];
917 uint16_t num_tx_total = 0;
920 uint16_t num_of_slaves = internals->active_slave_count;
921 uint16_t slaves[RTE_MAX_ETHPORTS];
923 struct rte_ether_hdr *ether_hdr;
924 struct rte_ether_addr primary_slave_addr;
925 struct rte_ether_addr active_slave_addr;
927 if (num_of_slaves < 1)
930 memcpy(slaves, internals->tlb_slaves_order,
931 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
934 rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
937 for (i = 0; i < 3; i++)
938 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
941 for (i = 0; i < num_of_slaves; i++) {
942 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
943 for (j = num_tx_total; j < nb_pkts; j++) {
945 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
947 ether_hdr = rte_pktmbuf_mtod(bufs[j],
948 struct rte_ether_hdr *);
949 if (rte_is_same_ether_addr(ðer_hdr->s_addr,
950 &primary_slave_addr))
951 rte_ether_addr_copy(&active_slave_addr,
953 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
954 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
958 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
959 bufs + num_tx_total, nb_pkts - num_tx_total);
961 if (num_tx_total == nb_pkts)
969 bond_tlb_disable(struct bond_dev_private *internals)
971 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
975 bond_tlb_enable(struct bond_dev_private *internals)
977 bond_ethdev_update_tlb_slave_cb(internals);
981 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
983 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
984 struct bond_dev_private *internals = bd_tx_q->dev_private;
986 struct rte_ether_hdr *eth_h;
987 uint16_t ether_type, offset;
989 struct client_data *client_info;
992 * We create transmit buffers for every slave and one additional to send
993 * through tlb. In worst case every packet will be send on one port.
995 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
996 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
999 * We create separate transmit buffers for update packets as they won't
1000 * be counted in num_tx_total.
1002 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
1003 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1005 struct rte_mbuf *upd_pkt;
1008 uint16_t num_send, num_not_send = 0;
1009 uint16_t num_tx_total = 0;
1014 /* Search tx buffer for ARP packets and forward them to alb */
1015 for (i = 0; i < nb_pkts; i++) {
1016 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1017 ether_type = eth_h->ether_type;
1018 offset = get_vlan_offset(eth_h, ðer_type);
1020 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1021 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1023 /* Change src mac in eth header */
1024 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
1026 /* Add packet to slave tx buffer */
1027 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1028 slave_bufs_pkts[slave_idx]++;
1030 /* If packet is not ARP, send it with TLB policy */
1031 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1033 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1037 /* Update connected client ARP tables */
1038 if (internals->mode6.ntt) {
1039 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1040 client_info = &internals->mode6.client_table[i];
1042 if (client_info->in_use) {
1043 /* Allocate new packet to send ARP update on current slave */
1044 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1045 if (upd_pkt == NULL) {
1047 "Failed to allocate ARP packet from pool");
1050 pkt_size = sizeof(struct rte_ether_hdr) +
1051 sizeof(struct rte_arp_hdr) +
1052 client_info->vlan_count *
1053 sizeof(struct rte_vlan_hdr);
1054 upd_pkt->data_len = pkt_size;
1055 upd_pkt->pkt_len = pkt_size;
1057 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1060 /* Add packet to update tx buffer */
1061 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1062 update_bufs_pkts[slave_idx]++;
1065 internals->mode6.ntt = 0;
1068 /* Send ARP packets on proper slaves */
1069 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1070 if (slave_bufs_pkts[i] > 0) {
1071 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1072 slave_bufs[i], slave_bufs_pkts[i]);
1073 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1074 bufs[nb_pkts - 1 - num_not_send - j] =
1075 slave_bufs[i][nb_pkts - 1 - j];
1078 num_tx_total += num_send;
1079 num_not_send += slave_bufs_pkts[i] - num_send;
1081 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1082 /* Print TX stats including update packets */
1083 for (j = 0; j < slave_bufs_pkts[i]; j++) {
1084 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1085 struct rte_ether_hdr *);
1086 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1092 /* Send update packets on proper slaves */
1093 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1094 if (update_bufs_pkts[i] > 0) {
1095 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1096 update_bufs_pkts[i]);
1097 for (j = num_send; j < update_bufs_pkts[i]; j++) {
1098 rte_pktmbuf_free(update_bufs[i][j]);
1100 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1101 for (j = 0; j < update_bufs_pkts[i]; j++) {
1102 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1103 struct rte_ether_hdr *);
1104 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1110 /* Send non-ARP packets using tlb policy */
1111 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1112 num_send = bond_ethdev_tx_burst_tlb(queue,
1113 slave_bufs[RTE_MAX_ETHPORTS],
1114 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1116 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1117 bufs[nb_pkts - 1 - num_not_send - j] =
1118 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1121 num_tx_total += num_send;
1124 return num_tx_total;
1127 static inline uint16_t
1128 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1129 uint16_t *slave_port_ids, uint16_t slave_count)
1131 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1132 struct bond_dev_private *internals = bd_tx_q->dev_private;
1134 /* Array to sort mbufs for transmission on each slave into */
1135 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1136 /* Number of mbufs for transmission on each slave */
1137 uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1138 /* Mapping array generated by hash function to map mbufs to slaves */
1139 uint16_t bufs_slave_port_idxs[nb_bufs];
1141 uint16_t slave_tx_count;
1142 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1147 * Populate slaves mbuf with the packets which are to be sent on it
1148 * selecting output slave using hash based on xmit policy
1150 internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1151 bufs_slave_port_idxs);
1153 for (i = 0; i < nb_bufs; i++) {
1154 /* Populate slave mbuf arrays with mbufs for that slave. */
1155 uint16_t slave_idx = bufs_slave_port_idxs[i];
1157 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1160 /* Send packet burst on each slave device */
1161 for (i = 0; i < slave_count; i++) {
1162 if (slave_nb_bufs[i] == 0)
1165 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1166 bd_tx_q->queue_id, slave_bufs[i],
1169 total_tx_count += slave_tx_count;
1171 /* If tx burst fails move packets to end of bufs */
1172 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1173 int slave_tx_fail_count = slave_nb_bufs[i] -
1175 total_tx_fail_count += slave_tx_fail_count;
1176 memcpy(&bufs[nb_bufs - total_tx_fail_count],
1177 &slave_bufs[i][slave_tx_count],
1178 slave_tx_fail_count * sizeof(bufs[0]));
1182 return total_tx_count;
1186 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1189 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1190 struct bond_dev_private *internals = bd_tx_q->dev_private;
1192 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1193 uint16_t slave_count;
1195 if (unlikely(nb_bufs == 0))
1198 /* Copy slave list to protect against slave up/down changes during tx
1201 slave_count = internals->active_slave_count;
1202 if (unlikely(slave_count < 1))
1205 memcpy(slave_port_ids, internals->active_slaves,
1206 sizeof(slave_port_ids[0]) * slave_count);
1207 return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1211 static inline uint16_t
1212 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1215 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1216 struct bond_dev_private *internals = bd_tx_q->dev_private;
1218 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1219 uint16_t slave_count;
1221 uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1222 uint16_t dist_slave_count;
1224 uint16_t slave_tx_count;
1228 /* Copy slave list to protect against slave up/down changes during tx
1230 slave_count = internals->active_slave_count;
1231 if (unlikely(slave_count < 1))
1234 memcpy(slave_port_ids, internals->active_slaves,
1235 sizeof(slave_port_ids[0]) * slave_count);
1240 /* Check for LACP control packets and send if available */
1241 for (i = 0; i < slave_count; i++) {
1242 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1243 struct rte_mbuf *ctrl_pkt = NULL;
1245 if (likely(rte_ring_empty(port->tx_ring)))
1248 if (rte_ring_dequeue(port->tx_ring,
1249 (void **)&ctrl_pkt) != -ENOENT) {
1250 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1251 bd_tx_q->queue_id, &ctrl_pkt, 1);
1253 * re-enqueue LAG control plane packets to buffering
1254 * ring if transmission fails so the packet isn't lost.
1256 if (slave_tx_count != 1)
1257 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1262 if (unlikely(nb_bufs == 0))
1265 dist_slave_count = 0;
1266 for (i = 0; i < slave_count; i++) {
1267 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1269 if (ACTOR_STATE(port, DISTRIBUTING))
1270 dist_slave_port_ids[dist_slave_count++] =
1274 if (unlikely(dist_slave_count < 1))
1277 return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1282 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1285 return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1289 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1292 return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1296 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1299 struct bond_dev_private *internals;
1300 struct bond_tx_queue *bd_tx_q;
1302 uint16_t slaves[RTE_MAX_ETHPORTS];
1303 uint8_t tx_failed_flag = 0;
1304 uint16_t num_of_slaves;
1306 uint16_t max_nb_of_tx_pkts = 0;
1308 int slave_tx_total[RTE_MAX_ETHPORTS];
1309 int i, most_successful_tx_slave = -1;
1311 bd_tx_q = (struct bond_tx_queue *)queue;
1312 internals = bd_tx_q->dev_private;
1314 /* Copy slave list to protect against slave up/down changes during tx
1316 num_of_slaves = internals->active_slave_count;
1317 memcpy(slaves, internals->active_slaves,
1318 sizeof(internals->active_slaves[0]) * num_of_slaves);
1320 if (num_of_slaves < 1)
1323 /* Increment reference count on mbufs */
1324 for (i = 0; i < nb_pkts; i++)
1325 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1327 /* Transmit burst on each active slave */
1328 for (i = 0; i < num_of_slaves; i++) {
1329 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1332 if (unlikely(slave_tx_total[i] < nb_pkts))
1335 /* record the value and slave index for the slave which transmits the
1336 * maximum number of packets */
1337 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1338 max_nb_of_tx_pkts = slave_tx_total[i];
1339 most_successful_tx_slave = i;
1343 /* if slaves fail to transmit packets from burst, the calling application
1344 * is not expected to know about multiple references to packets so we must
1345 * handle failures of all packets except those of the most successful slave
1347 if (unlikely(tx_failed_flag))
1348 for (i = 0; i < num_of_slaves; i++)
1349 if (i != most_successful_tx_slave)
1350 while (slave_tx_total[i] < nb_pkts)
1351 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1353 return max_nb_of_tx_pkts;
1357 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1359 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1361 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1363 * If in mode 4 then save the link properties of the first
1364 * slave, all subsequent slaves must match these properties
1366 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1368 bond_link->link_autoneg = slave_link->link_autoneg;
1369 bond_link->link_duplex = slave_link->link_duplex;
1370 bond_link->link_speed = slave_link->link_speed;
1373 * In any other mode the link properties are set to default
1374 * values of AUTONEG/DUPLEX
1376 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1377 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1382 link_properties_valid(struct rte_eth_dev *ethdev,
1383 struct rte_eth_link *slave_link)
1385 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1387 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1388 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1390 if (bond_link->link_duplex != slave_link->link_duplex ||
1391 bond_link->link_autoneg != slave_link->link_autoneg ||
1392 bond_link->link_speed != slave_link->link_speed)
1400 mac_address_get(struct rte_eth_dev *eth_dev,
1401 struct rte_ether_addr *dst_mac_addr)
1403 struct rte_ether_addr *mac_addr;
1405 if (eth_dev == NULL) {
1406 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1410 if (dst_mac_addr == NULL) {
1411 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1415 mac_addr = eth_dev->data->mac_addrs;
1417 rte_ether_addr_copy(mac_addr, dst_mac_addr);
1422 mac_address_set(struct rte_eth_dev *eth_dev,
1423 struct rte_ether_addr *new_mac_addr)
1425 struct rte_ether_addr *mac_addr;
1427 if (eth_dev == NULL) {
1428 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1432 if (new_mac_addr == NULL) {
1433 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1437 mac_addr = eth_dev->data->mac_addrs;
1439 /* If new MAC is different to current MAC then update */
1440 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1441 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1446 static const struct rte_ether_addr null_mac_addr;
1449 * Add additional MAC addresses to the slave
1452 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1453 uint16_t slave_port_id)
1456 struct rte_ether_addr *mac_addr;
1458 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1459 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1460 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1463 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1466 for (i--; i > 0; i--)
1467 rte_eth_dev_mac_addr_remove(slave_port_id,
1468 &bonded_eth_dev->data->mac_addrs[i]);
1477 * Remove additional MAC addresses from the slave
1480 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1481 uint16_t slave_port_id)
1484 struct rte_ether_addr *mac_addr;
1487 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1488 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1489 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1492 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1493 /* save only the first error */
1494 if (ret < 0 && rc == 0)
1502 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1504 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1508 /* Update slave devices MAC addresses */
1509 if (internals->slave_count < 1)
1512 switch (internals->mode) {
1513 case BONDING_MODE_ROUND_ROBIN:
1514 case BONDING_MODE_BALANCE:
1515 case BONDING_MODE_BROADCAST:
1516 for (i = 0; i < internals->slave_count; i++) {
1517 if (rte_eth_dev_default_mac_addr_set(
1518 internals->slaves[i].port_id,
1519 bonded_eth_dev->data->mac_addrs)) {
1520 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1521 internals->slaves[i].port_id);
1526 case BONDING_MODE_8023AD:
1527 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1529 case BONDING_MODE_ACTIVE_BACKUP:
1530 case BONDING_MODE_TLB:
1531 case BONDING_MODE_ALB:
1534 for (i = 0; i < internals->slave_count; i++) {
1535 if (internals->slaves[i].port_id ==
1536 internals->current_primary_port) {
1537 if (rte_eth_dev_default_mac_addr_set(
1538 internals->current_primary_port,
1539 bonded_eth_dev->data->mac_addrs)) {
1540 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1541 internals->current_primary_port);
1545 if (rte_eth_dev_default_mac_addr_set(
1546 internals->slaves[i].port_id,
1547 &internals->slaves[i].persisted_mac_addr)) {
1548 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1549 internals->slaves[i].port_id);
1561 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1563 struct bond_dev_private *internals;
1565 internals = eth_dev->data->dev_private;
1568 case BONDING_MODE_ROUND_ROBIN:
1569 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1570 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1572 case BONDING_MODE_ACTIVE_BACKUP:
1573 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1574 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1576 case BONDING_MODE_BALANCE:
1577 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1578 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1580 case BONDING_MODE_BROADCAST:
1581 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1582 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1584 case BONDING_MODE_8023AD:
1585 if (bond_mode_8023ad_enable(eth_dev) != 0)
1588 if (internals->mode4.dedicated_queues.enabled == 0) {
1589 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1590 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1591 RTE_BOND_LOG(WARNING,
1592 "Using mode 4, it is necessary to do TX burst "
1593 "and RX burst at least every 100ms.");
1595 /* Use flow director's optimization */
1596 eth_dev->rx_pkt_burst =
1597 bond_ethdev_rx_burst_8023ad_fast_queue;
1598 eth_dev->tx_pkt_burst =
1599 bond_ethdev_tx_burst_8023ad_fast_queue;
1602 case BONDING_MODE_TLB:
1603 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1604 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1606 case BONDING_MODE_ALB:
1607 if (bond_mode_alb_enable(eth_dev) != 0)
1610 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1611 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1617 internals->mode = mode;
1624 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1625 struct rte_eth_dev *slave_eth_dev)
1628 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1629 struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1631 if (port->slow_pool == NULL) {
1633 int slave_id = slave_eth_dev->data->port_id;
1635 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1637 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1638 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1639 slave_eth_dev->data->numa_node);
1641 /* Any memory allocation failure in initialization is critical because
1642 * resources can't be free, so reinitialization is impossible. */
1643 if (port->slow_pool == NULL) {
1644 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1645 slave_id, mem_name, rte_strerror(rte_errno));
1649 if (internals->mode4.dedicated_queues.enabled == 1) {
1650 /* Configure slow Rx queue */
1652 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1653 internals->mode4.dedicated_queues.rx_qid, 128,
1654 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1655 NULL, port->slow_pool);
1658 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1659 slave_eth_dev->data->port_id,
1660 internals->mode4.dedicated_queues.rx_qid,
1665 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1666 internals->mode4.dedicated_queues.tx_qid, 512,
1667 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1671 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1672 slave_eth_dev->data->port_id,
1673 internals->mode4.dedicated_queues.tx_qid,
1682 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1683 struct rte_eth_dev *slave_eth_dev)
1685 struct bond_rx_queue *bd_rx_q;
1686 struct bond_tx_queue *bd_tx_q;
1687 uint16_t nb_rx_queues;
1688 uint16_t nb_tx_queues;
1692 struct rte_flow_error flow_error;
1694 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1697 errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
1699 RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
1700 slave_eth_dev->data->port_id, errval);
1702 /* Enable interrupts on slave device if supported */
1703 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1704 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1706 /* If RSS is enabled for bonding, try to enable it for slaves */
1707 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1708 if (internals->rss_key_len != 0) {
1709 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1710 internals->rss_key_len;
1711 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1714 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1717 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1718 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1719 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1720 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1723 if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1724 DEV_RX_OFFLOAD_VLAN_FILTER)
1725 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1726 DEV_RX_OFFLOAD_VLAN_FILTER;
1728 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1729 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1731 nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1732 nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1734 if (internals->mode == BONDING_MODE_8023AD) {
1735 if (internals->mode4.dedicated_queues.enabled == 1) {
1741 errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1742 bonded_eth_dev->data->mtu);
1743 if (errval != 0 && errval != -ENOTSUP) {
1744 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1745 slave_eth_dev->data->port_id, errval);
1749 /* Configure device */
1750 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1751 nb_rx_queues, nb_tx_queues,
1752 &(slave_eth_dev->data->dev_conf));
1754 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1755 slave_eth_dev->data->port_id, errval);
1759 /* Setup Rx Queues */
1760 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1761 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1763 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1764 bd_rx_q->nb_rx_desc,
1765 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1766 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1769 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1770 slave_eth_dev->data->port_id, q_id, errval);
1775 /* Setup Tx Queues */
1776 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1777 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1779 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1780 bd_tx_q->nb_tx_desc,
1781 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1785 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1786 slave_eth_dev->data->port_id, q_id, errval);
1791 if (internals->mode == BONDING_MODE_8023AD &&
1792 internals->mode4.dedicated_queues.enabled == 1) {
1793 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1797 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1798 slave_eth_dev->data->port_id) != 0) {
1800 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1801 slave_eth_dev->data->port_id, q_id, errval);
1805 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1806 rte_flow_destroy(slave_eth_dev->data->port_id,
1807 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1810 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1811 slave_eth_dev->data->port_id);
1815 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1817 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1818 slave_eth_dev->data->port_id, errval);
1822 /* If RSS is enabled for bonding, synchronize RETA */
1823 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1825 struct bond_dev_private *internals;
1827 internals = bonded_eth_dev->data->dev_private;
1829 for (i = 0; i < internals->slave_count; i++) {
1830 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1831 errval = rte_eth_dev_rss_reta_update(
1832 slave_eth_dev->data->port_id,
1833 &internals->reta_conf[0],
1834 internals->slaves[i].reta_size);
1836 RTE_BOND_LOG(WARNING,
1837 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1838 " RSS Configuration for bonding may be inconsistent.",
1839 slave_eth_dev->data->port_id, errval);
1846 /* If lsc interrupt is set, check initial slave's link status */
1847 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1848 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1849 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1850 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1858 slave_remove(struct bond_dev_private *internals,
1859 struct rte_eth_dev *slave_eth_dev)
1863 for (i = 0; i < internals->slave_count; i++)
1864 if (internals->slaves[i].port_id ==
1865 slave_eth_dev->data->port_id)
1868 if (i < (internals->slave_count - 1)) {
1869 struct rte_flow *flow;
1871 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1872 sizeof(internals->slaves[0]) *
1873 (internals->slave_count - i - 1));
1874 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1875 memmove(&flow->flows[i], &flow->flows[i + 1],
1876 sizeof(flow->flows[0]) *
1877 (internals->slave_count - i - 1));
1878 flow->flows[internals->slave_count - 1] = NULL;
1882 internals->slave_count--;
1884 /* force reconfiguration of slave interfaces */
1885 rte_eth_dev_internal_reset(slave_eth_dev);
1889 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1892 slave_add(struct bond_dev_private *internals,
1893 struct rte_eth_dev *slave_eth_dev)
1895 struct bond_slave_details *slave_details =
1896 &internals->slaves[internals->slave_count];
1898 slave_details->port_id = slave_eth_dev->data->port_id;
1899 slave_details->last_link_status = 0;
1901 /* Mark slave devices that don't support interrupts so we can
1902 * compensate when we start the bond
1904 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1905 slave_details->link_status_poll_enabled = 1;
1908 slave_details->link_status_wait_to_complete = 0;
1909 /* clean tlb_last_obytes when adding port for bonding device */
1910 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1911 sizeof(struct rte_ether_addr));
1915 bond_ethdev_primary_set(struct bond_dev_private *internals,
1916 uint16_t slave_port_id)
1920 if (internals->active_slave_count < 1)
1921 internals->current_primary_port = slave_port_id;
1923 /* Search bonded device slave ports for new proposed primary port */
1924 for (i = 0; i < internals->active_slave_count; i++) {
1925 if (internals->active_slaves[i] == slave_port_id)
1926 internals->current_primary_port = slave_port_id;
1931 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1934 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1936 struct bond_dev_private *internals;
1939 /* slave eth dev will be started by bonded device */
1940 if (check_for_bonded_ethdev(eth_dev)) {
1941 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1942 eth_dev->data->port_id);
1946 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1947 eth_dev->data->dev_started = 1;
1949 internals = eth_dev->data->dev_private;
1951 if (internals->slave_count == 0) {
1952 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1956 if (internals->user_defined_mac == 0) {
1957 struct rte_ether_addr *new_mac_addr = NULL;
1959 for (i = 0; i < internals->slave_count; i++)
1960 if (internals->slaves[i].port_id == internals->primary_port)
1961 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1963 if (new_mac_addr == NULL)
1966 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1967 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1968 eth_dev->data->port_id);
1973 if (internals->mode == BONDING_MODE_8023AD) {
1974 if (internals->mode4.dedicated_queues.enabled == 1) {
1975 internals->mode4.dedicated_queues.rx_qid =
1976 eth_dev->data->nb_rx_queues;
1977 internals->mode4.dedicated_queues.tx_qid =
1978 eth_dev->data->nb_tx_queues;
1983 /* Reconfigure each slave device if starting bonded device */
1984 for (i = 0; i < internals->slave_count; i++) {
1985 struct rte_eth_dev *slave_ethdev =
1986 &(rte_eth_devices[internals->slaves[i].port_id]);
1987 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1989 "bonded port (%d) failed to reconfigure slave device (%d)",
1990 eth_dev->data->port_id,
1991 internals->slaves[i].port_id);
1994 /* We will need to poll for link status if any slave doesn't
1995 * support interrupts
1997 if (internals->slaves[i].link_status_poll_enabled)
1998 internals->link_status_polling_enabled = 1;
2001 /* start polling if needed */
2002 if (internals->link_status_polling_enabled) {
2004 internals->link_status_polling_interval_ms * 1000,
2005 bond_ethdev_slave_link_status_change_monitor,
2006 (void *)&rte_eth_devices[internals->port_id]);
2009 /* Update all slave devices MACs*/
2010 if (mac_address_slaves_update(eth_dev) != 0)
2013 if (internals->user_defined_primary_port)
2014 bond_ethdev_primary_set(internals, internals->primary_port);
2016 if (internals->mode == BONDING_MODE_8023AD)
2017 bond_mode_8023ad_start(eth_dev);
2019 if (internals->mode == BONDING_MODE_TLB ||
2020 internals->mode == BONDING_MODE_ALB)
2021 bond_tlb_enable(internals);
2026 eth_dev->data->dev_started = 0;
2031 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2035 if (dev->data->rx_queues != NULL) {
2036 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2037 rte_free(dev->data->rx_queues[i]);
2038 dev->data->rx_queues[i] = NULL;
2040 dev->data->nb_rx_queues = 0;
2043 if (dev->data->tx_queues != NULL) {
2044 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2045 rte_free(dev->data->tx_queues[i]);
2046 dev->data->tx_queues[i] = NULL;
2048 dev->data->nb_tx_queues = 0;
2053 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2055 struct bond_dev_private *internals = eth_dev->data->dev_private;
2058 if (internals->mode == BONDING_MODE_8023AD) {
2062 bond_mode_8023ad_stop(eth_dev);
2064 /* Discard all messages to/from mode 4 state machines */
2065 for (i = 0; i < internals->active_slave_count; i++) {
2066 port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2068 RTE_ASSERT(port->rx_ring != NULL);
2069 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2070 rte_pktmbuf_free(pkt);
2072 RTE_ASSERT(port->tx_ring != NULL);
2073 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2074 rte_pktmbuf_free(pkt);
2078 if (internals->mode == BONDING_MODE_TLB ||
2079 internals->mode == BONDING_MODE_ALB) {
2080 bond_tlb_disable(internals);
2081 for (i = 0; i < internals->active_slave_count; i++)
2082 tlb_last_obytets[internals->active_slaves[i]] = 0;
2085 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2086 eth_dev->data->dev_started = 0;
2088 internals->link_status_polling_enabled = 0;
2089 for (i = 0; i < internals->slave_count; i++) {
2090 uint16_t slave_id = internals->slaves[i].port_id;
2091 if (find_slave_by_id(internals->active_slaves,
2092 internals->active_slave_count, slave_id) !=
2093 internals->active_slave_count) {
2094 internals->slaves[i].last_link_status = 0;
2095 rte_eth_dev_stop(slave_id);
2096 deactivate_slave(eth_dev, slave_id);
2102 bond_ethdev_close(struct rte_eth_dev *dev)
2104 struct bond_dev_private *internals = dev->data->dev_private;
2105 uint16_t bond_port_id = internals->port_id;
2107 struct rte_flow_error ferror;
2109 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2112 RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2113 while (internals->slave_count != skipped) {
2114 uint16_t port_id = internals->slaves[skipped].port_id;
2116 if (rte_eth_dev_stop(port_id) != 0) {
2117 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2122 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2124 "Failed to remove port %d from bonded device %s",
2125 port_id, dev->device->name);
2129 bond_flow_ops.flush(dev, &ferror);
2130 bond_ethdev_free_queues(dev);
2131 rte_bitmap_reset(internals->vlan_filter_bmp);
2132 rte_bitmap_free(internals->vlan_filter_bmp);
2133 rte_free(internals->vlan_filter_bmpmem);
2135 /* Try to release mempool used in mode6. If the bond
2136 * device is not mode6, free the NULL is not problem.
2138 rte_mempool_free(internals->mode6.mempool);
2143 /* forward declaration */
2144 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2147 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2149 struct bond_dev_private *internals = dev->data->dev_private;
2150 struct bond_slave_details slave;
2153 uint16_t max_nb_rx_queues = UINT16_MAX;
2154 uint16_t max_nb_tx_queues = UINT16_MAX;
2155 uint16_t max_rx_desc_lim = UINT16_MAX;
2156 uint16_t max_tx_desc_lim = UINT16_MAX;
2158 dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2160 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2161 internals->candidate_max_rx_pktlen :
2162 RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2164 /* Max number of tx/rx queues that the bonded device can support is the
2165 * minimum values of the bonded slaves, as all slaves must be capable
2166 * of supporting the same number of tx/rx queues.
2168 if (internals->slave_count > 0) {
2169 struct rte_eth_dev_info slave_info;
2172 for (idx = 0; idx < internals->slave_count; idx++) {
2173 slave = internals->slaves[idx];
2174 ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2177 "%s: Error during getting device (port %u) info: %s\n",
2185 if (slave_info.max_rx_queues < max_nb_rx_queues)
2186 max_nb_rx_queues = slave_info.max_rx_queues;
2188 if (slave_info.max_tx_queues < max_nb_tx_queues)
2189 max_nb_tx_queues = slave_info.max_tx_queues;
2191 if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2192 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2194 if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2195 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2199 dev_info->max_rx_queues = max_nb_rx_queues;
2200 dev_info->max_tx_queues = max_nb_tx_queues;
2202 memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2203 sizeof(dev_info->default_rxconf));
2204 memcpy(&dev_info->default_txconf, &internals->default_txconf,
2205 sizeof(dev_info->default_txconf));
2207 dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2208 dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2211 * If dedicated hw queues enabled for link bonding device in LACP mode
2212 * then we need to reduce the maximum number of data path queues by 1.
2214 if (internals->mode == BONDING_MODE_8023AD &&
2215 internals->mode4.dedicated_queues.enabled == 1) {
2216 dev_info->max_rx_queues--;
2217 dev_info->max_tx_queues--;
2220 dev_info->min_rx_bufsize = 0;
2222 dev_info->rx_offload_capa = internals->rx_offload_capa;
2223 dev_info->tx_offload_capa = internals->tx_offload_capa;
2224 dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2225 dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2226 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2228 dev_info->reta_size = internals->reta_size;
2234 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2238 struct bond_dev_private *internals = dev->data->dev_private;
2240 /* don't do this while a slave is being added */
2241 rte_spinlock_lock(&internals->lock);
2244 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2246 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2248 for (i = 0; i < internals->slave_count; i++) {
2249 uint16_t port_id = internals->slaves[i].port_id;
2251 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2253 RTE_BOND_LOG(WARNING,
2254 "Setting VLAN filter on slave port %u not supported.",
2258 rte_spinlock_unlock(&internals->lock);
2263 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2264 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2265 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2267 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2268 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2269 0, dev->data->numa_node);
2270 if (bd_rx_q == NULL)
2273 bd_rx_q->queue_id = rx_queue_id;
2274 bd_rx_q->dev_private = dev->data->dev_private;
2276 bd_rx_q->nb_rx_desc = nb_rx_desc;
2278 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2279 bd_rx_q->mb_pool = mb_pool;
2281 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2287 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2288 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2289 const struct rte_eth_txconf *tx_conf)
2291 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
2292 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2293 0, dev->data->numa_node);
2295 if (bd_tx_q == NULL)
2298 bd_tx_q->queue_id = tx_queue_id;
2299 bd_tx_q->dev_private = dev->data->dev_private;
2301 bd_tx_q->nb_tx_desc = nb_tx_desc;
2302 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2304 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2310 bond_ethdev_rx_queue_release(void *queue)
2319 bond_ethdev_tx_queue_release(void *queue)
2328 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2330 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2331 struct bond_dev_private *internals;
2333 /* Default value for polling slave found is true as we don't want to
2334 * disable the polling thread if we cannot get the lock */
2335 int i, polling_slave_found = 1;
2340 bonded_ethdev = cb_arg;
2341 internals = bonded_ethdev->data->dev_private;
2343 if (!bonded_ethdev->data->dev_started ||
2344 !internals->link_status_polling_enabled)
2347 /* If device is currently being configured then don't check slaves link
2348 * status, wait until next period */
2349 if (rte_spinlock_trylock(&internals->lock)) {
2350 if (internals->slave_count > 0)
2351 polling_slave_found = 0;
2353 for (i = 0; i < internals->slave_count; i++) {
2354 if (!internals->slaves[i].link_status_poll_enabled)
2357 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2358 polling_slave_found = 1;
2360 /* Update slave link status */
2361 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2362 internals->slaves[i].link_status_wait_to_complete);
2364 /* if link status has changed since last checked then call lsc
2366 if (slave_ethdev->data->dev_link.link_status !=
2367 internals->slaves[i].last_link_status) {
2368 internals->slaves[i].last_link_status =
2369 slave_ethdev->data->dev_link.link_status;
2371 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2372 RTE_ETH_EVENT_INTR_LSC,
2373 &bonded_ethdev->data->port_id,
2377 rte_spinlock_unlock(&internals->lock);
2380 if (polling_slave_found)
2381 /* Set alarm to continue monitoring link status of slave ethdev's */
2382 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2383 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2387 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2389 int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2391 struct bond_dev_private *bond_ctx;
2392 struct rte_eth_link slave_link;
2394 bool one_link_update_succeeded;
2398 bond_ctx = ethdev->data->dev_private;
2400 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2402 if (ethdev->data->dev_started == 0 ||
2403 bond_ctx->active_slave_count == 0) {
2404 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2408 ethdev->data->dev_link.link_status = ETH_LINK_UP;
2410 if (wait_to_complete)
2411 link_update = rte_eth_link_get;
2413 link_update = rte_eth_link_get_nowait;
2415 switch (bond_ctx->mode) {
2416 case BONDING_MODE_BROADCAST:
2418 * Setting link speed to UINT32_MAX to ensure we pick up the
2419 * value of the first active slave
2421 ethdev->data->dev_link.link_speed = UINT32_MAX;
2424 * link speed is minimum value of all the slaves link speed as
2425 * packet loss will occur on this slave if transmission at rates
2426 * greater than this are attempted
2428 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2429 ret = link_update(bond_ctx->active_slaves[idx],
2432 ethdev->data->dev_link.link_speed =
2435 "Slave (port %u) link get failed: %s",
2436 bond_ctx->active_slaves[idx],
2437 rte_strerror(-ret));
2441 if (slave_link.link_speed <
2442 ethdev->data->dev_link.link_speed)
2443 ethdev->data->dev_link.link_speed =
2444 slave_link.link_speed;
2447 case BONDING_MODE_ACTIVE_BACKUP:
2448 /* Current primary slave */
2449 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2451 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2452 bond_ctx->current_primary_port,
2453 rte_strerror(-ret));
2457 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2459 case BONDING_MODE_8023AD:
2460 ethdev->data->dev_link.link_autoneg =
2461 bond_ctx->mode4.slave_link.link_autoneg;
2462 ethdev->data->dev_link.link_duplex =
2463 bond_ctx->mode4.slave_link.link_duplex;
2465 /* to update link speed */
2466 case BONDING_MODE_ROUND_ROBIN:
2467 case BONDING_MODE_BALANCE:
2468 case BONDING_MODE_TLB:
2469 case BONDING_MODE_ALB:
2472 * In theses mode the maximum theoretical link speed is the sum
2475 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2476 one_link_update_succeeded = false;
2478 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2479 ret = link_update(bond_ctx->active_slaves[idx],
2483 "Slave (port %u) link get failed: %s",
2484 bond_ctx->active_slaves[idx],
2485 rte_strerror(-ret));
2489 one_link_update_succeeded = true;
2490 ethdev->data->dev_link.link_speed +=
2491 slave_link.link_speed;
2494 if (!one_link_update_succeeded) {
2495 RTE_BOND_LOG(ERR, "All slaves link get failed");
2506 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2508 struct bond_dev_private *internals = dev->data->dev_private;
2509 struct rte_eth_stats slave_stats;
2512 for (i = 0; i < internals->slave_count; i++) {
2513 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2515 stats->ipackets += slave_stats.ipackets;
2516 stats->opackets += slave_stats.opackets;
2517 stats->ibytes += slave_stats.ibytes;
2518 stats->obytes += slave_stats.obytes;
2519 stats->imissed += slave_stats.imissed;
2520 stats->ierrors += slave_stats.ierrors;
2521 stats->oerrors += slave_stats.oerrors;
2522 stats->rx_nombuf += slave_stats.rx_nombuf;
2524 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2525 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2526 stats->q_opackets[j] += slave_stats.q_opackets[j];
2527 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2528 stats->q_obytes[j] += slave_stats.q_obytes[j];
2529 stats->q_errors[j] += slave_stats.q_errors[j];
2538 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2540 struct bond_dev_private *internals = dev->data->dev_private;
2545 for (i = 0, err = 0; i < internals->slave_count; i++) {
2546 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2555 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2557 struct bond_dev_private *internals = eth_dev->data->dev_private;
2562 switch (internals->mode) {
2563 /* Promiscuous mode is propagated to all slaves */
2564 case BONDING_MODE_ROUND_ROBIN:
2565 case BONDING_MODE_BALANCE:
2566 case BONDING_MODE_BROADCAST:
2567 case BONDING_MODE_8023AD: {
2568 unsigned int slave_ok = 0;
2570 for (i = 0; i < internals->slave_count; i++) {
2571 port_id = internals->slaves[i].port_id;
2573 ret = rte_eth_promiscuous_enable(port_id);
2576 "Failed to enable promiscuous mode for port %u: %s",
2577 port_id, rte_strerror(-ret));
2582 * Report success if operation is successful on at least
2583 * on one slave. Otherwise return last error code.
2589 /* Promiscuous mode is propagated only to primary slave */
2590 case BONDING_MODE_ACTIVE_BACKUP:
2591 case BONDING_MODE_TLB:
2592 case BONDING_MODE_ALB:
2594 /* Do not touch promisc when there cannot be primary ports */
2595 if (internals->slave_count == 0)
2597 port_id = internals->current_primary_port;
2598 ret = rte_eth_promiscuous_enable(port_id);
2601 "Failed to enable promiscuous mode for port %u: %s",
2602 port_id, rte_strerror(-ret));
2609 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2611 struct bond_dev_private *internals = dev->data->dev_private;
2616 switch (internals->mode) {
2617 /* Promiscuous mode is propagated to all slaves */
2618 case BONDING_MODE_ROUND_ROBIN:
2619 case BONDING_MODE_BALANCE:
2620 case BONDING_MODE_BROADCAST:
2621 case BONDING_MODE_8023AD: {
2622 unsigned int slave_ok = 0;
2624 for (i = 0; i < internals->slave_count; i++) {
2625 port_id = internals->slaves[i].port_id;
2627 if (internals->mode == BONDING_MODE_8023AD &&
2628 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2629 BOND_8023AD_FORCED_PROMISC) {
2633 ret = rte_eth_promiscuous_disable(port_id);
2636 "Failed to disable promiscuous mode for port %u: %s",
2637 port_id, rte_strerror(-ret));
2642 * Report success if operation is successful on at least
2643 * on one slave. Otherwise return last error code.
2649 /* Promiscuous mode is propagated only to primary slave */
2650 case BONDING_MODE_ACTIVE_BACKUP:
2651 case BONDING_MODE_TLB:
2652 case BONDING_MODE_ALB:
2654 /* Do not touch promisc when there cannot be primary ports */
2655 if (internals->slave_count == 0)
2657 port_id = internals->current_primary_port;
2658 ret = rte_eth_promiscuous_disable(port_id);
2661 "Failed to disable promiscuous mode for port %u: %s",
2662 port_id, rte_strerror(-ret));
2669 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2671 struct bond_dev_private *internals = eth_dev->data->dev_private;
2676 switch (internals->mode) {
2677 /* allmulti mode is propagated to all slaves */
2678 case BONDING_MODE_ROUND_ROBIN:
2679 case BONDING_MODE_BALANCE:
2680 case BONDING_MODE_BROADCAST:
2681 case BONDING_MODE_8023AD: {
2682 unsigned int slave_ok = 0;
2684 for (i = 0; i < internals->slave_count; i++) {
2685 port_id = internals->slaves[i].port_id;
2687 ret = rte_eth_allmulticast_enable(port_id);
2690 "Failed to enable allmulti mode for port %u: %s",
2691 port_id, rte_strerror(-ret));
2696 * Report success if operation is successful on at least
2697 * on one slave. Otherwise return last error code.
2703 /* allmulti mode is propagated only to primary slave */
2704 case BONDING_MODE_ACTIVE_BACKUP:
2705 case BONDING_MODE_TLB:
2706 case BONDING_MODE_ALB:
2708 /* Do not touch allmulti when there cannot be primary ports */
2709 if (internals->slave_count == 0)
2711 port_id = internals->current_primary_port;
2712 ret = rte_eth_allmulticast_enable(port_id);
2715 "Failed to enable allmulti mode for port %u: %s",
2716 port_id, rte_strerror(-ret));
2723 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2725 struct bond_dev_private *internals = eth_dev->data->dev_private;
2730 switch (internals->mode) {
2731 /* allmulti mode is propagated to all slaves */
2732 case BONDING_MODE_ROUND_ROBIN:
2733 case BONDING_MODE_BALANCE:
2734 case BONDING_MODE_BROADCAST:
2735 case BONDING_MODE_8023AD: {
2736 unsigned int slave_ok = 0;
2738 for (i = 0; i < internals->slave_count; i++) {
2739 uint16_t port_id = internals->slaves[i].port_id;
2741 if (internals->mode == BONDING_MODE_8023AD &&
2742 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2743 BOND_8023AD_FORCED_ALLMULTI)
2746 ret = rte_eth_allmulticast_disable(port_id);
2749 "Failed to disable allmulti mode for port %u: %s",
2750 port_id, rte_strerror(-ret));
2755 * Report success if operation is successful on at least
2756 * on one slave. Otherwise return last error code.
2762 /* allmulti mode is propagated only to primary slave */
2763 case BONDING_MODE_ACTIVE_BACKUP:
2764 case BONDING_MODE_TLB:
2765 case BONDING_MODE_ALB:
2767 /* Do not touch allmulti when there cannot be primary ports */
2768 if (internals->slave_count == 0)
2770 port_id = internals->current_primary_port;
2771 ret = rte_eth_allmulticast_disable(port_id);
2774 "Failed to disable allmulti mode for port %u: %s",
2775 port_id, rte_strerror(-ret));
2782 bond_ethdev_delayed_lsc_propagation(void *arg)
2787 rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2788 RTE_ETH_EVENT_INTR_LSC, NULL);
2792 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2793 void *param, void *ret_param __rte_unused)
2795 struct rte_eth_dev *bonded_eth_dev;
2796 struct bond_dev_private *internals;
2797 struct rte_eth_link link;
2801 uint8_t lsc_flag = 0;
2802 int valid_slave = 0;
2803 uint16_t active_pos;
2806 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2809 bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2811 if (check_for_bonded_ethdev(bonded_eth_dev))
2814 internals = bonded_eth_dev->data->dev_private;
2816 /* If the device isn't started don't handle interrupts */
2817 if (!bonded_eth_dev->data->dev_started)
2820 /* verify that port_id is a valid slave of bonded port */
2821 for (i = 0; i < internals->slave_count; i++) {
2822 if (internals->slaves[i].port_id == port_id) {
2831 /* Synchronize lsc callback parallel calls either by real link event
2832 * from the slaves PMDs or by the bonding PMD itself.
2834 rte_spinlock_lock(&internals->lsc_lock);
2836 /* Search for port in active port list */
2837 active_pos = find_slave_by_id(internals->active_slaves,
2838 internals->active_slave_count, port_id);
2840 ret = rte_eth_link_get_nowait(port_id, &link);
2842 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2844 if (ret == 0 && link.link_status) {
2845 if (active_pos < internals->active_slave_count)
2848 /* check link state properties if bonded link is up*/
2849 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2850 if (link_properties_valid(bonded_eth_dev, &link) != 0)
2851 RTE_BOND_LOG(ERR, "Invalid link properties "
2852 "for slave %d in bonding mode %d",
2853 port_id, internals->mode);
2855 /* inherit slave link properties */
2856 link_properties_set(bonded_eth_dev, &link);
2859 /* If no active slave ports then set this port to be
2862 if (internals->active_slave_count < 1) {
2863 /* If first active slave, then change link status */
2864 bonded_eth_dev->data->dev_link.link_status =
2866 internals->current_primary_port = port_id;
2869 mac_address_slaves_update(bonded_eth_dev);
2872 activate_slave(bonded_eth_dev, port_id);
2874 /* If the user has defined the primary port then default to
2877 if (internals->user_defined_primary_port &&
2878 internals->primary_port == port_id)
2879 bond_ethdev_primary_set(internals, port_id);
2881 if (active_pos == internals->active_slave_count)
2884 /* Remove from active slave list */
2885 deactivate_slave(bonded_eth_dev, port_id);
2887 if (internals->active_slave_count < 1)
2890 /* Update primary id, take first active slave from list or if none
2891 * available set to -1 */
2892 if (port_id == internals->current_primary_port) {
2893 if (internals->active_slave_count > 0)
2894 bond_ethdev_primary_set(internals,
2895 internals->active_slaves[0]);
2897 internals->current_primary_port = internals->primary_port;
2898 mac_address_slaves_update(bonded_eth_dev);
2904 * Update bonded device link properties after any change to active
2907 bond_ethdev_link_update(bonded_eth_dev, 0);
2910 /* Cancel any possible outstanding interrupts if delays are enabled */
2911 if (internals->link_up_delay_ms > 0 ||
2912 internals->link_down_delay_ms > 0)
2913 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2916 if (bonded_eth_dev->data->dev_link.link_status) {
2917 if (internals->link_up_delay_ms > 0)
2918 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2919 bond_ethdev_delayed_lsc_propagation,
2920 (void *)bonded_eth_dev);
2922 rte_eth_dev_callback_process(bonded_eth_dev,
2923 RTE_ETH_EVENT_INTR_LSC,
2927 if (internals->link_down_delay_ms > 0)
2928 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2929 bond_ethdev_delayed_lsc_propagation,
2930 (void *)bonded_eth_dev);
2932 rte_eth_dev_callback_process(bonded_eth_dev,
2933 RTE_ETH_EVENT_INTR_LSC,
2938 rte_spinlock_unlock(&internals->lsc_lock);
2944 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2945 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2949 int slave_reta_size;
2950 unsigned reta_count;
2951 struct bond_dev_private *internals = dev->data->dev_private;
2953 if (reta_size != internals->reta_size)
2956 /* Copy RETA table */
2957 reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
2958 RTE_RETA_GROUP_SIZE;
2960 for (i = 0; i < reta_count; i++) {
2961 internals->reta_conf[i].mask = reta_conf[i].mask;
2962 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2963 if ((reta_conf[i].mask >> j) & 0x01)
2964 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2967 /* Fill rest of array */
2968 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2969 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2970 sizeof(internals->reta_conf[0]) * reta_count);
2972 /* Propagate RETA over slaves */
2973 for (i = 0; i < internals->slave_count; i++) {
2974 slave_reta_size = internals->slaves[i].reta_size;
2975 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2976 &internals->reta_conf[0], slave_reta_size);
2985 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2986 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2989 struct bond_dev_private *internals = dev->data->dev_private;
2991 if (reta_size != internals->reta_size)
2994 /* Copy RETA table */
2995 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2996 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2997 if ((reta_conf[i].mask >> j) & 0x01)
2998 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
3004 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3005 struct rte_eth_rss_conf *rss_conf)
3008 struct bond_dev_private *internals = dev->data->dev_private;
3009 struct rte_eth_rss_conf bond_rss_conf;
3011 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3013 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3015 if (bond_rss_conf.rss_hf != 0)
3016 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3018 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
3019 sizeof(internals->rss_key)) {
3020 if (bond_rss_conf.rss_key_len == 0)
3021 bond_rss_conf.rss_key_len = 40;
3022 internals->rss_key_len = bond_rss_conf.rss_key_len;
3023 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3024 internals->rss_key_len);
3027 for (i = 0; i < internals->slave_count; i++) {
3028 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3038 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3039 struct rte_eth_rss_conf *rss_conf)
3041 struct bond_dev_private *internals = dev->data->dev_private;
3043 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3044 rss_conf->rss_key_len = internals->rss_key_len;
3045 if (rss_conf->rss_key)
3046 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3052 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3054 struct rte_eth_dev *slave_eth_dev;
3055 struct bond_dev_private *internals = dev->data->dev_private;
3058 rte_spinlock_lock(&internals->lock);
3060 for (i = 0; i < internals->slave_count; i++) {
3061 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3062 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3063 rte_spinlock_unlock(&internals->lock);
3067 for (i = 0; i < internals->slave_count; i++) {
3068 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3070 rte_spinlock_unlock(&internals->lock);
3075 rte_spinlock_unlock(&internals->lock);
3080 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3081 struct rte_ether_addr *addr)
3083 if (mac_address_set(dev, addr)) {
3084 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3092 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
3093 enum rte_filter_type type, enum rte_filter_op op, void *arg)
3095 if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
3096 *(const void **)arg = &bond_flow_ops;
3103 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3104 struct rte_ether_addr *mac_addr,
3105 __rte_unused uint32_t index, uint32_t vmdq)
3107 struct rte_eth_dev *slave_eth_dev;
3108 struct bond_dev_private *internals = dev->data->dev_private;
3111 rte_spinlock_lock(&internals->lock);
3113 for (i = 0; i < internals->slave_count; i++) {
3114 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3115 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3116 *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3122 for (i = 0; i < internals->slave_count; i++) {
3123 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3127 for (i--; i >= 0; i--)
3128 rte_eth_dev_mac_addr_remove(
3129 internals->slaves[i].port_id, mac_addr);
3136 rte_spinlock_unlock(&internals->lock);
3141 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3143 struct rte_eth_dev *slave_eth_dev;
3144 struct bond_dev_private *internals = dev->data->dev_private;
3147 rte_spinlock_lock(&internals->lock);
3149 for (i = 0; i < internals->slave_count; i++) {
3150 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3151 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3155 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3157 for (i = 0; i < internals->slave_count; i++)
3158 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3162 rte_spinlock_unlock(&internals->lock);
3165 const struct eth_dev_ops default_dev_ops = {
3166 .dev_start = bond_ethdev_start,
3167 .dev_stop = bond_ethdev_stop,
3168 .dev_close = bond_ethdev_close,
3169 .dev_configure = bond_ethdev_configure,
3170 .dev_infos_get = bond_ethdev_info,
3171 .vlan_filter_set = bond_ethdev_vlan_filter_set,
3172 .rx_queue_setup = bond_ethdev_rx_queue_setup,
3173 .tx_queue_setup = bond_ethdev_tx_queue_setup,
3174 .rx_queue_release = bond_ethdev_rx_queue_release,
3175 .tx_queue_release = bond_ethdev_tx_queue_release,
3176 .link_update = bond_ethdev_link_update,
3177 .stats_get = bond_ethdev_stats_get,
3178 .stats_reset = bond_ethdev_stats_reset,
3179 .promiscuous_enable = bond_ethdev_promiscuous_enable,
3180 .promiscuous_disable = bond_ethdev_promiscuous_disable,
3181 .allmulticast_enable = bond_ethdev_allmulticast_enable,
3182 .allmulticast_disable = bond_ethdev_allmulticast_disable,
3183 .reta_update = bond_ethdev_rss_reta_update,
3184 .reta_query = bond_ethdev_rss_reta_query,
3185 .rss_hash_update = bond_ethdev_rss_hash_update,
3186 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
3187 .mtu_set = bond_ethdev_mtu_set,
3188 .mac_addr_set = bond_ethdev_mac_address_set,
3189 .mac_addr_add = bond_ethdev_mac_addr_add,
3190 .mac_addr_remove = bond_ethdev_mac_addr_remove,
3191 .filter_ctrl = bond_filter_ctrl
3195 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3197 const char *name = rte_vdev_device_name(dev);
3198 uint8_t socket_id = dev->device.numa_node;
3199 struct bond_dev_private *internals = NULL;
3200 struct rte_eth_dev *eth_dev = NULL;
3201 uint32_t vlan_filter_bmp_size;
3203 /* now do all data allocation - for eth_dev structure, dummy pci driver
3204 * and internal (private) data
3207 /* reserve an ethdev entry */
3208 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3209 if (eth_dev == NULL) {
3210 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3214 internals = eth_dev->data->dev_private;
3215 eth_dev->data->nb_rx_queues = (uint16_t)1;
3216 eth_dev->data->nb_tx_queues = (uint16_t)1;
3218 /* Allocate memory for storing MAC addresses */
3219 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3220 BOND_MAX_MAC_ADDRS, 0, socket_id);
3221 if (eth_dev->data->mac_addrs == NULL) {
3223 "Failed to allocate %u bytes needed to store MAC addresses",
3224 RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3228 eth_dev->dev_ops = &default_dev_ops;
3229 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3231 rte_spinlock_init(&internals->lock);
3232 rte_spinlock_init(&internals->lsc_lock);
3234 internals->port_id = eth_dev->data->port_id;
3235 internals->mode = BONDING_MODE_INVALID;
3236 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3237 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3238 internals->burst_xmit_hash = burst_xmit_l2_hash;
3239 internals->user_defined_mac = 0;
3241 internals->link_status_polling_enabled = 0;
3243 internals->link_status_polling_interval_ms =
3244 DEFAULT_POLLING_INTERVAL_10_MS;
3245 internals->link_down_delay_ms = 0;
3246 internals->link_up_delay_ms = 0;
3248 internals->slave_count = 0;
3249 internals->active_slave_count = 0;
3250 internals->rx_offload_capa = 0;
3251 internals->tx_offload_capa = 0;
3252 internals->rx_queue_offload_capa = 0;
3253 internals->tx_queue_offload_capa = 0;
3254 internals->candidate_max_rx_pktlen = 0;
3255 internals->max_rx_pktlen = 0;
3257 /* Initially allow to choose any offload type */
3258 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3260 memset(&internals->default_rxconf, 0,
3261 sizeof(internals->default_rxconf));
3262 memset(&internals->default_txconf, 0,
3263 sizeof(internals->default_txconf));
3265 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3266 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3268 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3269 memset(internals->slaves, 0, sizeof(internals->slaves));
3271 TAILQ_INIT(&internals->flow_list);
3272 internals->flow_isolated_valid = 0;
3274 /* Set mode 4 default configuration */
3275 bond_mode_8023ad_setup(eth_dev, NULL);
3276 if (bond_ethdev_mode_set(eth_dev, mode)) {
3277 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3278 eth_dev->data->port_id, mode);
3282 vlan_filter_bmp_size =
3283 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3284 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3285 RTE_CACHE_LINE_SIZE);
3286 if (internals->vlan_filter_bmpmem == NULL) {
3288 "Failed to allocate vlan bitmap for bonded device %u",
3289 eth_dev->data->port_id);
3293 internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3294 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3295 if (internals->vlan_filter_bmp == NULL) {
3297 "Failed to init vlan bitmap for bonded device %u",
3298 eth_dev->data->port_id);
3299 rte_free(internals->vlan_filter_bmpmem);
3303 return eth_dev->data->port_id;
3306 rte_free(internals);
3307 if (eth_dev != NULL)
3308 eth_dev->data->dev_private = NULL;
3309 rte_eth_dev_release_port(eth_dev);
3314 bond_probe(struct rte_vdev_device *dev)
3317 struct bond_dev_private *internals;
3318 struct rte_kvargs *kvlist;
3319 uint8_t bonding_mode, socket_id/*, agg_mode*/;
3320 int arg_count, port_id;
3322 struct rte_eth_dev *eth_dev;
3327 name = rte_vdev_device_name(dev);
3328 RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3330 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3331 eth_dev = rte_eth_dev_attach_secondary(name);
3333 RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3336 /* TODO: request info from primary to set up Rx and Tx */
3337 eth_dev->dev_ops = &default_dev_ops;
3338 eth_dev->device = &dev->device;
3339 rte_eth_dev_probing_finish(eth_dev);
3343 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3344 pmd_bond_init_valid_arguments);
3348 /* Parse link bonding mode */
3349 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3350 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3351 &bond_ethdev_parse_slave_mode_kvarg,
3352 &bonding_mode) != 0) {
3353 RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3358 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3363 /* Parse socket id to create bonding device on */
3364 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3365 if (arg_count == 1) {
3366 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3367 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3369 RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3370 "bonded device %s", name);
3373 } else if (arg_count > 1) {
3374 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3375 "bonded device %s", name);
3378 socket_id = rte_socket_id();
3381 dev->device.numa_node = socket_id;
3383 /* Create link bonding eth device */
3384 port_id = bond_alloc(dev, bonding_mode);
3386 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3387 "socket %u.", name, bonding_mode, socket_id);
3390 internals = rte_eth_devices[port_id].data->dev_private;
3391 internals->kvlist = kvlist;
3393 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3394 if (rte_kvargs_process(kvlist,
3395 PMD_BOND_AGG_MODE_KVARG,
3396 &bond_ethdev_parse_slave_agg_mode_kvarg,
3399 "Failed to parse agg selection mode for bonded device %s",
3404 if (internals->mode == BONDING_MODE_8023AD)
3405 internals->mode4.agg_selection = agg_mode;
3407 internals->mode4.agg_selection = AGG_STABLE;
3410 rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3411 RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3412 "socket %u.", name, port_id, bonding_mode, socket_id);
3416 rte_kvargs_free(kvlist);
3422 bond_remove(struct rte_vdev_device *dev)
3424 struct rte_eth_dev *eth_dev;
3425 struct bond_dev_private *internals;
3431 name = rte_vdev_device_name(dev);
3432 RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3434 /* find an ethdev entry */
3435 eth_dev = rte_eth_dev_allocated(name);
3436 if (eth_dev == NULL)
3437 return 0; /* port already released */
3439 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3440 return rte_eth_dev_release_port(eth_dev);
3442 RTE_ASSERT(eth_dev->device == &dev->device);
3444 internals = eth_dev->data->dev_private;
3445 if (internals->slave_count != 0)
3448 if (eth_dev->data->dev_started == 1) {
3449 bond_ethdev_stop(eth_dev);
3450 bond_ethdev_close(eth_dev);
3452 rte_eth_dev_release_port(eth_dev);
3457 /* this part will resolve the slave portids after all the other pdev and vdev
3458 * have been allocated */
3460 bond_ethdev_configure(struct rte_eth_dev *dev)
3462 const char *name = dev->device->name;
3463 struct bond_dev_private *internals = dev->data->dev_private;
3464 struct rte_kvargs *kvlist = internals->kvlist;
3466 uint16_t port_id = dev - rte_eth_devices;
3469 static const uint8_t default_rss_key[40] = {
3470 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3471 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3472 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3473 0xBE, 0xAC, 0x01, 0xFA
3479 * If RSS is enabled, fill table with default values and
3480 * set key to the the value specified in port RSS configuration.
3481 * Fall back to default RSS key if the key is not specified
3483 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3484 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3485 internals->rss_key_len =
3486 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3487 memcpy(internals->rss_key,
3488 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3489 internals->rss_key_len);
3491 internals->rss_key_len = sizeof(default_rss_key);
3492 memcpy(internals->rss_key, default_rss_key,
3493 internals->rss_key_len);
3496 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3497 internals->reta_conf[i].mask = ~0LL;
3498 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3499 internals->reta_conf[i].reta[j] =
3500 (i * RTE_RETA_GROUP_SIZE + j) %
3501 dev->data->nb_rx_queues;
3505 /* set the max_rx_pktlen */
3506 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3509 * if no kvlist, it means that this bonded device has been created
3510 * through the bonding api.
3515 /* Parse MAC address for bonded device */
3516 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3517 if (arg_count == 1) {
3518 struct rte_ether_addr bond_mac;
3520 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3521 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3522 RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3527 /* Set MAC address */
3528 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3530 "Failed to set mac address on bonded device %s",
3534 } else if (arg_count > 1) {
3536 "MAC address can be specified only once for bonded device %s",
3541 /* Parse/set balance mode transmit policy */
3542 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3543 if (arg_count == 1) {
3544 uint8_t xmit_policy;
3546 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3547 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3550 "Invalid xmit policy specified for bonded device %s",
3555 /* Set balance mode transmit policy*/
3556 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3558 "Failed to set balance xmit policy on bonded device %s",
3562 } else if (arg_count > 1) {
3564 "Transmit policy can be specified only once for bonded device %s",
3569 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3570 if (rte_kvargs_process(kvlist,
3571 PMD_BOND_AGG_MODE_KVARG,
3572 &bond_ethdev_parse_slave_agg_mode_kvarg,
3575 "Failed to parse agg selection mode for bonded device %s",
3578 if (internals->mode == BONDING_MODE_8023AD) {
3579 int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3583 "Invalid args for agg selection set for bonded device %s",
3590 /* Parse/add slave ports to bonded device */
3591 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3592 struct bond_ethdev_slave_ports slave_ports;
3595 memset(&slave_ports, 0, sizeof(slave_ports));
3597 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3598 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3600 "Failed to parse slave ports for bonded device %s",
3605 for (i = 0; i < slave_ports.slave_count; i++) {
3606 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3608 "Failed to add port %d as slave to bonded device %s",
3609 slave_ports.slaves[i], name);
3614 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3618 /* Parse/set primary slave port id*/
3619 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3620 if (arg_count == 1) {
3621 uint16_t primary_slave_port_id;
3623 if (rte_kvargs_process(kvlist,
3624 PMD_BOND_PRIMARY_SLAVE_KVARG,
3625 &bond_ethdev_parse_primary_slave_port_id_kvarg,
3626 &primary_slave_port_id) < 0) {
3628 "Invalid primary slave port id specified for bonded device %s",
3633 /* Set balance mode transmit policy*/
3634 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3637 "Failed to set primary slave port %d on bonded device %s",
3638 primary_slave_port_id, name);
3641 } else if (arg_count > 1) {
3643 "Primary slave can be specified only once for bonded device %s",
3648 /* Parse link status monitor polling interval */
3649 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3650 if (arg_count == 1) {
3651 uint32_t lsc_poll_interval_ms;
3653 if (rte_kvargs_process(kvlist,
3654 PMD_BOND_LSC_POLL_PERIOD_KVARG,
3655 &bond_ethdev_parse_time_ms_kvarg,
3656 &lsc_poll_interval_ms) < 0) {
3658 "Invalid lsc polling interval value specified for bonded"
3659 " device %s", name);
3663 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3666 "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3667 lsc_poll_interval_ms, name);
3670 } else if (arg_count > 1) {
3672 "LSC polling interval can be specified only once for bonded"
3673 " device %s", name);
3677 /* Parse link up interrupt propagation delay */
3678 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3679 if (arg_count == 1) {
3680 uint32_t link_up_delay_ms;
3682 if (rte_kvargs_process(kvlist,
3683 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3684 &bond_ethdev_parse_time_ms_kvarg,
3685 &link_up_delay_ms) < 0) {
3687 "Invalid link up propagation delay value specified for"
3688 " bonded device %s", name);
3692 /* Set balance mode transmit policy*/
3693 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3696 "Failed to set link up propagation delay (%u ms) on bonded"
3697 " device %s", link_up_delay_ms, name);
3700 } else if (arg_count > 1) {
3702 "Link up propagation delay can be specified only once for"
3703 " bonded device %s", name);
3707 /* Parse link down interrupt propagation delay */
3708 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3709 if (arg_count == 1) {
3710 uint32_t link_down_delay_ms;
3712 if (rte_kvargs_process(kvlist,
3713 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3714 &bond_ethdev_parse_time_ms_kvarg,
3715 &link_down_delay_ms) < 0) {
3717 "Invalid link down propagation delay value specified for"
3718 " bonded device %s", name);
3722 /* Set balance mode transmit policy*/
3723 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3726 "Failed to set link down propagation delay (%u ms) on bonded device %s",
3727 link_down_delay_ms, name);
3730 } else if (arg_count > 1) {
3732 "Link down propagation delay can be specified only once for bonded device %s",
3740 struct rte_vdev_driver pmd_bond_drv = {
3741 .probe = bond_probe,
3742 .remove = bond_remove,
3745 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3746 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3748 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3752 "xmit_policy=[l2 | l23 | l34] "
3753 "agg_mode=[count | stable | bandwidth] "
3756 "lsc_poll_period_ms=<int> "
3758 "down_delay=<int>");
3760 RTE_LOG_REGISTER(bond_logtype, pmd.net.bond, NOTICE);