1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
6 #include <netinet/in.h>
9 #include <rte_malloc.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
23 #include "rte_eth_bond.h"
24 #include "eth_bond_private.h"
25 #include "eth_bond_8023ad_private.h"
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
39 size_t vlan_offset = 0;
41 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43 struct rte_vlan_hdr *vlan_hdr =
44 (struct rte_vlan_hdr *)(eth_hdr + 1);
46 vlan_offset = sizeof(struct rte_vlan_hdr);
47 *proto = vlan_hdr->eth_proto;
49 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50 vlan_hdr = vlan_hdr + 1;
51 *proto = vlan_hdr->eth_proto;
52 vlan_offset += sizeof(struct rte_vlan_hdr);
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
61 struct bond_dev_private *internals;
63 uint16_t num_rx_total = 0;
65 uint16_t active_slave;
68 /* Cast to structure, containing bonded device's port id and queue id */
69 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70 internals = bd_rx_q->dev_private;
71 slave_count = internals->active_slave_count;
72 active_slave = bd_rx_q->active_slave;
74 for (i = 0; i < slave_count && nb_pkts; i++) {
75 uint16_t num_rx_slave;
77 /* Offset of pointer to *bufs increases as packets are received
78 * from other slaves */
80 rte_eth_rx_burst(internals->active_slaves[active_slave],
82 bufs + num_rx_total, nb_pkts);
83 num_rx_total += num_rx_slave;
84 nb_pkts -= num_rx_slave;
85 if (++active_slave == slave_count)
89 if (++bd_rx_q->active_slave >= slave_count)
90 bd_rx_q->active_slave = 0;
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
98 struct bond_dev_private *internals;
100 /* Cast to structure, containing bonded device's port id and queue id */
101 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
103 internals = bd_rx_q->dev_private;
105 return rte_eth_rx_burst(internals->current_primary_port,
106 bd_rx_q->queue_id, bufs, nb_pkts);
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
112 const uint16_t ether_type_slow_be =
113 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
115 return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116 (ethertype == ether_type_slow_be &&
117 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
120 /*****************************************************************************
121 * Flow director's setup for mode 4 optimization
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125 .dst.addr_bytes = { 0 },
126 .src.addr_bytes = { 0 },
127 .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131 .dst.addr_bytes = { 0 },
132 .src.addr_bytes = { 0 },
136 static struct rte_flow_item flow_item_8023ad[] = {
138 .type = RTE_FLOW_ITEM_TYPE_ETH,
139 .spec = &flow_item_eth_type_8023ad,
141 .mask = &flow_item_eth_mask_type_8023ad,
144 .type = RTE_FLOW_ITEM_TYPE_END,
151 const struct rte_flow_attr flow_attr_8023ad = {
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161 uint16_t slave_port) {
162 struct rte_eth_dev_info slave_info;
163 struct rte_flow_error error;
164 struct bond_dev_private *internals = bond_dev->data->dev_private;
166 const struct rte_flow_action_queue lacp_queue_conf = {
170 const struct rte_flow_action actions[] = {
172 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173 .conf = &lacp_queue_conf
176 .type = RTE_FLOW_ACTION_TYPE_END,
180 int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181 flow_item_8023ad, actions, &error);
183 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184 __func__, error.message, slave_port,
185 internals->mode4.dedicated_queues.rx_qid);
189 ret = rte_eth_dev_info_get(slave_port, &slave_info);
192 "%s: Error during getting device (port %u) info: %s\n",
193 __func__, slave_port, strerror(-ret));
198 if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199 slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
201 "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202 __func__, slave_port);
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211 struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212 struct bond_dev_private *internals = bond_dev->data->dev_private;
213 struct rte_eth_dev_info bond_info;
217 /* Verify if all slaves in bonding supports flow director and */
218 if (internals->slave_count > 0) {
219 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
222 "%s: Error during getting device (port %u) info: %s\n",
223 __func__, bond_dev->data->port_id,
229 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
232 for (idx = 0; idx < internals->slave_count; idx++) {
233 if (bond_ethdev_8023ad_flow_verify(bond_dev,
234 internals->slaves[idx].port_id) != 0)
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
245 struct rte_flow_error error;
246 struct bond_dev_private *internals = bond_dev->data->dev_private;
247 struct rte_flow_action_queue lacp_queue_conf = {
248 .index = internals->mode4.dedicated_queues.rx_qid,
251 const struct rte_flow_action actions[] = {
253 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254 .conf = &lacp_queue_conf
257 .type = RTE_FLOW_ACTION_TYPE_END,
261 internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262 &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263 if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265 "(slave_port=%d queue_id=%d)",
266 error.message, slave_port,
267 internals->mode4.dedicated_queues.rx_qid);
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
278 /* Cast to structure, containing bonded device's port id and queue id */
279 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280 struct bond_dev_private *internals = bd_rx_q->dev_private;
281 struct rte_eth_dev *bonded_eth_dev =
282 &rte_eth_devices[internals->port_id];
283 struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284 struct rte_ether_hdr *hdr;
286 const uint16_t ether_type_slow_be =
287 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288 uint16_t num_rx_total = 0; /* Total number of received packets */
289 uint16_t slaves[RTE_MAX_ETHPORTS];
290 uint16_t slave_count, idx;
292 uint8_t collecting; /* current slave collecting status */
293 const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294 const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
300 /* Copy slave list to protect against slave up/down changes during tx
302 slave_count = internals->active_slave_count;
303 memcpy(slaves, internals->active_slaves,
304 sizeof(internals->active_slaves[0]) * slave_count);
306 idx = bd_rx_q->active_slave;
307 if (idx >= slave_count) {
308 bd_rx_q->active_slave = 0;
311 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
313 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
316 /* Read packets from this slave */
317 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318 &bufs[num_rx_total], nb_pkts - num_rx_total);
320 for (k = j; k < 2 && k < num_rx_total; k++)
321 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
323 /* Handle slow protocol packets. */
324 while (j < num_rx_total) {
325 if (j + 3 < num_rx_total)
326 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
328 hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
331 /* Remove packet from array if:
332 * - it is slow packet but no dedicated rxq is present,
333 * - slave is not in collecting state,
334 * - bonding interface is not in promiscuous mode:
335 * - packet is unicast and address does not match,
336 * - packet is multicast and bonding interface
337 * is not in allmulti,
341 is_lacp_packets(hdr->ether_type, subtype,
345 ((rte_is_unicast_ether_addr(&hdr->dst_addr) &&
346 !rte_is_same_ether_addr(bond_mac,
349 rte_is_multicast_ether_addr(&hdr->dst_addr)))))) {
351 if (hdr->ether_type == ether_type_slow_be) {
352 bond_mode_8023ad_handle_slow_pkt(
353 internals, slaves[idx], bufs[j]);
355 rte_pktmbuf_free(bufs[j]);
357 /* Packet is managed by mode 4 or dropped, shift the array */
359 if (j < num_rx_total) {
360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
366 if (unlikely(++idx == slave_count))
370 if (++bd_rx_q->active_slave >= slave_count)
371 bd_rx_q->active_slave = 0;
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
380 return rx_burst_8023ad(queue, bufs, nb_pkts, false);
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
387 return rx_burst_8023ad(queue, bufs, nb_pkts, true);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
400 case RTE_ARP_OP_REQUEST:
401 strlcpy(buf, "ARP Request", buf_len);
403 case RTE_ARP_OP_REPLY:
404 strlcpy(buf, "ARP Reply", buf_len);
406 case RTE_ARP_OP_REVREQUEST:
407 strlcpy(buf, "Reverse ARP Request", buf_len);
409 case RTE_ARP_OP_REVREPLY:
410 strlcpy(buf, "Reverse ARP Reply", buf_len);
412 case RTE_ARP_OP_INVREQUEST:
413 strlcpy(buf, "Peer Identify Request", buf_len);
415 case RTE_ARP_OP_INVREPLY:
416 strlcpy(buf, "Peer Identify Reply", buf_len);
421 strlcpy(buf, "Unknown", buf_len);
425 #define MaxIPv4String 16
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
431 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
437 #define MAX_CLIENTS_NUMBER 128
438 uint8_t active_clients;
439 struct client_stats_t {
442 uint32_t ipv4_rx_packets;
443 uint32_t ipv4_tx_packets;
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
452 for (; i < MAX_CLIENTS_NUMBER; i++) {
453 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
454 /* Just update RX packets number for this client */
455 if (TXorRXindicator == &burstnumberRX)
456 client_stats[i].ipv4_rx_packets++;
458 client_stats[i].ipv4_tx_packets++;
462 /* We have a new client. Insert him to the table, and increment stats */
463 if (TXorRXindicator == &burstnumberRX)
464 client_stats[active_clients].ipv4_rx_packets++;
466 client_stats[active_clients].ipv4_tx_packets++;
467 client_stats[active_clients].ipv4_addr = addr;
468 client_stats[active_clients].port = port;
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475 rte_log(RTE_LOG_DEBUG, bond_logtype, \
476 "%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \
477 "DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d\n", \
480 RTE_ETHER_ADDR_BYTES(ð_h->src_addr), \
482 RTE_ETHER_ADDR_BYTES(ð_h->dst_addr), \
484 arp_op, ++burstnumber)
488 mode6_debug(const char __rte_unused *info,
489 struct rte_ether_hdr *eth_h, uint16_t port,
490 uint32_t __rte_unused *burstnumber)
492 struct rte_ipv4_hdr *ipv4_h;
493 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
494 struct rte_arp_hdr *arp_h;
501 uint16_t ether_type = eth_h->ether_type;
502 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
504 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
505 strlcpy(buf, info, 16);
508 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
509 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
510 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
511 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
512 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
513 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
515 update_client_stats(ipv4_h->src_addr, port, burstnumber);
517 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
518 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
519 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
520 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
521 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
522 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
523 ArpOp, sizeof(ArpOp));
524 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
531 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
533 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
534 struct bond_dev_private *internals = bd_rx_q->dev_private;
535 struct rte_ether_hdr *eth_h;
536 uint16_t ether_type, offset;
537 uint16_t nb_recv_pkts;
540 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
542 for (i = 0; i < nb_recv_pkts; i++) {
543 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
544 ether_type = eth_h->ether_type;
545 offset = get_vlan_offset(eth_h, ðer_type);
547 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
548 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
549 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
551 bond_mode_alb_arp_recv(eth_h, offset, internals);
553 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
554 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
555 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
563 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
566 struct bond_dev_private *internals;
567 struct bond_tx_queue *bd_tx_q;
569 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
570 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
572 uint16_t num_of_slaves;
573 uint16_t slaves[RTE_MAX_ETHPORTS];
575 uint16_t num_tx_total = 0, num_tx_slave;
577 static int slave_idx = 0;
578 int i, cslave_idx = 0, tx_fail_total = 0;
580 bd_tx_q = (struct bond_tx_queue *)queue;
581 internals = bd_tx_q->dev_private;
583 /* Copy slave list to protect against slave up/down changes during tx
585 num_of_slaves = internals->active_slave_count;
586 memcpy(slaves, internals->active_slaves,
587 sizeof(internals->active_slaves[0]) * num_of_slaves);
589 if (num_of_slaves < 1)
592 /* Populate slaves mbuf with which packets are to be sent on it */
593 for (i = 0; i < nb_pkts; i++) {
594 cslave_idx = (slave_idx + i) % num_of_slaves;
595 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
598 /* increment current slave index so the next call to tx burst starts on the
600 slave_idx = ++cslave_idx;
602 /* Send packet burst on each slave device */
603 for (i = 0; i < num_of_slaves; i++) {
604 if (slave_nb_pkts[i] > 0) {
605 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
606 slave_bufs[i], slave_nb_pkts[i]);
608 /* if tx burst fails move packets to end of bufs */
609 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
610 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
612 tx_fail_total += tx_fail_slave;
614 memcpy(&bufs[nb_pkts - tx_fail_total],
615 &slave_bufs[i][num_tx_slave],
616 tx_fail_slave * sizeof(bufs[0]));
618 num_tx_total += num_tx_slave;
626 bond_ethdev_tx_burst_active_backup(void *queue,
627 struct rte_mbuf **bufs, uint16_t nb_pkts)
629 struct bond_dev_private *internals;
630 struct bond_tx_queue *bd_tx_q;
632 bd_tx_q = (struct bond_tx_queue *)queue;
633 internals = bd_tx_q->dev_private;
635 if (internals->active_slave_count < 1)
638 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
642 static inline uint16_t
643 ether_hash(struct rte_ether_hdr *eth_hdr)
645 unaligned_uint16_t *word_src_addr =
646 (unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes;
647 unaligned_uint16_t *word_dst_addr =
648 (unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes;
650 return (word_src_addr[0] ^ word_dst_addr[0]) ^
651 (word_src_addr[1] ^ word_dst_addr[1]) ^
652 (word_src_addr[2] ^ word_dst_addr[2]);
655 static inline uint32_t
656 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
658 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
661 static inline uint32_t
662 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
664 unaligned_uint32_t *word_src_addr =
665 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
666 unaligned_uint32_t *word_dst_addr =
667 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
669 return (word_src_addr[0] ^ word_dst_addr[0]) ^
670 (word_src_addr[1] ^ word_dst_addr[1]) ^
671 (word_src_addr[2] ^ word_dst_addr[2]) ^
672 (word_src_addr[3] ^ word_dst_addr[3]);
677 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
678 uint16_t slave_count, uint16_t *slaves)
680 struct rte_ether_hdr *eth_hdr;
684 for (i = 0; i < nb_pkts; i++) {
685 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
687 hash = ether_hash(eth_hdr);
689 slaves[i] = (hash ^= hash >> 8) % slave_count;
694 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
695 uint16_t slave_count, uint16_t *slaves)
698 struct rte_ether_hdr *eth_hdr;
701 uint32_t hash, l3hash;
703 for (i = 0; i < nb_pkts; i++) {
704 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
707 proto = eth_hdr->ether_type;
708 hash = ether_hash(eth_hdr);
710 vlan_offset = get_vlan_offset(eth_hdr, &proto);
712 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
713 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
714 ((char *)(eth_hdr + 1) + vlan_offset);
715 l3hash = ipv4_hash(ipv4_hdr);
717 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
718 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
719 ((char *)(eth_hdr + 1) + vlan_offset);
720 l3hash = ipv6_hash(ipv6_hdr);
723 hash = hash ^ l3hash;
727 slaves[i] = hash % slave_count;
732 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
733 uint16_t slave_count, uint16_t *slaves)
735 struct rte_ether_hdr *eth_hdr;
740 struct rte_udp_hdr *udp_hdr;
741 struct rte_tcp_hdr *tcp_hdr;
742 uint32_t hash, l3hash, l4hash;
744 for (i = 0; i < nb_pkts; i++) {
745 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
746 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
747 proto = eth_hdr->ether_type;
748 vlan_offset = get_vlan_offset(eth_hdr, &proto);
752 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
753 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
754 ((char *)(eth_hdr + 1) + vlan_offset);
755 size_t ip_hdr_offset;
757 l3hash = ipv4_hash(ipv4_hdr);
759 /* there is no L4 header in fragmented packet */
760 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
762 ip_hdr_offset = (ipv4_hdr->version_ihl
763 & RTE_IPV4_HDR_IHL_MASK) *
764 RTE_IPV4_IHL_MULTIPLIER;
766 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
767 tcp_hdr = (struct rte_tcp_hdr *)
770 if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
772 l4hash = HASH_L4_PORTS(tcp_hdr);
773 } else if (ipv4_hdr->next_proto_id ==
775 udp_hdr = (struct rte_udp_hdr *)
778 if ((size_t)udp_hdr + sizeof(*udp_hdr)
780 l4hash = HASH_L4_PORTS(udp_hdr);
783 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
784 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
785 ((char *)(eth_hdr + 1) + vlan_offset);
786 l3hash = ipv6_hash(ipv6_hdr);
788 if (ipv6_hdr->proto == IPPROTO_TCP) {
789 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
790 l4hash = HASH_L4_PORTS(tcp_hdr);
791 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
792 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
793 l4hash = HASH_L4_PORTS(udp_hdr);
797 hash = l3hash ^ l4hash;
801 slaves[i] = hash % slave_count;
806 uint64_t bwg_left_int;
807 uint64_t bwg_left_remainder;
812 bond_tlb_activate_slave(struct bond_dev_private *internals) {
815 for (i = 0; i < internals->active_slave_count; i++) {
816 tlb_last_obytets[internals->active_slaves[i]] = 0;
821 bandwidth_cmp(const void *a, const void *b)
823 const struct bwg_slave *bwg_a = a;
824 const struct bwg_slave *bwg_b = b;
825 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
826 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
827 (int64_t)bwg_a->bwg_left_remainder;
841 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
842 struct bwg_slave *bwg_slave)
844 struct rte_eth_link link_status;
847 ret = rte_eth_link_get_nowait(port_id, &link_status);
849 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
850 port_id, rte_strerror(-ret));
853 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
856 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
857 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
858 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
862 bond_ethdev_update_tlb_slave_cb(void *arg)
864 struct bond_dev_private *internals = arg;
865 struct rte_eth_stats slave_stats;
866 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
867 uint16_t slave_count;
870 uint8_t update_stats = 0;
874 internals->slave_update_idx++;
877 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
880 for (i = 0; i < internals->active_slave_count; i++) {
881 slave_id = internals->active_slaves[i];
882 rte_eth_stats_get(slave_id, &slave_stats);
883 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
884 bandwidth_left(slave_id, tx_bytes,
885 internals->slave_update_idx, &bwg_array[i]);
886 bwg_array[i].slave = slave_id;
889 tlb_last_obytets[slave_id] = slave_stats.obytes;
893 if (update_stats == 1)
894 internals->slave_update_idx = 0;
897 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
898 for (i = 0; i < slave_count; i++)
899 internals->tlb_slaves_order[i] = bwg_array[i].slave;
901 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
902 (struct bond_dev_private *)internals);
906 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
908 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
909 struct bond_dev_private *internals = bd_tx_q->dev_private;
911 struct rte_eth_dev *primary_port =
912 &rte_eth_devices[internals->primary_port];
913 uint16_t num_tx_total = 0;
916 uint16_t num_of_slaves = internals->active_slave_count;
917 uint16_t slaves[RTE_MAX_ETHPORTS];
919 struct rte_ether_hdr *ether_hdr;
920 struct rte_ether_addr primary_slave_addr;
921 struct rte_ether_addr active_slave_addr;
923 if (num_of_slaves < 1)
926 memcpy(slaves, internals->tlb_slaves_order,
927 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
930 rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
933 for (i = 0; i < 3; i++)
934 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
937 for (i = 0; i < num_of_slaves; i++) {
938 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
939 for (j = num_tx_total; j < nb_pkts; j++) {
941 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
943 ether_hdr = rte_pktmbuf_mtod(bufs[j],
944 struct rte_ether_hdr *);
945 if (rte_is_same_ether_addr(ðer_hdr->src_addr,
946 &primary_slave_addr))
947 rte_ether_addr_copy(&active_slave_addr,
948 ðer_hdr->src_addr);
949 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
950 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
954 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
955 bufs + num_tx_total, nb_pkts - num_tx_total);
957 if (num_tx_total == nb_pkts)
965 bond_tlb_disable(struct bond_dev_private *internals)
967 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
971 bond_tlb_enable(struct bond_dev_private *internals)
973 bond_ethdev_update_tlb_slave_cb(internals);
977 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
979 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
980 struct bond_dev_private *internals = bd_tx_q->dev_private;
982 struct rte_ether_hdr *eth_h;
983 uint16_t ether_type, offset;
985 struct client_data *client_info;
988 * We create transmit buffers for every slave and one additional to send
989 * through tlb. In worst case every packet will be send on one port.
991 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
992 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
995 * We create separate transmit buffers for update packets as they won't
996 * be counted in num_tx_total.
998 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
999 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
1001 struct rte_mbuf *upd_pkt;
1004 uint16_t num_send, num_not_send = 0;
1005 uint16_t num_tx_total = 0;
1010 /* Search tx buffer for ARP packets and forward them to alb */
1011 for (i = 0; i < nb_pkts; i++) {
1012 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1013 ether_type = eth_h->ether_type;
1014 offset = get_vlan_offset(eth_h, ðer_type);
1016 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1017 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1019 /* Change src mac in eth header */
1020 rte_eth_macaddr_get(slave_idx, ð_h->src_addr);
1022 /* Add packet to slave tx buffer */
1023 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1024 slave_bufs_pkts[slave_idx]++;
1026 /* If packet is not ARP, send it with TLB policy */
1027 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1029 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1033 /* Update connected client ARP tables */
1034 if (internals->mode6.ntt) {
1035 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1036 client_info = &internals->mode6.client_table[i];
1038 if (client_info->in_use) {
1039 /* Allocate new packet to send ARP update on current slave */
1040 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1041 if (upd_pkt == NULL) {
1043 "Failed to allocate ARP packet from pool");
1046 pkt_size = sizeof(struct rte_ether_hdr) +
1047 sizeof(struct rte_arp_hdr) +
1048 client_info->vlan_count *
1049 sizeof(struct rte_vlan_hdr);
1050 upd_pkt->data_len = pkt_size;
1051 upd_pkt->pkt_len = pkt_size;
1053 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1056 /* Add packet to update tx buffer */
1057 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1058 update_bufs_pkts[slave_idx]++;
1061 internals->mode6.ntt = 0;
1064 /* Send ARP packets on proper slaves */
1065 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1066 if (slave_bufs_pkts[i] > 0) {
1067 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1068 slave_bufs[i], slave_bufs_pkts[i]);
1069 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1070 bufs[nb_pkts - 1 - num_not_send - j] =
1071 slave_bufs[i][nb_pkts - 1 - j];
1074 num_tx_total += num_send;
1075 num_not_send += slave_bufs_pkts[i] - num_send;
1077 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1078 /* Print TX stats including update packets */
1079 for (j = 0; j < slave_bufs_pkts[i]; j++) {
1080 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1081 struct rte_ether_hdr *);
1082 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1088 /* Send update packets on proper slaves */
1089 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1090 if (update_bufs_pkts[i] > 0) {
1091 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1092 update_bufs_pkts[i]);
1093 for (j = num_send; j < update_bufs_pkts[i]; j++) {
1094 rte_pktmbuf_free(update_bufs[i][j]);
1096 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1097 for (j = 0; j < update_bufs_pkts[i]; j++) {
1098 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1099 struct rte_ether_hdr *);
1100 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1106 /* Send non-ARP packets using tlb policy */
1107 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1108 num_send = bond_ethdev_tx_burst_tlb(queue,
1109 slave_bufs[RTE_MAX_ETHPORTS],
1110 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1112 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1113 bufs[nb_pkts - 1 - num_not_send - j] =
1114 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1117 num_tx_total += num_send;
1120 return num_tx_total;
1123 static inline uint16_t
1124 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1125 uint16_t *slave_port_ids, uint16_t slave_count)
1127 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1128 struct bond_dev_private *internals = bd_tx_q->dev_private;
1130 /* Array to sort mbufs for transmission on each slave into */
1131 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1132 /* Number of mbufs for transmission on each slave */
1133 uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1134 /* Mapping array generated by hash function to map mbufs to slaves */
1135 uint16_t bufs_slave_port_idxs[nb_bufs];
1137 uint16_t slave_tx_count;
1138 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1143 * Populate slaves mbuf with the packets which are to be sent on it
1144 * selecting output slave using hash based on xmit policy
1146 internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1147 bufs_slave_port_idxs);
1149 for (i = 0; i < nb_bufs; i++) {
1150 /* Populate slave mbuf arrays with mbufs for that slave. */
1151 uint16_t slave_idx = bufs_slave_port_idxs[i];
1153 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1156 /* Send packet burst on each slave device */
1157 for (i = 0; i < slave_count; i++) {
1158 if (slave_nb_bufs[i] == 0)
1161 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1162 bd_tx_q->queue_id, slave_bufs[i],
1165 total_tx_count += slave_tx_count;
1167 /* If tx burst fails move packets to end of bufs */
1168 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1169 int slave_tx_fail_count = slave_nb_bufs[i] -
1171 total_tx_fail_count += slave_tx_fail_count;
1172 memcpy(&bufs[nb_bufs - total_tx_fail_count],
1173 &slave_bufs[i][slave_tx_count],
1174 slave_tx_fail_count * sizeof(bufs[0]));
1178 return total_tx_count;
1182 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1185 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1186 struct bond_dev_private *internals = bd_tx_q->dev_private;
1188 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1189 uint16_t slave_count;
1191 if (unlikely(nb_bufs == 0))
1194 /* Copy slave list to protect against slave up/down changes during tx
1197 slave_count = internals->active_slave_count;
1198 if (unlikely(slave_count < 1))
1201 memcpy(slave_port_ids, internals->active_slaves,
1202 sizeof(slave_port_ids[0]) * slave_count);
1203 return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1207 static inline uint16_t
1208 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1211 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1212 struct bond_dev_private *internals = bd_tx_q->dev_private;
1214 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1215 uint16_t slave_count;
1217 uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1218 uint16_t dist_slave_count;
1220 uint16_t slave_tx_count;
1224 /* Copy slave list to protect against slave up/down changes during tx
1226 slave_count = internals->active_slave_count;
1227 if (unlikely(slave_count < 1))
1230 memcpy(slave_port_ids, internals->active_slaves,
1231 sizeof(slave_port_ids[0]) * slave_count);
1236 /* Check for LACP control packets and send if available */
1237 for (i = 0; i < slave_count; i++) {
1238 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1239 struct rte_mbuf *ctrl_pkt = NULL;
1241 if (likely(rte_ring_empty(port->tx_ring)))
1244 if (rte_ring_dequeue(port->tx_ring,
1245 (void **)&ctrl_pkt) != -ENOENT) {
1246 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1247 bd_tx_q->queue_id, &ctrl_pkt, 1);
1249 * re-enqueue LAG control plane packets to buffering
1250 * ring if transmission fails so the packet isn't lost.
1252 if (slave_tx_count != 1)
1253 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1258 if (unlikely(nb_bufs == 0))
1261 dist_slave_count = 0;
1262 for (i = 0; i < slave_count; i++) {
1263 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1265 if (ACTOR_STATE(port, DISTRIBUTING))
1266 dist_slave_port_ids[dist_slave_count++] =
1270 if (unlikely(dist_slave_count < 1))
1273 return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1278 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1281 return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1285 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1288 return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1292 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1295 struct bond_dev_private *internals;
1296 struct bond_tx_queue *bd_tx_q;
1298 uint16_t slaves[RTE_MAX_ETHPORTS];
1299 uint8_t tx_failed_flag = 0;
1300 uint16_t num_of_slaves;
1302 uint16_t max_nb_of_tx_pkts = 0;
1304 int slave_tx_total[RTE_MAX_ETHPORTS];
1305 int i, most_successful_tx_slave = -1;
1307 bd_tx_q = (struct bond_tx_queue *)queue;
1308 internals = bd_tx_q->dev_private;
1310 /* Copy slave list to protect against slave up/down changes during tx
1312 num_of_slaves = internals->active_slave_count;
1313 memcpy(slaves, internals->active_slaves,
1314 sizeof(internals->active_slaves[0]) * num_of_slaves);
1316 if (num_of_slaves < 1)
1319 /* Increment reference count on mbufs */
1320 for (i = 0; i < nb_pkts; i++)
1321 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1323 /* Transmit burst on each active slave */
1324 for (i = 0; i < num_of_slaves; i++) {
1325 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1328 if (unlikely(slave_tx_total[i] < nb_pkts))
1331 /* record the value and slave index for the slave which transmits the
1332 * maximum number of packets */
1333 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1334 max_nb_of_tx_pkts = slave_tx_total[i];
1335 most_successful_tx_slave = i;
1339 /* if slaves fail to transmit packets from burst, the calling application
1340 * is not expected to know about multiple references to packets so we must
1341 * handle failures of all packets except those of the most successful slave
1343 if (unlikely(tx_failed_flag))
1344 for (i = 0; i < num_of_slaves; i++)
1345 if (i != most_successful_tx_slave)
1346 while (slave_tx_total[i] < nb_pkts)
1347 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1349 return max_nb_of_tx_pkts;
1353 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1355 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1357 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1359 * If in mode 4 then save the link properties of the first
1360 * slave, all subsequent slaves must match these properties
1362 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1364 bond_link->link_autoneg = slave_link->link_autoneg;
1365 bond_link->link_duplex = slave_link->link_duplex;
1366 bond_link->link_speed = slave_link->link_speed;
1369 * In any other mode the link properties are set to default
1370 * values of AUTONEG/DUPLEX
1372 ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
1373 ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1378 link_properties_valid(struct rte_eth_dev *ethdev,
1379 struct rte_eth_link *slave_link)
1381 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1383 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1384 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1386 if (bond_link->link_duplex != slave_link->link_duplex ||
1387 bond_link->link_autoneg != slave_link->link_autoneg ||
1388 bond_link->link_speed != slave_link->link_speed)
1396 mac_address_get(struct rte_eth_dev *eth_dev,
1397 struct rte_ether_addr *dst_mac_addr)
1399 struct rte_ether_addr *mac_addr;
1401 if (eth_dev == NULL) {
1402 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1406 if (dst_mac_addr == NULL) {
1407 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1411 mac_addr = eth_dev->data->mac_addrs;
1413 rte_ether_addr_copy(mac_addr, dst_mac_addr);
1418 mac_address_set(struct rte_eth_dev *eth_dev,
1419 struct rte_ether_addr *new_mac_addr)
1421 struct rte_ether_addr *mac_addr;
1423 if (eth_dev == NULL) {
1424 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1428 if (new_mac_addr == NULL) {
1429 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1433 mac_addr = eth_dev->data->mac_addrs;
1435 /* If new MAC is different to current MAC then update */
1436 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1437 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1442 static const struct rte_ether_addr null_mac_addr;
1445 * Add additional MAC addresses to the slave
1448 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1449 uint16_t slave_port_id)
1452 struct rte_ether_addr *mac_addr;
1454 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1455 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1456 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1459 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1462 for (i--; i > 0; i--)
1463 rte_eth_dev_mac_addr_remove(slave_port_id,
1464 &bonded_eth_dev->data->mac_addrs[i]);
1473 * Remove additional MAC addresses from the slave
1476 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1477 uint16_t slave_port_id)
1480 struct rte_ether_addr *mac_addr;
1483 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1484 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1485 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1488 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1489 /* save only the first error */
1490 if (ret < 0 && rc == 0)
1498 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1500 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1504 /* Update slave devices MAC addresses */
1505 if (internals->slave_count < 1)
1508 switch (internals->mode) {
1509 case BONDING_MODE_ROUND_ROBIN:
1510 case BONDING_MODE_BALANCE:
1511 case BONDING_MODE_BROADCAST:
1512 for (i = 0; i < internals->slave_count; i++) {
1513 if (rte_eth_dev_default_mac_addr_set(
1514 internals->slaves[i].port_id,
1515 bonded_eth_dev->data->mac_addrs)) {
1516 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1517 internals->slaves[i].port_id);
1522 case BONDING_MODE_8023AD:
1523 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1525 case BONDING_MODE_ACTIVE_BACKUP:
1526 case BONDING_MODE_TLB:
1527 case BONDING_MODE_ALB:
1530 for (i = 0; i < internals->slave_count; i++) {
1531 if (internals->slaves[i].port_id ==
1532 internals->current_primary_port) {
1533 if (rte_eth_dev_default_mac_addr_set(
1534 internals->current_primary_port,
1535 bonded_eth_dev->data->mac_addrs)) {
1536 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1537 internals->current_primary_port);
1541 if (rte_eth_dev_default_mac_addr_set(
1542 internals->slaves[i].port_id,
1543 &internals->slaves[i].persisted_mac_addr)) {
1544 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1545 internals->slaves[i].port_id);
1557 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1559 struct bond_dev_private *internals;
1561 internals = eth_dev->data->dev_private;
1564 case BONDING_MODE_ROUND_ROBIN:
1565 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1566 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1568 case BONDING_MODE_ACTIVE_BACKUP:
1569 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1570 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1572 case BONDING_MODE_BALANCE:
1573 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1574 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1576 case BONDING_MODE_BROADCAST:
1577 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1578 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1580 case BONDING_MODE_8023AD:
1581 if (bond_mode_8023ad_enable(eth_dev) != 0)
1584 if (internals->mode4.dedicated_queues.enabled == 0) {
1585 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1586 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1587 RTE_BOND_LOG(WARNING,
1588 "Using mode 4, it is necessary to do TX burst "
1589 "and RX burst at least every 100ms.");
1591 /* Use flow director's optimization */
1592 eth_dev->rx_pkt_burst =
1593 bond_ethdev_rx_burst_8023ad_fast_queue;
1594 eth_dev->tx_pkt_burst =
1595 bond_ethdev_tx_burst_8023ad_fast_queue;
1598 case BONDING_MODE_TLB:
1599 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1600 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1602 case BONDING_MODE_ALB:
1603 if (bond_mode_alb_enable(eth_dev) != 0)
1606 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1607 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1613 internals->mode = mode;
1620 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1621 struct rte_eth_dev *slave_eth_dev)
1624 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1625 struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1627 if (port->slow_pool == NULL) {
1629 int slave_id = slave_eth_dev->data->port_id;
1631 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1633 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1634 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1635 slave_eth_dev->data->numa_node);
1637 /* Any memory allocation failure in initialization is critical because
1638 * resources can't be free, so reinitialization is impossible. */
1639 if (port->slow_pool == NULL) {
1640 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1641 slave_id, mem_name, rte_strerror(rte_errno));
1645 if (internals->mode4.dedicated_queues.enabled == 1) {
1646 /* Configure slow Rx queue */
1648 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1649 internals->mode4.dedicated_queues.rx_qid, 128,
1650 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1651 NULL, port->slow_pool);
1654 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1655 slave_eth_dev->data->port_id,
1656 internals->mode4.dedicated_queues.rx_qid,
1661 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1662 internals->mode4.dedicated_queues.tx_qid, 512,
1663 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1667 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1668 slave_eth_dev->data->port_id,
1669 internals->mode4.dedicated_queues.tx_qid,
1678 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1679 struct rte_eth_dev *slave_eth_dev)
1681 struct bond_rx_queue *bd_rx_q;
1682 struct bond_tx_queue *bd_tx_q;
1683 uint16_t nb_rx_queues;
1684 uint16_t nb_tx_queues;
1688 struct rte_flow_error flow_error;
1690 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1693 errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
1695 RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
1696 slave_eth_dev->data->port_id, errval);
1698 /* Enable interrupts on slave device if supported */
1699 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1700 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1702 /* If RSS is enabled for bonding, try to enable it for slaves */
1703 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
1704 /* rss_key won't be empty if RSS is configured in bonded dev */
1705 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1706 internals->rss_key_len;
1707 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1710 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1711 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1712 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1713 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1716 if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1717 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1718 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1719 RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1721 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1722 ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
1724 slave_eth_dev->data->dev_conf.rxmode.mtu =
1725 bonded_eth_dev->data->dev_conf.rxmode.mtu;
1727 nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1728 nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1730 if (internals->mode == BONDING_MODE_8023AD) {
1731 if (internals->mode4.dedicated_queues.enabled == 1) {
1737 errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1738 bonded_eth_dev->data->mtu);
1739 if (errval != 0 && errval != -ENOTSUP) {
1740 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1741 slave_eth_dev->data->port_id, errval);
1745 /* Configure device */
1746 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1747 nb_rx_queues, nb_tx_queues,
1748 &(slave_eth_dev->data->dev_conf));
1750 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1751 slave_eth_dev->data->port_id, errval);
1755 /* Setup Rx Queues */
1756 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1757 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1759 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1760 bd_rx_q->nb_rx_desc,
1761 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1762 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1765 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1766 slave_eth_dev->data->port_id, q_id, errval);
1771 /* Setup Tx Queues */
1772 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1773 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1775 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1776 bd_tx_q->nb_tx_desc,
1777 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1781 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1782 slave_eth_dev->data->port_id, q_id, errval);
1787 if (internals->mode == BONDING_MODE_8023AD &&
1788 internals->mode4.dedicated_queues.enabled == 1) {
1789 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1793 errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1794 slave_eth_dev->data->port_id);
1797 "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
1798 slave_eth_dev->data->port_id, errval);
1802 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1803 rte_flow_destroy(slave_eth_dev->data->port_id,
1804 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1807 errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1808 slave_eth_dev->data->port_id);
1811 "bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
1812 slave_eth_dev->data->port_id, errval);
1818 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1820 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1821 slave_eth_dev->data->port_id, errval);
1825 /* If RSS is enabled for bonding, synchronize RETA */
1826 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
1828 struct bond_dev_private *internals;
1830 internals = bonded_eth_dev->data->dev_private;
1832 for (i = 0; i < internals->slave_count; i++) {
1833 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1834 errval = rte_eth_dev_rss_reta_update(
1835 slave_eth_dev->data->port_id,
1836 &internals->reta_conf[0],
1837 internals->slaves[i].reta_size);
1839 RTE_BOND_LOG(WARNING,
1840 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1841 " RSS Configuration for bonding may be inconsistent.",
1842 slave_eth_dev->data->port_id, errval);
1849 /* If lsc interrupt is set, check initial slave's link status */
1850 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1851 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1852 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1853 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1861 slave_remove(struct bond_dev_private *internals,
1862 struct rte_eth_dev *slave_eth_dev)
1866 for (i = 0; i < internals->slave_count; i++)
1867 if (internals->slaves[i].port_id ==
1868 slave_eth_dev->data->port_id)
1871 if (i < (internals->slave_count - 1)) {
1872 struct rte_flow *flow;
1874 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1875 sizeof(internals->slaves[0]) *
1876 (internals->slave_count - i - 1));
1877 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1878 memmove(&flow->flows[i], &flow->flows[i + 1],
1879 sizeof(flow->flows[0]) *
1880 (internals->slave_count - i - 1));
1881 flow->flows[internals->slave_count - 1] = NULL;
1885 internals->slave_count--;
1887 /* force reconfiguration of slave interfaces */
1888 rte_eth_dev_internal_reset(slave_eth_dev);
1892 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1895 slave_add(struct bond_dev_private *internals,
1896 struct rte_eth_dev *slave_eth_dev)
1898 struct bond_slave_details *slave_details =
1899 &internals->slaves[internals->slave_count];
1901 slave_details->port_id = slave_eth_dev->data->port_id;
1902 slave_details->last_link_status = 0;
1904 /* Mark slave devices that don't support interrupts so we can
1905 * compensate when we start the bond
1907 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1908 slave_details->link_status_poll_enabled = 1;
1911 slave_details->link_status_wait_to_complete = 0;
1912 /* clean tlb_last_obytes when adding port for bonding device */
1913 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1914 sizeof(struct rte_ether_addr));
1918 bond_ethdev_primary_set(struct bond_dev_private *internals,
1919 uint16_t slave_port_id)
1923 if (internals->active_slave_count < 1)
1924 internals->current_primary_port = slave_port_id;
1926 /* Search bonded device slave ports for new proposed primary port */
1927 for (i = 0; i < internals->active_slave_count; i++) {
1928 if (internals->active_slaves[i] == slave_port_id)
1929 internals->current_primary_port = slave_port_id;
1934 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1937 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1939 struct bond_dev_private *internals;
1942 /* slave eth dev will be started by bonded device */
1943 if (check_for_bonded_ethdev(eth_dev)) {
1944 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1945 eth_dev->data->port_id);
1949 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
1950 eth_dev->data->dev_started = 1;
1952 internals = eth_dev->data->dev_private;
1954 if (internals->slave_count == 0) {
1955 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1959 if (internals->user_defined_mac == 0) {
1960 struct rte_ether_addr *new_mac_addr = NULL;
1962 for (i = 0; i < internals->slave_count; i++)
1963 if (internals->slaves[i].port_id == internals->primary_port)
1964 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1966 if (new_mac_addr == NULL)
1969 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1970 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1971 eth_dev->data->port_id);
1976 if (internals->mode == BONDING_MODE_8023AD) {
1977 if (internals->mode4.dedicated_queues.enabled == 1) {
1978 internals->mode4.dedicated_queues.rx_qid =
1979 eth_dev->data->nb_rx_queues;
1980 internals->mode4.dedicated_queues.tx_qid =
1981 eth_dev->data->nb_tx_queues;
1986 /* Reconfigure each slave device if starting bonded device */
1987 for (i = 0; i < internals->slave_count; i++) {
1988 struct rte_eth_dev *slave_ethdev =
1989 &(rte_eth_devices[internals->slaves[i].port_id]);
1990 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1992 "bonded port (%d) failed to reconfigure slave device (%d)",
1993 eth_dev->data->port_id,
1994 internals->slaves[i].port_id);
1997 /* We will need to poll for link status if any slave doesn't
1998 * support interrupts
2000 if (internals->slaves[i].link_status_poll_enabled)
2001 internals->link_status_polling_enabled = 1;
2004 /* start polling if needed */
2005 if (internals->link_status_polling_enabled) {
2007 internals->link_status_polling_interval_ms * 1000,
2008 bond_ethdev_slave_link_status_change_monitor,
2009 (void *)&rte_eth_devices[internals->port_id]);
2012 /* Update all slave devices MACs*/
2013 if (mac_address_slaves_update(eth_dev) != 0)
2016 if (internals->user_defined_primary_port)
2017 bond_ethdev_primary_set(internals, internals->primary_port);
2019 if (internals->mode == BONDING_MODE_8023AD)
2020 bond_mode_8023ad_start(eth_dev);
2022 if (internals->mode == BONDING_MODE_TLB ||
2023 internals->mode == BONDING_MODE_ALB)
2024 bond_tlb_enable(internals);
2029 eth_dev->data->dev_started = 0;
2034 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2038 if (dev->data->rx_queues != NULL) {
2039 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2040 rte_free(dev->data->rx_queues[i]);
2041 dev->data->rx_queues[i] = NULL;
2043 dev->data->nb_rx_queues = 0;
2046 if (dev->data->tx_queues != NULL) {
2047 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2048 rte_free(dev->data->tx_queues[i]);
2049 dev->data->tx_queues[i] = NULL;
2051 dev->data->nb_tx_queues = 0;
2056 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2058 struct bond_dev_private *internals = eth_dev->data->dev_private;
2062 if (internals->mode == BONDING_MODE_8023AD) {
2066 bond_mode_8023ad_stop(eth_dev);
2068 /* Discard all messages to/from mode 4 state machines */
2069 for (i = 0; i < internals->active_slave_count; i++) {
2070 port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2072 RTE_ASSERT(port->rx_ring != NULL);
2073 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2074 rte_pktmbuf_free(pkt);
2076 RTE_ASSERT(port->tx_ring != NULL);
2077 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2078 rte_pktmbuf_free(pkt);
2082 if (internals->mode == BONDING_MODE_TLB ||
2083 internals->mode == BONDING_MODE_ALB) {
2084 bond_tlb_disable(internals);
2085 for (i = 0; i < internals->active_slave_count; i++)
2086 tlb_last_obytets[internals->active_slaves[i]] = 0;
2089 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2090 eth_dev->data->dev_started = 0;
2092 internals->link_status_polling_enabled = 0;
2093 for (i = 0; i < internals->slave_count; i++) {
2094 uint16_t slave_id = internals->slaves[i].port_id;
2095 if (find_slave_by_id(internals->active_slaves,
2096 internals->active_slave_count, slave_id) !=
2097 internals->active_slave_count) {
2098 internals->slaves[i].last_link_status = 0;
2099 ret = rte_eth_dev_stop(slave_id);
2101 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2105 deactivate_slave(eth_dev, slave_id);
2113 bond_ethdev_close(struct rte_eth_dev *dev)
2115 struct bond_dev_private *internals = dev->data->dev_private;
2116 uint16_t bond_port_id = internals->port_id;
2118 struct rte_flow_error ferror;
2120 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2123 RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2124 while (internals->slave_count != skipped) {
2125 uint16_t port_id = internals->slaves[skipped].port_id;
2127 if (rte_eth_dev_stop(port_id) != 0) {
2128 RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
2133 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2135 "Failed to remove port %d from bonded device %s",
2136 port_id, dev->device->name);
2140 bond_flow_ops.flush(dev, &ferror);
2141 bond_ethdev_free_queues(dev);
2142 rte_bitmap_reset(internals->vlan_filter_bmp);
2143 rte_bitmap_free(internals->vlan_filter_bmp);
2144 rte_free(internals->vlan_filter_bmpmem);
2146 /* Try to release mempool used in mode6. If the bond
2147 * device is not mode6, free the NULL is not problem.
2149 rte_mempool_free(internals->mode6.mempool);
2151 if (internals->kvlist != NULL)
2152 rte_kvargs_free(internals->kvlist);
2157 /* forward declaration */
2158 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2161 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2163 struct bond_dev_private *internals = dev->data->dev_private;
2164 struct bond_slave_details slave;
2167 uint16_t max_nb_rx_queues = UINT16_MAX;
2168 uint16_t max_nb_tx_queues = UINT16_MAX;
2169 uint16_t max_rx_desc_lim = UINT16_MAX;
2170 uint16_t max_tx_desc_lim = UINT16_MAX;
2172 dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2174 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2175 internals->candidate_max_rx_pktlen :
2176 RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2178 /* Max number of tx/rx queues that the bonded device can support is the
2179 * minimum values of the bonded slaves, as all slaves must be capable
2180 * of supporting the same number of tx/rx queues.
2182 if (internals->slave_count > 0) {
2183 struct rte_eth_dev_info slave_info;
2186 for (idx = 0; idx < internals->slave_count; idx++) {
2187 slave = internals->slaves[idx];
2188 ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2191 "%s: Error during getting device (port %u) info: %s\n",
2199 if (slave_info.max_rx_queues < max_nb_rx_queues)
2200 max_nb_rx_queues = slave_info.max_rx_queues;
2202 if (slave_info.max_tx_queues < max_nb_tx_queues)
2203 max_nb_tx_queues = slave_info.max_tx_queues;
2205 if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2206 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2208 if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2209 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2213 dev_info->max_rx_queues = max_nb_rx_queues;
2214 dev_info->max_tx_queues = max_nb_tx_queues;
2216 memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2217 sizeof(dev_info->default_rxconf));
2218 memcpy(&dev_info->default_txconf, &internals->default_txconf,
2219 sizeof(dev_info->default_txconf));
2221 dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2222 dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2225 * If dedicated hw queues enabled for link bonding device in LACP mode
2226 * then we need to reduce the maximum number of data path queues by 1.
2228 if (internals->mode == BONDING_MODE_8023AD &&
2229 internals->mode4.dedicated_queues.enabled == 1) {
2230 dev_info->max_rx_queues--;
2231 dev_info->max_tx_queues--;
2234 dev_info->min_rx_bufsize = 0;
2236 dev_info->rx_offload_capa = internals->rx_offload_capa;
2237 dev_info->tx_offload_capa = internals->tx_offload_capa;
2238 dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2239 dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2240 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2242 dev_info->reta_size = internals->reta_size;
2243 dev_info->hash_key_size = internals->rss_key_len;
2249 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2253 struct bond_dev_private *internals = dev->data->dev_private;
2255 /* don't do this while a slave is being added */
2256 rte_spinlock_lock(&internals->lock);
2259 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2261 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2263 for (i = 0; i < internals->slave_count; i++) {
2264 uint16_t port_id = internals->slaves[i].port_id;
2266 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2268 RTE_BOND_LOG(WARNING,
2269 "Setting VLAN filter on slave port %u not supported.",
2273 rte_spinlock_unlock(&internals->lock);
2278 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2279 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2280 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2282 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2283 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2284 0, dev->data->numa_node);
2285 if (bd_rx_q == NULL)
2288 bd_rx_q->queue_id = rx_queue_id;
2289 bd_rx_q->dev_private = dev->data->dev_private;
2291 bd_rx_q->nb_rx_desc = nb_rx_desc;
2293 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2294 bd_rx_q->mb_pool = mb_pool;
2296 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2302 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2303 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2304 const struct rte_eth_txconf *tx_conf)
2306 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
2307 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2308 0, dev->data->numa_node);
2310 if (bd_tx_q == NULL)
2313 bd_tx_q->queue_id = tx_queue_id;
2314 bd_tx_q->dev_private = dev->data->dev_private;
2316 bd_tx_q->nb_tx_desc = nb_tx_desc;
2317 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2319 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2325 bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2327 void *queue = dev->data->rx_queues[queue_id];
2336 bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
2338 void *queue = dev->data->tx_queues[queue_id];
2347 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2349 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2350 struct bond_dev_private *internals;
2352 /* Default value for polling slave found is true as we don't want to
2353 * disable the polling thread if we cannot get the lock */
2354 int i, polling_slave_found = 1;
2359 bonded_ethdev = cb_arg;
2360 internals = bonded_ethdev->data->dev_private;
2362 if (!bonded_ethdev->data->dev_started ||
2363 !internals->link_status_polling_enabled)
2366 /* If device is currently being configured then don't check slaves link
2367 * status, wait until next period */
2368 if (rte_spinlock_trylock(&internals->lock)) {
2369 if (internals->slave_count > 0)
2370 polling_slave_found = 0;
2372 for (i = 0; i < internals->slave_count; i++) {
2373 if (!internals->slaves[i].link_status_poll_enabled)
2376 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2377 polling_slave_found = 1;
2379 /* Update slave link status */
2380 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2381 internals->slaves[i].link_status_wait_to_complete);
2383 /* if link status has changed since last checked then call lsc
2385 if (slave_ethdev->data->dev_link.link_status !=
2386 internals->slaves[i].last_link_status) {
2387 internals->slaves[i].last_link_status =
2388 slave_ethdev->data->dev_link.link_status;
2390 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2391 RTE_ETH_EVENT_INTR_LSC,
2392 &bonded_ethdev->data->port_id,
2396 rte_spinlock_unlock(&internals->lock);
2399 if (polling_slave_found)
2400 /* Set alarm to continue monitoring link status of slave ethdev's */
2401 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2402 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2406 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2408 int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2410 struct bond_dev_private *bond_ctx;
2411 struct rte_eth_link slave_link;
2413 bool one_link_update_succeeded;
2417 bond_ctx = ethdev->data->dev_private;
2419 ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2421 if (ethdev->data->dev_started == 0 ||
2422 bond_ctx->active_slave_count == 0) {
2423 ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2427 ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
2429 if (wait_to_complete)
2430 link_update = rte_eth_link_get;
2432 link_update = rte_eth_link_get_nowait;
2434 switch (bond_ctx->mode) {
2435 case BONDING_MODE_BROADCAST:
2437 * Setting link speed to UINT32_MAX to ensure we pick up the
2438 * value of the first active slave
2440 ethdev->data->dev_link.link_speed = UINT32_MAX;
2443 * link speed is minimum value of all the slaves link speed as
2444 * packet loss will occur on this slave if transmission at rates
2445 * greater than this are attempted
2447 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2448 ret = link_update(bond_ctx->active_slaves[idx],
2451 ethdev->data->dev_link.link_speed =
2452 RTE_ETH_SPEED_NUM_NONE;
2454 "Slave (port %u) link get failed: %s",
2455 bond_ctx->active_slaves[idx],
2456 rte_strerror(-ret));
2460 if (slave_link.link_speed <
2461 ethdev->data->dev_link.link_speed)
2462 ethdev->data->dev_link.link_speed =
2463 slave_link.link_speed;
2466 case BONDING_MODE_ACTIVE_BACKUP:
2467 /* Current primary slave */
2468 ret = link_update(bond_ctx->current_primary_port, &slave_link);
2470 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
2471 bond_ctx->current_primary_port,
2472 rte_strerror(-ret));
2476 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2478 case BONDING_MODE_8023AD:
2479 ethdev->data->dev_link.link_autoneg =
2480 bond_ctx->mode4.slave_link.link_autoneg;
2481 ethdev->data->dev_link.link_duplex =
2482 bond_ctx->mode4.slave_link.link_duplex;
2484 /* to update link speed */
2485 case BONDING_MODE_ROUND_ROBIN:
2486 case BONDING_MODE_BALANCE:
2487 case BONDING_MODE_TLB:
2488 case BONDING_MODE_ALB:
2491 * In theses mode the maximum theoretical link speed is the sum
2494 ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2495 one_link_update_succeeded = false;
2497 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2498 ret = link_update(bond_ctx->active_slaves[idx],
2502 "Slave (port %u) link get failed: %s",
2503 bond_ctx->active_slaves[idx],
2504 rte_strerror(-ret));
2508 one_link_update_succeeded = true;
2509 ethdev->data->dev_link.link_speed +=
2510 slave_link.link_speed;
2513 if (!one_link_update_succeeded) {
2514 RTE_BOND_LOG(ERR, "All slaves link get failed");
2525 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2527 struct bond_dev_private *internals = dev->data->dev_private;
2528 struct rte_eth_stats slave_stats;
2531 for (i = 0; i < internals->slave_count; i++) {
2532 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2534 stats->ipackets += slave_stats.ipackets;
2535 stats->opackets += slave_stats.opackets;
2536 stats->ibytes += slave_stats.ibytes;
2537 stats->obytes += slave_stats.obytes;
2538 stats->imissed += slave_stats.imissed;
2539 stats->ierrors += slave_stats.ierrors;
2540 stats->oerrors += slave_stats.oerrors;
2541 stats->rx_nombuf += slave_stats.rx_nombuf;
2543 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2544 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2545 stats->q_opackets[j] += slave_stats.q_opackets[j];
2546 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2547 stats->q_obytes[j] += slave_stats.q_obytes[j];
2548 stats->q_errors[j] += slave_stats.q_errors[j];
2557 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2559 struct bond_dev_private *internals = dev->data->dev_private;
2564 for (i = 0, err = 0; i < internals->slave_count; i++) {
2565 ret = rte_eth_stats_reset(internals->slaves[i].port_id);
2574 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2576 struct bond_dev_private *internals = eth_dev->data->dev_private;
2581 switch (internals->mode) {
2582 /* Promiscuous mode is propagated to all slaves */
2583 case BONDING_MODE_ROUND_ROBIN:
2584 case BONDING_MODE_BALANCE:
2585 case BONDING_MODE_BROADCAST:
2586 case BONDING_MODE_8023AD: {
2587 unsigned int slave_ok = 0;
2589 for (i = 0; i < internals->slave_count; i++) {
2590 port_id = internals->slaves[i].port_id;
2592 ret = rte_eth_promiscuous_enable(port_id);
2595 "Failed to enable promiscuous mode for port %u: %s",
2596 port_id, rte_strerror(-ret));
2601 * Report success if operation is successful on at least
2602 * on one slave. Otherwise return last error code.
2608 /* Promiscuous mode is propagated only to primary slave */
2609 case BONDING_MODE_ACTIVE_BACKUP:
2610 case BONDING_MODE_TLB:
2611 case BONDING_MODE_ALB:
2613 /* Do not touch promisc when there cannot be primary ports */
2614 if (internals->slave_count == 0)
2616 port_id = internals->current_primary_port;
2617 ret = rte_eth_promiscuous_enable(port_id);
2620 "Failed to enable promiscuous mode for port %u: %s",
2621 port_id, rte_strerror(-ret));
2628 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2630 struct bond_dev_private *internals = dev->data->dev_private;
2635 switch (internals->mode) {
2636 /* Promiscuous mode is propagated to all slaves */
2637 case BONDING_MODE_ROUND_ROBIN:
2638 case BONDING_MODE_BALANCE:
2639 case BONDING_MODE_BROADCAST:
2640 case BONDING_MODE_8023AD: {
2641 unsigned int slave_ok = 0;
2643 for (i = 0; i < internals->slave_count; i++) {
2644 port_id = internals->slaves[i].port_id;
2646 if (internals->mode == BONDING_MODE_8023AD &&
2647 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2648 BOND_8023AD_FORCED_PROMISC) {
2652 ret = rte_eth_promiscuous_disable(port_id);
2655 "Failed to disable promiscuous mode for port %u: %s",
2656 port_id, rte_strerror(-ret));
2661 * Report success if operation is successful on at least
2662 * on one slave. Otherwise return last error code.
2668 /* Promiscuous mode is propagated only to primary slave */
2669 case BONDING_MODE_ACTIVE_BACKUP:
2670 case BONDING_MODE_TLB:
2671 case BONDING_MODE_ALB:
2673 /* Do not touch promisc when there cannot be primary ports */
2674 if (internals->slave_count == 0)
2676 port_id = internals->current_primary_port;
2677 ret = rte_eth_promiscuous_disable(port_id);
2680 "Failed to disable promiscuous mode for port %u: %s",
2681 port_id, rte_strerror(-ret));
2688 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2690 struct bond_dev_private *internals = eth_dev->data->dev_private;
2695 switch (internals->mode) {
2696 /* allmulti mode is propagated to all slaves */
2697 case BONDING_MODE_ROUND_ROBIN:
2698 case BONDING_MODE_BALANCE:
2699 case BONDING_MODE_BROADCAST:
2700 case BONDING_MODE_8023AD: {
2701 unsigned int slave_ok = 0;
2703 for (i = 0; i < internals->slave_count; i++) {
2704 port_id = internals->slaves[i].port_id;
2706 ret = rte_eth_allmulticast_enable(port_id);
2709 "Failed to enable allmulti mode for port %u: %s",
2710 port_id, rte_strerror(-ret));
2715 * Report success if operation is successful on at least
2716 * on one slave. Otherwise return last error code.
2722 /* allmulti mode is propagated only to primary slave */
2723 case BONDING_MODE_ACTIVE_BACKUP:
2724 case BONDING_MODE_TLB:
2725 case BONDING_MODE_ALB:
2727 /* Do not touch allmulti when there cannot be primary ports */
2728 if (internals->slave_count == 0)
2730 port_id = internals->current_primary_port;
2731 ret = rte_eth_allmulticast_enable(port_id);
2734 "Failed to enable allmulti mode for port %u: %s",
2735 port_id, rte_strerror(-ret));
2742 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2744 struct bond_dev_private *internals = eth_dev->data->dev_private;
2749 switch (internals->mode) {
2750 /* allmulti mode is propagated to all slaves */
2751 case BONDING_MODE_ROUND_ROBIN:
2752 case BONDING_MODE_BALANCE:
2753 case BONDING_MODE_BROADCAST:
2754 case BONDING_MODE_8023AD: {
2755 unsigned int slave_ok = 0;
2757 for (i = 0; i < internals->slave_count; i++) {
2758 uint16_t port_id = internals->slaves[i].port_id;
2760 if (internals->mode == BONDING_MODE_8023AD &&
2761 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2762 BOND_8023AD_FORCED_ALLMULTI)
2765 ret = rte_eth_allmulticast_disable(port_id);
2768 "Failed to disable allmulti mode for port %u: %s",
2769 port_id, rte_strerror(-ret));
2774 * Report success if operation is successful on at least
2775 * on one slave. Otherwise return last error code.
2781 /* allmulti mode is propagated only to primary slave */
2782 case BONDING_MODE_ACTIVE_BACKUP:
2783 case BONDING_MODE_TLB:
2784 case BONDING_MODE_ALB:
2786 /* Do not touch allmulti when there cannot be primary ports */
2787 if (internals->slave_count == 0)
2789 port_id = internals->current_primary_port;
2790 ret = rte_eth_allmulticast_disable(port_id);
2793 "Failed to disable allmulti mode for port %u: %s",
2794 port_id, rte_strerror(-ret));
2801 bond_ethdev_delayed_lsc_propagation(void *arg)
2806 rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2807 RTE_ETH_EVENT_INTR_LSC, NULL);
2811 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2812 void *param, void *ret_param __rte_unused)
2814 struct rte_eth_dev *bonded_eth_dev;
2815 struct bond_dev_private *internals;
2816 struct rte_eth_link link;
2820 uint8_t lsc_flag = 0;
2821 int valid_slave = 0;
2822 uint16_t active_pos;
2825 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2828 bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2830 if (check_for_bonded_ethdev(bonded_eth_dev))
2833 internals = bonded_eth_dev->data->dev_private;
2835 /* If the device isn't started don't handle interrupts */
2836 if (!bonded_eth_dev->data->dev_started)
2839 /* verify that port_id is a valid slave of bonded port */
2840 for (i = 0; i < internals->slave_count; i++) {
2841 if (internals->slaves[i].port_id == port_id) {
2850 /* Synchronize lsc callback parallel calls either by real link event
2851 * from the slaves PMDs or by the bonding PMD itself.
2853 rte_spinlock_lock(&internals->lsc_lock);
2855 /* Search for port in active port list */
2856 active_pos = find_slave_by_id(internals->active_slaves,
2857 internals->active_slave_count, port_id);
2859 ret = rte_eth_link_get_nowait(port_id, &link);
2861 RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
2863 if (ret == 0 && link.link_status) {
2864 if (active_pos < internals->active_slave_count)
2867 /* check link state properties if bonded link is up*/
2868 if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
2869 if (link_properties_valid(bonded_eth_dev, &link) != 0)
2870 RTE_BOND_LOG(ERR, "Invalid link properties "
2871 "for slave %d in bonding mode %d",
2872 port_id, internals->mode);
2874 /* inherit slave link properties */
2875 link_properties_set(bonded_eth_dev, &link);
2878 /* If no active slave ports then set this port to be
2881 if (internals->active_slave_count < 1) {
2882 /* If first active slave, then change link status */
2883 bonded_eth_dev->data->dev_link.link_status =
2885 internals->current_primary_port = port_id;
2888 mac_address_slaves_update(bonded_eth_dev);
2891 activate_slave(bonded_eth_dev, port_id);
2893 /* If the user has defined the primary port then default to
2896 if (internals->user_defined_primary_port &&
2897 internals->primary_port == port_id)
2898 bond_ethdev_primary_set(internals, port_id);
2900 if (active_pos == internals->active_slave_count)
2903 /* Remove from active slave list */
2904 deactivate_slave(bonded_eth_dev, port_id);
2906 if (internals->active_slave_count < 1)
2909 /* Update primary id, take first active slave from list or if none
2910 * available set to -1 */
2911 if (port_id == internals->current_primary_port) {
2912 if (internals->active_slave_count > 0)
2913 bond_ethdev_primary_set(internals,
2914 internals->active_slaves[0]);
2916 internals->current_primary_port = internals->primary_port;
2917 mac_address_slaves_update(bonded_eth_dev);
2923 * Update bonded device link properties after any change to active
2926 bond_ethdev_link_update(bonded_eth_dev, 0);
2929 /* Cancel any possible outstanding interrupts if delays are enabled */
2930 if (internals->link_up_delay_ms > 0 ||
2931 internals->link_down_delay_ms > 0)
2932 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2935 if (bonded_eth_dev->data->dev_link.link_status) {
2936 if (internals->link_up_delay_ms > 0)
2937 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2938 bond_ethdev_delayed_lsc_propagation,
2939 (void *)bonded_eth_dev);
2941 rte_eth_dev_callback_process(bonded_eth_dev,
2942 RTE_ETH_EVENT_INTR_LSC,
2946 if (internals->link_down_delay_ms > 0)
2947 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2948 bond_ethdev_delayed_lsc_propagation,
2949 (void *)bonded_eth_dev);
2951 rte_eth_dev_callback_process(bonded_eth_dev,
2952 RTE_ETH_EVENT_INTR_LSC,
2957 rte_spinlock_unlock(&internals->lsc_lock);
2963 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2964 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2968 int slave_reta_size;
2969 unsigned reta_count;
2970 struct bond_dev_private *internals = dev->data->dev_private;
2972 if (reta_size != internals->reta_size)
2975 /* Copy RETA table */
2976 reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
2977 RTE_ETH_RETA_GROUP_SIZE;
2979 for (i = 0; i < reta_count; i++) {
2980 internals->reta_conf[i].mask = reta_conf[i].mask;
2981 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
2982 if ((reta_conf[i].mask >> j) & 0x01)
2983 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2986 /* Fill rest of array */
2987 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2988 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2989 sizeof(internals->reta_conf[0]) * reta_count);
2991 /* Propagate RETA over slaves */
2992 for (i = 0; i < internals->slave_count; i++) {
2993 slave_reta_size = internals->slaves[i].reta_size;
2994 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2995 &internals->reta_conf[0], slave_reta_size);
3004 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
3005 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
3008 struct bond_dev_private *internals = dev->data->dev_private;
3010 if (reta_size != internals->reta_size)
3013 /* Copy RETA table */
3014 for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
3015 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3016 if ((reta_conf[i].mask >> j) & 0x01)
3017 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
3023 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
3024 struct rte_eth_rss_conf *rss_conf)
3027 struct bond_dev_private *internals = dev->data->dev_private;
3028 struct rte_eth_rss_conf bond_rss_conf;
3030 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
3032 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
3034 if (bond_rss_conf.rss_hf != 0)
3035 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
3037 if (bond_rss_conf.rss_key) {
3038 if (bond_rss_conf.rss_key_len < internals->rss_key_len)
3040 else if (bond_rss_conf.rss_key_len > internals->rss_key_len)
3041 RTE_BOND_LOG(WARNING, "rss_key will be truncated");
3043 memcpy(internals->rss_key, bond_rss_conf.rss_key,
3044 internals->rss_key_len);
3045 bond_rss_conf.rss_key_len = internals->rss_key_len;
3048 for (i = 0; i < internals->slave_count; i++) {
3049 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
3059 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
3060 struct rte_eth_rss_conf *rss_conf)
3062 struct bond_dev_private *internals = dev->data->dev_private;
3064 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
3065 rss_conf->rss_key_len = internals->rss_key_len;
3066 if (rss_conf->rss_key)
3067 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
3073 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3075 struct rte_eth_dev *slave_eth_dev;
3076 struct bond_dev_private *internals = dev->data->dev_private;
3079 rte_spinlock_lock(&internals->lock);
3081 for (i = 0; i < internals->slave_count; i++) {
3082 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3083 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
3084 rte_spinlock_unlock(&internals->lock);
3088 for (i = 0; i < internals->slave_count; i++) {
3089 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
3091 rte_spinlock_unlock(&internals->lock);
3096 rte_spinlock_unlock(&internals->lock);
3101 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
3102 struct rte_ether_addr *addr)
3104 if (mac_address_set(dev, addr)) {
3105 RTE_BOND_LOG(ERR, "Failed to update MAC address");
3113 bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
3114 const struct rte_flow_ops **ops)
3116 *ops = &bond_flow_ops;
3121 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
3122 struct rte_ether_addr *mac_addr,
3123 __rte_unused uint32_t index, uint32_t vmdq)
3125 struct rte_eth_dev *slave_eth_dev;
3126 struct bond_dev_private *internals = dev->data->dev_private;
3129 rte_spinlock_lock(&internals->lock);
3131 for (i = 0; i < internals->slave_count; i++) {
3132 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3133 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
3134 *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
3140 for (i = 0; i < internals->slave_count; i++) {
3141 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
3145 for (i--; i >= 0; i--)
3146 rte_eth_dev_mac_addr_remove(
3147 internals->slaves[i].port_id, mac_addr);
3154 rte_spinlock_unlock(&internals->lock);
3159 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3161 struct rte_eth_dev *slave_eth_dev;
3162 struct bond_dev_private *internals = dev->data->dev_private;
3165 rte_spinlock_lock(&internals->lock);
3167 for (i = 0; i < internals->slave_count; i++) {
3168 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
3169 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3173 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3175 for (i = 0; i < internals->slave_count; i++)
3176 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3180 rte_spinlock_unlock(&internals->lock);
3183 const struct eth_dev_ops default_dev_ops = {
3184 .dev_start = bond_ethdev_start,
3185 .dev_stop = bond_ethdev_stop,
3186 .dev_close = bond_ethdev_close,
3187 .dev_configure = bond_ethdev_configure,
3188 .dev_infos_get = bond_ethdev_info,
3189 .vlan_filter_set = bond_ethdev_vlan_filter_set,
3190 .rx_queue_setup = bond_ethdev_rx_queue_setup,
3191 .tx_queue_setup = bond_ethdev_tx_queue_setup,
3192 .rx_queue_release = bond_ethdev_rx_queue_release,
3193 .tx_queue_release = bond_ethdev_tx_queue_release,
3194 .link_update = bond_ethdev_link_update,
3195 .stats_get = bond_ethdev_stats_get,
3196 .stats_reset = bond_ethdev_stats_reset,
3197 .promiscuous_enable = bond_ethdev_promiscuous_enable,
3198 .promiscuous_disable = bond_ethdev_promiscuous_disable,
3199 .allmulticast_enable = bond_ethdev_allmulticast_enable,
3200 .allmulticast_disable = bond_ethdev_allmulticast_disable,
3201 .reta_update = bond_ethdev_rss_reta_update,
3202 .reta_query = bond_ethdev_rss_reta_query,
3203 .rss_hash_update = bond_ethdev_rss_hash_update,
3204 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
3205 .mtu_set = bond_ethdev_mtu_set,
3206 .mac_addr_set = bond_ethdev_mac_address_set,
3207 .mac_addr_add = bond_ethdev_mac_addr_add,
3208 .mac_addr_remove = bond_ethdev_mac_addr_remove,
3209 .flow_ops_get = bond_flow_ops_get
3213 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3215 const char *name = rte_vdev_device_name(dev);
3216 uint8_t socket_id = dev->device.numa_node;
3217 struct bond_dev_private *internals = NULL;
3218 struct rte_eth_dev *eth_dev = NULL;
3219 uint32_t vlan_filter_bmp_size;
3221 /* now do all data allocation - for eth_dev structure, dummy pci driver
3222 * and internal (private) data
3225 /* reserve an ethdev entry */
3226 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3227 if (eth_dev == NULL) {
3228 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3232 internals = eth_dev->data->dev_private;
3233 eth_dev->data->nb_rx_queues = (uint16_t)1;
3234 eth_dev->data->nb_tx_queues = (uint16_t)1;
3236 /* Allocate memory for storing MAC addresses */
3237 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3238 BOND_MAX_MAC_ADDRS, 0, socket_id);
3239 if (eth_dev->data->mac_addrs == NULL) {
3241 "Failed to allocate %u bytes needed to store MAC addresses",
3242 RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3246 eth_dev->dev_ops = &default_dev_ops;
3247 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
3248 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3250 rte_spinlock_init(&internals->lock);
3251 rte_spinlock_init(&internals->lsc_lock);
3253 internals->port_id = eth_dev->data->port_id;
3254 internals->mode = BONDING_MODE_INVALID;
3255 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3256 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3257 internals->burst_xmit_hash = burst_xmit_l2_hash;
3258 internals->user_defined_mac = 0;
3260 internals->link_status_polling_enabled = 0;
3262 internals->link_status_polling_interval_ms =
3263 DEFAULT_POLLING_INTERVAL_10_MS;
3264 internals->link_down_delay_ms = 0;
3265 internals->link_up_delay_ms = 0;
3267 internals->slave_count = 0;
3268 internals->active_slave_count = 0;
3269 internals->rx_offload_capa = 0;
3270 internals->tx_offload_capa = 0;
3271 internals->rx_queue_offload_capa = 0;
3272 internals->tx_queue_offload_capa = 0;
3273 internals->candidate_max_rx_pktlen = 0;
3274 internals->max_rx_pktlen = 0;
3276 /* Initially allow to choose any offload type */
3277 internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
3279 memset(&internals->default_rxconf, 0,
3280 sizeof(internals->default_rxconf));
3281 memset(&internals->default_txconf, 0,
3282 sizeof(internals->default_txconf));
3284 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3285 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3287 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3288 memset(internals->slaves, 0, sizeof(internals->slaves));
3290 TAILQ_INIT(&internals->flow_list);
3291 internals->flow_isolated_valid = 0;
3293 /* Set mode 4 default configuration */
3294 bond_mode_8023ad_setup(eth_dev, NULL);
3295 if (bond_ethdev_mode_set(eth_dev, mode)) {
3296 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3297 eth_dev->data->port_id, mode);
3301 vlan_filter_bmp_size =
3302 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3303 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3304 RTE_CACHE_LINE_SIZE);
3305 if (internals->vlan_filter_bmpmem == NULL) {
3307 "Failed to allocate vlan bitmap for bonded device %u",
3308 eth_dev->data->port_id);
3312 internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3313 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3314 if (internals->vlan_filter_bmp == NULL) {
3316 "Failed to init vlan bitmap for bonded device %u",
3317 eth_dev->data->port_id);
3318 rte_free(internals->vlan_filter_bmpmem);
3322 return eth_dev->data->port_id;
3325 rte_free(internals);
3326 if (eth_dev != NULL)
3327 eth_dev->data->dev_private = NULL;
3328 rte_eth_dev_release_port(eth_dev);
3333 bond_probe(struct rte_vdev_device *dev)
3336 struct bond_dev_private *internals;
3337 struct rte_kvargs *kvlist;
3338 uint8_t bonding_mode;
3339 int arg_count, port_id;
3342 struct rte_eth_dev *eth_dev;
3347 name = rte_vdev_device_name(dev);
3348 RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3350 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3351 eth_dev = rte_eth_dev_attach_secondary(name);
3353 RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3356 /* TODO: request info from primary to set up Rx and Tx */
3357 eth_dev->dev_ops = &default_dev_ops;
3358 eth_dev->device = &dev->device;
3359 rte_eth_dev_probing_finish(eth_dev);
3363 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3364 pmd_bond_init_valid_arguments);
3368 /* Parse link bonding mode */
3369 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3370 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3371 &bond_ethdev_parse_slave_mode_kvarg,
3372 &bonding_mode) != 0) {
3373 RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3378 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3383 /* Parse socket id to create bonding device on */
3384 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3385 if (arg_count == 1) {
3386 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3387 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3389 RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3390 "bonded device %s", name);
3393 } else if (arg_count > 1) {
3394 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3395 "bonded device %s", name);
3398 socket_id = rte_socket_id();
3401 dev->device.numa_node = socket_id;
3403 /* Create link bonding eth device */
3404 port_id = bond_alloc(dev, bonding_mode);
3406 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3407 "socket %u.", name, bonding_mode, socket_id);
3410 internals = rte_eth_devices[port_id].data->dev_private;
3411 internals->kvlist = kvlist;
3413 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3414 if (rte_kvargs_process(kvlist,
3415 PMD_BOND_AGG_MODE_KVARG,
3416 &bond_ethdev_parse_slave_agg_mode_kvarg,
3419 "Failed to parse agg selection mode for bonded device %s",
3424 if (internals->mode == BONDING_MODE_8023AD)
3425 internals->mode4.agg_selection = agg_mode;
3427 internals->mode4.agg_selection = AGG_STABLE;
3430 rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3431 RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3432 "socket %u.", name, port_id, bonding_mode, socket_id);
3436 rte_kvargs_free(kvlist);
3442 bond_remove(struct rte_vdev_device *dev)
3444 struct rte_eth_dev *eth_dev;
3445 struct bond_dev_private *internals;
3452 name = rte_vdev_device_name(dev);
3453 RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3455 /* find an ethdev entry */
3456 eth_dev = rte_eth_dev_allocated(name);
3457 if (eth_dev == NULL)
3458 return 0; /* port already released */
3460 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3461 return rte_eth_dev_release_port(eth_dev);
3463 RTE_ASSERT(eth_dev->device == &dev->device);
3465 internals = eth_dev->data->dev_private;
3466 if (internals->slave_count != 0)
3469 if (eth_dev->data->dev_started == 1) {
3470 ret = bond_ethdev_stop(eth_dev);
3471 bond_ethdev_close(eth_dev);
3473 rte_eth_dev_release_port(eth_dev);
3478 /* this part will resolve the slave portids after all the other pdev and vdev
3479 * have been allocated */
3481 bond_ethdev_configure(struct rte_eth_dev *dev)
3483 const char *name = dev->device->name;
3484 struct bond_dev_private *internals = dev->data->dev_private;
3485 struct rte_kvargs *kvlist = internals->kvlist;
3487 uint16_t port_id = dev - rte_eth_devices;
3490 static const uint8_t default_rss_key[40] = {
3491 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3492 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3493 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3494 0xBE, 0xAC, 0x01, 0xFA
3500 * If RSS is enabled, fill table with default values and
3501 * set key to the the value specified in port RSS configuration.
3502 * Fall back to default RSS key if the key is not specified
3504 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
3505 struct rte_eth_rss_conf *rss_conf =
3506 &dev->data->dev_conf.rx_adv_conf.rss_conf;
3507 if (rss_conf->rss_key != NULL) {
3508 if (internals->rss_key_len > rss_conf->rss_key_len) {
3509 RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
3510 rss_conf->rss_key_len);
3514 memcpy(internals->rss_key, rss_conf->rss_key,
3515 internals->rss_key_len);
3517 if (internals->rss_key_len > sizeof(default_rss_key)) {
3519 "There is no suitable default hash key");
3523 memcpy(internals->rss_key, default_rss_key,
3524 internals->rss_key_len);
3527 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3528 internals->reta_conf[i].mask = ~0LL;
3529 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
3530 internals->reta_conf[i].reta[j] =
3531 (i * RTE_ETH_RETA_GROUP_SIZE + j) %
3532 dev->data->nb_rx_queues;
3536 /* set the max_rx_pktlen */
3537 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3540 * if no kvlist, it means that this bonded device has been created
3541 * through the bonding api.
3546 /* Parse MAC address for bonded device */
3547 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3548 if (arg_count == 1) {
3549 struct rte_ether_addr bond_mac;
3551 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3552 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3553 RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3558 /* Set MAC address */
3559 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3561 "Failed to set mac address on bonded device %s",
3565 } else if (arg_count > 1) {
3567 "MAC address can be specified only once for bonded device %s",
3572 /* Parse/set balance mode transmit policy */
3573 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3574 if (arg_count == 1) {
3575 uint8_t xmit_policy;
3577 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3578 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3581 "Invalid xmit policy specified for bonded device %s",
3586 /* Set balance mode transmit policy*/
3587 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3589 "Failed to set balance xmit policy on bonded device %s",
3593 } else if (arg_count > 1) {
3595 "Transmit policy can be specified only once for bonded device %s",
3600 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3601 if (rte_kvargs_process(kvlist,
3602 PMD_BOND_AGG_MODE_KVARG,
3603 &bond_ethdev_parse_slave_agg_mode_kvarg,
3606 "Failed to parse agg selection mode for bonded device %s",
3609 if (internals->mode == BONDING_MODE_8023AD) {
3610 int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3614 "Invalid args for agg selection set for bonded device %s",
3621 /* Parse/add slave ports to bonded device */
3622 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3623 struct bond_ethdev_slave_ports slave_ports;
3626 memset(&slave_ports, 0, sizeof(slave_ports));
3628 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3629 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3631 "Failed to parse slave ports for bonded device %s",
3636 for (i = 0; i < slave_ports.slave_count; i++) {
3637 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3639 "Failed to add port %d as slave to bonded device %s",
3640 slave_ports.slaves[i], name);
3645 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3649 /* Parse/set primary slave port id*/
3650 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3651 if (arg_count == 1) {
3652 uint16_t primary_slave_port_id;
3654 if (rte_kvargs_process(kvlist,
3655 PMD_BOND_PRIMARY_SLAVE_KVARG,
3656 &bond_ethdev_parse_primary_slave_port_id_kvarg,
3657 &primary_slave_port_id) < 0) {
3659 "Invalid primary slave port id specified for bonded device %s",
3664 /* Set balance mode transmit policy*/
3665 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3668 "Failed to set primary slave port %d on bonded device %s",
3669 primary_slave_port_id, name);
3672 } else if (arg_count > 1) {
3674 "Primary slave can be specified only once for bonded device %s",
3679 /* Parse link status monitor polling interval */
3680 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3681 if (arg_count == 1) {
3682 uint32_t lsc_poll_interval_ms;
3684 if (rte_kvargs_process(kvlist,
3685 PMD_BOND_LSC_POLL_PERIOD_KVARG,
3686 &bond_ethdev_parse_time_ms_kvarg,
3687 &lsc_poll_interval_ms) < 0) {
3689 "Invalid lsc polling interval value specified for bonded"
3690 " device %s", name);
3694 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3697 "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3698 lsc_poll_interval_ms, name);
3701 } else if (arg_count > 1) {
3703 "LSC polling interval can be specified only once for bonded"
3704 " device %s", name);
3708 /* Parse link up interrupt propagation delay */
3709 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3710 if (arg_count == 1) {
3711 uint32_t link_up_delay_ms;
3713 if (rte_kvargs_process(kvlist,
3714 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3715 &bond_ethdev_parse_time_ms_kvarg,
3716 &link_up_delay_ms) < 0) {
3718 "Invalid link up propagation delay value specified for"
3719 " bonded device %s", name);
3723 /* Set balance mode transmit policy*/
3724 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3727 "Failed to set link up propagation delay (%u ms) on bonded"
3728 " device %s", link_up_delay_ms, name);
3731 } else if (arg_count > 1) {
3733 "Link up propagation delay can be specified only once for"
3734 " bonded device %s", name);
3738 /* Parse link down interrupt propagation delay */
3739 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3740 if (arg_count == 1) {
3741 uint32_t link_down_delay_ms;
3743 if (rte_kvargs_process(kvlist,
3744 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3745 &bond_ethdev_parse_time_ms_kvarg,
3746 &link_down_delay_ms) < 0) {
3748 "Invalid link down propagation delay value specified for"
3749 " bonded device %s", name);
3753 /* Set balance mode transmit policy*/
3754 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3757 "Failed to set link down propagation delay (%u ms) on bonded device %s",
3758 link_down_delay_ms, name);
3761 } else if (arg_count > 1) {
3763 "Link down propagation delay can be specified only once for bonded device %s",
3771 struct rte_vdev_driver pmd_bond_drv = {
3772 .probe = bond_probe,
3773 .remove = bond_remove,
3776 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3777 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3779 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3783 "xmit_policy=[l2 | l23 | l34] "
3784 "agg_mode=[count | stable | bandwidth] "
3787 "lsc_poll_period_ms=<int> "
3789 "down_delay=<int>");
3791 /* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for
3792 * this library, see meson.build.
3794 RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);