1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
6 #include <netinet/in.h>
9 #include <rte_malloc.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
15 #include <rte_ip_frag.h>
16 #include <rte_devargs.h>
17 #include <rte_kvargs.h>
18 #include <rte_bus_vdev.h>
19 #include <rte_alarm.h>
20 #include <rte_cycles.h>
21 #include <rte_string_fns.h>
23 #include "rte_eth_bond.h"
24 #include "rte_eth_bond_private.h"
25 #include "rte_eth_bond_8023ad_private.h"
27 #define REORDER_PERIOD_MS 10
28 #define DEFAULT_POLLING_INTERVAL_10_MS (10)
29 #define BOND_MAX_MAC_ADDRS 16
31 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
33 /* Table for statistics in mode 5 TLB */
34 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
37 get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
39 size_t vlan_offset = 0;
41 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
42 rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
43 struct rte_vlan_hdr *vlan_hdr =
44 (struct rte_vlan_hdr *)(eth_hdr + 1);
46 vlan_offset = sizeof(struct rte_vlan_hdr);
47 *proto = vlan_hdr->eth_proto;
49 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
50 vlan_hdr = vlan_hdr + 1;
51 *proto = vlan_hdr->eth_proto;
52 vlan_offset += sizeof(struct rte_vlan_hdr);
59 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
61 struct bond_dev_private *internals;
63 uint16_t num_rx_total = 0;
65 uint16_t active_slave;
68 /* Cast to structure, containing bonded device's port id and queue id */
69 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
70 internals = bd_rx_q->dev_private;
71 slave_count = internals->active_slave_count;
72 active_slave = internals->active_slave;
74 for (i = 0; i < slave_count && nb_pkts; i++) {
75 uint16_t num_rx_slave;
77 /* Offset of pointer to *bufs increases as packets are received
78 * from other slaves */
80 rte_eth_rx_burst(internals->active_slaves[active_slave],
82 bufs + num_rx_total, nb_pkts);
83 num_rx_total += num_rx_slave;
84 nb_pkts -= num_rx_slave;
85 if (++active_slave == slave_count)
89 if (++internals->active_slave >= slave_count)
90 internals->active_slave = 0;
95 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
98 struct bond_dev_private *internals;
100 /* Cast to structure, containing bonded device's port id and queue id */
101 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
103 internals = bd_rx_q->dev_private;
105 return rte_eth_rx_burst(internals->current_primary_port,
106 bd_rx_q->queue_id, bufs, nb_pkts);
109 static inline uint8_t
110 is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
112 const uint16_t ether_type_slow_be =
113 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
115 return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
116 (ethertype == ether_type_slow_be &&
117 (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
120 /*****************************************************************************
121 * Flow director's setup for mode 4 optimization
124 static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
125 .dst.addr_bytes = { 0 },
126 .src.addr_bytes = { 0 },
127 .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
130 static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
131 .dst.addr_bytes = { 0 },
132 .src.addr_bytes = { 0 },
136 static struct rte_flow_item flow_item_8023ad[] = {
138 .type = RTE_FLOW_ITEM_TYPE_ETH,
139 .spec = &flow_item_eth_type_8023ad,
141 .mask = &flow_item_eth_mask_type_8023ad,
144 .type = RTE_FLOW_ITEM_TYPE_END,
151 const struct rte_flow_attr flow_attr_8023ad = {
160 bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
161 uint16_t slave_port) {
162 struct rte_eth_dev_info slave_info;
163 struct rte_flow_error error;
164 struct bond_dev_private *internals = bond_dev->data->dev_private;
166 const struct rte_flow_action_queue lacp_queue_conf = {
170 const struct rte_flow_action actions[] = {
172 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
173 .conf = &lacp_queue_conf
176 .type = RTE_FLOW_ACTION_TYPE_END,
180 int ret = rte_flow_validate(slave_port, &flow_attr_8023ad,
181 flow_item_8023ad, actions, &error);
183 RTE_BOND_LOG(ERR, "%s: %s (slave_port=%d queue_id=%d)",
184 __func__, error.message, slave_port,
185 internals->mode4.dedicated_queues.rx_qid);
189 ret = rte_eth_dev_info_get(slave_port, &slave_info);
192 "%s: Error during getting device (port %u) info: %s\n",
193 __func__, slave_port, strerror(-ret));
198 if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
199 slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
201 "%s: Slave %d capabilities doesn't allow to allocate additional queues",
202 __func__, slave_port);
210 bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
211 struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
212 struct bond_dev_private *internals = bond_dev->data->dev_private;
213 struct rte_eth_dev_info bond_info;
217 /* Verify if all slaves in bonding supports flow director and */
218 if (internals->slave_count > 0) {
219 ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
222 "%s: Error during getting device (port %u) info: %s\n",
223 __func__, bond_dev->data->port_id,
229 internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
230 internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
232 for (idx = 0; idx < internals->slave_count; idx++) {
233 if (bond_ethdev_8023ad_flow_verify(bond_dev,
234 internals->slaves[idx].port_id) != 0)
243 bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
245 struct rte_flow_error error;
246 struct bond_dev_private *internals = bond_dev->data->dev_private;
247 struct rte_flow_action_queue lacp_queue_conf = {
248 .index = internals->mode4.dedicated_queues.rx_qid,
251 const struct rte_flow_action actions[] = {
253 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
254 .conf = &lacp_queue_conf
257 .type = RTE_FLOW_ACTION_TYPE_END,
261 internals->mode4.dedicated_queues.flow[slave_port] = rte_flow_create(slave_port,
262 &flow_attr_8023ad, flow_item_8023ad, actions, &error);
263 if (internals->mode4.dedicated_queues.flow[slave_port] == NULL) {
264 RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_set: %s "
265 "(slave_port=%d queue_id=%d)",
266 error.message, slave_port,
267 internals->mode4.dedicated_queues.rx_qid);
274 static inline uint16_t
275 rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
278 /* Cast to structure, containing bonded device's port id and queue id */
279 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
280 struct bond_dev_private *internals = bd_rx_q->dev_private;
281 struct rte_eth_dev *bonded_eth_dev =
282 &rte_eth_devices[internals->port_id];
283 struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
284 struct rte_ether_hdr *hdr;
286 const uint16_t ether_type_slow_be =
287 rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
288 uint16_t num_rx_total = 0; /* Total number of received packets */
289 uint16_t slaves[RTE_MAX_ETHPORTS];
290 uint16_t slave_count, idx;
292 uint8_t collecting; /* current slave collecting status */
293 const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
294 const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
300 /* Copy slave list to protect against slave up/down changes during tx
302 slave_count = internals->active_slave_count;
303 memcpy(slaves, internals->active_slaves,
304 sizeof(internals->active_slaves[0]) * slave_count);
306 idx = internals->active_slave;
307 if (idx >= slave_count) {
308 internals->active_slave = 0;
311 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
313 collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
316 /* Read packets from this slave */
317 num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
318 &bufs[num_rx_total], nb_pkts - num_rx_total);
320 for (k = j; k < 2 && k < num_rx_total; k++)
321 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
323 /* Handle slow protocol packets. */
324 while (j < num_rx_total) {
325 if (j + 3 < num_rx_total)
326 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
328 hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
329 subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
331 /* Remove packet from array if:
332 * - it is slow packet but no dedicated rxq is present,
333 * - slave is not in collecting state,
334 * - bonding interface is not in promiscuous mode:
335 * - packet is unicast and address does not match,
336 * - packet is multicast and bonding interface
337 * is not in allmulti,
341 is_lacp_packets(hdr->ether_type, subtype,
345 ((rte_is_unicast_ether_addr(&hdr->d_addr) &&
346 !rte_is_same_ether_addr(bond_mac,
349 rte_is_multicast_ether_addr(&hdr->d_addr)))))) {
351 if (hdr->ether_type == ether_type_slow_be) {
352 bond_mode_8023ad_handle_slow_pkt(
353 internals, slaves[idx], bufs[j]);
355 rte_pktmbuf_free(bufs[j]);
357 /* Packet is managed by mode 4 or dropped, shift the array */
359 if (j < num_rx_total) {
360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
366 if (unlikely(++idx == slave_count))
370 if (++internals->active_slave >= slave_count)
371 internals->active_slave = 0;
377 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
380 return rx_burst_8023ad(queue, bufs, nb_pkts, false);
384 bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
387 return rx_burst_8023ad(queue, bufs, nb_pkts, true);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 uint32_t burstnumberRX;
392 uint32_t burstnumberTX;
394 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
397 arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
400 case RTE_ARP_OP_REQUEST:
401 strlcpy(buf, "ARP Request", buf_len);
403 case RTE_ARP_OP_REPLY:
404 strlcpy(buf, "ARP Reply", buf_len);
406 case RTE_ARP_OP_REVREQUEST:
407 strlcpy(buf, "Reverse ARP Request", buf_len);
409 case RTE_ARP_OP_REVREPLY:
410 strlcpy(buf, "Reverse ARP Reply", buf_len);
412 case RTE_ARP_OP_INVREQUEST:
413 strlcpy(buf, "Peer Identify Request", buf_len);
415 case RTE_ARP_OP_INVREPLY:
416 strlcpy(buf, "Peer Identify Reply", buf_len);
421 strlcpy(buf, "Unknown", buf_len);
425 #define MaxIPv4String 16
427 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
431 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
432 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
433 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
437 #define MAX_CLIENTS_NUMBER 128
438 uint8_t active_clients;
439 struct client_stats_t {
442 uint32_t ipv4_rx_packets;
443 uint32_t ipv4_tx_packets;
445 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
448 update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
452 for (; i < MAX_CLIENTS_NUMBER; i++) {
453 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
454 /* Just update RX packets number for this client */
455 if (TXorRXindicator == &burstnumberRX)
456 client_stats[i].ipv4_rx_packets++;
458 client_stats[i].ipv4_tx_packets++;
462 /* We have a new client. Insert him to the table, and increment stats */
463 if (TXorRXindicator == &burstnumberRX)
464 client_stats[active_clients].ipv4_rx_packets++;
466 client_stats[active_clients].ipv4_tx_packets++;
467 client_stats[active_clients].ipv4_addr = addr;
468 client_stats[active_clients].port = port;
473 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
474 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
475 rte_log(RTE_LOG_DEBUG, bond_logtype, \
476 "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
477 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
480 eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
481 eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
482 eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
484 eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
485 eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
486 eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
488 arp_op, ++burstnumber)
492 mode6_debug(const char __attribute__((unused)) *info,
493 struct rte_ether_hdr *eth_h, uint16_t port,
494 uint32_t __attribute__((unused)) *burstnumber)
496 struct rte_ipv4_hdr *ipv4_h;
497 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
498 struct rte_arp_hdr *arp_h;
505 uint16_t ether_type = eth_h->ether_type;
506 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
508 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
509 strlcpy(buf, info, 16);
512 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
513 ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
514 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
515 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
516 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
517 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
519 update_client_stats(ipv4_h->src_addr, port, burstnumber);
521 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
522 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
523 arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
524 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
525 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
526 arp_op_name(rte_be_to_cpu_16(arp_h->arp_opcode),
527 ArpOp, sizeof(ArpOp));
528 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
535 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
537 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
538 struct bond_dev_private *internals = bd_tx_q->dev_private;
539 struct rte_ether_hdr *eth_h;
540 uint16_t ether_type, offset;
541 uint16_t nb_recv_pkts;
544 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
546 for (i = 0; i < nb_recv_pkts; i++) {
547 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
548 ether_type = eth_h->ether_type;
549 offset = get_vlan_offset(eth_h, ðer_type);
551 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
552 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
553 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
555 bond_mode_alb_arp_recv(eth_h, offset, internals);
557 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
558 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
559 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
567 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
570 struct bond_dev_private *internals;
571 struct bond_tx_queue *bd_tx_q;
573 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
574 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
576 uint16_t num_of_slaves;
577 uint16_t slaves[RTE_MAX_ETHPORTS];
579 uint16_t num_tx_total = 0, num_tx_slave;
581 static int slave_idx = 0;
582 int i, cslave_idx = 0, tx_fail_total = 0;
584 bd_tx_q = (struct bond_tx_queue *)queue;
585 internals = bd_tx_q->dev_private;
587 /* Copy slave list to protect against slave up/down changes during tx
589 num_of_slaves = internals->active_slave_count;
590 memcpy(slaves, internals->active_slaves,
591 sizeof(internals->active_slaves[0]) * num_of_slaves);
593 if (num_of_slaves < 1)
596 /* Populate slaves mbuf with which packets are to be sent on it */
597 for (i = 0; i < nb_pkts; i++) {
598 cslave_idx = (slave_idx + i) % num_of_slaves;
599 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
602 /* increment current slave index so the next call to tx burst starts on the
604 slave_idx = ++cslave_idx;
606 /* Send packet burst on each slave device */
607 for (i = 0; i < num_of_slaves; i++) {
608 if (slave_nb_pkts[i] > 0) {
609 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
610 slave_bufs[i], slave_nb_pkts[i]);
612 /* if tx burst fails move packets to end of bufs */
613 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
614 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
616 tx_fail_total += tx_fail_slave;
618 memcpy(&bufs[nb_pkts - tx_fail_total],
619 &slave_bufs[i][num_tx_slave],
620 tx_fail_slave * sizeof(bufs[0]));
622 num_tx_total += num_tx_slave;
630 bond_ethdev_tx_burst_active_backup(void *queue,
631 struct rte_mbuf **bufs, uint16_t nb_pkts)
633 struct bond_dev_private *internals;
634 struct bond_tx_queue *bd_tx_q;
636 bd_tx_q = (struct bond_tx_queue *)queue;
637 internals = bd_tx_q->dev_private;
639 if (internals->active_slave_count < 1)
642 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
646 static inline uint16_t
647 ether_hash(struct rte_ether_hdr *eth_hdr)
649 unaligned_uint16_t *word_src_addr =
650 (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
651 unaligned_uint16_t *word_dst_addr =
652 (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
654 return (word_src_addr[0] ^ word_dst_addr[0]) ^
655 (word_src_addr[1] ^ word_dst_addr[1]) ^
656 (word_src_addr[2] ^ word_dst_addr[2]);
659 static inline uint32_t
660 ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
662 return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
665 static inline uint32_t
666 ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
668 unaligned_uint32_t *word_src_addr =
669 (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
670 unaligned_uint32_t *word_dst_addr =
671 (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
673 return (word_src_addr[0] ^ word_dst_addr[0]) ^
674 (word_src_addr[1] ^ word_dst_addr[1]) ^
675 (word_src_addr[2] ^ word_dst_addr[2]) ^
676 (word_src_addr[3] ^ word_dst_addr[3]);
681 burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
682 uint16_t slave_count, uint16_t *slaves)
684 struct rte_ether_hdr *eth_hdr;
688 for (i = 0; i < nb_pkts; i++) {
689 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
691 hash = ether_hash(eth_hdr);
693 slaves[i] = (hash ^= hash >> 8) % slave_count;
698 burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
699 uint16_t slave_count, uint16_t *slaves)
702 struct rte_ether_hdr *eth_hdr;
705 uint32_t hash, l3hash;
707 for (i = 0; i < nb_pkts; i++) {
708 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
711 proto = eth_hdr->ether_type;
712 hash = ether_hash(eth_hdr);
714 vlan_offset = get_vlan_offset(eth_hdr, &proto);
716 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
717 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
718 ((char *)(eth_hdr + 1) + vlan_offset);
719 l3hash = ipv4_hash(ipv4_hdr);
721 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
722 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
723 ((char *)(eth_hdr + 1) + vlan_offset);
724 l3hash = ipv6_hash(ipv6_hdr);
727 hash = hash ^ l3hash;
731 slaves[i] = hash % slave_count;
736 burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
737 uint16_t slave_count, uint16_t *slaves)
739 struct rte_ether_hdr *eth_hdr;
744 struct rte_udp_hdr *udp_hdr;
745 struct rte_tcp_hdr *tcp_hdr;
746 uint32_t hash, l3hash, l4hash;
748 for (i = 0; i < nb_pkts; i++) {
749 eth_hdr = rte_pktmbuf_mtod(buf[i], struct rte_ether_hdr *);
750 size_t pkt_end = (size_t)eth_hdr + rte_pktmbuf_data_len(buf[i]);
751 proto = eth_hdr->ether_type;
752 vlan_offset = get_vlan_offset(eth_hdr, &proto);
756 if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
757 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
758 ((char *)(eth_hdr + 1) + vlan_offset);
759 size_t ip_hdr_offset;
761 l3hash = ipv4_hash(ipv4_hdr);
763 /* there is no L4 header in fragmented packet */
764 if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
766 ip_hdr_offset = (ipv4_hdr->version_ihl
767 & RTE_IPV4_HDR_IHL_MASK) *
768 RTE_IPV4_IHL_MULTIPLIER;
770 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
771 tcp_hdr = (struct rte_tcp_hdr *)
774 if ((size_t)tcp_hdr + sizeof(*tcp_hdr)
776 l4hash = HASH_L4_PORTS(tcp_hdr);
777 } else if (ipv4_hdr->next_proto_id ==
779 udp_hdr = (struct rte_udp_hdr *)
782 if ((size_t)udp_hdr + sizeof(*udp_hdr)
784 l4hash = HASH_L4_PORTS(udp_hdr);
787 } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
788 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
789 ((char *)(eth_hdr + 1) + vlan_offset);
790 l3hash = ipv6_hash(ipv6_hdr);
792 if (ipv6_hdr->proto == IPPROTO_TCP) {
793 tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
794 l4hash = HASH_L4_PORTS(tcp_hdr);
795 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
796 udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
797 l4hash = HASH_L4_PORTS(udp_hdr);
801 hash = l3hash ^ l4hash;
805 slaves[i] = hash % slave_count;
810 uint64_t bwg_left_int;
811 uint64_t bwg_left_remainder;
816 bond_tlb_activate_slave(struct bond_dev_private *internals) {
819 for (i = 0; i < internals->active_slave_count; i++) {
820 tlb_last_obytets[internals->active_slaves[i]] = 0;
825 bandwidth_cmp(const void *a, const void *b)
827 const struct bwg_slave *bwg_a = a;
828 const struct bwg_slave *bwg_b = b;
829 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
830 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
831 (int64_t)bwg_a->bwg_left_remainder;
845 bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
846 struct bwg_slave *bwg_slave)
848 struct rte_eth_link link_status;
850 rte_eth_link_get_nowait(port_id, &link_status);
851 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
854 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
855 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
856 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
860 bond_ethdev_update_tlb_slave_cb(void *arg)
862 struct bond_dev_private *internals = arg;
863 struct rte_eth_stats slave_stats;
864 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
865 uint16_t slave_count;
868 uint8_t update_stats = 0;
872 internals->slave_update_idx++;
875 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
878 for (i = 0; i < internals->active_slave_count; i++) {
879 slave_id = internals->active_slaves[i];
880 rte_eth_stats_get(slave_id, &slave_stats);
881 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
882 bandwidth_left(slave_id, tx_bytes,
883 internals->slave_update_idx, &bwg_array[i]);
884 bwg_array[i].slave = slave_id;
887 tlb_last_obytets[slave_id] = slave_stats.obytes;
891 if (update_stats == 1)
892 internals->slave_update_idx = 0;
895 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
896 for (i = 0; i < slave_count; i++)
897 internals->tlb_slaves_order[i] = bwg_array[i].slave;
899 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
900 (struct bond_dev_private *)internals);
904 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
906 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
907 struct bond_dev_private *internals = bd_tx_q->dev_private;
909 struct rte_eth_dev *primary_port =
910 &rte_eth_devices[internals->primary_port];
911 uint16_t num_tx_total = 0;
914 uint16_t num_of_slaves = internals->active_slave_count;
915 uint16_t slaves[RTE_MAX_ETHPORTS];
917 struct rte_ether_hdr *ether_hdr;
918 struct rte_ether_addr primary_slave_addr;
919 struct rte_ether_addr active_slave_addr;
921 if (num_of_slaves < 1)
924 memcpy(slaves, internals->tlb_slaves_order,
925 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
928 rte_ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
931 for (i = 0; i < 3; i++)
932 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
935 for (i = 0; i < num_of_slaves; i++) {
936 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
937 for (j = num_tx_total; j < nb_pkts; j++) {
939 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
941 ether_hdr = rte_pktmbuf_mtod(bufs[j],
942 struct rte_ether_hdr *);
943 if (rte_is_same_ether_addr(ðer_hdr->s_addr,
944 &primary_slave_addr))
945 rte_ether_addr_copy(&active_slave_addr,
947 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
948 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
952 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
953 bufs + num_tx_total, nb_pkts - num_tx_total);
955 if (num_tx_total == nb_pkts)
963 bond_tlb_disable(struct bond_dev_private *internals)
965 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
969 bond_tlb_enable(struct bond_dev_private *internals)
971 bond_ethdev_update_tlb_slave_cb(internals);
975 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
977 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
978 struct bond_dev_private *internals = bd_tx_q->dev_private;
980 struct rte_ether_hdr *eth_h;
981 uint16_t ether_type, offset;
983 struct client_data *client_info;
986 * We create transmit buffers for every slave and one additional to send
987 * through tlb. In worst case every packet will be send on one port.
989 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
990 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
993 * We create separate transmit buffers for update packets as they won't
994 * be counted in num_tx_total.
996 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
997 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
999 struct rte_mbuf *upd_pkt;
1002 uint16_t num_send, num_not_send = 0;
1003 uint16_t num_tx_total = 0;
1008 /* Search tx buffer for ARP packets and forward them to alb */
1009 for (i = 0; i < nb_pkts; i++) {
1010 eth_h = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *);
1011 ether_type = eth_h->ether_type;
1012 offset = get_vlan_offset(eth_h, ðer_type);
1014 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
1015 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
1017 /* Change src mac in eth header */
1018 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
1020 /* Add packet to slave tx buffer */
1021 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
1022 slave_bufs_pkts[slave_idx]++;
1024 /* If packet is not ARP, send it with TLB policy */
1025 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
1027 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
1031 /* Update connected client ARP tables */
1032 if (internals->mode6.ntt) {
1033 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
1034 client_info = &internals->mode6.client_table[i];
1036 if (client_info->in_use) {
1037 /* Allocate new packet to send ARP update on current slave */
1038 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
1039 if (upd_pkt == NULL) {
1041 "Failed to allocate ARP packet from pool");
1044 pkt_size = sizeof(struct rte_ether_hdr) +
1045 sizeof(struct rte_arp_hdr) +
1046 client_info->vlan_count *
1047 sizeof(struct rte_vlan_hdr);
1048 upd_pkt->data_len = pkt_size;
1049 upd_pkt->pkt_len = pkt_size;
1051 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
1054 /* Add packet to update tx buffer */
1055 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
1056 update_bufs_pkts[slave_idx]++;
1059 internals->mode6.ntt = 0;
1062 /* Send ARP packets on proper slaves */
1063 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1064 if (slave_bufs_pkts[i] > 0) {
1065 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
1066 slave_bufs[i], slave_bufs_pkts[i]);
1067 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
1068 bufs[nb_pkts - 1 - num_not_send - j] =
1069 slave_bufs[i][nb_pkts - 1 - j];
1072 num_tx_total += num_send;
1073 num_not_send += slave_bufs_pkts[i] - num_send;
1075 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1076 /* Print TX stats including update packets */
1077 for (j = 0; j < slave_bufs_pkts[i]; j++) {
1078 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j],
1079 struct rte_ether_hdr *);
1080 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
1086 /* Send update packets on proper slaves */
1087 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1088 if (update_bufs_pkts[i] > 0) {
1089 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
1090 update_bufs_pkts[i]);
1091 for (j = num_send; j < update_bufs_pkts[i]; j++) {
1092 rte_pktmbuf_free(update_bufs[i][j]);
1094 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
1095 for (j = 0; j < update_bufs_pkts[i]; j++) {
1096 eth_h = rte_pktmbuf_mtod(update_bufs[i][j],
1097 struct rte_ether_hdr *);
1098 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
1104 /* Send non-ARP packets using tlb policy */
1105 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
1106 num_send = bond_ethdev_tx_burst_tlb(queue,
1107 slave_bufs[RTE_MAX_ETHPORTS],
1108 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
1110 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
1111 bufs[nb_pkts - 1 - num_not_send - j] =
1112 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
1115 num_tx_total += num_send;
1118 return num_tx_total;
1121 static inline uint16_t
1122 tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1123 uint16_t *slave_port_ids, uint16_t slave_count)
1125 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1126 struct bond_dev_private *internals = bd_tx_q->dev_private;
1128 /* Array to sort mbufs for transmission on each slave into */
1129 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
1130 /* Number of mbufs for transmission on each slave */
1131 uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
1132 /* Mapping array generated by hash function to map mbufs to slaves */
1133 uint16_t bufs_slave_port_idxs[nb_bufs];
1135 uint16_t slave_tx_count;
1136 uint16_t total_tx_count = 0, total_tx_fail_count = 0;
1141 * Populate slaves mbuf with the packets which are to be sent on it
1142 * selecting output slave using hash based on xmit policy
1144 internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
1145 bufs_slave_port_idxs);
1147 for (i = 0; i < nb_bufs; i++) {
1148 /* Populate slave mbuf arrays with mbufs for that slave. */
1149 uint16_t slave_idx = bufs_slave_port_idxs[i];
1151 slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
1154 /* Send packet burst on each slave device */
1155 for (i = 0; i < slave_count; i++) {
1156 if (slave_nb_bufs[i] == 0)
1159 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1160 bd_tx_q->queue_id, slave_bufs[i],
1163 total_tx_count += slave_tx_count;
1165 /* If tx burst fails move packets to end of bufs */
1166 if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
1167 int slave_tx_fail_count = slave_nb_bufs[i] -
1169 total_tx_fail_count += slave_tx_fail_count;
1170 memcpy(&bufs[nb_bufs - total_tx_fail_count],
1171 &slave_bufs[i][slave_tx_count],
1172 slave_tx_fail_count * sizeof(bufs[0]));
1176 return total_tx_count;
1180 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
1183 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1184 struct bond_dev_private *internals = bd_tx_q->dev_private;
1186 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1187 uint16_t slave_count;
1189 if (unlikely(nb_bufs == 0))
1192 /* Copy slave list to protect against slave up/down changes during tx
1195 slave_count = internals->active_slave_count;
1196 if (unlikely(slave_count < 1))
1199 memcpy(slave_port_ids, internals->active_slaves,
1200 sizeof(slave_port_ids[0]) * slave_count);
1201 return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
1205 static inline uint16_t
1206 tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
1209 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
1210 struct bond_dev_private *internals = bd_tx_q->dev_private;
1212 uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
1213 uint16_t slave_count;
1215 uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
1216 uint16_t dist_slave_count;
1218 uint16_t slave_tx_count;
1222 /* Copy slave list to protect against slave up/down changes during tx
1224 slave_count = internals->active_slave_count;
1225 if (unlikely(slave_count < 1))
1228 memcpy(slave_port_ids, internals->active_slaves,
1229 sizeof(slave_port_ids[0]) * slave_count);
1234 /* Check for LACP control packets and send if available */
1235 for (i = 0; i < slave_count; i++) {
1236 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1237 struct rte_mbuf *ctrl_pkt = NULL;
1239 if (likely(rte_ring_empty(port->tx_ring)))
1242 if (rte_ring_dequeue(port->tx_ring,
1243 (void **)&ctrl_pkt) != -ENOENT) {
1244 slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
1245 bd_tx_q->queue_id, &ctrl_pkt, 1);
1247 * re-enqueue LAG control plane packets to buffering
1248 * ring if transmission fails so the packet isn't lost.
1250 if (slave_tx_count != 1)
1251 rte_ring_enqueue(port->tx_ring, ctrl_pkt);
1256 if (unlikely(nb_bufs == 0))
1259 dist_slave_count = 0;
1260 for (i = 0; i < slave_count; i++) {
1261 struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
1263 if (ACTOR_STATE(port, DISTRIBUTING))
1264 dist_slave_port_ids[dist_slave_count++] =
1268 if (unlikely(dist_slave_count < 1))
1271 return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
1276 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
1279 return tx_burst_8023ad(queue, bufs, nb_bufs, false);
1283 bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
1286 return tx_burst_8023ad(queue, bufs, nb_bufs, true);
1290 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1293 struct bond_dev_private *internals;
1294 struct bond_tx_queue *bd_tx_q;
1296 uint16_t slaves[RTE_MAX_ETHPORTS];
1297 uint8_t tx_failed_flag = 0;
1298 uint16_t num_of_slaves;
1300 uint16_t max_nb_of_tx_pkts = 0;
1302 int slave_tx_total[RTE_MAX_ETHPORTS];
1303 int i, most_successful_tx_slave = -1;
1305 bd_tx_q = (struct bond_tx_queue *)queue;
1306 internals = bd_tx_q->dev_private;
1308 /* Copy slave list to protect against slave up/down changes during tx
1310 num_of_slaves = internals->active_slave_count;
1311 memcpy(slaves, internals->active_slaves,
1312 sizeof(internals->active_slaves[0]) * num_of_slaves);
1314 if (num_of_slaves < 1)
1317 /* Increment reference count on mbufs */
1318 for (i = 0; i < nb_pkts; i++)
1319 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1321 /* Transmit burst on each active slave */
1322 for (i = 0; i < num_of_slaves; i++) {
1323 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1326 if (unlikely(slave_tx_total[i] < nb_pkts))
1329 /* record the value and slave index for the slave which transmits the
1330 * maximum number of packets */
1331 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1332 max_nb_of_tx_pkts = slave_tx_total[i];
1333 most_successful_tx_slave = i;
1337 /* if slaves fail to transmit packets from burst, the calling application
1338 * is not expected to know about multiple references to packets so we must
1339 * handle failures of all packets except those of the most successful slave
1341 if (unlikely(tx_failed_flag))
1342 for (i = 0; i < num_of_slaves; i++)
1343 if (i != most_successful_tx_slave)
1344 while (slave_tx_total[i] < nb_pkts)
1345 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1347 return max_nb_of_tx_pkts;
1351 link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
1353 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1355 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1357 * If in mode 4 then save the link properties of the first
1358 * slave, all subsequent slaves must match these properties
1360 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1362 bond_link->link_autoneg = slave_link->link_autoneg;
1363 bond_link->link_duplex = slave_link->link_duplex;
1364 bond_link->link_speed = slave_link->link_speed;
1367 * In any other mode the link properties are set to default
1368 * values of AUTONEG/DUPLEX
1370 ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
1371 ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1376 link_properties_valid(struct rte_eth_dev *ethdev,
1377 struct rte_eth_link *slave_link)
1379 struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
1381 if (bond_ctx->mode == BONDING_MODE_8023AD) {
1382 struct rte_eth_link *bond_link = &bond_ctx->mode4.slave_link;
1384 if (bond_link->link_duplex != slave_link->link_duplex ||
1385 bond_link->link_autoneg != slave_link->link_autoneg ||
1386 bond_link->link_speed != slave_link->link_speed)
1394 mac_address_get(struct rte_eth_dev *eth_dev,
1395 struct rte_ether_addr *dst_mac_addr)
1397 struct rte_ether_addr *mac_addr;
1399 if (eth_dev == NULL) {
1400 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1404 if (dst_mac_addr == NULL) {
1405 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1409 mac_addr = eth_dev->data->mac_addrs;
1411 rte_ether_addr_copy(mac_addr, dst_mac_addr);
1416 mac_address_set(struct rte_eth_dev *eth_dev,
1417 struct rte_ether_addr *new_mac_addr)
1419 struct rte_ether_addr *mac_addr;
1421 if (eth_dev == NULL) {
1422 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1426 if (new_mac_addr == NULL) {
1427 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1431 mac_addr = eth_dev->data->mac_addrs;
1433 /* If new MAC is different to current MAC then update */
1434 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1435 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1440 static const struct rte_ether_addr null_mac_addr;
1443 * Add additional MAC addresses to the slave
1446 slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1447 uint16_t slave_port_id)
1450 struct rte_ether_addr *mac_addr;
1452 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1453 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1454 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1457 ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
1460 for (i--; i > 0; i--)
1461 rte_eth_dev_mac_addr_remove(slave_port_id,
1462 &bonded_eth_dev->data->mac_addrs[i]);
1471 * Remove additional MAC addresses from the slave
1474 slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
1475 uint16_t slave_port_id)
1478 struct rte_ether_addr *mac_addr;
1481 for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
1482 mac_addr = &bonded_eth_dev->data->mac_addrs[i];
1483 if (rte_is_same_ether_addr(mac_addr, &null_mac_addr))
1486 ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
1487 /* save only the first error */
1488 if (ret < 0 && rc == 0)
1496 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1498 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1501 /* Update slave devices MAC addresses */
1502 if (internals->slave_count < 1)
1505 switch (internals->mode) {
1506 case BONDING_MODE_ROUND_ROBIN:
1507 case BONDING_MODE_BALANCE:
1508 case BONDING_MODE_BROADCAST:
1509 for (i = 0; i < internals->slave_count; i++) {
1510 if (rte_eth_dev_default_mac_addr_set(
1511 internals->slaves[i].port_id,
1512 bonded_eth_dev->data->mac_addrs)) {
1513 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1514 internals->slaves[i].port_id);
1519 case BONDING_MODE_8023AD:
1520 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1522 case BONDING_MODE_ACTIVE_BACKUP:
1523 case BONDING_MODE_TLB:
1524 case BONDING_MODE_ALB:
1526 for (i = 0; i < internals->slave_count; i++) {
1527 if (internals->slaves[i].port_id ==
1528 internals->current_primary_port) {
1529 if (rte_eth_dev_default_mac_addr_set(
1530 internals->primary_port,
1531 bonded_eth_dev->data->mac_addrs)) {
1532 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1533 internals->current_primary_port);
1537 if (rte_eth_dev_default_mac_addr_set(
1538 internals->slaves[i].port_id,
1539 &internals->slaves[i].persisted_mac_addr)) {
1540 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1541 internals->slaves[i].port_id);
1552 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1554 struct bond_dev_private *internals;
1556 internals = eth_dev->data->dev_private;
1559 case BONDING_MODE_ROUND_ROBIN:
1560 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1561 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1563 case BONDING_MODE_ACTIVE_BACKUP:
1564 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1565 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1567 case BONDING_MODE_BALANCE:
1568 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1569 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1571 case BONDING_MODE_BROADCAST:
1572 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1573 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1575 case BONDING_MODE_8023AD:
1576 if (bond_mode_8023ad_enable(eth_dev) != 0)
1579 if (internals->mode4.dedicated_queues.enabled == 0) {
1580 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1581 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1582 RTE_BOND_LOG(WARNING,
1583 "Using mode 4, it is necessary to do TX burst "
1584 "and RX burst at least every 100ms.");
1586 /* Use flow director's optimization */
1587 eth_dev->rx_pkt_burst =
1588 bond_ethdev_rx_burst_8023ad_fast_queue;
1589 eth_dev->tx_pkt_burst =
1590 bond_ethdev_tx_burst_8023ad_fast_queue;
1593 case BONDING_MODE_TLB:
1594 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1595 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1597 case BONDING_MODE_ALB:
1598 if (bond_mode_alb_enable(eth_dev) != 0)
1601 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1602 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1608 internals->mode = mode;
1615 slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev,
1616 struct rte_eth_dev *slave_eth_dev)
1619 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1620 struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
1622 if (port->slow_pool == NULL) {
1624 int slave_id = slave_eth_dev->data->port_id;
1626 snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_slow_pool",
1628 port->slow_pool = rte_pktmbuf_pool_create(mem_name, 8191,
1629 250, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
1630 slave_eth_dev->data->numa_node);
1632 /* Any memory allocation failure in initialization is critical because
1633 * resources can't be free, so reinitialization is impossible. */
1634 if (port->slow_pool == NULL) {
1635 rte_panic("Slave %u: Failed to create memory pool '%s': %s\n",
1636 slave_id, mem_name, rte_strerror(rte_errno));
1640 if (internals->mode4.dedicated_queues.enabled == 1) {
1641 /* Configure slow Rx queue */
1643 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id,
1644 internals->mode4.dedicated_queues.rx_qid, 128,
1645 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1646 NULL, port->slow_pool);
1649 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1650 slave_eth_dev->data->port_id,
1651 internals->mode4.dedicated_queues.rx_qid,
1656 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id,
1657 internals->mode4.dedicated_queues.tx_qid, 512,
1658 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1662 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1663 slave_eth_dev->data->port_id,
1664 internals->mode4.dedicated_queues.tx_qid,
1673 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1674 struct rte_eth_dev *slave_eth_dev)
1676 struct bond_rx_queue *bd_rx_q;
1677 struct bond_tx_queue *bd_tx_q;
1678 uint16_t nb_rx_queues;
1679 uint16_t nb_tx_queues;
1683 struct rte_flow_error flow_error;
1685 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1688 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1690 /* Enable interrupts on slave device if supported */
1691 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1692 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1694 /* If RSS is enabled for bonding, try to enable it for slaves */
1695 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1696 if (internals->rss_key_len != 0) {
1697 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
1698 internals->rss_key_len;
1699 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
1702 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1705 slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1706 bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1707 slave_eth_dev->data->dev_conf.rxmode.mq_mode =
1708 bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
1711 if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
1712 DEV_RX_OFFLOAD_VLAN_FILTER)
1713 slave_eth_dev->data->dev_conf.rxmode.offloads |=
1714 DEV_RX_OFFLOAD_VLAN_FILTER;
1716 slave_eth_dev->data->dev_conf.rxmode.offloads &=
1717 ~DEV_RX_OFFLOAD_VLAN_FILTER;
1719 nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
1720 nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
1722 if (internals->mode == BONDING_MODE_8023AD) {
1723 if (internals->mode4.dedicated_queues.enabled == 1) {
1729 errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
1730 bonded_eth_dev->data->mtu);
1731 if (errval != 0 && errval != -ENOTSUP) {
1732 RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
1733 slave_eth_dev->data->port_id, errval);
1737 /* Configure device */
1738 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1739 nb_rx_queues, nb_tx_queues,
1740 &(slave_eth_dev->data->dev_conf));
1742 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
1743 slave_eth_dev->data->port_id, errval);
1747 /* Setup Rx Queues */
1748 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1749 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1751 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1752 bd_rx_q->nb_rx_desc,
1753 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1754 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1757 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1758 slave_eth_dev->data->port_id, q_id, errval);
1763 /* Setup Tx Queues */
1764 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1765 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1767 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1768 bd_tx_q->nb_tx_desc,
1769 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1773 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1774 slave_eth_dev->data->port_id, q_id, errval);
1779 if (internals->mode == BONDING_MODE_8023AD &&
1780 internals->mode4.dedicated_queues.enabled == 1) {
1781 if (slave_configure_slow_queue(bonded_eth_dev, slave_eth_dev)
1785 if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
1786 slave_eth_dev->data->port_id) != 0) {
1788 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1789 slave_eth_dev->data->port_id, q_id, errval);
1793 if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
1794 rte_flow_destroy(slave_eth_dev->data->port_id,
1795 internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
1798 bond_ethdev_8023ad_flow_set(bonded_eth_dev,
1799 slave_eth_dev->data->port_id);
1803 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1805 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1806 slave_eth_dev->data->port_id, errval);
1810 /* If RSS is enabled for bonding, synchronize RETA */
1811 if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
1813 struct bond_dev_private *internals;
1815 internals = bonded_eth_dev->data->dev_private;
1817 for (i = 0; i < internals->slave_count; i++) {
1818 if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) {
1819 errval = rte_eth_dev_rss_reta_update(
1820 slave_eth_dev->data->port_id,
1821 &internals->reta_conf[0],
1822 internals->slaves[i].reta_size);
1824 RTE_BOND_LOG(WARNING,
1825 "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
1826 " RSS Configuration for bonding may be inconsistent.",
1827 slave_eth_dev->data->port_id, errval);
1834 /* If lsc interrupt is set, check initial slave's link status */
1835 if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1836 slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
1837 bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
1838 RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
1846 slave_remove(struct bond_dev_private *internals,
1847 struct rte_eth_dev *slave_eth_dev)
1851 for (i = 0; i < internals->slave_count; i++)
1852 if (internals->slaves[i].port_id ==
1853 slave_eth_dev->data->port_id)
1856 if (i < (internals->slave_count - 1)) {
1857 struct rte_flow *flow;
1859 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1860 sizeof(internals->slaves[0]) *
1861 (internals->slave_count - i - 1));
1862 TAILQ_FOREACH(flow, &internals->flow_list, next) {
1863 memmove(&flow->flows[i], &flow->flows[i + 1],
1864 sizeof(flow->flows[0]) *
1865 (internals->slave_count - i - 1));
1866 flow->flows[internals->slave_count - 1] = NULL;
1870 internals->slave_count--;
1872 /* force reconfiguration of slave interfaces */
1873 _rte_eth_dev_reset(slave_eth_dev);
1877 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1880 slave_add(struct bond_dev_private *internals,
1881 struct rte_eth_dev *slave_eth_dev)
1883 struct bond_slave_details *slave_details =
1884 &internals->slaves[internals->slave_count];
1886 slave_details->port_id = slave_eth_dev->data->port_id;
1887 slave_details->last_link_status = 0;
1889 /* Mark slave devices that don't support interrupts so we can
1890 * compensate when we start the bond
1892 if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1893 slave_details->link_status_poll_enabled = 1;
1896 slave_details->link_status_wait_to_complete = 0;
1897 /* clean tlb_last_obytes when adding port for bonding device */
1898 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1899 sizeof(struct rte_ether_addr));
1903 bond_ethdev_primary_set(struct bond_dev_private *internals,
1904 uint16_t slave_port_id)
1908 if (internals->active_slave_count < 1)
1909 internals->current_primary_port = slave_port_id;
1911 /* Search bonded device slave ports for new proposed primary port */
1912 for (i = 0; i < internals->active_slave_count; i++) {
1913 if (internals->active_slaves[i] == slave_port_id)
1914 internals->current_primary_port = slave_port_id;
1919 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1922 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1924 struct bond_dev_private *internals;
1927 /* slave eth dev will be started by bonded device */
1928 if (check_for_bonded_ethdev(eth_dev)) {
1929 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1930 eth_dev->data->port_id);
1934 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1935 eth_dev->data->dev_started = 1;
1937 internals = eth_dev->data->dev_private;
1939 if (internals->slave_count == 0) {
1940 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1944 if (internals->user_defined_mac == 0) {
1945 struct rte_ether_addr *new_mac_addr = NULL;
1947 for (i = 0; i < internals->slave_count; i++)
1948 if (internals->slaves[i].port_id == internals->primary_port)
1949 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1951 if (new_mac_addr == NULL)
1954 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1955 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1956 eth_dev->data->port_id);
1961 if (internals->mode == BONDING_MODE_8023AD) {
1962 if (internals->mode4.dedicated_queues.enabled == 1) {
1963 internals->mode4.dedicated_queues.rx_qid =
1964 eth_dev->data->nb_rx_queues;
1965 internals->mode4.dedicated_queues.tx_qid =
1966 eth_dev->data->nb_tx_queues;
1971 /* Reconfigure each slave device if starting bonded device */
1972 for (i = 0; i < internals->slave_count; i++) {
1973 struct rte_eth_dev *slave_ethdev =
1974 &(rte_eth_devices[internals->slaves[i].port_id]);
1975 if (slave_configure(eth_dev, slave_ethdev) != 0) {
1977 "bonded port (%d) failed to reconfigure slave device (%d)",
1978 eth_dev->data->port_id,
1979 internals->slaves[i].port_id);
1982 /* We will need to poll for link status if any slave doesn't
1983 * support interrupts
1985 if (internals->slaves[i].link_status_poll_enabled)
1986 internals->link_status_polling_enabled = 1;
1989 /* start polling if needed */
1990 if (internals->link_status_polling_enabled) {
1992 internals->link_status_polling_interval_ms * 1000,
1993 bond_ethdev_slave_link_status_change_monitor,
1994 (void *)&rte_eth_devices[internals->port_id]);
1997 /* Update all slave devices MACs*/
1998 if (mac_address_slaves_update(eth_dev) != 0)
2001 if (internals->user_defined_primary_port)
2002 bond_ethdev_primary_set(internals, internals->primary_port);
2004 if (internals->mode == BONDING_MODE_8023AD)
2005 bond_mode_8023ad_start(eth_dev);
2007 if (internals->mode == BONDING_MODE_TLB ||
2008 internals->mode == BONDING_MODE_ALB)
2009 bond_tlb_enable(internals);
2014 eth_dev->data->dev_started = 0;
2019 bond_ethdev_free_queues(struct rte_eth_dev *dev)
2023 if (dev->data->rx_queues != NULL) {
2024 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2025 rte_free(dev->data->rx_queues[i]);
2026 dev->data->rx_queues[i] = NULL;
2028 dev->data->nb_rx_queues = 0;
2031 if (dev->data->tx_queues != NULL) {
2032 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2033 rte_free(dev->data->tx_queues[i]);
2034 dev->data->tx_queues[i] = NULL;
2036 dev->data->nb_tx_queues = 0;
2041 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
2043 struct bond_dev_private *internals = eth_dev->data->dev_private;
2046 if (internals->mode == BONDING_MODE_8023AD) {
2050 bond_mode_8023ad_stop(eth_dev);
2052 /* Discard all messages to/from mode 4 state machines */
2053 for (i = 0; i < internals->active_slave_count; i++) {
2054 port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
2056 RTE_ASSERT(port->rx_ring != NULL);
2057 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
2058 rte_pktmbuf_free(pkt);
2060 RTE_ASSERT(port->tx_ring != NULL);
2061 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
2062 rte_pktmbuf_free(pkt);
2066 if (internals->mode == BONDING_MODE_TLB ||
2067 internals->mode == BONDING_MODE_ALB) {
2068 bond_tlb_disable(internals);
2069 for (i = 0; i < internals->active_slave_count; i++)
2070 tlb_last_obytets[internals->active_slaves[i]] = 0;
2073 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
2074 eth_dev->data->dev_started = 0;
2076 internals->link_status_polling_enabled = 0;
2077 for (i = 0; i < internals->slave_count; i++) {
2078 uint16_t slave_id = internals->slaves[i].port_id;
2079 if (find_slave_by_id(internals->active_slaves,
2080 internals->active_slave_count, slave_id) !=
2081 internals->active_slave_count) {
2082 internals->slaves[i].last_link_status = 0;
2083 rte_eth_dev_stop(slave_id);
2084 deactivate_slave(eth_dev, slave_id);
2090 bond_ethdev_close(struct rte_eth_dev *dev)
2092 struct bond_dev_private *internals = dev->data->dev_private;
2093 uint16_t bond_port_id = internals->port_id;
2095 struct rte_flow_error ferror;
2097 RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
2098 while (internals->slave_count != skipped) {
2099 uint16_t port_id = internals->slaves[skipped].port_id;
2101 rte_eth_dev_stop(port_id);
2103 if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
2105 "Failed to remove port %d from bonded device %s",
2106 port_id, dev->device->name);
2110 bond_flow_ops.flush(dev, &ferror);
2111 bond_ethdev_free_queues(dev);
2112 rte_bitmap_reset(internals->vlan_filter_bmp);
2115 /* forward declaration */
2116 static int bond_ethdev_configure(struct rte_eth_dev *dev);
2119 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2121 struct bond_dev_private *internals = dev->data->dev_private;
2122 struct bond_slave_details slave;
2125 uint16_t max_nb_rx_queues = UINT16_MAX;
2126 uint16_t max_nb_tx_queues = UINT16_MAX;
2127 uint16_t max_rx_desc_lim = UINT16_MAX;
2128 uint16_t max_tx_desc_lim = UINT16_MAX;
2130 dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
2132 dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
2133 internals->candidate_max_rx_pktlen :
2134 RTE_ETHER_MAX_JUMBO_FRAME_LEN;
2136 /* Max number of tx/rx queues that the bonded device can support is the
2137 * minimum values of the bonded slaves, as all slaves must be capable
2138 * of supporting the same number of tx/rx queues.
2140 if (internals->slave_count > 0) {
2141 struct rte_eth_dev_info slave_info;
2144 for (idx = 0; idx < internals->slave_count; idx++) {
2145 slave = internals->slaves[idx];
2146 ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
2149 "%s: Error during getting device (port %u) info: %s\n",
2157 if (slave_info.max_rx_queues < max_nb_rx_queues)
2158 max_nb_rx_queues = slave_info.max_rx_queues;
2160 if (slave_info.max_tx_queues < max_nb_tx_queues)
2161 max_nb_tx_queues = slave_info.max_tx_queues;
2163 if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
2164 max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
2166 if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
2167 max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
2171 dev_info->max_rx_queues = max_nb_rx_queues;
2172 dev_info->max_tx_queues = max_nb_tx_queues;
2174 memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
2175 sizeof(dev_info->default_rxconf));
2176 memcpy(&dev_info->default_txconf, &internals->default_txconf,
2177 sizeof(dev_info->default_txconf));
2179 dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
2180 dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
2183 * If dedicated hw queues enabled for link bonding device in LACP mode
2184 * then we need to reduce the maximum number of data path queues by 1.
2186 if (internals->mode == BONDING_MODE_8023AD &&
2187 internals->mode4.dedicated_queues.enabled == 1) {
2188 dev_info->max_rx_queues--;
2189 dev_info->max_tx_queues--;
2192 dev_info->min_rx_bufsize = 0;
2194 dev_info->rx_offload_capa = internals->rx_offload_capa;
2195 dev_info->tx_offload_capa = internals->tx_offload_capa;
2196 dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
2197 dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
2198 dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
2200 dev_info->reta_size = internals->reta_size;
2206 bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2210 struct bond_dev_private *internals = dev->data->dev_private;
2212 /* don't do this while a slave is being added */
2213 rte_spinlock_lock(&internals->lock);
2216 rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
2218 rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
2220 for (i = 0; i < internals->slave_count; i++) {
2221 uint16_t port_id = internals->slaves[i].port_id;
2223 res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2225 RTE_BOND_LOG(WARNING,
2226 "Setting VLAN filter on slave port %u not supported.",
2230 rte_spinlock_unlock(&internals->lock);
2235 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
2236 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
2237 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
2239 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
2240 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
2241 0, dev->data->numa_node);
2242 if (bd_rx_q == NULL)
2245 bd_rx_q->queue_id = rx_queue_id;
2246 bd_rx_q->dev_private = dev->data->dev_private;
2248 bd_rx_q->nb_rx_desc = nb_rx_desc;
2250 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
2251 bd_rx_q->mb_pool = mb_pool;
2253 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
2259 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
2260 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
2261 const struct rte_eth_txconf *tx_conf)
2263 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
2264 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
2265 0, dev->data->numa_node);
2267 if (bd_tx_q == NULL)
2270 bd_tx_q->queue_id = tx_queue_id;
2271 bd_tx_q->dev_private = dev->data->dev_private;
2273 bd_tx_q->nb_tx_desc = nb_tx_desc;
2274 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
2276 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
2282 bond_ethdev_rx_queue_release(void *queue)
2291 bond_ethdev_tx_queue_release(void *queue)
2300 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
2302 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
2303 struct bond_dev_private *internals;
2305 /* Default value for polling slave found is true as we don't want to
2306 * disable the polling thread if we cannot get the lock */
2307 int i, polling_slave_found = 1;
2312 bonded_ethdev = cb_arg;
2313 internals = bonded_ethdev->data->dev_private;
2315 if (!bonded_ethdev->data->dev_started ||
2316 !internals->link_status_polling_enabled)
2319 /* If device is currently being configured then don't check slaves link
2320 * status, wait until next period */
2321 if (rte_spinlock_trylock(&internals->lock)) {
2322 if (internals->slave_count > 0)
2323 polling_slave_found = 0;
2325 for (i = 0; i < internals->slave_count; i++) {
2326 if (!internals->slaves[i].link_status_poll_enabled)
2329 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
2330 polling_slave_found = 1;
2332 /* Update slave link status */
2333 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
2334 internals->slaves[i].link_status_wait_to_complete);
2336 /* if link status has changed since last checked then call lsc
2338 if (slave_ethdev->data->dev_link.link_status !=
2339 internals->slaves[i].last_link_status) {
2340 internals->slaves[i].last_link_status =
2341 slave_ethdev->data->dev_link.link_status;
2343 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
2344 RTE_ETH_EVENT_INTR_LSC,
2345 &bonded_ethdev->data->port_id,
2349 rte_spinlock_unlock(&internals->lock);
2352 if (polling_slave_found)
2353 /* Set alarm to continue monitoring link status of slave ethdev's */
2354 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
2355 bond_ethdev_slave_link_status_change_monitor, cb_arg);
2359 bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
2361 void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
2363 struct bond_dev_private *bond_ctx;
2364 struct rte_eth_link slave_link;
2368 bond_ctx = ethdev->data->dev_private;
2370 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2372 if (ethdev->data->dev_started == 0 ||
2373 bond_ctx->active_slave_count == 0) {
2374 ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
2378 ethdev->data->dev_link.link_status = ETH_LINK_UP;
2380 if (wait_to_complete)
2381 link_update = rte_eth_link_get;
2383 link_update = rte_eth_link_get_nowait;
2385 switch (bond_ctx->mode) {
2386 case BONDING_MODE_BROADCAST:
2388 * Setting link speed to UINT32_MAX to ensure we pick up the
2389 * value of the first active slave
2391 ethdev->data->dev_link.link_speed = UINT32_MAX;
2394 * link speed is minimum value of all the slaves link speed as
2395 * packet loss will occur on this slave if transmission at rates
2396 * greater than this are attempted
2398 for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
2399 link_update(bond_ctx->active_slaves[0], &slave_link);
2401 if (slave_link.link_speed <
2402 ethdev->data->dev_link.link_speed)
2403 ethdev->data->dev_link.link_speed =
2404 slave_link.link_speed;
2407 case BONDING_MODE_ACTIVE_BACKUP:
2408 /* Current primary slave */
2409 link_update(bond_ctx->current_primary_port, &slave_link);
2411 ethdev->data->dev_link.link_speed = slave_link.link_speed;
2413 case BONDING_MODE_8023AD:
2414 ethdev->data->dev_link.link_autoneg =
2415 bond_ctx->mode4.slave_link.link_autoneg;
2416 ethdev->data->dev_link.link_duplex =
2417 bond_ctx->mode4.slave_link.link_duplex;
2418 /* fall through to update link speed */
2419 case BONDING_MODE_ROUND_ROBIN:
2420 case BONDING_MODE_BALANCE:
2421 case BONDING_MODE_TLB:
2422 case BONDING_MODE_ALB:
2425 * In theses mode the maximum theoretical link speed is the sum
2428 ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
2430 for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
2431 link_update(bond_ctx->active_slaves[idx], &slave_link);
2433 ethdev->data->dev_link.link_speed +=
2434 slave_link.link_speed;
2444 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2446 struct bond_dev_private *internals = dev->data->dev_private;
2447 struct rte_eth_stats slave_stats;
2450 for (i = 0; i < internals->slave_count; i++) {
2451 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
2453 stats->ipackets += slave_stats.ipackets;
2454 stats->opackets += slave_stats.opackets;
2455 stats->ibytes += slave_stats.ibytes;
2456 stats->obytes += slave_stats.obytes;
2457 stats->imissed += slave_stats.imissed;
2458 stats->ierrors += slave_stats.ierrors;
2459 stats->oerrors += slave_stats.oerrors;
2460 stats->rx_nombuf += slave_stats.rx_nombuf;
2462 for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
2463 stats->q_ipackets[j] += slave_stats.q_ipackets[j];
2464 stats->q_opackets[j] += slave_stats.q_opackets[j];
2465 stats->q_ibytes[j] += slave_stats.q_ibytes[j];
2466 stats->q_obytes[j] += slave_stats.q_obytes[j];
2467 stats->q_errors[j] += slave_stats.q_errors[j];
2476 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
2478 struct bond_dev_private *internals = dev->data->dev_private;
2481 for (i = 0; i < internals->slave_count; i++)
2482 rte_eth_stats_reset(internals->slaves[i].port_id);
2486 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2488 struct bond_dev_private *internals = eth_dev->data->dev_private;
2493 switch (internals->mode) {
2494 /* Promiscuous mode is propagated to all slaves */
2495 case BONDING_MODE_ROUND_ROBIN:
2496 case BONDING_MODE_BALANCE:
2497 case BONDING_MODE_BROADCAST:
2498 case BONDING_MODE_8023AD:
2499 for (i = 0; i < internals->slave_count; i++) {
2500 port_id = internals->slaves[i].port_id;
2502 ret = rte_eth_promiscuous_enable(port_id);
2505 "Failed to enable promiscuous mode for port %u: %s",
2506 port_id, rte_strerror(-ret));
2509 /* Promiscuous mode is propagated only to primary slave */
2510 case BONDING_MODE_ACTIVE_BACKUP:
2511 case BONDING_MODE_TLB:
2512 case BONDING_MODE_ALB:
2514 /* Do not touch promisc when there cannot be primary ports */
2515 if (internals->slave_count == 0)
2517 port_id = internals->current_primary_port;
2518 ret = rte_eth_promiscuous_enable(port_id);
2521 "Failed to enable promiscuous mode for port %u: %s",
2522 port_id, rte_strerror(-ret));
2527 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
2529 struct bond_dev_private *internals = dev->data->dev_private;
2534 switch (internals->mode) {
2535 /* Promiscuous mode is propagated to all slaves */
2536 case BONDING_MODE_ROUND_ROBIN:
2537 case BONDING_MODE_BALANCE:
2538 case BONDING_MODE_BROADCAST:
2539 case BONDING_MODE_8023AD:
2540 for (i = 0; i < internals->slave_count; i++) {
2541 port_id = internals->slaves[i].port_id;
2543 if (internals->mode == BONDING_MODE_8023AD &&
2544 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2545 BOND_8023AD_FORCED_PROMISC)
2547 ret = rte_eth_promiscuous_disable(port_id);
2550 "Failed to disable promiscuous mode for port %u: %s",
2551 port_id, rte_strerror(-ret));
2554 /* Promiscuous mode is propagated only to primary slave */
2555 case BONDING_MODE_ACTIVE_BACKUP:
2556 case BONDING_MODE_TLB:
2557 case BONDING_MODE_ALB:
2559 /* Do not touch promisc when there cannot be primary ports */
2560 if (internals->slave_count == 0)
2562 port_id = internals->current_primary_port;
2563 ret = rte_eth_promiscuous_disable(port_id);
2566 "Failed to disable promiscuous mode for port %u: %s",
2567 port_id, rte_strerror(-ret));
2572 bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
2574 struct bond_dev_private *internals = eth_dev->data->dev_private;
2577 switch (internals->mode) {
2578 /* allmulti mode is propagated to all slaves */
2579 case BONDING_MODE_ROUND_ROBIN:
2580 case BONDING_MODE_BALANCE:
2581 case BONDING_MODE_BROADCAST:
2582 case BONDING_MODE_8023AD:
2583 for (i = 0; i < internals->slave_count; i++) {
2584 uint16_t port_id = internals->slaves[i].port_id;
2586 rte_eth_allmulticast_enable(port_id);
2589 /* allmulti mode is propagated only to primary slave */
2590 case BONDING_MODE_ACTIVE_BACKUP:
2591 case BONDING_MODE_TLB:
2592 case BONDING_MODE_ALB:
2594 /* Do not touch allmulti when there cannot be primary ports */
2595 if (internals->slave_count == 0)
2597 rte_eth_allmulticast_enable(internals->current_primary_port);
2602 bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
2604 struct bond_dev_private *internals = eth_dev->data->dev_private;
2607 switch (internals->mode) {
2608 /* allmulti mode is propagated to all slaves */
2609 case BONDING_MODE_ROUND_ROBIN:
2610 case BONDING_MODE_BALANCE:
2611 case BONDING_MODE_BROADCAST:
2612 case BONDING_MODE_8023AD:
2613 for (i = 0; i < internals->slave_count; i++) {
2614 uint16_t port_id = internals->slaves[i].port_id;
2616 if (internals->mode == BONDING_MODE_8023AD &&
2617 bond_mode_8023ad_ports[port_id].forced_rx_flags ==
2618 BOND_8023AD_FORCED_ALLMULTI)
2620 rte_eth_allmulticast_disable(port_id);
2623 /* allmulti mode is propagated only to primary slave */
2624 case BONDING_MODE_ACTIVE_BACKUP:
2625 case BONDING_MODE_TLB:
2626 case BONDING_MODE_ALB:
2628 /* Do not touch allmulti when there cannot be primary ports */
2629 if (internals->slave_count == 0)
2631 rte_eth_allmulticast_disable(internals->current_primary_port);
2636 bond_ethdev_delayed_lsc_propagation(void *arg)
2641 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
2642 RTE_ETH_EVENT_INTR_LSC, NULL);
2646 bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
2647 void *param, void *ret_param __rte_unused)
2649 struct rte_eth_dev *bonded_eth_dev;
2650 struct bond_dev_private *internals;
2651 struct rte_eth_link link;
2654 uint8_t lsc_flag = 0;
2655 int valid_slave = 0;
2656 uint16_t active_pos;
2659 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
2662 bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
2664 if (check_for_bonded_ethdev(bonded_eth_dev))
2667 internals = bonded_eth_dev->data->dev_private;
2669 /* If the device isn't started don't handle interrupts */
2670 if (!bonded_eth_dev->data->dev_started)
2673 /* verify that port_id is a valid slave of bonded port */
2674 for (i = 0; i < internals->slave_count; i++) {
2675 if (internals->slaves[i].port_id == port_id) {
2684 /* Synchronize lsc callback parallel calls either by real link event
2685 * from the slaves PMDs or by the bonding PMD itself.
2687 rte_spinlock_lock(&internals->lsc_lock);
2689 /* Search for port in active port list */
2690 active_pos = find_slave_by_id(internals->active_slaves,
2691 internals->active_slave_count, port_id);
2693 rte_eth_link_get_nowait(port_id, &link);
2694 if (link.link_status) {
2695 if (active_pos < internals->active_slave_count)
2698 /* check link state properties if bonded link is up*/
2699 if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
2700 if (link_properties_valid(bonded_eth_dev, &link) != 0)
2701 RTE_BOND_LOG(ERR, "Invalid link properties "
2702 "for slave %d in bonding mode %d",
2703 port_id, internals->mode);
2705 /* inherit slave link properties */
2706 link_properties_set(bonded_eth_dev, &link);
2709 /* If no active slave ports then set this port to be
2712 if (internals->active_slave_count < 1) {
2713 /* If first active slave, then change link status */
2714 bonded_eth_dev->data->dev_link.link_status =
2716 internals->current_primary_port = port_id;
2719 mac_address_slaves_update(bonded_eth_dev);
2722 activate_slave(bonded_eth_dev, port_id);
2724 /* If the user has defined the primary port then default to
2727 if (internals->user_defined_primary_port &&
2728 internals->primary_port == port_id)
2729 bond_ethdev_primary_set(internals, port_id);
2731 if (active_pos == internals->active_slave_count)
2734 /* Remove from active slave list */
2735 deactivate_slave(bonded_eth_dev, port_id);
2737 if (internals->active_slave_count < 1)
2740 /* Update primary id, take first active slave from list or if none
2741 * available set to -1 */
2742 if (port_id == internals->current_primary_port) {
2743 if (internals->active_slave_count > 0)
2744 bond_ethdev_primary_set(internals,
2745 internals->active_slaves[0]);
2747 internals->current_primary_port = internals->primary_port;
2753 * Update bonded device link properties after any change to active
2756 bond_ethdev_link_update(bonded_eth_dev, 0);
2759 /* Cancel any possible outstanding interrupts if delays are enabled */
2760 if (internals->link_up_delay_ms > 0 ||
2761 internals->link_down_delay_ms > 0)
2762 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
2765 if (bonded_eth_dev->data->dev_link.link_status) {
2766 if (internals->link_up_delay_ms > 0)
2767 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
2768 bond_ethdev_delayed_lsc_propagation,
2769 (void *)bonded_eth_dev);
2771 _rte_eth_dev_callback_process(bonded_eth_dev,
2772 RTE_ETH_EVENT_INTR_LSC,
2776 if (internals->link_down_delay_ms > 0)
2777 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
2778 bond_ethdev_delayed_lsc_propagation,
2779 (void *)bonded_eth_dev);
2781 _rte_eth_dev_callback_process(bonded_eth_dev,
2782 RTE_ETH_EVENT_INTR_LSC,
2787 rte_spinlock_unlock(&internals->lsc_lock);
2793 bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
2794 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2798 int slave_reta_size;
2799 unsigned reta_count;
2800 struct bond_dev_private *internals = dev->data->dev_private;
2802 if (reta_size != internals->reta_size)
2805 /* Copy RETA table */
2806 reta_count = reta_size / RTE_RETA_GROUP_SIZE;
2808 for (i = 0; i < reta_count; i++) {
2809 internals->reta_conf[i].mask = reta_conf[i].mask;
2810 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2811 if ((reta_conf[i].mask >> j) & 0x01)
2812 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
2815 /* Fill rest of array */
2816 for (; i < RTE_DIM(internals->reta_conf); i += reta_count)
2817 memcpy(&internals->reta_conf[i], &internals->reta_conf[0],
2818 sizeof(internals->reta_conf[0]) * reta_count);
2820 /* Propagate RETA over slaves */
2821 for (i = 0; i < internals->slave_count; i++) {
2822 slave_reta_size = internals->slaves[i].reta_size;
2823 result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id,
2824 &internals->reta_conf[0], slave_reta_size);
2833 bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
2834 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
2837 struct bond_dev_private *internals = dev->data->dev_private;
2839 if (reta_size != internals->reta_size)
2842 /* Copy RETA table */
2843 for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
2844 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
2845 if ((reta_conf[i].mask >> j) & 0x01)
2846 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
2852 bond_ethdev_rss_hash_update(struct rte_eth_dev *dev,
2853 struct rte_eth_rss_conf *rss_conf)
2856 struct bond_dev_private *internals = dev->data->dev_private;
2857 struct rte_eth_rss_conf bond_rss_conf;
2859 memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf));
2861 bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads;
2863 if (bond_rss_conf.rss_hf != 0)
2864 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
2866 if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
2867 sizeof(internals->rss_key)) {
2868 if (bond_rss_conf.rss_key_len == 0)
2869 bond_rss_conf.rss_key_len = 40;
2870 internals->rss_key_len = bond_rss_conf.rss_key_len;
2871 memcpy(internals->rss_key, bond_rss_conf.rss_key,
2872 internals->rss_key_len);
2875 for (i = 0; i < internals->slave_count; i++) {
2876 result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id,
2886 bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
2887 struct rte_eth_rss_conf *rss_conf)
2889 struct bond_dev_private *internals = dev->data->dev_private;
2891 rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
2892 rss_conf->rss_key_len = internals->rss_key_len;
2893 if (rss_conf->rss_key)
2894 memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len);
2900 bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2902 struct rte_eth_dev *slave_eth_dev;
2903 struct bond_dev_private *internals = dev->data->dev_private;
2906 rte_spinlock_lock(&internals->lock);
2908 for (i = 0; i < internals->slave_count; i++) {
2909 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2910 if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
2911 rte_spinlock_unlock(&internals->lock);
2915 for (i = 0; i < internals->slave_count; i++) {
2916 ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
2918 rte_spinlock_unlock(&internals->lock);
2923 rte_spinlock_unlock(&internals->lock);
2928 bond_ethdev_mac_address_set(struct rte_eth_dev *dev,
2929 struct rte_ether_addr *addr)
2931 if (mac_address_set(dev, addr)) {
2932 RTE_BOND_LOG(ERR, "Failed to update MAC address");
2940 bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
2941 enum rte_filter_type type, enum rte_filter_op op, void *arg)
2943 if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
2944 *(const void **)arg = &bond_flow_ops;
2951 bond_ethdev_mac_addr_add(struct rte_eth_dev *dev,
2952 struct rte_ether_addr *mac_addr,
2953 __rte_unused uint32_t index, uint32_t vmdq)
2955 struct rte_eth_dev *slave_eth_dev;
2956 struct bond_dev_private *internals = dev->data->dev_private;
2959 rte_spinlock_lock(&internals->lock);
2961 for (i = 0; i < internals->slave_count; i++) {
2962 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2963 if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
2964 *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
2970 for (i = 0; i < internals->slave_count; i++) {
2971 ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
2975 for (i--; i >= 0; i--)
2976 rte_eth_dev_mac_addr_remove(
2977 internals->slaves[i].port_id, mac_addr);
2984 rte_spinlock_unlock(&internals->lock);
2989 bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2991 struct rte_eth_dev *slave_eth_dev;
2992 struct bond_dev_private *internals = dev->data->dev_private;
2995 rte_spinlock_lock(&internals->lock);
2997 for (i = 0; i < internals->slave_count; i++) {
2998 slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
2999 if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
3003 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
3005 for (i = 0; i < internals->slave_count; i++)
3006 rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
3010 rte_spinlock_unlock(&internals->lock);
3013 const struct eth_dev_ops default_dev_ops = {
3014 .dev_start = bond_ethdev_start,
3015 .dev_stop = bond_ethdev_stop,
3016 .dev_close = bond_ethdev_close,
3017 .dev_configure = bond_ethdev_configure,
3018 .dev_infos_get = bond_ethdev_info,
3019 .vlan_filter_set = bond_ethdev_vlan_filter_set,
3020 .rx_queue_setup = bond_ethdev_rx_queue_setup,
3021 .tx_queue_setup = bond_ethdev_tx_queue_setup,
3022 .rx_queue_release = bond_ethdev_rx_queue_release,
3023 .tx_queue_release = bond_ethdev_tx_queue_release,
3024 .link_update = bond_ethdev_link_update,
3025 .stats_get = bond_ethdev_stats_get,
3026 .stats_reset = bond_ethdev_stats_reset,
3027 .promiscuous_enable = bond_ethdev_promiscuous_enable,
3028 .promiscuous_disable = bond_ethdev_promiscuous_disable,
3029 .allmulticast_enable = bond_ethdev_allmulticast_enable,
3030 .allmulticast_disable = bond_ethdev_allmulticast_disable,
3031 .reta_update = bond_ethdev_rss_reta_update,
3032 .reta_query = bond_ethdev_rss_reta_query,
3033 .rss_hash_update = bond_ethdev_rss_hash_update,
3034 .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
3035 .mtu_set = bond_ethdev_mtu_set,
3036 .mac_addr_set = bond_ethdev_mac_address_set,
3037 .mac_addr_add = bond_ethdev_mac_addr_add,
3038 .mac_addr_remove = bond_ethdev_mac_addr_remove,
3039 .filter_ctrl = bond_filter_ctrl
3043 bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
3045 const char *name = rte_vdev_device_name(dev);
3046 uint8_t socket_id = dev->device.numa_node;
3047 struct bond_dev_private *internals = NULL;
3048 struct rte_eth_dev *eth_dev = NULL;
3049 uint32_t vlan_filter_bmp_size;
3051 /* now do all data allocation - for eth_dev structure, dummy pci driver
3052 * and internal (private) data
3055 /* reserve an ethdev entry */
3056 eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
3057 if (eth_dev == NULL) {
3058 RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
3062 internals = eth_dev->data->dev_private;
3063 eth_dev->data->nb_rx_queues = (uint16_t)1;
3064 eth_dev->data->nb_tx_queues = (uint16_t)1;
3066 /* Allocate memory for storing MAC addresses */
3067 eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
3068 BOND_MAX_MAC_ADDRS, 0, socket_id);
3069 if (eth_dev->data->mac_addrs == NULL) {
3071 "Failed to allocate %u bytes needed to store MAC addresses",
3072 RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
3076 eth_dev->dev_ops = &default_dev_ops;
3077 eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
3079 rte_spinlock_init(&internals->lock);
3080 rte_spinlock_init(&internals->lsc_lock);
3082 internals->port_id = eth_dev->data->port_id;
3083 internals->mode = BONDING_MODE_INVALID;
3084 internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
3085 internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
3086 internals->burst_xmit_hash = burst_xmit_l2_hash;
3087 internals->user_defined_mac = 0;
3089 internals->link_status_polling_enabled = 0;
3091 internals->link_status_polling_interval_ms =
3092 DEFAULT_POLLING_INTERVAL_10_MS;
3093 internals->link_down_delay_ms = 0;
3094 internals->link_up_delay_ms = 0;
3096 internals->slave_count = 0;
3097 internals->active_slave_count = 0;
3098 internals->rx_offload_capa = 0;
3099 internals->tx_offload_capa = 0;
3100 internals->rx_queue_offload_capa = 0;
3101 internals->tx_queue_offload_capa = 0;
3102 internals->candidate_max_rx_pktlen = 0;
3103 internals->max_rx_pktlen = 0;
3105 /* Initially allow to choose any offload type */
3106 internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
3108 memset(&internals->default_rxconf, 0,
3109 sizeof(internals->default_rxconf));
3110 memset(&internals->default_txconf, 0,
3111 sizeof(internals->default_txconf));
3113 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
3114 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
3116 memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
3117 memset(internals->slaves, 0, sizeof(internals->slaves));
3119 TAILQ_INIT(&internals->flow_list);
3120 internals->flow_isolated_valid = 0;
3122 /* Set mode 4 default configuration */
3123 bond_mode_8023ad_setup(eth_dev, NULL);
3124 if (bond_ethdev_mode_set(eth_dev, mode)) {
3125 RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
3126 eth_dev->data->port_id, mode);
3130 vlan_filter_bmp_size =
3131 rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
3132 internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
3133 RTE_CACHE_LINE_SIZE);
3134 if (internals->vlan_filter_bmpmem == NULL) {
3136 "Failed to allocate vlan bitmap for bonded device %u",
3137 eth_dev->data->port_id);
3141 internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
3142 internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
3143 if (internals->vlan_filter_bmp == NULL) {
3145 "Failed to init vlan bitmap for bonded device %u",
3146 eth_dev->data->port_id);
3147 rte_free(internals->vlan_filter_bmpmem);
3151 return eth_dev->data->port_id;
3154 rte_free(internals);
3155 if (eth_dev != NULL)
3156 eth_dev->data->dev_private = NULL;
3157 rte_eth_dev_release_port(eth_dev);
3162 bond_probe(struct rte_vdev_device *dev)
3165 struct bond_dev_private *internals;
3166 struct rte_kvargs *kvlist;
3167 uint8_t bonding_mode, socket_id/*, agg_mode*/;
3168 int arg_count, port_id;
3170 struct rte_eth_dev *eth_dev;
3175 name = rte_vdev_device_name(dev);
3176 RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
3178 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
3179 eth_dev = rte_eth_dev_attach_secondary(name);
3181 RTE_BOND_LOG(ERR, "Failed to probe %s", name);
3184 /* TODO: request info from primary to set up Rx and Tx */
3185 eth_dev->dev_ops = &default_dev_ops;
3186 eth_dev->device = &dev->device;
3187 rte_eth_dev_probing_finish(eth_dev);
3191 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
3192 pmd_bond_init_valid_arguments);
3196 /* Parse link bonding mode */
3197 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
3198 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
3199 &bond_ethdev_parse_slave_mode_kvarg,
3200 &bonding_mode) != 0) {
3201 RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
3206 RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
3211 /* Parse socket id to create bonding device on */
3212 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
3213 if (arg_count == 1) {
3214 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
3215 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
3217 RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
3218 "bonded device %s", name);
3221 } else if (arg_count > 1) {
3222 RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
3223 "bonded device %s", name);
3226 socket_id = rte_socket_id();
3229 dev->device.numa_node = socket_id;
3231 /* Create link bonding eth device */
3232 port_id = bond_alloc(dev, bonding_mode);
3234 RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
3235 "socket %u.", name, bonding_mode, socket_id);
3238 internals = rte_eth_devices[port_id].data->dev_private;
3239 internals->kvlist = kvlist;
3241 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3242 if (rte_kvargs_process(kvlist,
3243 PMD_BOND_AGG_MODE_KVARG,
3244 &bond_ethdev_parse_slave_agg_mode_kvarg,
3247 "Failed to parse agg selection mode for bonded device %s",
3252 if (internals->mode == BONDING_MODE_8023AD)
3253 internals->mode4.agg_selection = agg_mode;
3255 internals->mode4.agg_selection = AGG_STABLE;
3258 rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
3259 RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
3260 "socket %u.", name, port_id, bonding_mode, socket_id);
3264 rte_kvargs_free(kvlist);
3270 bond_remove(struct rte_vdev_device *dev)
3272 struct rte_eth_dev *eth_dev;
3273 struct bond_dev_private *internals;
3279 name = rte_vdev_device_name(dev);
3280 RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
3282 /* now free all data allocation - for eth_dev structure,
3283 * dummy pci driver and internal (private) data
3286 /* find an ethdev entry */
3287 eth_dev = rte_eth_dev_allocated(name);
3288 if (eth_dev == NULL)
3291 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3292 return rte_eth_dev_release_port(eth_dev);
3294 RTE_ASSERT(eth_dev->device == &dev->device);
3296 internals = eth_dev->data->dev_private;
3297 if (internals->slave_count != 0)
3300 if (eth_dev->data->dev_started == 1) {
3301 bond_ethdev_stop(eth_dev);
3302 bond_ethdev_close(eth_dev);
3305 eth_dev->dev_ops = NULL;
3306 eth_dev->rx_pkt_burst = NULL;
3307 eth_dev->tx_pkt_burst = NULL;
3309 internals = eth_dev->data->dev_private;
3310 /* Try to release mempool used in mode6. If the bond
3311 * device is not mode6, free the NULL is not problem.
3313 rte_mempool_free(internals->mode6.mempool);
3314 rte_bitmap_free(internals->vlan_filter_bmp);
3315 rte_free(internals->vlan_filter_bmpmem);
3317 rte_eth_dev_release_port(eth_dev);
3322 /* this part will resolve the slave portids after all the other pdev and vdev
3323 * have been allocated */
3325 bond_ethdev_configure(struct rte_eth_dev *dev)
3327 const char *name = dev->device->name;
3328 struct bond_dev_private *internals = dev->data->dev_private;
3329 struct rte_kvargs *kvlist = internals->kvlist;
3331 uint16_t port_id = dev - rte_eth_devices;
3334 static const uint8_t default_rss_key[40] = {
3335 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
3336 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3337 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
3338 0xBE, 0xAC, 0x01, 0xFA
3344 * If RSS is enabled, fill table with default values and
3345 * set key to the the value specified in port RSS configuration.
3346 * Fall back to default RSS key if the key is not specified
3348 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
3349 if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
3350 internals->rss_key_len =
3351 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
3352 memcpy(internals->rss_key,
3353 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
3354 internals->rss_key_len);
3356 internals->rss_key_len = sizeof(default_rss_key);
3357 memcpy(internals->rss_key, default_rss_key,
3358 internals->rss_key_len);
3361 for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
3362 internals->reta_conf[i].mask = ~0LL;
3363 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
3364 internals->reta_conf[i].reta[j] =
3365 (i * RTE_RETA_GROUP_SIZE + j) %
3366 dev->data->nb_rx_queues;
3370 /* set the max_rx_pktlen */
3371 internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
3374 * if no kvlist, it means that this bonded device has been created
3375 * through the bonding api.
3380 /* Parse MAC address for bonded device */
3381 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
3382 if (arg_count == 1) {
3383 struct rte_ether_addr bond_mac;
3385 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
3386 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
3387 RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
3392 /* Set MAC address */
3393 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
3395 "Failed to set mac address on bonded device %s",
3399 } else if (arg_count > 1) {
3401 "MAC address can be specified only once for bonded device %s",
3406 /* Parse/set balance mode transmit policy */
3407 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
3408 if (arg_count == 1) {
3409 uint8_t xmit_policy;
3411 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
3412 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
3415 "Invalid xmit policy specified for bonded device %s",
3420 /* Set balance mode transmit policy*/
3421 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
3423 "Failed to set balance xmit policy on bonded device %s",
3427 } else if (arg_count > 1) {
3429 "Transmit policy can be specified only once for bonded device %s",
3434 if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
3435 if (rte_kvargs_process(kvlist,
3436 PMD_BOND_AGG_MODE_KVARG,
3437 &bond_ethdev_parse_slave_agg_mode_kvarg,
3440 "Failed to parse agg selection mode for bonded device %s",
3443 if (internals->mode == BONDING_MODE_8023AD) {
3444 int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
3448 "Invalid args for agg selection set for bonded device %s",
3455 /* Parse/add slave ports to bonded device */
3456 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
3457 struct bond_ethdev_slave_ports slave_ports;
3460 memset(&slave_ports, 0, sizeof(slave_ports));
3462 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
3463 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
3465 "Failed to parse slave ports for bonded device %s",
3470 for (i = 0; i < slave_ports.slave_count; i++) {
3471 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
3473 "Failed to add port %d as slave to bonded device %s",
3474 slave_ports.slaves[i], name);
3479 RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
3483 /* Parse/set primary slave port id*/
3484 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
3485 if (arg_count == 1) {
3486 uint16_t primary_slave_port_id;
3488 if (rte_kvargs_process(kvlist,
3489 PMD_BOND_PRIMARY_SLAVE_KVARG,
3490 &bond_ethdev_parse_primary_slave_port_id_kvarg,
3491 &primary_slave_port_id) < 0) {
3493 "Invalid primary slave port id specified for bonded device %s",
3498 /* Set balance mode transmit policy*/
3499 if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
3502 "Failed to set primary slave port %d on bonded device %s",
3503 primary_slave_port_id, name);
3506 } else if (arg_count > 1) {
3508 "Primary slave can be specified only once for bonded device %s",
3513 /* Parse link status monitor polling interval */
3514 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
3515 if (arg_count == 1) {
3516 uint32_t lsc_poll_interval_ms;
3518 if (rte_kvargs_process(kvlist,
3519 PMD_BOND_LSC_POLL_PERIOD_KVARG,
3520 &bond_ethdev_parse_time_ms_kvarg,
3521 &lsc_poll_interval_ms) < 0) {
3523 "Invalid lsc polling interval value specified for bonded"
3524 " device %s", name);
3528 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
3531 "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
3532 lsc_poll_interval_ms, name);
3535 } else if (arg_count > 1) {
3537 "LSC polling interval can be specified only once for bonded"
3538 " device %s", name);
3542 /* Parse link up interrupt propagation delay */
3543 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
3544 if (arg_count == 1) {
3545 uint32_t link_up_delay_ms;
3547 if (rte_kvargs_process(kvlist,
3548 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
3549 &bond_ethdev_parse_time_ms_kvarg,
3550 &link_up_delay_ms) < 0) {
3552 "Invalid link up propagation delay value specified for"
3553 " bonded device %s", name);
3557 /* Set balance mode transmit policy*/
3558 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
3561 "Failed to set link up propagation delay (%u ms) on bonded"
3562 " device %s", link_up_delay_ms, name);
3565 } else if (arg_count > 1) {
3567 "Link up propagation delay can be specified only once for"
3568 " bonded device %s", name);
3572 /* Parse link down interrupt propagation delay */
3573 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
3574 if (arg_count == 1) {
3575 uint32_t link_down_delay_ms;
3577 if (rte_kvargs_process(kvlist,
3578 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
3579 &bond_ethdev_parse_time_ms_kvarg,
3580 &link_down_delay_ms) < 0) {
3582 "Invalid link down propagation delay value specified for"
3583 " bonded device %s", name);
3587 /* Set balance mode transmit policy*/
3588 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
3591 "Failed to set link down propagation delay (%u ms) on bonded device %s",
3592 link_down_delay_ms, name);
3595 } else if (arg_count > 1) {
3597 "Link down propagation delay can be specified only once for bonded device %s",
3605 struct rte_vdev_driver pmd_bond_drv = {
3606 .probe = bond_probe,
3607 .remove = bond_remove,
3610 RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
3611 RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
3613 RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
3617 "xmit_policy=[l2 | l23 | l34] "
3618 "agg_mode=[count | stable | bandwidth] "
3621 "lsc_poll_period_ms=<int> "
3623 "down_delay=<int>");
3627 RTE_INIT(bond_init_log)
3629 bond_logtype = rte_log_register("pmd.net.bond");
3630 if (bond_logtype >= 0)
3631 rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);