4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_devargs.h>
43 #include <rte_kvargs.h>
45 #include <rte_alarm.h>
46 #include <rte_cycles.h>
48 #include "rte_eth_bond.h"
49 #include "rte_eth_bond_private.h"
50 #include "rte_eth_bond_8023ad_private.h"
52 #define REORDER_PERIOD_MS 10
54 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56 /* Table for statistics in mode 5 TLB */
57 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
60 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 size_t vlan_offset = 0;
64 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
65 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67 vlan_offset = sizeof(struct vlan_hdr);
68 *proto = vlan_hdr->eth_proto;
70 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
71 vlan_hdr = vlan_hdr + 1;
72 *proto = vlan_hdr->eth_proto;
73 vlan_offset += sizeof(struct vlan_hdr);
80 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 struct bond_dev_private *internals;
84 uint16_t num_rx_slave = 0;
85 uint16_t num_rx_total = 0;
89 /* Cast to structure, containing bonded device's port id and queue id */
90 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92 internals = bd_rx_q->dev_private;
95 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
96 /* Offset of pointer to *bufs increases as packets are received
97 * from other slaves */
98 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
99 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101 num_rx_total += num_rx_slave;
102 nb_pkts -= num_rx_slave;
110 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
113 struct bond_dev_private *internals;
115 /* Cast to structure, containing bonded device's port id and queue id */
116 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118 internals = bd_rx_q->dev_private;
120 return rte_eth_rx_burst(internals->current_primary_port,
121 bd_rx_q->queue_id, bufs, nb_pkts);
125 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
128 /* Cast to structure, containing bonded device's port id and queue id */
129 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
130 struct bond_dev_private *internals = bd_rx_q->dev_private;
131 struct ether_addr bond_mac;
133 struct ether_hdr *hdr;
135 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
136 uint16_t num_rx_total = 0; /* Total number of received packets */
137 uint8_t slaves[RTE_MAX_ETHPORTS];
140 uint8_t collecting; /* current slave collecting status */
141 const uint8_t promisc = internals->promiscuous_en;
144 rte_eth_macaddr_get(internals->port_id, &bond_mac);
145 /* Copy slave list to protect against slave up/down changes during tx
147 slave_count = internals->active_slave_count;
148 memcpy(slaves, internals->active_slaves,
149 sizeof(internals->active_slaves[0]) * slave_count);
151 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
153 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
155 /* Read packets from this slave */
156 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
157 &bufs[num_rx_total], nb_pkts - num_rx_total);
159 for (k = j; k < 2 && k < num_rx_total; k++)
160 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
162 /* Handle slow protocol packets. */
163 while (j < num_rx_total) {
164 if (j + 3 < num_rx_total)
165 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
167 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
168 /* Remove packet from array if it is slow packet or slave is not
169 * in collecting state or bondign interface is not in promiscus
170 * mode and packet address does not match. */
171 if (unlikely(hdr->ether_type == ether_type_slow_be ||
172 !collecting || (!promisc &&
173 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
175 if (hdr->ether_type == ether_type_slow_be) {
176 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
179 rte_pktmbuf_free(bufs[j]);
181 /* Packet is managed by mode 4 or dropped, shift the array */
183 if (j < num_rx_total) {
184 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
195 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
196 uint32_t burstnumberRX;
197 uint32_t burstnumberTX;
199 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
202 arp_op_name(uint16_t arp_op, char *buf)
206 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
209 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
211 case ARP_OP_REVREQUEST:
212 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
213 "Reverse ARP Request");
215 case ARP_OP_REVREPLY:
216 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
217 "Reverse ARP Reply");
219 case ARP_OP_INVREQUEST:
220 snprintf(buf, sizeof("Peer Identify Request"), "%s",
221 "Peer Identify Request");
223 case ARP_OP_INVREPLY:
224 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
225 "Peer Identify Reply");
230 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
234 #define MaxIPv4String 16
236 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
240 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
241 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
242 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
246 #define MAX_CLIENTS_NUMBER 128
247 uint8_t active_clients;
248 struct client_stats_t {
251 uint32_t ipv4_rx_packets;
252 uint32_t ipv4_tx_packets;
254 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
257 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
261 for (; i < MAX_CLIENTS_NUMBER; i++) {
262 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
263 /* Just update RX packets number for this client */
264 if (TXorRXindicator == &burstnumberRX)
265 client_stats[i].ipv4_rx_packets++;
267 client_stats[i].ipv4_tx_packets++;
271 /* We have a new client. Insert him to the table, and increment stats */
272 if (TXorRXindicator == &burstnumberRX)
273 client_stats[active_clients].ipv4_rx_packets++;
275 client_stats[active_clients].ipv4_tx_packets++;
276 client_stats[active_clients].ipv4_addr = addr;
277 client_stats[active_clients].port = port;
282 void print_client_stats(void);
283 void print_client_stats(void)
286 char buf[MaxIPv4String];
288 for (; i < active_clients; i++) {
289 ipv4_addr_to_dot(client_stats[i].ipv4_addr, buf, MaxIPv4String);
290 printf("port:%d client:%s RX:%d TX:%d\n", client_stats[i].port, buf,
291 client_stats[i].ipv4_rx_packets,
292 client_stats[i].ipv4_tx_packets);
295 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
296 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
297 RTE_LOG(DEBUG, PMD, \
300 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
302 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
308 eth_h->s_addr.addr_bytes[0], \
309 eth_h->s_addr.addr_bytes[1], \
310 eth_h->s_addr.addr_bytes[2], \
311 eth_h->s_addr.addr_bytes[3], \
312 eth_h->s_addr.addr_bytes[4], \
313 eth_h->s_addr.addr_bytes[5], \
315 eth_h->d_addr.addr_bytes[0], \
316 eth_h->d_addr.addr_bytes[1], \
317 eth_h->d_addr.addr_bytes[2], \
318 eth_h->d_addr.addr_bytes[3], \
319 eth_h->d_addr.addr_bytes[4], \
320 eth_h->d_addr.addr_bytes[5], \
327 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
328 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
330 struct ipv4_hdr *ipv4_h;
331 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
332 struct arp_hdr *arp_h;
339 uint16_t ether_type = eth_h->ether_type;
340 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
342 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
343 snprintf(buf, 16, "%s", info);
346 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
347 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
348 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
349 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
350 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
351 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
353 update_client_stats(ipv4_h->src_addr, port, burstnumber);
355 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
356 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
357 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
358 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
359 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
360 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
361 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
368 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
370 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
371 struct bond_dev_private *internals = bd_tx_q->dev_private;
372 struct ether_hdr *eth_h;
373 uint16_t ether_type, offset;
374 uint16_t nb_recv_pkts;
377 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
379 for (i = 0; i < nb_recv_pkts; i++) {
380 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
381 ether_type = eth_h->ether_type;
382 offset = get_vlan_offset(eth_h, ðer_type);
384 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
385 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
386 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
388 bond_mode_alb_arp_recv(eth_h, offset, internals);
390 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
391 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
392 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
400 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
403 struct bond_dev_private *internals;
404 struct bond_tx_queue *bd_tx_q;
406 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
407 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
409 uint8_t num_of_slaves;
410 uint8_t slaves[RTE_MAX_ETHPORTS];
412 uint16_t num_tx_total = 0, num_tx_slave;
414 static int slave_idx = 0;
415 int i, cslave_idx = 0, tx_fail_total = 0;
417 bd_tx_q = (struct bond_tx_queue *)queue;
418 internals = bd_tx_q->dev_private;
420 /* Copy slave list to protect against slave up/down changes during tx
422 num_of_slaves = internals->active_slave_count;
423 memcpy(slaves, internals->active_slaves,
424 sizeof(internals->active_slaves[0]) * num_of_slaves);
426 if (num_of_slaves < 1)
429 /* Populate slaves mbuf with which packets are to be sent on it */
430 for (i = 0; i < nb_pkts; i++) {
431 cslave_idx = (slave_idx + i) % num_of_slaves;
432 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
435 /* increment current slave index so the next call to tx burst starts on the
437 slave_idx = ++cslave_idx;
439 /* Send packet burst on each slave device */
440 for (i = 0; i < num_of_slaves; i++) {
441 if (slave_nb_pkts[i] > 0) {
442 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
443 slave_bufs[i], slave_nb_pkts[i]);
445 /* if tx burst fails move packets to end of bufs */
446 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
447 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
449 tx_fail_total += tx_fail_slave;
451 memcpy(&bufs[nb_pkts - tx_fail_total],
452 &slave_bufs[i][num_tx_slave],
453 tx_fail_slave * sizeof(bufs[0]));
455 num_tx_total += num_tx_slave;
463 bond_ethdev_tx_burst_active_backup(void *queue,
464 struct rte_mbuf **bufs, uint16_t nb_pkts)
466 struct bond_dev_private *internals;
467 struct bond_tx_queue *bd_tx_q;
469 bd_tx_q = (struct bond_tx_queue *)queue;
470 internals = bd_tx_q->dev_private;
472 if (internals->active_slave_count < 1)
475 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
479 static inline uint16_t
480 ether_hash(struct ether_hdr *eth_hdr)
482 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
483 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
485 return (word_src_addr[0] ^ word_dst_addr[0]) ^
486 (word_src_addr[1] ^ word_dst_addr[1]) ^
487 (word_src_addr[2] ^ word_dst_addr[2]);
490 static inline uint32_t
491 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
493 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
496 static inline uint32_t
497 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
499 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
500 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
502 return (word_src_addr[0] ^ word_dst_addr[0]) ^
503 (word_src_addr[1] ^ word_dst_addr[1]) ^
504 (word_src_addr[2] ^ word_dst_addr[2]) ^
505 (word_src_addr[3] ^ word_dst_addr[3]);
509 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
511 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
513 uint32_t hash = ether_hash(eth_hdr);
515 return (hash ^= hash >> 8) % slave_count;
519 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
521 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
522 uint16_t proto = eth_hdr->ether_type;
523 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
524 uint32_t hash, l3hash = 0;
526 hash = ether_hash(eth_hdr);
528 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
529 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
530 ((char *)(eth_hdr + 1) + vlan_offset);
531 l3hash = ipv4_hash(ipv4_hdr);
533 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
534 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
535 ((char *)(eth_hdr + 1) + vlan_offset);
536 l3hash = ipv6_hash(ipv6_hdr);
539 hash = hash ^ l3hash;
543 return hash % slave_count;
547 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
549 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
550 uint16_t proto = eth_hdr->ether_type;
551 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
553 struct udp_hdr *udp_hdr = NULL;
554 struct tcp_hdr *tcp_hdr = NULL;
555 uint32_t hash, l3hash = 0, l4hash = 0;
557 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
558 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
559 ((char *)(eth_hdr + 1) + vlan_offset);
560 size_t ip_hdr_offset;
562 l3hash = ipv4_hash(ipv4_hdr);
564 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
567 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
568 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
570 l4hash = HASH_L4_PORTS(tcp_hdr);
571 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
572 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
574 l4hash = HASH_L4_PORTS(udp_hdr);
576 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
577 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
578 ((char *)(eth_hdr + 1) + vlan_offset);
579 l3hash = ipv6_hash(ipv6_hdr);
581 if (ipv6_hdr->proto == IPPROTO_TCP) {
582 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
583 l4hash = HASH_L4_PORTS(tcp_hdr);
584 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
585 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
586 l4hash = HASH_L4_PORTS(udp_hdr);
590 hash = l3hash ^ l4hash;
594 return hash % slave_count;
598 uint64_t bwg_left_int;
599 uint64_t bwg_left_remainder;
604 bond_tlb_activate_slave(struct bond_dev_private *internals) {
607 for (i = 0; i < internals->active_slave_count; i++) {
608 tlb_last_obytets[internals->active_slaves[i]] = 0;
613 bandwidth_cmp(const void *a, const void *b)
615 const struct bwg_slave *bwg_a = a;
616 const struct bwg_slave *bwg_b = b;
617 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
618 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
619 (int64_t)bwg_a->bwg_left_remainder;
633 bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
634 struct bwg_slave *bwg_slave)
636 struct rte_eth_link link_status;
638 rte_eth_link_get(port_id, &link_status);
639 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
642 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
643 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
644 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
648 bond_ethdev_update_tlb_slave_cb(void *arg)
650 struct bond_dev_private *internals = arg;
651 struct rte_eth_stats slave_stats;
652 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
656 uint8_t update_stats = 0;
659 internals->slave_update_idx++;
662 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
665 for (i = 0; i < internals->active_slave_count; i++) {
666 slave_id = internals->active_slaves[i];
667 rte_eth_stats_get(slave_id, &slave_stats);
668 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
669 bandwidth_left(slave_id, tx_bytes,
670 internals->slave_update_idx, &bwg_array[i]);
671 bwg_array[i].slave = slave_id;
674 tlb_last_obytets[slave_id] = slave_stats.obytes;
678 if (update_stats == 1)
679 internals->slave_update_idx = 0;
682 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
683 for (i = 0; i < slave_count; i++)
684 internals->tlb_slaves_order[i] = bwg_array[i].slave;
686 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
687 (struct bond_dev_private *)internals);
691 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
693 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
694 struct bond_dev_private *internals = bd_tx_q->dev_private;
696 struct rte_eth_dev *primary_port =
697 &rte_eth_devices[internals->primary_port];
698 uint16_t num_tx_total = 0;
701 uint8_t num_of_slaves = internals->active_slave_count;
702 uint8_t slaves[RTE_MAX_ETHPORTS];
704 struct ether_hdr *ether_hdr;
705 struct ether_addr primary_slave_addr;
706 struct ether_addr active_slave_addr;
708 if (num_of_slaves < 1)
711 memcpy(slaves, internals->tlb_slaves_order,
712 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
715 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
718 for (i = 0; i < 3; i++)
719 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
722 for (i = 0; i < num_of_slaves; i++) {
723 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
724 for (j = num_tx_total; j < nb_pkts; j++) {
726 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
728 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
729 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
730 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
731 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
732 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
736 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
737 bufs + num_tx_total, nb_pkts - num_tx_total);
739 if (num_tx_total == nb_pkts)
747 bond_tlb_disable(struct bond_dev_private *internals)
749 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
753 bond_tlb_enable(struct bond_dev_private *internals)
755 bond_ethdev_update_tlb_slave_cb(internals);
759 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
761 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
762 struct bond_dev_private *internals = bd_tx_q->dev_private;
764 struct ether_hdr *eth_h;
765 uint16_t ether_type, offset;
767 struct client_data *client_info;
770 * We create transmit buffers for every slave and one additional to send
771 * through tlb. In worst case every packet will be send on one port.
773 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
774 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
777 * We create separate transmit buffers for update packets as they wont be
778 * counted in num_tx_total.
780 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
781 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
783 struct rte_mbuf *upd_pkt;
786 uint16_t num_send, num_not_send = 0;
787 uint16_t num_tx_total = 0;
792 /* Search tx buffer for ARP packets and forward them to alb */
793 for (i = 0; i < nb_pkts; i++) {
794 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
795 ether_type = eth_h->ether_type;
796 offset = get_vlan_offset(eth_h, ðer_type);
798 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
799 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
801 /* Change src mac in eth header */
802 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
804 /* Add packet to slave tx buffer */
805 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
806 slave_bufs_pkts[slave_idx]++;
808 /* If packet is not ARP, send it with TLB policy */
809 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
811 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
815 /* Update connected client ARP tables */
816 if (internals->mode6.ntt) {
817 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
818 client_info = &internals->mode6.client_table[i];
820 if (client_info->in_use) {
821 /* Allocate new packet to send ARP update on current slave */
822 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
823 if (upd_pkt == NULL) {
824 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
827 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
828 + client_info->vlan_count * sizeof(struct vlan_hdr);
829 upd_pkt->data_len = pkt_size;
830 upd_pkt->pkt_len = pkt_size;
832 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
835 /* Add packet to update tx buffer */
836 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
837 update_bufs_pkts[slave_idx]++;
840 internals->mode6.ntt = 0;
843 /* Send ARP packets on proper slaves */
844 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
845 if (slave_bufs_pkts[i] > 0) {
846 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
847 slave_bufs[i], slave_bufs_pkts[i]);
848 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
849 bufs[nb_pkts - 1 - num_not_send - j] =
850 slave_bufs[i][nb_pkts - 1 - j];
853 num_tx_total += num_send;
854 num_not_send += slave_bufs_pkts[i] - num_send;
856 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
857 /* Print TX stats including update packets */
858 for (j = 0; j < slave_bufs_pkts[i]; j++) {
859 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
860 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
866 /* Send update packets on proper slaves */
867 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
868 if (update_bufs_pkts[i] > 0) {
869 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
870 update_bufs_pkts[i]);
871 for (j = num_send; j < update_bufs_pkts[i]; j++) {
872 rte_pktmbuf_free(update_bufs[i][j]);
874 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
875 for (j = 0; j < update_bufs_pkts[i]; j++) {
876 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
877 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
883 /* Send non-ARP packets using tlb policy */
884 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
885 num_send = bond_ethdev_tx_burst_tlb(queue,
886 slave_bufs[RTE_MAX_ETHPORTS],
887 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
889 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
890 bufs[nb_pkts - 1 - num_not_send - j] =
891 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
894 num_tx_total += num_send;
895 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
902 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
905 struct bond_dev_private *internals;
906 struct bond_tx_queue *bd_tx_q;
908 uint8_t num_of_slaves;
909 uint8_t slaves[RTE_MAX_ETHPORTS];
911 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
915 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
916 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
918 bd_tx_q = (struct bond_tx_queue *)queue;
919 internals = bd_tx_q->dev_private;
921 /* Copy slave list to protect against slave up/down changes during tx
923 num_of_slaves = internals->active_slave_count;
924 memcpy(slaves, internals->active_slaves,
925 sizeof(internals->active_slaves[0]) * num_of_slaves);
927 if (num_of_slaves < 1)
930 /* Populate slaves mbuf with the packets which are to be sent on it */
931 for (i = 0; i < nb_pkts; i++) {
932 /* Select output slave using hash based on xmit policy */
933 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
935 /* Populate slave mbuf arrays with mbufs for that slave */
936 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
939 /* Send packet burst on each slave device */
940 for (i = 0; i < num_of_slaves; i++) {
941 if (slave_nb_pkts[i] > 0) {
942 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
943 slave_bufs[i], slave_nb_pkts[i]);
945 /* if tx burst fails move packets to end of bufs */
946 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
947 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
949 tx_fail_total += slave_tx_fail_count;
950 memcpy(&bufs[nb_pkts - tx_fail_total],
951 &slave_bufs[i][num_tx_slave],
952 slave_tx_fail_count * sizeof(bufs[0]));
955 num_tx_total += num_tx_slave;
963 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
966 struct bond_dev_private *internals;
967 struct bond_tx_queue *bd_tx_q;
969 uint8_t num_of_slaves;
970 uint8_t slaves[RTE_MAX_ETHPORTS];
971 /* positions in slaves, not ID */
972 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
973 uint8_t distributing_count;
975 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
976 uint16_t i, j, op_slave_idx;
977 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
979 /* Allocate additional packets in case 8023AD mode. */
980 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
981 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
983 /* Total amount of packets in slave_bufs */
984 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
985 /* Slow packets placed in each slave */
986 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
988 bd_tx_q = (struct bond_tx_queue *)queue;
989 internals = bd_tx_q->dev_private;
991 /* Copy slave list to protect against slave up/down changes during tx
993 num_of_slaves = internals->active_slave_count;
994 if (num_of_slaves < 1)
997 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
999 distributing_count = 0;
1000 for (i = 0; i < num_of_slaves; i++) {
1001 struct port *port = &mode_8023ad_ports[slaves[i]];
1003 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
1004 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
1005 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
1007 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
1008 slave_bufs[i][j] = slow_pkts[j];
1010 if (ACTOR_STATE(port, DISTRIBUTING))
1011 distributing_offsets[distributing_count++] = i;
1014 if (likely(distributing_count > 0)) {
1015 /* Populate slaves mbuf with the packets which are to be sent on it */
1016 for (i = 0; i < nb_pkts; i++) {
1017 /* Select output slave using hash based on xmit policy */
1018 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1020 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1021 * slaves that are currently distributing. */
1022 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1023 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1024 slave_nb_pkts[slave_offset]++;
1028 /* Send packet burst on each slave device */
1029 for (i = 0; i < num_of_slaves; i++) {
1030 if (slave_nb_pkts[i] == 0)
1033 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1034 slave_bufs[i], slave_nb_pkts[i]);
1036 /* If tx burst fails drop slow packets */
1037 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1038 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1040 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1041 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1043 /* If tx burst fails move packets to end of bufs */
1044 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1045 uint16_t j = nb_pkts - num_tx_fail_total;
1046 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1047 bufs[j] = slave_bufs[i][num_tx_slave];
1051 return num_tx_total;
1054 #ifdef RTE_MBUF_REFCNT
1056 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1059 struct bond_dev_private *internals;
1060 struct bond_tx_queue *bd_tx_q;
1062 uint8_t tx_failed_flag = 0, num_of_slaves;
1063 uint8_t slaves[RTE_MAX_ETHPORTS];
1065 uint16_t max_nb_of_tx_pkts = 0;
1067 int slave_tx_total[RTE_MAX_ETHPORTS];
1068 int i, most_successful_tx_slave = -1;
1070 bd_tx_q = (struct bond_tx_queue *)queue;
1071 internals = bd_tx_q->dev_private;
1073 /* Copy slave list to protect against slave up/down changes during tx
1075 num_of_slaves = internals->active_slave_count;
1076 memcpy(slaves, internals->active_slaves,
1077 sizeof(internals->active_slaves[0]) * num_of_slaves);
1079 if (num_of_slaves < 1)
1082 /* Increment reference count on mbufs */
1083 for (i = 0; i < nb_pkts; i++)
1084 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1086 /* Transmit burst on each active slave */
1087 for (i = 0; i < num_of_slaves; i++) {
1088 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1091 if (unlikely(slave_tx_total[i] < nb_pkts))
1094 /* record the value and slave index for the slave which transmits the
1095 * maximum number of packets */
1096 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1097 max_nb_of_tx_pkts = slave_tx_total[i];
1098 most_successful_tx_slave = i;
1102 /* if slaves fail to transmit packets from burst, the calling application
1103 * is not expected to know about multiple references to packets so we must
1104 * handle failures of all packets except those of the most successful slave
1106 if (unlikely(tx_failed_flag))
1107 for (i = 0; i < num_of_slaves; i++)
1108 if (i != most_successful_tx_slave)
1109 while (slave_tx_total[i] < nb_pkts)
1110 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1112 return max_nb_of_tx_pkts;
1117 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1118 struct rte_eth_link *slave_dev_link)
1120 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1121 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1123 if (slave_dev_link->link_status &&
1124 bonded_eth_dev->data->dev_started) {
1125 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1126 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1128 internals->link_props_set = 1;
1133 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1135 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1137 memset(&(bonded_eth_dev->data->dev_link), 0,
1138 sizeof(bonded_eth_dev->data->dev_link));
1140 internals->link_props_set = 0;
1144 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1145 struct rte_eth_link *slave_dev_link)
1147 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1148 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1155 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1157 struct ether_addr *mac_addr;
1159 if (eth_dev == NULL) {
1160 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1164 if (dst_mac_addr == NULL) {
1165 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1169 mac_addr = eth_dev->data->mac_addrs;
1171 ether_addr_copy(mac_addr, dst_mac_addr);
1176 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1178 struct ether_addr *mac_addr;
1180 if (eth_dev == NULL) {
1181 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1185 if (new_mac_addr == NULL) {
1186 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1190 mac_addr = eth_dev->data->mac_addrs;
1192 /* If new MAC is different to current MAC then update */
1193 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1194 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1200 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1202 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1205 /* Update slave devices MAC addresses */
1206 if (internals->slave_count < 1)
1209 switch (internals->mode) {
1210 case BONDING_MODE_ROUND_ROBIN:
1211 case BONDING_MODE_BALANCE:
1212 #ifdef RTE_MBUF_REFCNT
1213 case BONDING_MODE_BROADCAST:
1215 for (i = 0; i < internals->slave_count; i++) {
1216 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1217 bonded_eth_dev->data->mac_addrs)) {
1218 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1219 internals->slaves[i].port_id);
1224 case BONDING_MODE_8023AD:
1225 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1227 case BONDING_MODE_ACTIVE_BACKUP:
1228 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1229 case BONDING_MODE_ALB:
1231 for (i = 0; i < internals->slave_count; i++) {
1232 if (internals->slaves[i].port_id ==
1233 internals->current_primary_port) {
1234 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1235 bonded_eth_dev->data->mac_addrs)) {
1236 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1237 internals->current_primary_port);
1241 if (mac_address_set(
1242 &rte_eth_devices[internals->slaves[i].port_id],
1243 &internals->slaves[i].persisted_mac_addr)) {
1244 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1245 internals->slaves[i].port_id);
1256 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1258 struct bond_dev_private *internals;
1260 internals = eth_dev->data->dev_private;
1263 case BONDING_MODE_ROUND_ROBIN:
1264 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1265 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1267 case BONDING_MODE_ACTIVE_BACKUP:
1268 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1269 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1271 case BONDING_MODE_BALANCE:
1272 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1273 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1275 #ifdef RTE_MBUF_REFCNT
1276 case BONDING_MODE_BROADCAST:
1277 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1278 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1281 case BONDING_MODE_8023AD:
1282 if (bond_mode_8023ad_enable(eth_dev) != 0)
1285 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1286 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1287 RTE_LOG(WARNING, PMD,
1288 "Using mode 4, it is necessary to do TX burst and RX burst "
1289 "at least every 100ms.\n");
1291 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1292 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1293 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1295 case BONDING_MODE_ALB:
1296 if (bond_mode_alb_enable(eth_dev) != 0)
1299 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1300 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1306 internals->mode = mode;
1312 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1313 struct rte_eth_dev *slave_eth_dev)
1315 struct bond_rx_queue *bd_rx_q;
1316 struct bond_tx_queue *bd_tx_q;
1321 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1323 /* Enable interrupts on slave device if supported */
1324 if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
1325 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1327 /* Configure device */
1328 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1329 bonded_eth_dev->data->nb_rx_queues,
1330 bonded_eth_dev->data->nb_tx_queues,
1331 &(slave_eth_dev->data->dev_conf));
1333 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1334 slave_eth_dev->data->port_id, errval);
1338 /* Setup Rx Queues */
1339 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1340 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1342 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1343 bd_rx_q->nb_rx_desc,
1344 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1345 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1348 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1349 slave_eth_dev->data->port_id, q_id, errval);
1354 /* Setup Tx Queues */
1355 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1356 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1358 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1359 bd_tx_q->nb_tx_desc,
1360 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1364 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1365 slave_eth_dev->data->port_id, q_id, errval);
1371 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1373 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1374 slave_eth_dev->data->port_id, errval);
1382 slave_remove(struct bond_dev_private *internals,
1383 struct rte_eth_dev *slave_eth_dev)
1387 for (i = 0; i < internals->slave_count; i++)
1388 if (internals->slaves[i].port_id ==
1389 slave_eth_dev->data->port_id)
1392 if (i < (internals->slave_count - 1))
1393 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1394 sizeof(internals->slaves[0]) *
1395 (internals->slave_count - i - 1));
1397 internals->slave_count--;
1401 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1404 slave_add(struct bond_dev_private *internals,
1405 struct rte_eth_dev *slave_eth_dev)
1407 struct bond_slave_details *slave_details =
1408 &internals->slaves[internals->slave_count];
1410 slave_details->port_id = slave_eth_dev->data->port_id;
1411 slave_details->last_link_status = 0;
1413 /* If slave device doesn't support interrupts then we need to enabled
1414 * polling to monitor link status */
1415 if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1416 slave_details->link_status_poll_enabled = 1;
1418 if (!internals->link_status_polling_enabled) {
1419 internals->link_status_polling_enabled = 1;
1421 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1422 bond_ethdev_slave_link_status_change_monitor,
1423 (void *)&rte_eth_devices[internals->port_id]);
1427 slave_details->link_status_wait_to_complete = 0;
1428 /* clean tlb_last_obytes when adding port for bonding device */
1429 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1430 sizeof(struct ether_addr));
1434 bond_ethdev_primary_set(struct bond_dev_private *internals,
1435 uint8_t slave_port_id)
1439 if (internals->active_slave_count < 1)
1440 internals->current_primary_port = slave_port_id;
1442 /* Search bonded device slave ports for new proposed primary port */
1443 for (i = 0; i < internals->active_slave_count; i++) {
1444 if (internals->active_slaves[i] == slave_port_id)
1445 internals->current_primary_port = slave_port_id;
1450 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1453 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1455 struct bond_dev_private *internals;
1458 /* slave eth dev will be started by bonded device */
1459 if (valid_bonded_ethdev(eth_dev)) {
1460 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1461 eth_dev->data->port_id);
1465 eth_dev->data->dev_link.link_status = 0;
1466 eth_dev->data->dev_started = 1;
1468 internals = eth_dev->data->dev_private;
1470 if (internals->slave_count == 0) {
1471 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1475 if (internals->user_defined_mac == 0) {
1476 struct ether_addr *new_mac_addr = NULL;
1478 for (i = 0; i < internals->slave_count; i++)
1479 if (internals->slaves[i].port_id == internals->primary_port)
1480 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1482 if (new_mac_addr == NULL)
1485 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1486 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1487 eth_dev->data->port_id);
1492 /* Update all slave devices MACs*/
1493 if (mac_address_slaves_update(eth_dev) != 0)
1496 /* If bonded device is configure in promiscuous mode then re-apply config */
1497 if (internals->promiscuous_en)
1498 bond_ethdev_promiscuous_enable(eth_dev);
1500 /* Reconfigure each slave device if starting bonded device */
1501 for (i = 0; i < internals->slave_count; i++) {
1502 if (slave_configure(eth_dev,
1503 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1505 "bonded port (%d) failed to reconfigure slave device (%d)",
1506 eth_dev->data->port_id, internals->slaves[i].port_id);
1511 if (internals->user_defined_primary_port)
1512 bond_ethdev_primary_set(internals, internals->primary_port);
1514 if (internals->mode == BONDING_MODE_8023AD)
1515 bond_mode_8023ad_start(eth_dev);
1517 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
1518 internals->mode == BONDING_MODE_ALB)
1519 bond_tlb_enable(internals);
1525 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1527 struct bond_dev_private *internals = eth_dev->data->dev_private;
1530 if (internals->mode == BONDING_MODE_8023AD) {
1534 bond_mode_8023ad_stop(eth_dev);
1536 /* Discard all messages to/from mode 4 state machines */
1537 for (i = 0; i < internals->slave_count; i++) {
1538 port = &mode_8023ad_ports[internals->slaves[i].port_id];
1540 RTE_VERIFY(port->rx_ring != NULL);
1541 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1542 rte_pktmbuf_free(pkt);
1544 RTE_VERIFY(port->tx_ring != NULL);
1545 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1546 rte_pktmbuf_free(pkt);
1550 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
1551 internals->mode == BONDING_MODE_ALB) {
1552 bond_tlb_disable(internals);
1553 for (i = 0; i < internals->active_slave_count; i++)
1554 tlb_last_obytets[internals->active_slaves[i]] = 0;
1557 internals->active_slave_count = 0;
1558 internals->link_status_polling_enabled = 0;
1560 eth_dev->data->dev_link.link_status = 0;
1561 eth_dev->data->dev_started = 0;
1565 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
1569 /* forward declaration */
1570 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1573 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1575 struct bond_dev_private *internals = dev->data->dev_private;
1577 dev_info->driver_name = driver_name;
1578 dev_info->max_mac_addrs = 1;
1580 dev_info->max_rx_pktlen = (uint32_t)2048;
1582 dev_info->max_rx_queues = (uint16_t)128;
1583 dev_info->max_tx_queues = (uint16_t)512;
1585 dev_info->min_rx_bufsize = 0;
1586 dev_info->pci_dev = dev->pci_dev;
1588 dev_info->rx_offload_capa = internals->rx_offload_capa;
1589 dev_info->tx_offload_capa = internals->tx_offload_capa;
1593 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1594 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1595 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1597 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1598 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1599 0, dev->pci_dev->numa_node);
1600 if (bd_rx_q == NULL)
1603 bd_rx_q->queue_id = rx_queue_id;
1604 bd_rx_q->dev_private = dev->data->dev_private;
1606 bd_rx_q->nb_rx_desc = nb_rx_desc;
1608 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1609 bd_rx_q->mb_pool = mb_pool;
1611 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1617 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1618 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1619 const struct rte_eth_txconf *tx_conf)
1621 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1622 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1623 0, dev->pci_dev->numa_node);
1625 if (bd_tx_q == NULL)
1628 bd_tx_q->queue_id = tx_queue_id;
1629 bd_tx_q->dev_private = dev->data->dev_private;
1631 bd_tx_q->nb_tx_desc = nb_tx_desc;
1632 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1634 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1640 bond_ethdev_rx_queue_release(void *queue)
1649 bond_ethdev_tx_queue_release(void *queue)
1658 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1660 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1661 struct bond_dev_private *internals;
1663 /* Default value for polling slave found is true as we don't want to
1664 * disable the polling thread if we cannot get the lock */
1665 int i, polling_slave_found = 1;
1670 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1671 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1673 if (!bonded_ethdev->data->dev_started ||
1674 !internals->link_status_polling_enabled)
1677 /* If device is currently being configured then don't check slaves link
1678 * status, wait until next period */
1679 if (rte_spinlock_trylock(&internals->lock)) {
1680 if (internals->slave_count > 0)
1681 polling_slave_found = 0;
1683 for (i = 0; i < internals->slave_count; i++) {
1684 if (!internals->slaves[i].link_status_poll_enabled)
1687 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1688 polling_slave_found = 1;
1690 /* Update slave link status */
1691 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1692 internals->slaves[i].link_status_wait_to_complete);
1694 /* if link status has changed since last checked then call lsc
1696 if (slave_ethdev->data->dev_link.link_status !=
1697 internals->slaves[i].last_link_status) {
1698 internals->slaves[i].last_link_status =
1699 slave_ethdev->data->dev_link.link_status;
1701 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1702 RTE_ETH_EVENT_INTR_LSC,
1703 &bonded_ethdev->data->port_id);
1706 rte_spinlock_unlock(&internals->lock);
1709 if (polling_slave_found)
1710 /* Set alarm to continue monitoring link status of slave ethdev's */
1711 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1712 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1716 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1717 int wait_to_complete)
1719 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1721 if (!bonded_eth_dev->data->dev_started ||
1722 internals->active_slave_count == 0) {
1723 bonded_eth_dev->data->dev_link.link_status = 0;
1726 struct rte_eth_dev *slave_eth_dev;
1729 for (i = 0; i < internals->active_slave_count; i++) {
1730 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1732 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1734 if (slave_eth_dev->data->dev_link.link_status == 1) {
1740 bonded_eth_dev->data->dev_link.link_status = link_up;
1747 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1749 struct bond_dev_private *internals = dev->data->dev_private;
1750 struct rte_eth_stats slave_stats;
1754 /* clear bonded stats before populating from slaves */
1755 memset(stats, 0, sizeof(*stats));
1757 for (i = 0; i < internals->slave_count; i++) {
1758 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1760 stats->ipackets += slave_stats.ipackets;
1761 stats->opackets += slave_stats.opackets;
1762 stats->ibytes += slave_stats.ibytes;
1763 stats->obytes += slave_stats.obytes;
1764 stats->ierrors += slave_stats.ierrors;
1765 stats->oerrors += slave_stats.oerrors;
1766 stats->imcasts += slave_stats.imcasts;
1767 stats->rx_nombuf += slave_stats.rx_nombuf;
1768 stats->fdirmatch += slave_stats.fdirmatch;
1769 stats->fdirmiss += slave_stats.fdirmiss;
1770 stats->tx_pause_xon += slave_stats.tx_pause_xon;
1771 stats->rx_pause_xon += slave_stats.rx_pause_xon;
1772 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
1773 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
1778 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1780 struct bond_dev_private *internals = dev->data->dev_private;
1783 for (i = 0; i < internals->slave_count; i++)
1784 rte_eth_stats_reset(internals->slaves[i].port_id);
1788 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1790 struct bond_dev_private *internals = eth_dev->data->dev_private;
1793 internals->promiscuous_en = 1;
1795 switch (internals->mode) {
1796 /* Promiscuous mode is propagated to all slaves */
1797 case BONDING_MODE_ROUND_ROBIN:
1798 case BONDING_MODE_BALANCE:
1799 #ifdef RTE_MBUF_REFCNT
1800 case BONDING_MODE_BROADCAST:
1802 for (i = 0; i < internals->slave_count; i++)
1803 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1805 /* In mode4 promiscus mode is managed when slave is added/removed */
1806 case BONDING_MODE_8023AD:
1808 /* Promiscuous mode is propagated only to primary slave */
1809 case BONDING_MODE_ACTIVE_BACKUP:
1810 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1811 case BONDING_MODE_ALB:
1813 rte_eth_promiscuous_enable(internals->current_primary_port);
1818 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1820 struct bond_dev_private *internals = dev->data->dev_private;
1823 internals->promiscuous_en = 0;
1825 switch (internals->mode) {
1826 /* Promiscuous mode is propagated to all slaves */
1827 case BONDING_MODE_ROUND_ROBIN:
1828 case BONDING_MODE_BALANCE:
1829 #ifdef RTE_MBUF_REFCNT
1830 case BONDING_MODE_BROADCAST:
1832 for (i = 0; i < internals->slave_count; i++)
1833 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1835 /* In mode4 promiscus mode is set managed when slave is added/removed */
1836 case BONDING_MODE_8023AD:
1838 /* Promiscuous mode is propagated only to primary slave */
1839 case BONDING_MODE_ACTIVE_BACKUP:
1840 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1841 case BONDING_MODE_ALB:
1843 rte_eth_promiscuous_disable(internals->current_primary_port);
1848 bond_ethdev_delayed_lsc_propagation(void *arg)
1853 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1854 RTE_ETH_EVENT_INTR_LSC);
1858 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1861 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1862 struct bond_dev_private *internals;
1863 struct rte_eth_link link;
1865 int i, valid_slave = 0;
1867 uint8_t lsc_flag = 0;
1869 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1872 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1873 slave_eth_dev = &rte_eth_devices[port_id];
1875 if (valid_bonded_ethdev(bonded_eth_dev))
1878 internals = bonded_eth_dev->data->dev_private;
1880 /* If the device isn't started don't handle interrupts */
1881 if (!bonded_eth_dev->data->dev_started)
1884 /* verify that port_id is a valid slave of bonded port */
1885 for (i = 0; i < internals->slave_count; i++) {
1886 if (internals->slaves[i].port_id == port_id) {
1895 /* Search for port in active port list */
1896 active_pos = find_slave_by_id(internals->active_slaves,
1897 internals->active_slave_count, port_id);
1899 rte_eth_link_get_nowait(port_id, &link);
1900 if (link.link_status) {
1901 if (active_pos < internals->active_slave_count)
1904 /* if no active slave ports then set this port to be primary port */
1905 if (internals->active_slave_count < 1) {
1906 /* If first active slave, then change link status */
1907 bonded_eth_dev->data->dev_link.link_status = 1;
1908 internals->current_primary_port = port_id;
1911 mac_address_slaves_update(bonded_eth_dev);
1913 /* Inherit eth dev link properties from first active slave */
1914 link_properties_set(bonded_eth_dev,
1915 &(slave_eth_dev->data->dev_link));
1918 activate_slave(bonded_eth_dev, port_id);
1920 /* If user has defined the primary port then default to using it */
1921 if (internals->user_defined_primary_port &&
1922 internals->primary_port == port_id)
1923 bond_ethdev_primary_set(internals, port_id);
1925 if (active_pos == internals->active_slave_count)
1928 /* Remove from active slave list */
1929 deactivate_slave(bonded_eth_dev, port_id);
1931 /* No active slaves, change link status to down and reset other
1932 * link properties */
1933 if (internals->active_slave_count < 1) {
1935 bonded_eth_dev->data->dev_link.link_status = 0;
1937 link_properties_reset(bonded_eth_dev);
1940 /* Update primary id, take first active slave from list or if none
1941 * available set to -1 */
1942 if (port_id == internals->current_primary_port) {
1943 if (internals->active_slave_count > 0)
1944 bond_ethdev_primary_set(internals,
1945 internals->active_slaves[0]);
1947 internals->current_primary_port = internals->primary_port;
1952 /* Cancel any possible outstanding interrupts if delays are enabled */
1953 if (internals->link_up_delay_ms > 0 ||
1954 internals->link_down_delay_ms > 0)
1955 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
1958 if (bonded_eth_dev->data->dev_link.link_status) {
1959 if (internals->link_up_delay_ms > 0)
1960 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
1961 bond_ethdev_delayed_lsc_propagation,
1962 (void *)bonded_eth_dev);
1964 _rte_eth_dev_callback_process(bonded_eth_dev,
1965 RTE_ETH_EVENT_INTR_LSC);
1968 if (internals->link_down_delay_ms > 0)
1969 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
1970 bond_ethdev_delayed_lsc_propagation,
1971 (void *)bonded_eth_dev);
1973 _rte_eth_dev_callback_process(bonded_eth_dev,
1974 RTE_ETH_EVENT_INTR_LSC);
1979 struct eth_dev_ops default_dev_ops = {
1980 .dev_start = bond_ethdev_start,
1981 .dev_stop = bond_ethdev_stop,
1982 .dev_close = bond_ethdev_close,
1983 .dev_configure = bond_ethdev_configure,
1984 .dev_infos_get = bond_ethdev_info,
1985 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1986 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1987 .rx_queue_release = bond_ethdev_rx_queue_release,
1988 .tx_queue_release = bond_ethdev_tx_queue_release,
1989 .link_update = bond_ethdev_link_update,
1990 .stats_get = bond_ethdev_stats_get,
1991 .stats_reset = bond_ethdev_stats_reset,
1992 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1993 .promiscuous_disable = bond_ethdev_promiscuous_disable
1997 bond_init(const char *name, const char *params)
1999 struct bond_dev_private *internals;
2000 struct rte_kvargs *kvlist;
2001 uint8_t bonding_mode, socket_id;
2002 int arg_count, port_id;
2004 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
2006 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
2010 /* Parse link bonding mode */
2011 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
2012 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
2013 &bond_ethdev_parse_slave_mode_kvarg,
2014 &bonding_mode) != 0) {
2015 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
2020 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
2021 "device %s\n", name);
2025 /* Parse socket id to create bonding device on */
2026 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2027 if (arg_count == 1) {
2028 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2029 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2031 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2032 "bonded device %s\n", name);
2035 } else if (arg_count > 1) {
2036 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2037 "bonded device %s\n", name);
2040 socket_id = rte_socket_id();
2043 /* Create link bonding eth device */
2044 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2046 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2047 "socket %u.\n", name, bonding_mode, socket_id);
2050 internals = rte_eth_devices[port_id].data->dev_private;
2051 internals->kvlist = kvlist;
2053 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2054 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2058 rte_kvargs_free(kvlist);
2063 /* this part will resolve the slave portids after all the other pdev and vdev
2064 * have been allocated */
2066 bond_ethdev_configure(struct rte_eth_dev *dev)
2068 char *name = dev->data->name;
2069 struct bond_dev_private *internals = dev->data->dev_private;
2070 struct rte_kvargs *kvlist = internals->kvlist;
2071 int arg_count, port_id = dev - rte_eth_devices;
2074 * if no kvlist, it means that this bonded device has been created
2075 * through the bonding api.
2080 /* Parse MAC address for bonded device */
2081 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2082 if (arg_count == 1) {
2083 struct ether_addr bond_mac;
2085 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2086 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2087 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2092 /* Set MAC address */
2093 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2095 "Failed to set mac address on bonded device %s\n",
2099 } else if (arg_count > 1) {
2101 "MAC address can be specified only once for bonded device %s\n",
2106 /* Parse/set balance mode transmit policy */
2107 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2108 if (arg_count == 1) {
2109 uint8_t xmit_policy;
2111 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2112 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2115 "Invalid xmit policy specified for bonded device %s\n",
2120 /* Set balance mode transmit policy*/
2121 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2123 "Failed to set balance xmit policy on bonded device %s\n",
2127 } else if (arg_count > 1) {
2129 "Transmit policy can be specified only once for bonded device"
2134 /* Parse/add slave ports to bonded device */
2135 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2136 struct bond_ethdev_slave_ports slave_ports;
2139 memset(&slave_ports, 0, sizeof(slave_ports));
2141 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2142 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2144 "Failed to parse slave ports for bonded device %s\n",
2149 for (i = 0; i < slave_ports.slave_count; i++) {
2150 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2152 "Failed to add port %d as slave to bonded device %s\n",
2153 slave_ports.slaves[i], name);
2158 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2162 /* Parse/set primary slave port id*/
2163 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2164 if (arg_count == 1) {
2165 uint8_t primary_slave_port_id;
2167 if (rte_kvargs_process(kvlist,
2168 PMD_BOND_PRIMARY_SLAVE_KVARG,
2169 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2170 &primary_slave_port_id) < 0) {
2172 "Invalid primary slave port id specified for bonded device"
2177 /* Set balance mode transmit policy*/
2178 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2181 "Failed to set primary slave port %d on bonded device %s\n",
2182 primary_slave_port_id, name);
2185 } else if (arg_count > 1) {
2187 "Primary slave can be specified only once for bonded device"
2192 /* Parse link status monitor polling interval */
2193 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2194 if (arg_count == 1) {
2195 uint32_t lsc_poll_interval_ms;
2197 if (rte_kvargs_process(kvlist,
2198 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2199 &bond_ethdev_parse_time_ms_kvarg,
2200 &lsc_poll_interval_ms) < 0) {
2202 "Invalid lsc polling interval value specified for bonded"
2203 " device %s\n", name);
2207 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2210 "Failed to set lsc monitor polling interval (%u ms) on"
2211 " bonded device %s\n", lsc_poll_interval_ms, name);
2214 } else if (arg_count > 1) {
2216 "LSC polling interval can be specified only once for bonded"
2217 " device %s\n", name);
2221 /* Parse link up interrupt propagation delay */
2222 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2223 if (arg_count == 1) {
2224 uint32_t link_up_delay_ms;
2226 if (rte_kvargs_process(kvlist,
2227 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2228 &bond_ethdev_parse_time_ms_kvarg,
2229 &link_up_delay_ms) < 0) {
2231 "Invalid link up propagation delay value specified for"
2232 " bonded device %s\n", name);
2236 /* Set balance mode transmit policy*/
2237 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2240 "Failed to set link up propagation delay (%u ms) on bonded"
2241 " device %s\n", link_up_delay_ms, name);
2244 } else if (arg_count > 1) {
2246 "Link up propagation delay can be specified only once for"
2247 " bonded device %s\n", name);
2251 /* Parse link down interrupt propagation delay */
2252 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2253 if (arg_count == 1) {
2254 uint32_t link_down_delay_ms;
2256 if (rte_kvargs_process(kvlist,
2257 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2258 &bond_ethdev_parse_time_ms_kvarg,
2259 &link_down_delay_ms) < 0) {
2261 "Invalid link down propagation delay value specified for"
2262 " bonded device %s\n", name);
2266 /* Set balance mode transmit policy*/
2267 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2270 "Failed to set link down propagation delay (%u ms) on"
2271 " bonded device %s\n", link_down_delay_ms, name);
2274 } else if (arg_count > 1) {
2276 "Link down propagation delay can be specified only once for"
2277 " bonded device %s\n", name);
2284 static struct rte_driver bond_drv = {
2290 PMD_REGISTER_DRIVER(bond_drv);