4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_devargs.h>
43 #include <rte_kvargs.h>
45 #include <rte_alarm.h>
46 #include <rte_cycles.h>
48 #include "rte_eth_bond.h"
49 #include "rte_eth_bond_private.h"
50 #include "rte_eth_bond_8023ad_private.h"
52 #define REORDER_PERIOD_MS 10
54 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56 /* Table for statistics in mode 5 TLB */
57 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
60 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
62 struct bond_dev_private *internals;
64 uint16_t num_rx_slave = 0;
65 uint16_t num_rx_total = 0;
69 /* Cast to structure, containing bonded device's port id and queue id */
70 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
72 internals = bd_rx_q->dev_private;
75 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
76 /* Offset of pointer to *bufs increases as packets are received
77 * from other slaves */
78 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
79 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
81 num_rx_total += num_rx_slave;
82 nb_pkts -= num_rx_slave;
90 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
93 struct bond_dev_private *internals;
95 /* Cast to structure, containing bonded device's port id and queue id */
96 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
98 internals = bd_rx_q->dev_private;
100 return rte_eth_rx_burst(internals->current_primary_port,
101 bd_rx_q->queue_id, bufs, nb_pkts);
105 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
108 /* Cast to structure, containing bonded device's port id and queue id */
109 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
110 struct bond_dev_private *internals = bd_rx_q->dev_private;
111 struct ether_addr bond_mac;
113 struct ether_hdr *hdr;
115 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
116 uint16_t num_rx_total = 0; /* Total number of received packets */
117 uint8_t slaves[RTE_MAX_ETHPORTS];
120 uint8_t collecting; /* current slave collecting status */
121 const uint8_t promisc = internals->promiscuous_en;
124 rte_eth_macaddr_get(internals->port_id, &bond_mac);
125 /* Copy slave list to protect against slave up/down changes during tx
127 slave_count = internals->active_slave_count;
128 memcpy(slaves, internals->active_slaves,
129 sizeof(internals->active_slaves[0]) * slave_count);
131 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
133 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
135 /* Read packets from this slave */
136 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
137 &bufs[num_rx_total], nb_pkts - num_rx_total);
139 for (k = j; k < 2 && k < num_rx_total; k++)
140 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
142 /* Handle slow protocol packets. */
143 while (j < num_rx_total) {
144 if (j + 3 < num_rx_total)
145 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
147 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
148 /* Remove packet from array if it is slow packet or slave is not
149 * in collecting state or bondign interface is not in promiscus
150 * mode and packet address does not match. */
151 if (unlikely(hdr->ether_type == ether_type_slow_be ||
152 !collecting || (!promisc &&
153 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
155 if (hdr->ether_type == ether_type_slow_be) {
156 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
159 rte_pktmbuf_free(bufs[j]);
161 /* Packet is managed by mode 4 or dropped, shift the array */
163 if (j < num_rx_total) {
164 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
176 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
179 struct bond_dev_private *internals;
180 struct bond_tx_queue *bd_tx_q;
182 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
183 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
185 uint8_t num_of_slaves;
186 uint8_t slaves[RTE_MAX_ETHPORTS];
188 uint16_t num_tx_total = 0, num_tx_slave;
190 static int slave_idx = 0;
191 int i, cslave_idx = 0, tx_fail_total = 0;
193 bd_tx_q = (struct bond_tx_queue *)queue;
194 internals = bd_tx_q->dev_private;
196 /* Copy slave list to protect against slave up/down changes during tx
198 num_of_slaves = internals->active_slave_count;
199 memcpy(slaves, internals->active_slaves,
200 sizeof(internals->active_slaves[0]) * num_of_slaves);
202 if (num_of_slaves < 1)
205 /* Populate slaves mbuf with which packets are to be sent on it */
206 for (i = 0; i < nb_pkts; i++) {
207 cslave_idx = (slave_idx + i) % num_of_slaves;
208 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
211 /* increment current slave index so the next call to tx burst starts on the
213 slave_idx = ++cslave_idx;
215 /* Send packet burst on each slave device */
216 for (i = 0; i < num_of_slaves; i++) {
217 if (slave_nb_pkts[i] > 0) {
218 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
219 slave_bufs[i], slave_nb_pkts[i]);
221 /* if tx burst fails move packets to end of bufs */
222 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
223 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
225 tx_fail_total += tx_fail_slave;
227 memcpy(&bufs[nb_pkts - tx_fail_total],
228 &slave_bufs[i][num_tx_slave],
229 tx_fail_slave * sizeof(bufs[0]));
231 num_tx_total += num_tx_slave;
239 bond_ethdev_tx_burst_active_backup(void *queue,
240 struct rte_mbuf **bufs, uint16_t nb_pkts)
242 struct bond_dev_private *internals;
243 struct bond_tx_queue *bd_tx_q;
245 bd_tx_q = (struct bond_tx_queue *)queue;
246 internals = bd_tx_q->dev_private;
248 if (internals->active_slave_count < 1)
251 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
255 static inline uint16_t
256 ether_hash(struct ether_hdr *eth_hdr)
258 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
259 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
261 return (word_src_addr[0] ^ word_dst_addr[0]) ^
262 (word_src_addr[1] ^ word_dst_addr[1]) ^
263 (word_src_addr[2] ^ word_dst_addr[2]);
266 static inline uint32_t
267 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
269 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
272 static inline uint32_t
273 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
275 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
276 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
278 return (word_src_addr[0] ^ word_dst_addr[0]) ^
279 (word_src_addr[1] ^ word_dst_addr[1]) ^
280 (word_src_addr[2] ^ word_dst_addr[2]) ^
281 (word_src_addr[3] ^ word_dst_addr[3]);
285 get_vlan_offset(struct ether_hdr *eth_hdr)
287 size_t vlan_offset = 0;
289 /* Calculate VLAN offset */
290 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == eth_hdr->ether_type) {
291 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
292 vlan_offset = sizeof(struct vlan_hdr);
294 while (rte_cpu_to_be_16(ETHER_TYPE_VLAN) ==
295 vlan_hdr->eth_proto) {
296 vlan_hdr = vlan_hdr + 1;
297 vlan_offset += sizeof(struct vlan_hdr);
304 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
306 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
308 uint32_t hash = ether_hash(eth_hdr);
310 return (hash ^= hash >> 8) % slave_count;
314 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
316 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
317 size_t vlan_offset = get_vlan_offset(eth_hdr);
318 uint32_t hash, l3hash = 0;
320 hash = ether_hash(eth_hdr);
322 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
323 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
324 ((char *)(eth_hdr + 1) + vlan_offset);
325 l3hash = ipv4_hash(ipv4_hdr);
327 } else if (buf->ol_flags & PKT_RX_IPV6_HDR) {
328 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
329 ((char *)(eth_hdr + 1) + vlan_offset);
330 l3hash = ipv6_hash(ipv6_hdr);
333 hash = hash ^ l3hash;
337 return hash % slave_count;
341 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
343 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
344 size_t vlan_offset = get_vlan_offset(eth_hdr);
345 struct udp_hdr *udp_hdr = NULL;
346 struct tcp_hdr *tcp_hdr = NULL;
347 uint32_t hash, l3hash = 0, l4hash = 0;
349 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
350 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
351 ((char *)(eth_hdr + 1) + vlan_offset);
352 size_t ip_hdr_offset;
354 l3hash = ipv4_hash(ipv4_hdr);
356 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
359 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
360 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
362 l4hash = HASH_L4_PORTS(tcp_hdr);
363 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
364 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
366 l4hash = HASH_L4_PORTS(udp_hdr);
368 } else if (buf->ol_flags & PKT_RX_IPV6_HDR) {
369 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
370 ((char *)(eth_hdr + 1) + vlan_offset);
371 l3hash = ipv6_hash(ipv6_hdr);
373 if (ipv6_hdr->proto == IPPROTO_TCP) {
374 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
375 l4hash = HASH_L4_PORTS(tcp_hdr);
376 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
377 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
378 l4hash = HASH_L4_PORTS(udp_hdr);
382 hash = l3hash ^ l4hash;
386 return hash % slave_count;
390 uint64_t bwg_left_int;
391 uint64_t bwg_left_remainder;
396 bandwidth_cmp(const void *a, const void *b)
398 const struct bwg_slave *bwg_a = a;
399 const struct bwg_slave *bwg_b = b;
400 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
401 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
402 (int64_t)bwg_a->bwg_left_remainder;
416 bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
417 struct bwg_slave *bwg_slave)
419 struct rte_eth_link link_status;
421 rte_eth_link_get(port_id, &link_status);
422 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
425 link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
426 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
427 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
431 bond_ethdev_update_tlb_slave_cb(void *arg)
433 struct bond_dev_private *internals = arg;
434 struct rte_eth_stats slave_stats;
435 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
439 uint8_t update_stats = 0;
442 internals->slave_update_idx++;
445 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
448 for (i = 0; i < internals->active_slave_count; i++) {
449 slave_id = internals->active_slaves[i];
450 rte_eth_stats_get(slave_id, &slave_stats);
451 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
452 bandwidth_left(slave_id, tx_bytes,
453 internals->slave_update_idx, &bwg_array[i]);
454 bwg_array[i].slave = slave_id;
457 tlb_last_obytets[slave_id] = slave_stats.obytes;
460 if (update_stats == 1)
461 internals->slave_update_idx = 0;
464 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
465 for (i = 0; i < slave_count; i++)
466 internals->active_slaves[i] = bwg_array[i].slave;
468 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
469 (struct bond_dev_private *)internals);
473 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
475 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
476 struct bond_dev_private *internals = bd_tx_q->dev_private;
478 struct rte_eth_dev *primary_port =
479 &rte_eth_devices[internals->primary_port];
480 uint16_t num_tx_total = 0;
483 uint8_t num_of_slaves = internals->active_slave_count;
484 uint8_t slaves[RTE_MAX_ETHPORTS];
486 struct ether_hdr *ether_hdr;
487 struct ether_addr primary_slave_addr;
488 struct ether_addr active_slave_addr;
490 if (num_of_slaves < 1)
493 memcpy(slaves, internals->active_slaves,
494 sizeof(internals->active_slaves[0]) * num_of_slaves);
497 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
500 for (i = 0; i < 3; i++)
501 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
504 for (i = 0; i < num_of_slaves; i++) {
505 ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
508 for (j = num_tx_total; j < nb_pkts; j++) {
510 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
512 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
513 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
514 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
517 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
518 bufs + num_tx_total, nb_pkts - num_tx_total);
520 if (num_tx_total == nb_pkts)
528 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
531 struct bond_dev_private *internals;
532 struct bond_tx_queue *bd_tx_q;
534 uint8_t num_of_slaves;
535 uint8_t slaves[RTE_MAX_ETHPORTS];
537 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
541 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
542 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
544 bd_tx_q = (struct bond_tx_queue *)queue;
545 internals = bd_tx_q->dev_private;
547 /* Copy slave list to protect against slave up/down changes during tx
549 num_of_slaves = internals->active_slave_count;
550 memcpy(slaves, internals->active_slaves,
551 sizeof(internals->active_slaves[0]) * num_of_slaves);
553 if (num_of_slaves < 1)
556 /* Populate slaves mbuf with the packets which are to be sent on it */
557 for (i = 0; i < nb_pkts; i++) {
558 /* Select output slave using hash based on xmit policy */
559 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
561 /* Populate slave mbuf arrays with mbufs for that slave */
562 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
565 /* Send packet burst on each slave device */
566 for (i = 0; i < num_of_slaves; i++) {
567 if (slave_nb_pkts[i] > 0) {
568 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
569 slave_bufs[i], slave_nb_pkts[i]);
571 /* if tx burst fails move packets to end of bufs */
572 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
573 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
575 tx_fail_total += slave_tx_fail_count;
576 memcpy(&bufs[nb_pkts - tx_fail_total],
577 &slave_bufs[i][num_tx_slave],
578 slave_tx_fail_count * sizeof(bufs[0]));
581 num_tx_total += num_tx_slave;
589 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
592 struct bond_dev_private *internals;
593 struct bond_tx_queue *bd_tx_q;
595 uint8_t num_of_slaves;
596 uint8_t slaves[RTE_MAX_ETHPORTS];
597 /* positions in slaves, not ID */
598 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
599 uint8_t distributing_count;
601 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
602 uint16_t i, j, op_slave_idx;
603 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
605 /* Allocate additional packets in case 8023AD mode. */
606 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
607 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
609 /* Total amount of packets in slave_bufs */
610 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
611 /* Slow packets placed in each slave */
612 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
614 bd_tx_q = (struct bond_tx_queue *)queue;
615 internals = bd_tx_q->dev_private;
617 /* Copy slave list to protect against slave up/down changes during tx
619 num_of_slaves = internals->active_slave_count;
620 if (num_of_slaves < 1)
623 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
625 distributing_count = 0;
626 for (i = 0; i < num_of_slaves; i++) {
627 struct port *port = &mode_8023ad_ports[slaves[i]];
629 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
630 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
631 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
633 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
634 slave_bufs[i][j] = slow_pkts[j];
636 if (ACTOR_STATE(port, DISTRIBUTING))
637 distributing_offsets[distributing_count++] = i;
640 if (likely(distributing_count > 0)) {
641 /* Populate slaves mbuf with the packets which are to be sent on it */
642 for (i = 0; i < nb_pkts; i++) {
643 /* Select output slave using hash based on xmit policy */
644 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
646 /* Populate slave mbuf arrays with mbufs for that slave. Use only
647 * slaves that are currently distributing. */
648 uint8_t slave_offset = distributing_offsets[op_slave_idx];
649 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
650 slave_nb_pkts[slave_offset]++;
654 /* Send packet burst on each slave device */
655 for (i = 0; i < num_of_slaves; i++) {
656 if (slave_nb_pkts[i] == 0)
659 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
660 slave_bufs[i], slave_nb_pkts[i]);
662 /* If tx burst fails drop slow packets */
663 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
664 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
666 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
667 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
669 /* If tx burst fails move packets to end of bufs */
670 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
671 uint16_t j = nb_pkts - num_tx_fail_total;
672 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
673 bufs[j] = slave_bufs[i][num_tx_slave];
680 #ifdef RTE_MBUF_REFCNT
682 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
685 struct bond_dev_private *internals;
686 struct bond_tx_queue *bd_tx_q;
688 uint8_t tx_failed_flag = 0, num_of_slaves;
689 uint8_t slaves[RTE_MAX_ETHPORTS];
691 uint16_t max_nb_of_tx_pkts = 0;
693 int slave_tx_total[RTE_MAX_ETHPORTS];
694 int i, most_successful_tx_slave = -1;
696 bd_tx_q = (struct bond_tx_queue *)queue;
697 internals = bd_tx_q->dev_private;
699 /* Copy slave list to protect against slave up/down changes during tx
701 num_of_slaves = internals->active_slave_count;
702 memcpy(slaves, internals->active_slaves,
703 sizeof(internals->active_slaves[0]) * num_of_slaves);
705 if (num_of_slaves < 1)
708 /* Increment reference count on mbufs */
709 for (i = 0; i < nb_pkts; i++)
710 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
712 /* Transmit burst on each active slave */
713 for (i = 0; i < num_of_slaves; i++) {
714 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
717 if (unlikely(slave_tx_total[i] < nb_pkts))
720 /* record the value and slave index for the slave which transmits the
721 * maximum number of packets */
722 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
723 max_nb_of_tx_pkts = slave_tx_total[i];
724 most_successful_tx_slave = i;
728 /* if slaves fail to transmit packets from burst, the calling application
729 * is not expected to know about multiple references to packets so we must
730 * handle failures of all packets except those of the most successful slave
732 if (unlikely(tx_failed_flag))
733 for (i = 0; i < num_of_slaves; i++)
734 if (i != most_successful_tx_slave)
735 while (slave_tx_total[i] < nb_pkts)
736 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
738 return max_nb_of_tx_pkts;
743 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
744 struct rte_eth_link *slave_dev_link)
746 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
747 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
749 if (slave_dev_link->link_status &&
750 bonded_eth_dev->data->dev_started) {
751 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
752 bonded_dev_link->link_speed = slave_dev_link->link_speed;
754 internals->link_props_set = 1;
759 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
761 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
763 memset(&(bonded_eth_dev->data->dev_link), 0,
764 sizeof(bonded_eth_dev->data->dev_link));
766 internals->link_props_set = 0;
770 link_properties_valid(struct rte_eth_link *bonded_dev_link,
771 struct rte_eth_link *slave_dev_link)
773 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
774 bonded_dev_link->link_speed != slave_dev_link->link_speed)
781 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
783 struct ether_addr *mac_addr;
785 if (eth_dev == NULL) {
786 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
790 if (dst_mac_addr == NULL) {
791 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
795 mac_addr = eth_dev->data->mac_addrs;
797 ether_addr_copy(mac_addr, dst_mac_addr);
802 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
804 struct ether_addr *mac_addr;
806 if (eth_dev == NULL) {
807 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
811 if (new_mac_addr == NULL) {
812 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
816 mac_addr = eth_dev->data->mac_addrs;
818 /* If new MAC is different to current MAC then update */
819 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
820 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
826 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
828 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
831 /* Update slave devices MAC addresses */
832 if (internals->slave_count < 1)
835 switch (internals->mode) {
836 case BONDING_MODE_ROUND_ROBIN:
837 case BONDING_MODE_BALANCE:
838 #ifdef RTE_MBUF_REFCNT
839 case BONDING_MODE_BROADCAST:
841 for (i = 0; i < internals->slave_count; i++) {
842 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
843 bonded_eth_dev->data->mac_addrs)) {
844 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
845 internals->slaves[i].port_id);
850 case BONDING_MODE_8023AD:
851 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
853 case BONDING_MODE_ACTIVE_BACKUP:
854 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
856 for (i = 0; i < internals->slave_count; i++) {
857 if (internals->slaves[i].port_id ==
858 internals->current_primary_port) {
859 if (mac_address_set(&rte_eth_devices[internals->primary_port],
860 bonded_eth_dev->data->mac_addrs)) {
861 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
862 internals->current_primary_port);
867 &rte_eth_devices[internals->slaves[i].port_id],
868 &internals->slaves[i].persisted_mac_addr)) {
869 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
870 internals->slaves[i].port_id);
881 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
883 struct bond_dev_private *internals;
885 internals = eth_dev->data->dev_private;
888 case BONDING_MODE_ROUND_ROBIN:
889 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
890 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
892 case BONDING_MODE_ACTIVE_BACKUP:
893 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
894 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
896 case BONDING_MODE_BALANCE:
897 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
898 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
900 #ifdef RTE_MBUF_REFCNT
901 case BONDING_MODE_BROADCAST:
902 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
903 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
906 case BONDING_MODE_8023AD:
907 if (bond_mode_8023ad_enable(eth_dev) != 0)
910 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
911 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
912 RTE_BOND_LOG(WARNING,
913 "Using mode 4, it is necessary to do TX burst and RX burst "
914 "at least every 100ms.");
916 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
917 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
918 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
924 internals->mode = mode;
930 slave_configure(struct rte_eth_dev *bonded_eth_dev,
931 struct rte_eth_dev *slave_eth_dev)
933 struct bond_rx_queue *bd_rx_q;
934 struct bond_tx_queue *bd_tx_q;
939 rte_eth_dev_stop(slave_eth_dev->data->port_id);
941 /* Enable interrupts on slave device if supported */
942 if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
943 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
945 /* Configure device */
946 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
947 bonded_eth_dev->data->nb_rx_queues,
948 bonded_eth_dev->data->nb_tx_queues,
949 &(slave_eth_dev->data->dev_conf));
951 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
952 slave_eth_dev->data->port_id, errval);
956 /* Setup Rx Queues */
957 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
958 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
960 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
962 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
963 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
966 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
967 slave_eth_dev->data->port_id, q_id, errval);
972 /* Setup Tx Queues */
973 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
974 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
976 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
978 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
982 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
983 slave_eth_dev->data->port_id, q_id, errval);
989 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
991 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
992 slave_eth_dev->data->port_id, errval);
1000 slave_remove(struct bond_dev_private *internals,
1001 struct rte_eth_dev *slave_eth_dev)
1005 for (i = 0; i < internals->slave_count; i++)
1006 if (internals->slaves[i].port_id ==
1007 slave_eth_dev->data->port_id)
1010 if (i < (internals->slave_count - 1))
1011 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1012 sizeof(internals->slaves[0]) *
1013 (internals->slave_count - i - 1));
1015 internals->slave_count--;
1019 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1022 slave_add(struct bond_dev_private *internals,
1023 struct rte_eth_dev *slave_eth_dev)
1025 struct bond_slave_details *slave_details =
1026 &internals->slaves[internals->slave_count];
1028 slave_details->port_id = slave_eth_dev->data->port_id;
1029 slave_details->last_link_status = 0;
1031 /* If slave device doesn't support interrupts then we need to enabled
1032 * polling to monitor link status */
1033 if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1034 slave_details->link_status_poll_enabled = 1;
1036 if (!internals->link_status_polling_enabled) {
1037 internals->link_status_polling_enabled = 1;
1039 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1040 bond_ethdev_slave_link_status_change_monitor,
1041 (void *)&rte_eth_devices[internals->port_id]);
1045 slave_details->link_status_wait_to_complete = 0;
1046 /* clean tlb_last_obytes when adding port for bonding device */
1047 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1048 sizeof(struct ether_addr));
1052 bond_ethdev_primary_set(struct bond_dev_private *internals,
1053 uint8_t slave_port_id)
1057 if (internals->active_slave_count < 1)
1058 internals->current_primary_port = slave_port_id;
1060 /* Search bonded device slave ports for new proposed primary port */
1061 for (i = 0; i < internals->active_slave_count; i++) {
1062 if (internals->active_slaves[i] == slave_port_id)
1063 internals->current_primary_port = slave_port_id;
1068 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1071 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1073 struct bond_dev_private *internals;
1076 /* slave eth dev will be started by bonded device */
1077 if (valid_bonded_ethdev(eth_dev)) {
1078 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1079 eth_dev->data->port_id);
1083 eth_dev->data->dev_link.link_status = 0;
1084 eth_dev->data->dev_started = 1;
1086 internals = eth_dev->data->dev_private;
1088 if (internals->slave_count == 0) {
1089 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1093 if (internals->user_defined_mac == 0) {
1094 struct ether_addr *new_mac_addr = NULL;
1096 for (i = 0; i < internals->slave_count; i++)
1097 if (internals->slaves[i].port_id == internals->primary_port)
1098 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1100 if (new_mac_addr == NULL)
1103 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1104 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1105 eth_dev->data->port_id);
1110 /* Update all slave devices MACs*/
1111 if (mac_address_slaves_update(eth_dev) != 0)
1114 /* If bonded device is configure in promiscuous mode then re-apply config */
1115 if (internals->promiscuous_en)
1116 bond_ethdev_promiscuous_enable(eth_dev);
1118 /* Reconfigure each slave device if starting bonded device */
1119 for (i = 0; i < internals->slave_count; i++) {
1120 if (slave_configure(eth_dev,
1121 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1123 "bonded port (%d) failed to reconfigure slave device (%d)",
1124 eth_dev->data->port_id, internals->slaves[i].port_id);
1129 if (internals->user_defined_primary_port)
1130 bond_ethdev_primary_set(internals, internals->primary_port);
1132 if (internals->mode == BONDING_MODE_8023AD)
1133 bond_mode_8023ad_start(eth_dev);
1135 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
1136 bond_ethdev_update_tlb_slave_cb(internals);
1142 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1144 struct bond_dev_private *internals = eth_dev->data->dev_private;
1147 if (internals->mode == BONDING_MODE_8023AD) {
1151 bond_mode_8023ad_stop(eth_dev);
1153 /* Discard all messages to/from mode 4 state machines */
1154 for (i = 0; i < internals->slave_count; i++) {
1155 port = &mode_8023ad_ports[internals->slaves[i].port_id];
1157 RTE_VERIFY(port->rx_ring != NULL);
1158 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1159 rte_pktmbuf_free(pkt);
1161 RTE_VERIFY(port->tx_ring != NULL);
1162 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1163 rte_pktmbuf_free(pkt);
1167 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
1168 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
1171 internals->active_slave_count = 0;
1172 internals->link_status_polling_enabled = 0;
1174 eth_dev->data->dev_link.link_status = 0;
1175 eth_dev->data->dev_started = 0;
1179 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
1183 /* forward declaration */
1184 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1187 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1189 struct bond_dev_private *internals = dev->data->dev_private;
1191 dev_info->driver_name = driver_name;
1192 dev_info->max_mac_addrs = 1;
1194 dev_info->max_rx_pktlen = (uint32_t)2048;
1196 dev_info->max_rx_queues = (uint16_t)128;
1197 dev_info->max_tx_queues = (uint16_t)512;
1199 dev_info->min_rx_bufsize = 0;
1200 dev_info->pci_dev = dev->pci_dev;
1202 dev_info->rx_offload_capa = internals->rx_offload_capa;
1203 dev_info->tx_offload_capa = internals->tx_offload_capa;
1207 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1208 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1209 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1211 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1212 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1213 0, dev->pci_dev->numa_node);
1214 if (bd_rx_q == NULL)
1217 bd_rx_q->queue_id = rx_queue_id;
1218 bd_rx_q->dev_private = dev->data->dev_private;
1220 bd_rx_q->nb_rx_desc = nb_rx_desc;
1222 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1223 bd_rx_q->mb_pool = mb_pool;
1225 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1231 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1232 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1233 const struct rte_eth_txconf *tx_conf)
1235 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1236 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1237 0, dev->pci_dev->numa_node);
1239 if (bd_tx_q == NULL)
1242 bd_tx_q->queue_id = tx_queue_id;
1243 bd_tx_q->dev_private = dev->data->dev_private;
1245 bd_tx_q->nb_tx_desc = nb_tx_desc;
1246 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1248 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1254 bond_ethdev_rx_queue_release(void *queue)
1263 bond_ethdev_tx_queue_release(void *queue)
1272 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1274 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1275 struct bond_dev_private *internals;
1277 /* Default value for polling slave found is true as we don't want to
1278 * disable the polling thread if we cannot get the lock */
1279 int i, polling_slave_found = 1;
1284 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1285 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1287 if (!bonded_ethdev->data->dev_started ||
1288 !internals->link_status_polling_enabled)
1291 /* If device is currently being configured then don't check slaves link
1292 * status, wait until next period */
1293 if (rte_spinlock_trylock(&internals->lock)) {
1294 if (internals->slave_count > 0)
1295 polling_slave_found = 0;
1297 for (i = 0; i < internals->slave_count; i++) {
1298 if (!internals->slaves[i].link_status_poll_enabled)
1301 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1302 polling_slave_found = 1;
1304 /* Update slave link status */
1305 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1306 internals->slaves[i].link_status_wait_to_complete);
1308 /* if link status has changed since last checked then call lsc
1310 if (slave_ethdev->data->dev_link.link_status !=
1311 internals->slaves[i].last_link_status) {
1312 internals->slaves[i].last_link_status =
1313 slave_ethdev->data->dev_link.link_status;
1315 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1316 RTE_ETH_EVENT_INTR_LSC,
1317 &bonded_ethdev->data->port_id);
1320 rte_spinlock_unlock(&internals->lock);
1323 if (polling_slave_found)
1324 /* Set alarm to continue monitoring link status of slave ethdev's */
1325 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1326 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1330 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1331 int wait_to_complete)
1333 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1335 if (!bonded_eth_dev->data->dev_started ||
1336 internals->active_slave_count == 0) {
1337 bonded_eth_dev->data->dev_link.link_status = 0;
1340 struct rte_eth_dev *slave_eth_dev;
1343 for (i = 0; i < internals->active_slave_count; i++) {
1344 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1346 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1348 if (slave_eth_dev->data->dev_link.link_status == 1) {
1354 bonded_eth_dev->data->dev_link.link_status = link_up;
1361 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1363 struct bond_dev_private *internals = dev->data->dev_private;
1364 struct rte_eth_stats slave_stats;
1367 for (i = 0; i < internals->slave_count; i++) {
1368 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1370 stats->ipackets += slave_stats.ipackets;
1371 stats->opackets += slave_stats.opackets;
1372 stats->ibytes += slave_stats.ibytes;
1373 stats->obytes += slave_stats.obytes;
1374 stats->ierrors += slave_stats.ierrors;
1375 stats->oerrors += slave_stats.oerrors;
1376 stats->imcasts += slave_stats.imcasts;
1377 stats->rx_nombuf += slave_stats.rx_nombuf;
1378 stats->fdirmatch += slave_stats.fdirmatch;
1379 stats->fdirmiss += slave_stats.fdirmiss;
1380 stats->tx_pause_xon += slave_stats.tx_pause_xon;
1381 stats->rx_pause_xon += slave_stats.rx_pause_xon;
1382 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
1383 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
1388 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1390 struct bond_dev_private *internals = dev->data->dev_private;
1393 for (i = 0; i < internals->slave_count; i++)
1394 rte_eth_stats_reset(internals->slaves[i].port_id);
1398 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1400 struct bond_dev_private *internals = eth_dev->data->dev_private;
1403 internals->promiscuous_en = 1;
1405 switch (internals->mode) {
1406 /* Promiscuous mode is propagated to all slaves */
1407 case BONDING_MODE_ROUND_ROBIN:
1408 case BONDING_MODE_BALANCE:
1409 #ifdef RTE_MBUF_REFCNT
1410 case BONDING_MODE_BROADCAST:
1412 for (i = 0; i < internals->slave_count; i++)
1413 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1415 /* In mode4 promiscus mode is managed when slave is added/removed */
1416 case BONDING_MODE_8023AD:
1418 /* Promiscuous mode is propagated only to primary slave */
1419 case BONDING_MODE_ACTIVE_BACKUP:
1420 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1422 rte_eth_promiscuous_enable(internals->current_primary_port);
1427 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1429 struct bond_dev_private *internals = dev->data->dev_private;
1432 internals->promiscuous_en = 0;
1434 switch (internals->mode) {
1435 /* Promiscuous mode is propagated to all slaves */
1436 case BONDING_MODE_ROUND_ROBIN:
1437 case BONDING_MODE_BALANCE:
1438 #ifdef RTE_MBUF_REFCNT
1439 case BONDING_MODE_BROADCAST:
1441 for (i = 0; i < internals->slave_count; i++)
1442 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1444 /* In mode4 promiscus mode is set managed when slave is added/removed */
1445 case BONDING_MODE_8023AD:
1447 /* Promiscuous mode is propagated only to primary slave */
1448 case BONDING_MODE_ACTIVE_BACKUP:
1449 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1451 rte_eth_promiscuous_disable(internals->current_primary_port);
1456 bond_ethdev_delayed_lsc_propagation(void *arg)
1461 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1462 RTE_ETH_EVENT_INTR_LSC);
1466 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1469 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1470 struct bond_dev_private *internals;
1471 struct rte_eth_link link;
1473 int i, valid_slave = 0;
1475 uint8_t lsc_flag = 0;
1477 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1480 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1481 slave_eth_dev = &rte_eth_devices[port_id];
1483 if (valid_bonded_ethdev(bonded_eth_dev))
1486 internals = bonded_eth_dev->data->dev_private;
1488 /* If the device isn't started don't handle interrupts */
1489 if (!bonded_eth_dev->data->dev_started)
1492 /* verify that port_id is a valid slave of bonded port */
1493 for (i = 0; i < internals->slave_count; i++) {
1494 if (internals->slaves[i].port_id == port_id) {
1503 /* Search for port in active port list */
1504 active_pos = find_slave_by_id(internals->active_slaves,
1505 internals->active_slave_count, port_id);
1507 rte_eth_link_get_nowait(port_id, &link);
1508 if (link.link_status) {
1509 if (active_pos < internals->active_slave_count)
1512 /* if no active slave ports then set this port to be primary port */
1513 if (internals->active_slave_count < 1) {
1514 /* If first active slave, then change link status */
1515 bonded_eth_dev->data->dev_link.link_status = 1;
1516 internals->current_primary_port = port_id;
1519 mac_address_slaves_update(bonded_eth_dev);
1521 /* Inherit eth dev link properties from first active slave */
1522 link_properties_set(bonded_eth_dev,
1523 &(slave_eth_dev->data->dev_link));
1526 activate_slave(bonded_eth_dev, port_id);
1528 /* If user has defined the primary port then default to using it */
1529 if (internals->user_defined_primary_port &&
1530 internals->primary_port == port_id)
1531 bond_ethdev_primary_set(internals, port_id);
1533 if (active_pos == internals->active_slave_count)
1536 /* Remove from active slave list */
1537 deactivate_slave(bonded_eth_dev, port_id);
1539 /* No active slaves, change link status to down and reset other
1540 * link properties */
1541 if (internals->active_slave_count < 1) {
1543 bonded_eth_dev->data->dev_link.link_status = 0;
1545 link_properties_reset(bonded_eth_dev);
1548 /* Update primary id, take first active slave from list or if none
1549 * available set to -1 */
1550 if (port_id == internals->current_primary_port) {
1551 if (internals->active_slave_count > 0)
1552 bond_ethdev_primary_set(internals,
1553 internals->active_slaves[0]);
1555 internals->current_primary_port = internals->primary_port;
1560 /* Cancel any possible outstanding interrupts if delays are enabled */
1561 if (internals->link_up_delay_ms > 0 ||
1562 internals->link_down_delay_ms > 0)
1563 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
1566 if (bonded_eth_dev->data->dev_link.link_status) {
1567 if (internals->link_up_delay_ms > 0)
1568 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
1569 bond_ethdev_delayed_lsc_propagation,
1570 (void *)bonded_eth_dev);
1572 _rte_eth_dev_callback_process(bonded_eth_dev,
1573 RTE_ETH_EVENT_INTR_LSC);
1576 if (internals->link_down_delay_ms > 0)
1577 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
1578 bond_ethdev_delayed_lsc_propagation,
1579 (void *)bonded_eth_dev);
1581 _rte_eth_dev_callback_process(bonded_eth_dev,
1582 RTE_ETH_EVENT_INTR_LSC);
1587 struct eth_dev_ops default_dev_ops = {
1588 .dev_start = bond_ethdev_start,
1589 .dev_stop = bond_ethdev_stop,
1590 .dev_close = bond_ethdev_close,
1591 .dev_configure = bond_ethdev_configure,
1592 .dev_infos_get = bond_ethdev_info,
1593 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1594 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1595 .rx_queue_release = bond_ethdev_rx_queue_release,
1596 .tx_queue_release = bond_ethdev_tx_queue_release,
1597 .link_update = bond_ethdev_link_update,
1598 .stats_get = bond_ethdev_stats_get,
1599 .stats_reset = bond_ethdev_stats_reset,
1600 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1601 .promiscuous_disable = bond_ethdev_promiscuous_disable
1605 bond_init(const char *name, const char *params)
1607 struct bond_dev_private *internals;
1608 struct rte_kvargs *kvlist;
1609 uint8_t bonding_mode, socket_id;
1610 int arg_count, port_id;
1612 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1614 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1618 /* Parse link bonding mode */
1619 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1620 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1621 &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
1622 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
1627 "Mode must be specified only once for bonded device %s\n",
1632 /* Parse socket id to create bonding device on */
1633 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
1634 if (arg_count == 1) {
1635 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
1636 &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
1638 "Invalid socket Id specified for bonded device %s\n",
1642 } else if (arg_count > 1) {
1644 "Socket Id can be specified only once for bonded device %s\n",
1648 socket_id = rte_socket_id();
1651 /* Create link bonding eth device */
1652 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
1655 "Failed to create socket %s in mode %u on socket %u.\n",
1656 name, bonding_mode, socket_id);
1659 internals = rte_eth_devices[port_id].data->dev_private;
1660 internals->kvlist = kvlist;
1663 "Create bonded device %s on port %d in mode %u on socket %u.\n",
1664 name, port_id, bonding_mode, socket_id);
1668 /* this part will resolve the slave portids after all the other pdev and vdev
1669 * have been allocated */
1671 bond_ethdev_configure(struct rte_eth_dev *dev)
1673 char *name = dev->data->name;
1674 struct bond_dev_private *internals = dev->data->dev_private;
1675 struct rte_kvargs *kvlist = internals->kvlist;
1676 int arg_count, port_id = dev - rte_eth_devices;
1679 * if no kvlist, it means that this bonded device has been created
1680 * through the bonding api.
1685 /* Parse MAC address for bonded device */
1686 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
1687 if (arg_count == 1) {
1688 struct ether_addr bond_mac;
1690 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
1691 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
1692 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
1697 /* Set MAC address */
1698 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
1700 "Failed to set mac address on bonded device %s\n",
1704 } else if (arg_count > 1) {
1706 "MAC address can be specified only once for bonded device %s\n",
1711 /* Parse/set balance mode transmit policy */
1712 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
1713 if (arg_count == 1) {
1714 uint8_t xmit_policy;
1716 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
1717 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
1720 "Invalid xmit policy specified for bonded device %s\n",
1725 /* Set balance mode transmit policy*/
1726 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
1728 "Failed to set balance xmit policy on bonded device %s\n",
1732 } else if (arg_count > 1) {
1734 "Transmit policy can be specified only once for bonded device"
1739 /* Parse/add slave ports to bonded device */
1740 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
1741 struct bond_ethdev_slave_ports slave_ports;
1744 memset(&slave_ports, 0, sizeof(slave_ports));
1746 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
1747 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
1749 "Failed to parse slave ports for bonded device %s\n",
1754 for (i = 0; i < slave_ports.slave_count; i++) {
1755 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
1757 "Failed to add port %d as slave to bonded device %s\n",
1758 slave_ports.slaves[i], name);
1763 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
1767 /* Parse/set primary slave port id*/
1768 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
1769 if (arg_count == 1) {
1770 uint8_t primary_slave_port_id;
1772 if (rte_kvargs_process(kvlist,
1773 PMD_BOND_PRIMARY_SLAVE_KVARG,
1774 &bond_ethdev_parse_primary_slave_port_id_kvarg,
1775 &primary_slave_port_id) < 0) {
1777 "Invalid primary slave port id specified for bonded device"
1782 /* Set balance mode transmit policy*/
1783 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
1786 "Failed to set primary slave port %d on bonded device %s\n",
1787 primary_slave_port_id, name);
1790 } else if (arg_count > 1) {
1792 "Primary slave can be specified only once for bonded device"
1797 /* Parse link status monitor polling interval */
1798 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
1799 if (arg_count == 1) {
1800 uint32_t lsc_poll_interval_ms;
1802 if (rte_kvargs_process(kvlist,
1803 PMD_BOND_LSC_POLL_PERIOD_KVARG,
1804 &bond_ethdev_parse_time_ms_kvarg,
1805 &lsc_poll_interval_ms) < 0) {
1807 "Invalid lsc polling interval value specified for bonded"
1808 " device %s\n", name);
1812 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
1815 "Failed to set lsc monitor polling interval (%u ms) on"
1816 " bonded device %s\n", lsc_poll_interval_ms, name);
1819 } else if (arg_count > 1) {
1821 "LSC polling interval can be specified only once for bonded"
1822 " device %s\n", name);
1826 /* Parse link up interrupt propagation delay */
1827 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
1828 if (arg_count == 1) {
1829 uint32_t link_up_delay_ms;
1831 if (rte_kvargs_process(kvlist,
1832 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
1833 &bond_ethdev_parse_time_ms_kvarg,
1834 &link_up_delay_ms) < 0) {
1836 "Invalid link up propagation delay value specified for"
1837 " bonded device %s\n", name);
1841 /* Set balance mode transmit policy*/
1842 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
1845 "Failed to set link up propagation delay (%u ms) on bonded"
1846 " device %s\n", link_up_delay_ms, name);
1849 } else if (arg_count > 1) {
1851 "Link up propagation delay can be specified only once for"
1852 " bonded device %s\n", name);
1856 /* Parse link down interrupt propagation delay */
1857 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
1858 if (arg_count == 1) {
1859 uint32_t link_down_delay_ms;
1861 if (rte_kvargs_process(kvlist,
1862 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
1863 &bond_ethdev_parse_time_ms_kvarg,
1864 &link_down_delay_ms) < 0) {
1866 "Invalid link down propagation delay value specified for"
1867 " bonded device %s\n", name);
1871 /* Set balance mode transmit policy*/
1872 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
1875 "Failed to set link down propagation delay (%u ms) on"
1876 " bonded device %s\n", link_down_delay_ms, name);
1879 } else if (arg_count > 1) {
1881 "Link down propagation delay can be specified only once for"
1882 " bonded device %s\n", name);
1889 static struct rte_driver bond_drv = {
1895 PMD_REGISTER_DRIVER(bond_drv);