4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_devargs.h>
43 #include <rte_kvargs.h>
45 #include <rte_alarm.h>
46 #include <rte_cycles.h>
48 #include "rte_eth_bond.h"
49 #include "rte_eth_bond_private.h"
50 #include "rte_eth_bond_8023ad_private.h"
52 #define REORDER_PERIOD_MS 10
54 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56 /* Table for statistics in mode 5 TLB */
57 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
60 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
62 struct bond_dev_private *internals;
64 uint16_t num_rx_slave = 0;
65 uint16_t num_rx_total = 0;
69 /* Cast to structure, containing bonded device's port id and queue id */
70 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
72 internals = bd_rx_q->dev_private;
75 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
76 /* Offset of pointer to *bufs increases as packets are received
77 * from other slaves */
78 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
79 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
81 num_rx_total += num_rx_slave;
82 nb_pkts -= num_rx_slave;
90 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
93 struct bond_dev_private *internals;
95 /* Cast to structure, containing bonded device's port id and queue id */
96 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
98 internals = bd_rx_q->dev_private;
100 return rte_eth_rx_burst(internals->current_primary_port,
101 bd_rx_q->queue_id, bufs, nb_pkts);
105 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
108 /* Cast to structure, containing bonded device's port id and queue id */
109 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
110 struct bond_dev_private *internals = bd_rx_q->dev_private;
111 struct ether_addr bond_mac;
113 struct ether_hdr *hdr;
115 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
116 uint16_t num_rx_total = 0; /* Total number of received packets */
117 uint8_t slaves[RTE_MAX_ETHPORTS];
120 uint8_t collecting; /* current slave collecting status */
121 const uint8_t promisc = internals->promiscuous_en;
124 rte_eth_macaddr_get(internals->port_id, &bond_mac);
125 /* Copy slave list to protect against slave up/down changes during tx
127 slave_count = internals->active_slave_count;
128 memcpy(slaves, internals->active_slaves,
129 sizeof(internals->active_slaves[0]) * slave_count);
131 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
133 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
135 /* Read packets from this slave */
136 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
137 &bufs[num_rx_total], nb_pkts - num_rx_total);
139 for (k = j; k < 2 && k < num_rx_total; k++)
140 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
142 /* Handle slow protocol packets. */
143 while (j < num_rx_total) {
144 if (j + 3 < num_rx_total)
145 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
147 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
148 /* Remove packet from array if it is slow packet or slave is not
149 * in collecting state or bondign interface is not in promiscus
150 * mode and packet address does not match. */
151 if (unlikely(hdr->ether_type == ether_type_slow_be ||
152 !collecting || (!promisc &&
153 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
155 if (hdr->ether_type == ether_type_slow_be) {
156 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
159 rte_pktmbuf_free(bufs[j]);
161 /* Packet is managed by mode 4 or dropped, shift the array */
163 if (j < num_rx_total) {
164 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
176 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
179 struct bond_dev_private *internals;
180 struct bond_tx_queue *bd_tx_q;
182 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
183 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
185 uint8_t num_of_slaves;
186 uint8_t slaves[RTE_MAX_ETHPORTS];
188 uint16_t num_tx_total = 0, num_tx_slave;
190 static int slave_idx = 0;
191 int i, cslave_idx = 0, tx_fail_total = 0;
193 bd_tx_q = (struct bond_tx_queue *)queue;
194 internals = bd_tx_q->dev_private;
196 /* Copy slave list to protect against slave up/down changes during tx
198 num_of_slaves = internals->active_slave_count;
199 memcpy(slaves, internals->active_slaves,
200 sizeof(internals->active_slaves[0]) * num_of_slaves);
202 if (num_of_slaves < 1)
205 /* Populate slaves mbuf with which packets are to be sent on it */
206 for (i = 0; i < nb_pkts; i++) {
207 cslave_idx = (slave_idx + i) % num_of_slaves;
208 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
211 /* increment current slave index so the next call to tx burst starts on the
213 slave_idx = ++cslave_idx;
215 /* Send packet burst on each slave device */
216 for (i = 0; i < num_of_slaves; i++) {
217 if (slave_nb_pkts[i] > 0) {
218 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
219 slave_bufs[i], slave_nb_pkts[i]);
221 /* if tx burst fails move packets to end of bufs */
222 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
223 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
225 tx_fail_total += tx_fail_slave;
227 memcpy(&bufs[nb_pkts - tx_fail_total],
228 &slave_bufs[i][num_tx_slave],
229 tx_fail_slave * sizeof(bufs[0]));
231 num_tx_total += num_tx_slave;
239 bond_ethdev_tx_burst_active_backup(void *queue,
240 struct rte_mbuf **bufs, uint16_t nb_pkts)
242 struct bond_dev_private *internals;
243 struct bond_tx_queue *bd_tx_q;
245 bd_tx_q = (struct bond_tx_queue *)queue;
246 internals = bd_tx_q->dev_private;
248 if (internals->active_slave_count < 1)
251 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
255 static inline uint16_t
256 ether_hash(struct ether_hdr *eth_hdr)
258 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
259 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
261 return (word_src_addr[0] ^ word_dst_addr[0]) ^
262 (word_src_addr[1] ^ word_dst_addr[1]) ^
263 (word_src_addr[2] ^ word_dst_addr[2]);
266 static inline uint32_t
267 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
269 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
272 static inline uint32_t
273 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
275 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
276 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
278 return (word_src_addr[0] ^ word_dst_addr[0]) ^
279 (word_src_addr[1] ^ word_dst_addr[1]) ^
280 (word_src_addr[2] ^ word_dst_addr[2]) ^
281 (word_src_addr[3] ^ word_dst_addr[3]);
285 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
287 size_t vlan_offset = 0;
289 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
290 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
291 vlan_offset = sizeof(struct vlan_hdr);
292 *proto = vlan_hdr->eth_proto;
294 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
295 vlan_hdr = vlan_hdr + 1;
297 *proto = vlan_hdr->eth_proto;
298 vlan_offset += sizeof(struct vlan_hdr);
305 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
307 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
309 uint32_t hash = ether_hash(eth_hdr);
311 return (hash ^= hash >> 8) % slave_count;
315 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
317 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
318 uint16_t proto = eth_hdr->ether_type;
319 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
320 uint32_t hash, l3hash = 0;
322 hash = ether_hash(eth_hdr);
324 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
325 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
326 ((char *)(eth_hdr + 1) + vlan_offset);
327 l3hash = ipv4_hash(ipv4_hdr);
329 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
330 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
331 ((char *)(eth_hdr + 1) + vlan_offset);
332 l3hash = ipv6_hash(ipv6_hdr);
335 hash = hash ^ l3hash;
339 return hash % slave_count;
343 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
345 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
346 uint16_t proto = eth_hdr->ether_type;
347 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
349 struct udp_hdr *udp_hdr = NULL;
350 struct tcp_hdr *tcp_hdr = NULL;
351 uint32_t hash, l3hash = 0, l4hash = 0;
353 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
354 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
355 ((char *)(eth_hdr + 1) + vlan_offset);
356 size_t ip_hdr_offset;
358 l3hash = ipv4_hash(ipv4_hdr);
360 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
363 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
364 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
366 l4hash = HASH_L4_PORTS(tcp_hdr);
367 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
368 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
370 l4hash = HASH_L4_PORTS(udp_hdr);
372 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
373 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
374 ((char *)(eth_hdr + 1) + vlan_offset);
375 l3hash = ipv6_hash(ipv6_hdr);
377 if (ipv6_hdr->proto == IPPROTO_TCP) {
378 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
379 l4hash = HASH_L4_PORTS(tcp_hdr);
380 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
381 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
382 l4hash = HASH_L4_PORTS(udp_hdr);
386 hash = l3hash ^ l4hash;
390 return hash % slave_count;
394 uint64_t bwg_left_int;
395 uint64_t bwg_left_remainder;
400 bandwidth_cmp(const void *a, const void *b)
402 const struct bwg_slave *bwg_a = a;
403 const struct bwg_slave *bwg_b = b;
404 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
405 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
406 (int64_t)bwg_a->bwg_left_remainder;
420 bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
421 struct bwg_slave *bwg_slave)
423 struct rte_eth_link link_status;
425 rte_eth_link_get(port_id, &link_status);
426 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
429 link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
430 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
431 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
435 bond_ethdev_update_tlb_slave_cb(void *arg)
437 struct bond_dev_private *internals = arg;
438 struct rte_eth_stats slave_stats;
439 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
443 uint8_t update_stats = 0;
446 internals->slave_update_idx++;
449 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
452 for (i = 0; i < internals->active_slave_count; i++) {
453 slave_id = internals->active_slaves[i];
454 rte_eth_stats_get(slave_id, &slave_stats);
455 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
456 bandwidth_left(slave_id, tx_bytes,
457 internals->slave_update_idx, &bwg_array[i]);
458 bwg_array[i].slave = slave_id;
461 tlb_last_obytets[slave_id] = slave_stats.obytes;
464 if (update_stats == 1)
465 internals->slave_update_idx = 0;
468 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
469 for (i = 0; i < slave_count; i++)
470 internals->active_slaves[i] = bwg_array[i].slave;
472 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
473 (struct bond_dev_private *)internals);
477 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
479 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
480 struct bond_dev_private *internals = bd_tx_q->dev_private;
482 struct rte_eth_dev *primary_port =
483 &rte_eth_devices[internals->primary_port];
484 uint16_t num_tx_total = 0;
487 uint8_t num_of_slaves = internals->active_slave_count;
488 uint8_t slaves[RTE_MAX_ETHPORTS];
490 struct ether_hdr *ether_hdr;
491 struct ether_addr primary_slave_addr;
492 struct ether_addr active_slave_addr;
494 if (num_of_slaves < 1)
497 memcpy(slaves, internals->active_slaves,
498 sizeof(internals->active_slaves[0]) * num_of_slaves);
501 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
504 for (i = 0; i < 3; i++)
505 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
508 for (i = 0; i < num_of_slaves; i++) {
509 ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
512 for (j = num_tx_total; j < nb_pkts; j++) {
514 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
516 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
517 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
518 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
521 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
522 bufs + num_tx_total, nb_pkts - num_tx_total);
524 if (num_tx_total == nb_pkts)
532 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
535 struct bond_dev_private *internals;
536 struct bond_tx_queue *bd_tx_q;
538 uint8_t num_of_slaves;
539 uint8_t slaves[RTE_MAX_ETHPORTS];
541 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
545 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
546 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
548 bd_tx_q = (struct bond_tx_queue *)queue;
549 internals = bd_tx_q->dev_private;
551 /* Copy slave list to protect against slave up/down changes during tx
553 num_of_slaves = internals->active_slave_count;
554 memcpy(slaves, internals->active_slaves,
555 sizeof(internals->active_slaves[0]) * num_of_slaves);
557 if (num_of_slaves < 1)
560 /* Populate slaves mbuf with the packets which are to be sent on it */
561 for (i = 0; i < nb_pkts; i++) {
562 /* Select output slave using hash based on xmit policy */
563 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
565 /* Populate slave mbuf arrays with mbufs for that slave */
566 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
569 /* Send packet burst on each slave device */
570 for (i = 0; i < num_of_slaves; i++) {
571 if (slave_nb_pkts[i] > 0) {
572 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
573 slave_bufs[i], slave_nb_pkts[i]);
575 /* if tx burst fails move packets to end of bufs */
576 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
577 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
579 tx_fail_total += slave_tx_fail_count;
580 memcpy(&bufs[nb_pkts - tx_fail_total],
581 &slave_bufs[i][num_tx_slave],
582 slave_tx_fail_count * sizeof(bufs[0]));
585 num_tx_total += num_tx_slave;
593 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
596 struct bond_dev_private *internals;
597 struct bond_tx_queue *bd_tx_q;
599 uint8_t num_of_slaves;
600 uint8_t slaves[RTE_MAX_ETHPORTS];
601 /* positions in slaves, not ID */
602 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
603 uint8_t distributing_count;
605 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
606 uint16_t i, j, op_slave_idx;
607 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
609 /* Allocate additional packets in case 8023AD mode. */
610 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
611 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
613 /* Total amount of packets in slave_bufs */
614 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
615 /* Slow packets placed in each slave */
616 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
618 bd_tx_q = (struct bond_tx_queue *)queue;
619 internals = bd_tx_q->dev_private;
621 /* Copy slave list to protect against slave up/down changes during tx
623 num_of_slaves = internals->active_slave_count;
624 if (num_of_slaves < 1)
627 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
629 distributing_count = 0;
630 for (i = 0; i < num_of_slaves; i++) {
631 struct port *port = &mode_8023ad_ports[slaves[i]];
633 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
634 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
635 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
637 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
638 slave_bufs[i][j] = slow_pkts[j];
640 if (ACTOR_STATE(port, DISTRIBUTING))
641 distributing_offsets[distributing_count++] = i;
644 if (likely(distributing_count > 0)) {
645 /* Populate slaves mbuf with the packets which are to be sent on it */
646 for (i = 0; i < nb_pkts; i++) {
647 /* Select output slave using hash based on xmit policy */
648 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
650 /* Populate slave mbuf arrays with mbufs for that slave. Use only
651 * slaves that are currently distributing. */
652 uint8_t slave_offset = distributing_offsets[op_slave_idx];
653 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
654 slave_nb_pkts[slave_offset]++;
658 /* Send packet burst on each slave device */
659 for (i = 0; i < num_of_slaves; i++) {
660 if (slave_nb_pkts[i] == 0)
663 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
664 slave_bufs[i], slave_nb_pkts[i]);
666 /* If tx burst fails drop slow packets */
667 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
668 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
670 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
671 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
673 /* If tx burst fails move packets to end of bufs */
674 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
675 uint16_t j = nb_pkts - num_tx_fail_total;
676 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
677 bufs[j] = slave_bufs[i][num_tx_slave];
684 #ifdef RTE_MBUF_REFCNT
686 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
689 struct bond_dev_private *internals;
690 struct bond_tx_queue *bd_tx_q;
692 uint8_t tx_failed_flag = 0, num_of_slaves;
693 uint8_t slaves[RTE_MAX_ETHPORTS];
695 uint16_t max_nb_of_tx_pkts = 0;
697 int slave_tx_total[RTE_MAX_ETHPORTS];
698 int i, most_successful_tx_slave = -1;
700 bd_tx_q = (struct bond_tx_queue *)queue;
701 internals = bd_tx_q->dev_private;
703 /* Copy slave list to protect against slave up/down changes during tx
705 num_of_slaves = internals->active_slave_count;
706 memcpy(slaves, internals->active_slaves,
707 sizeof(internals->active_slaves[0]) * num_of_slaves);
709 if (num_of_slaves < 1)
712 /* Increment reference count on mbufs */
713 for (i = 0; i < nb_pkts; i++)
714 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
716 /* Transmit burst on each active slave */
717 for (i = 0; i < num_of_slaves; i++) {
718 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
721 if (unlikely(slave_tx_total[i] < nb_pkts))
724 /* record the value and slave index for the slave which transmits the
725 * maximum number of packets */
726 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
727 max_nb_of_tx_pkts = slave_tx_total[i];
728 most_successful_tx_slave = i;
732 /* if slaves fail to transmit packets from burst, the calling application
733 * is not expected to know about multiple references to packets so we must
734 * handle failures of all packets except those of the most successful slave
736 if (unlikely(tx_failed_flag))
737 for (i = 0; i < num_of_slaves; i++)
738 if (i != most_successful_tx_slave)
739 while (slave_tx_total[i] < nb_pkts)
740 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
742 return max_nb_of_tx_pkts;
747 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
748 struct rte_eth_link *slave_dev_link)
750 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
751 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
753 if (slave_dev_link->link_status &&
754 bonded_eth_dev->data->dev_started) {
755 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
756 bonded_dev_link->link_speed = slave_dev_link->link_speed;
758 internals->link_props_set = 1;
763 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
765 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
767 memset(&(bonded_eth_dev->data->dev_link), 0,
768 sizeof(bonded_eth_dev->data->dev_link));
770 internals->link_props_set = 0;
774 link_properties_valid(struct rte_eth_link *bonded_dev_link,
775 struct rte_eth_link *slave_dev_link)
777 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
778 bonded_dev_link->link_speed != slave_dev_link->link_speed)
785 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
787 struct ether_addr *mac_addr;
789 if (eth_dev == NULL) {
790 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
794 if (dst_mac_addr == NULL) {
795 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
799 mac_addr = eth_dev->data->mac_addrs;
801 ether_addr_copy(mac_addr, dst_mac_addr);
806 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
808 struct ether_addr *mac_addr;
810 if (eth_dev == NULL) {
811 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
815 if (new_mac_addr == NULL) {
816 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
820 mac_addr = eth_dev->data->mac_addrs;
822 /* If new MAC is different to current MAC then update */
823 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
824 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
830 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
832 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
835 /* Update slave devices MAC addresses */
836 if (internals->slave_count < 1)
839 switch (internals->mode) {
840 case BONDING_MODE_ROUND_ROBIN:
841 case BONDING_MODE_BALANCE:
842 #ifdef RTE_MBUF_REFCNT
843 case BONDING_MODE_BROADCAST:
845 for (i = 0; i < internals->slave_count; i++) {
846 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
847 bonded_eth_dev->data->mac_addrs)) {
848 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
849 internals->slaves[i].port_id);
854 case BONDING_MODE_8023AD:
855 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
857 case BONDING_MODE_ACTIVE_BACKUP:
858 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
860 for (i = 0; i < internals->slave_count; i++) {
861 if (internals->slaves[i].port_id ==
862 internals->current_primary_port) {
863 if (mac_address_set(&rte_eth_devices[internals->primary_port],
864 bonded_eth_dev->data->mac_addrs)) {
865 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
866 internals->current_primary_port);
871 &rte_eth_devices[internals->slaves[i].port_id],
872 &internals->slaves[i].persisted_mac_addr)) {
873 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
874 internals->slaves[i].port_id);
885 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
887 struct bond_dev_private *internals;
889 internals = eth_dev->data->dev_private;
892 case BONDING_MODE_ROUND_ROBIN:
893 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
894 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
896 case BONDING_MODE_ACTIVE_BACKUP:
897 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
898 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
900 case BONDING_MODE_BALANCE:
901 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
902 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
904 #ifdef RTE_MBUF_REFCNT
905 case BONDING_MODE_BROADCAST:
906 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
907 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
910 case BONDING_MODE_8023AD:
911 if (bond_mode_8023ad_enable(eth_dev) != 0)
914 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
915 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
916 RTE_BOND_LOG(WARNING,
917 "Using mode 4, it is necessary to do TX burst and RX burst "
918 "at least every 100ms.");
920 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
921 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
922 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
928 internals->mode = mode;
934 slave_configure(struct rte_eth_dev *bonded_eth_dev,
935 struct rte_eth_dev *slave_eth_dev)
937 struct bond_rx_queue *bd_rx_q;
938 struct bond_tx_queue *bd_tx_q;
943 rte_eth_dev_stop(slave_eth_dev->data->port_id);
945 /* Enable interrupts on slave device if supported */
946 if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
947 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
949 /* Configure device */
950 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
951 bonded_eth_dev->data->nb_rx_queues,
952 bonded_eth_dev->data->nb_tx_queues,
953 &(slave_eth_dev->data->dev_conf));
955 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
956 slave_eth_dev->data->port_id, errval);
960 /* Setup Rx Queues */
961 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
962 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
964 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
966 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
967 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
970 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
971 slave_eth_dev->data->port_id, q_id, errval);
976 /* Setup Tx Queues */
977 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
978 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
980 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
982 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
986 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
987 slave_eth_dev->data->port_id, q_id, errval);
993 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
995 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
996 slave_eth_dev->data->port_id, errval);
1004 slave_remove(struct bond_dev_private *internals,
1005 struct rte_eth_dev *slave_eth_dev)
1009 for (i = 0; i < internals->slave_count; i++)
1010 if (internals->slaves[i].port_id ==
1011 slave_eth_dev->data->port_id)
1014 if (i < (internals->slave_count - 1))
1015 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1016 sizeof(internals->slaves[0]) *
1017 (internals->slave_count - i - 1));
1019 internals->slave_count--;
1023 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1026 slave_add(struct bond_dev_private *internals,
1027 struct rte_eth_dev *slave_eth_dev)
1029 struct bond_slave_details *slave_details =
1030 &internals->slaves[internals->slave_count];
1032 slave_details->port_id = slave_eth_dev->data->port_id;
1033 slave_details->last_link_status = 0;
1035 /* If slave device doesn't support interrupts then we need to enabled
1036 * polling to monitor link status */
1037 if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1038 slave_details->link_status_poll_enabled = 1;
1040 if (!internals->link_status_polling_enabled) {
1041 internals->link_status_polling_enabled = 1;
1043 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1044 bond_ethdev_slave_link_status_change_monitor,
1045 (void *)&rte_eth_devices[internals->port_id]);
1049 slave_details->link_status_wait_to_complete = 0;
1050 /* clean tlb_last_obytes when adding port for bonding device */
1051 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1052 sizeof(struct ether_addr));
1056 bond_ethdev_primary_set(struct bond_dev_private *internals,
1057 uint8_t slave_port_id)
1061 if (internals->active_slave_count < 1)
1062 internals->current_primary_port = slave_port_id;
1064 /* Search bonded device slave ports for new proposed primary port */
1065 for (i = 0; i < internals->active_slave_count; i++) {
1066 if (internals->active_slaves[i] == slave_port_id)
1067 internals->current_primary_port = slave_port_id;
1072 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1075 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1077 struct bond_dev_private *internals;
1080 /* slave eth dev will be started by bonded device */
1081 if (valid_bonded_ethdev(eth_dev)) {
1082 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1083 eth_dev->data->port_id);
1087 eth_dev->data->dev_link.link_status = 0;
1088 eth_dev->data->dev_started = 1;
1090 internals = eth_dev->data->dev_private;
1092 if (internals->slave_count == 0) {
1093 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1097 if (internals->user_defined_mac == 0) {
1098 struct ether_addr *new_mac_addr = NULL;
1100 for (i = 0; i < internals->slave_count; i++)
1101 if (internals->slaves[i].port_id == internals->primary_port)
1102 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1104 if (new_mac_addr == NULL)
1107 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1108 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1109 eth_dev->data->port_id);
1114 /* Update all slave devices MACs*/
1115 if (mac_address_slaves_update(eth_dev) != 0)
1118 /* If bonded device is configure in promiscuous mode then re-apply config */
1119 if (internals->promiscuous_en)
1120 bond_ethdev_promiscuous_enable(eth_dev);
1122 /* Reconfigure each slave device if starting bonded device */
1123 for (i = 0; i < internals->slave_count; i++) {
1124 if (slave_configure(eth_dev,
1125 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1127 "bonded port (%d) failed to reconfigure slave device (%d)",
1128 eth_dev->data->port_id, internals->slaves[i].port_id);
1133 if (internals->user_defined_primary_port)
1134 bond_ethdev_primary_set(internals, internals->primary_port);
1136 if (internals->mode == BONDING_MODE_8023AD)
1137 bond_mode_8023ad_start(eth_dev);
1139 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
1140 bond_ethdev_update_tlb_slave_cb(internals);
1146 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1148 struct bond_dev_private *internals = eth_dev->data->dev_private;
1151 if (internals->mode == BONDING_MODE_8023AD) {
1155 bond_mode_8023ad_stop(eth_dev);
1157 /* Discard all messages to/from mode 4 state machines */
1158 for (i = 0; i < internals->slave_count; i++) {
1159 port = &mode_8023ad_ports[internals->slaves[i].port_id];
1161 RTE_VERIFY(port->rx_ring != NULL);
1162 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1163 rte_pktmbuf_free(pkt);
1165 RTE_VERIFY(port->tx_ring != NULL);
1166 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1167 rte_pktmbuf_free(pkt);
1171 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
1172 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
1175 internals->active_slave_count = 0;
1176 internals->link_status_polling_enabled = 0;
1178 eth_dev->data->dev_link.link_status = 0;
1179 eth_dev->data->dev_started = 0;
1183 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
1187 /* forward declaration */
1188 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1191 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1193 struct bond_dev_private *internals = dev->data->dev_private;
1195 dev_info->driver_name = driver_name;
1196 dev_info->max_mac_addrs = 1;
1198 dev_info->max_rx_pktlen = (uint32_t)2048;
1200 dev_info->max_rx_queues = (uint16_t)128;
1201 dev_info->max_tx_queues = (uint16_t)512;
1203 dev_info->min_rx_bufsize = 0;
1204 dev_info->pci_dev = dev->pci_dev;
1206 dev_info->rx_offload_capa = internals->rx_offload_capa;
1207 dev_info->tx_offload_capa = internals->tx_offload_capa;
1211 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1212 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1213 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1215 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1216 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1217 0, dev->pci_dev->numa_node);
1218 if (bd_rx_q == NULL)
1221 bd_rx_q->queue_id = rx_queue_id;
1222 bd_rx_q->dev_private = dev->data->dev_private;
1224 bd_rx_q->nb_rx_desc = nb_rx_desc;
1226 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1227 bd_rx_q->mb_pool = mb_pool;
1229 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1235 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1236 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1237 const struct rte_eth_txconf *tx_conf)
1239 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1240 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1241 0, dev->pci_dev->numa_node);
1243 if (bd_tx_q == NULL)
1246 bd_tx_q->queue_id = tx_queue_id;
1247 bd_tx_q->dev_private = dev->data->dev_private;
1249 bd_tx_q->nb_tx_desc = nb_tx_desc;
1250 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1252 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1258 bond_ethdev_rx_queue_release(void *queue)
1267 bond_ethdev_tx_queue_release(void *queue)
1276 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1278 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1279 struct bond_dev_private *internals;
1281 /* Default value for polling slave found is true as we don't want to
1282 * disable the polling thread if we cannot get the lock */
1283 int i, polling_slave_found = 1;
1288 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1289 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1291 if (!bonded_ethdev->data->dev_started ||
1292 !internals->link_status_polling_enabled)
1295 /* If device is currently being configured then don't check slaves link
1296 * status, wait until next period */
1297 if (rte_spinlock_trylock(&internals->lock)) {
1298 if (internals->slave_count > 0)
1299 polling_slave_found = 0;
1301 for (i = 0; i < internals->slave_count; i++) {
1302 if (!internals->slaves[i].link_status_poll_enabled)
1305 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1306 polling_slave_found = 1;
1308 /* Update slave link status */
1309 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1310 internals->slaves[i].link_status_wait_to_complete);
1312 /* if link status has changed since last checked then call lsc
1314 if (slave_ethdev->data->dev_link.link_status !=
1315 internals->slaves[i].last_link_status) {
1316 internals->slaves[i].last_link_status =
1317 slave_ethdev->data->dev_link.link_status;
1319 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1320 RTE_ETH_EVENT_INTR_LSC,
1321 &bonded_ethdev->data->port_id);
1324 rte_spinlock_unlock(&internals->lock);
1327 if (polling_slave_found)
1328 /* Set alarm to continue monitoring link status of slave ethdev's */
1329 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1330 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1334 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1335 int wait_to_complete)
1337 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1339 if (!bonded_eth_dev->data->dev_started ||
1340 internals->active_slave_count == 0) {
1341 bonded_eth_dev->data->dev_link.link_status = 0;
1344 struct rte_eth_dev *slave_eth_dev;
1347 for (i = 0; i < internals->active_slave_count; i++) {
1348 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1350 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1352 if (slave_eth_dev->data->dev_link.link_status == 1) {
1358 bonded_eth_dev->data->dev_link.link_status = link_up;
1365 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1367 struct bond_dev_private *internals = dev->data->dev_private;
1368 struct rte_eth_stats slave_stats;
1371 for (i = 0; i < internals->slave_count; i++) {
1372 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1374 stats->ipackets += slave_stats.ipackets;
1375 stats->opackets += slave_stats.opackets;
1376 stats->ibytes += slave_stats.ibytes;
1377 stats->obytes += slave_stats.obytes;
1378 stats->ierrors += slave_stats.ierrors;
1379 stats->oerrors += slave_stats.oerrors;
1380 stats->imcasts += slave_stats.imcasts;
1381 stats->rx_nombuf += slave_stats.rx_nombuf;
1382 stats->fdirmatch += slave_stats.fdirmatch;
1383 stats->fdirmiss += slave_stats.fdirmiss;
1384 stats->tx_pause_xon += slave_stats.tx_pause_xon;
1385 stats->rx_pause_xon += slave_stats.rx_pause_xon;
1386 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
1387 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
1392 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1394 struct bond_dev_private *internals = dev->data->dev_private;
1397 for (i = 0; i < internals->slave_count; i++)
1398 rte_eth_stats_reset(internals->slaves[i].port_id);
1402 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1404 struct bond_dev_private *internals = eth_dev->data->dev_private;
1407 internals->promiscuous_en = 1;
1409 switch (internals->mode) {
1410 /* Promiscuous mode is propagated to all slaves */
1411 case BONDING_MODE_ROUND_ROBIN:
1412 case BONDING_MODE_BALANCE:
1413 #ifdef RTE_MBUF_REFCNT
1414 case BONDING_MODE_BROADCAST:
1416 for (i = 0; i < internals->slave_count; i++)
1417 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1419 /* In mode4 promiscus mode is managed when slave is added/removed */
1420 case BONDING_MODE_8023AD:
1422 /* Promiscuous mode is propagated only to primary slave */
1423 case BONDING_MODE_ACTIVE_BACKUP:
1424 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1426 rte_eth_promiscuous_enable(internals->current_primary_port);
1431 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1433 struct bond_dev_private *internals = dev->data->dev_private;
1436 internals->promiscuous_en = 0;
1438 switch (internals->mode) {
1439 /* Promiscuous mode is propagated to all slaves */
1440 case BONDING_MODE_ROUND_ROBIN:
1441 case BONDING_MODE_BALANCE:
1442 #ifdef RTE_MBUF_REFCNT
1443 case BONDING_MODE_BROADCAST:
1445 for (i = 0; i < internals->slave_count; i++)
1446 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1448 /* In mode4 promiscus mode is set managed when slave is added/removed */
1449 case BONDING_MODE_8023AD:
1451 /* Promiscuous mode is propagated only to primary slave */
1452 case BONDING_MODE_ACTIVE_BACKUP:
1453 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1455 rte_eth_promiscuous_disable(internals->current_primary_port);
1460 bond_ethdev_delayed_lsc_propagation(void *arg)
1465 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1466 RTE_ETH_EVENT_INTR_LSC);
1470 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1473 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1474 struct bond_dev_private *internals;
1475 struct rte_eth_link link;
1477 int i, valid_slave = 0;
1479 uint8_t lsc_flag = 0;
1481 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1484 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1485 slave_eth_dev = &rte_eth_devices[port_id];
1487 if (valid_bonded_ethdev(bonded_eth_dev))
1490 internals = bonded_eth_dev->data->dev_private;
1492 /* If the device isn't started don't handle interrupts */
1493 if (!bonded_eth_dev->data->dev_started)
1496 /* verify that port_id is a valid slave of bonded port */
1497 for (i = 0; i < internals->slave_count; i++) {
1498 if (internals->slaves[i].port_id == port_id) {
1507 /* Search for port in active port list */
1508 active_pos = find_slave_by_id(internals->active_slaves,
1509 internals->active_slave_count, port_id);
1511 rte_eth_link_get_nowait(port_id, &link);
1512 if (link.link_status) {
1513 if (active_pos < internals->active_slave_count)
1516 /* if no active slave ports then set this port to be primary port */
1517 if (internals->active_slave_count < 1) {
1518 /* If first active slave, then change link status */
1519 bonded_eth_dev->data->dev_link.link_status = 1;
1520 internals->current_primary_port = port_id;
1523 mac_address_slaves_update(bonded_eth_dev);
1525 /* Inherit eth dev link properties from first active slave */
1526 link_properties_set(bonded_eth_dev,
1527 &(slave_eth_dev->data->dev_link));
1530 activate_slave(bonded_eth_dev, port_id);
1532 /* If user has defined the primary port then default to using it */
1533 if (internals->user_defined_primary_port &&
1534 internals->primary_port == port_id)
1535 bond_ethdev_primary_set(internals, port_id);
1537 if (active_pos == internals->active_slave_count)
1540 /* Remove from active slave list */
1541 deactivate_slave(bonded_eth_dev, port_id);
1543 /* No active slaves, change link status to down and reset other
1544 * link properties */
1545 if (internals->active_slave_count < 1) {
1547 bonded_eth_dev->data->dev_link.link_status = 0;
1549 link_properties_reset(bonded_eth_dev);
1552 /* Update primary id, take first active slave from list or if none
1553 * available set to -1 */
1554 if (port_id == internals->current_primary_port) {
1555 if (internals->active_slave_count > 0)
1556 bond_ethdev_primary_set(internals,
1557 internals->active_slaves[0]);
1559 internals->current_primary_port = internals->primary_port;
1564 /* Cancel any possible outstanding interrupts if delays are enabled */
1565 if (internals->link_up_delay_ms > 0 ||
1566 internals->link_down_delay_ms > 0)
1567 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
1570 if (bonded_eth_dev->data->dev_link.link_status) {
1571 if (internals->link_up_delay_ms > 0)
1572 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
1573 bond_ethdev_delayed_lsc_propagation,
1574 (void *)bonded_eth_dev);
1576 _rte_eth_dev_callback_process(bonded_eth_dev,
1577 RTE_ETH_EVENT_INTR_LSC);
1580 if (internals->link_down_delay_ms > 0)
1581 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
1582 bond_ethdev_delayed_lsc_propagation,
1583 (void *)bonded_eth_dev);
1585 _rte_eth_dev_callback_process(bonded_eth_dev,
1586 RTE_ETH_EVENT_INTR_LSC);
1591 struct eth_dev_ops default_dev_ops = {
1592 .dev_start = bond_ethdev_start,
1593 .dev_stop = bond_ethdev_stop,
1594 .dev_close = bond_ethdev_close,
1595 .dev_configure = bond_ethdev_configure,
1596 .dev_infos_get = bond_ethdev_info,
1597 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1598 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1599 .rx_queue_release = bond_ethdev_rx_queue_release,
1600 .tx_queue_release = bond_ethdev_tx_queue_release,
1601 .link_update = bond_ethdev_link_update,
1602 .stats_get = bond_ethdev_stats_get,
1603 .stats_reset = bond_ethdev_stats_reset,
1604 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1605 .promiscuous_disable = bond_ethdev_promiscuous_disable
1609 bond_init(const char *name, const char *params)
1611 struct bond_dev_private *internals;
1612 struct rte_kvargs *kvlist;
1613 uint8_t bonding_mode, socket_id;
1614 int arg_count, port_id;
1616 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1618 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1622 /* Parse link bonding mode */
1623 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1624 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1625 &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
1626 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
1631 "Mode must be specified only once for bonded device %s\n",
1636 /* Parse socket id to create bonding device on */
1637 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
1638 if (arg_count == 1) {
1639 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
1640 &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
1642 "Invalid socket Id specified for bonded device %s\n",
1646 } else if (arg_count > 1) {
1648 "Socket Id can be specified only once for bonded device %s\n",
1652 socket_id = rte_socket_id();
1655 /* Create link bonding eth device */
1656 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
1659 "Failed to create socket %s in mode %u on socket %u.\n",
1660 name, bonding_mode, socket_id);
1663 internals = rte_eth_devices[port_id].data->dev_private;
1664 internals->kvlist = kvlist;
1667 "Create bonded device %s on port %d in mode %u on socket %u.\n",
1668 name, port_id, bonding_mode, socket_id);
1672 /* this part will resolve the slave portids after all the other pdev and vdev
1673 * have been allocated */
1675 bond_ethdev_configure(struct rte_eth_dev *dev)
1677 char *name = dev->data->name;
1678 struct bond_dev_private *internals = dev->data->dev_private;
1679 struct rte_kvargs *kvlist = internals->kvlist;
1680 int arg_count, port_id = dev - rte_eth_devices;
1683 * if no kvlist, it means that this bonded device has been created
1684 * through the bonding api.
1689 /* Parse MAC address for bonded device */
1690 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
1691 if (arg_count == 1) {
1692 struct ether_addr bond_mac;
1694 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
1695 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
1696 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
1701 /* Set MAC address */
1702 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
1704 "Failed to set mac address on bonded device %s\n",
1708 } else if (arg_count > 1) {
1710 "MAC address can be specified only once for bonded device %s\n",
1715 /* Parse/set balance mode transmit policy */
1716 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
1717 if (arg_count == 1) {
1718 uint8_t xmit_policy;
1720 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
1721 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
1724 "Invalid xmit policy specified for bonded device %s\n",
1729 /* Set balance mode transmit policy*/
1730 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
1732 "Failed to set balance xmit policy on bonded device %s\n",
1736 } else if (arg_count > 1) {
1738 "Transmit policy can be specified only once for bonded device"
1743 /* Parse/add slave ports to bonded device */
1744 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
1745 struct bond_ethdev_slave_ports slave_ports;
1748 memset(&slave_ports, 0, sizeof(slave_ports));
1750 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
1751 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
1753 "Failed to parse slave ports for bonded device %s\n",
1758 for (i = 0; i < slave_ports.slave_count; i++) {
1759 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
1761 "Failed to add port %d as slave to bonded device %s\n",
1762 slave_ports.slaves[i], name);
1767 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
1771 /* Parse/set primary slave port id*/
1772 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
1773 if (arg_count == 1) {
1774 uint8_t primary_slave_port_id;
1776 if (rte_kvargs_process(kvlist,
1777 PMD_BOND_PRIMARY_SLAVE_KVARG,
1778 &bond_ethdev_parse_primary_slave_port_id_kvarg,
1779 &primary_slave_port_id) < 0) {
1781 "Invalid primary slave port id specified for bonded device"
1786 /* Set balance mode transmit policy*/
1787 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
1790 "Failed to set primary slave port %d on bonded device %s\n",
1791 primary_slave_port_id, name);
1794 } else if (arg_count > 1) {
1796 "Primary slave can be specified only once for bonded device"
1801 /* Parse link status monitor polling interval */
1802 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
1803 if (arg_count == 1) {
1804 uint32_t lsc_poll_interval_ms;
1806 if (rte_kvargs_process(kvlist,
1807 PMD_BOND_LSC_POLL_PERIOD_KVARG,
1808 &bond_ethdev_parse_time_ms_kvarg,
1809 &lsc_poll_interval_ms) < 0) {
1811 "Invalid lsc polling interval value specified for bonded"
1812 " device %s\n", name);
1816 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
1819 "Failed to set lsc monitor polling interval (%u ms) on"
1820 " bonded device %s\n", lsc_poll_interval_ms, name);
1823 } else if (arg_count > 1) {
1825 "LSC polling interval can be specified only once for bonded"
1826 " device %s\n", name);
1830 /* Parse link up interrupt propagation delay */
1831 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
1832 if (arg_count == 1) {
1833 uint32_t link_up_delay_ms;
1835 if (rte_kvargs_process(kvlist,
1836 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
1837 &bond_ethdev_parse_time_ms_kvarg,
1838 &link_up_delay_ms) < 0) {
1840 "Invalid link up propagation delay value specified for"
1841 " bonded device %s\n", name);
1845 /* Set balance mode transmit policy*/
1846 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
1849 "Failed to set link up propagation delay (%u ms) on bonded"
1850 " device %s\n", link_up_delay_ms, name);
1853 } else if (arg_count > 1) {
1855 "Link up propagation delay can be specified only once for"
1856 " bonded device %s\n", name);
1860 /* Parse link down interrupt propagation delay */
1861 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
1862 if (arg_count == 1) {
1863 uint32_t link_down_delay_ms;
1865 if (rte_kvargs_process(kvlist,
1866 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
1867 &bond_ethdev_parse_time_ms_kvarg,
1868 &link_down_delay_ms) < 0) {
1870 "Invalid link down propagation delay value specified for"
1871 " bonded device %s\n", name);
1875 /* Set balance mode transmit policy*/
1876 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
1879 "Failed to set link down propagation delay (%u ms) on"
1880 " bonded device %s\n", link_down_delay_ms, name);
1883 } else if (arg_count > 1) {
1885 "Link down propagation delay can be specified only once for"
1886 " bonded device %s\n", name);
1893 static struct rte_driver bond_drv = {
1899 PMD_REGISTER_DRIVER(bond_drv);