4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_devargs.h>
43 #include <rte_kvargs.h>
45 #include <rte_alarm.h>
46 #include <rte_cycles.h>
48 #include "rte_eth_bond.h"
49 #include "rte_eth_bond_private.h"
50 #include "rte_eth_bond_8023ad_private.h"
52 #define REORDER_PERIOD_MS 10
54 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56 /* Table for statistics in mode 5 TLB */
57 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
60 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 size_t vlan_offset = 0;
64 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
65 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67 vlan_offset = sizeof(struct vlan_hdr);
68 *proto = vlan_hdr->eth_proto;
70 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
71 vlan_hdr = vlan_hdr + 1;
72 *proto = vlan_hdr->eth_proto;
73 vlan_offset += sizeof(struct vlan_hdr);
80 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 struct bond_dev_private *internals;
84 uint16_t num_rx_slave = 0;
85 uint16_t num_rx_total = 0;
89 /* Cast to structure, containing bonded device's port id and queue id */
90 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92 internals = bd_rx_q->dev_private;
95 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
96 /* Offset of pointer to *bufs increases as packets are received
97 * from other slaves */
98 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
99 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101 num_rx_total += num_rx_slave;
102 nb_pkts -= num_rx_slave;
110 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
113 struct bond_dev_private *internals;
115 /* Cast to structure, containing bonded device's port id and queue id */
116 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118 internals = bd_rx_q->dev_private;
120 return rte_eth_rx_burst(internals->current_primary_port,
121 bd_rx_q->queue_id, bufs, nb_pkts);
125 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
128 /* Cast to structure, containing bonded device's port id and queue id */
129 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
130 struct bond_dev_private *internals = bd_rx_q->dev_private;
131 struct ether_addr bond_mac;
133 struct ether_hdr *hdr;
135 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
136 uint16_t num_rx_total = 0; /* Total number of received packets */
137 uint8_t slaves[RTE_MAX_ETHPORTS];
140 uint8_t collecting; /* current slave collecting status */
141 const uint8_t promisc = internals->promiscuous_en;
144 rte_eth_macaddr_get(internals->port_id, &bond_mac);
145 /* Copy slave list to protect against slave up/down changes during tx
147 slave_count = internals->active_slave_count;
148 memcpy(slaves, internals->active_slaves,
149 sizeof(internals->active_slaves[0]) * slave_count);
151 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
153 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
155 /* Read packets from this slave */
156 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
157 &bufs[num_rx_total], nb_pkts - num_rx_total);
159 for (k = j; k < 2 && k < num_rx_total; k++)
160 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
162 /* Handle slow protocol packets. */
163 while (j < num_rx_total) {
164 if (j + 3 < num_rx_total)
165 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
167 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
168 /* Remove packet from array if it is slow packet or slave is not
169 * in collecting state or bondign interface is not in promiscus
170 * mode and packet address does not match. */
171 if (unlikely(hdr->ether_type == ether_type_slow_be ||
172 !collecting || (!promisc &&
173 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
175 if (hdr->ether_type == ether_type_slow_be) {
176 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
179 rte_pktmbuf_free(bufs[j]);
181 /* Packet is managed by mode 4 or dropped, shift the array */
183 if (j < num_rx_total) {
184 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
196 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
198 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
199 struct bond_dev_private *internals = bd_tx_q->dev_private;
201 struct ether_hdr *eth_h;
203 uint16_t ether_type, offset;
204 uint16_t nb_recv_pkts;
208 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
210 for (i = 0; i < nb_recv_pkts; i++) {
211 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
212 ether_type = eth_h->ether_type;
213 offset = get_vlan_offset(eth_h, ðer_type);
215 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
216 bond_mode_alb_arp_recv(eth_h, offset, internals);
224 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
227 struct bond_dev_private *internals;
228 struct bond_tx_queue *bd_tx_q;
230 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
231 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
233 uint8_t num_of_slaves;
234 uint8_t slaves[RTE_MAX_ETHPORTS];
236 uint16_t num_tx_total = 0, num_tx_slave;
238 static int slave_idx = 0;
239 int i, cslave_idx = 0, tx_fail_total = 0;
241 bd_tx_q = (struct bond_tx_queue *)queue;
242 internals = bd_tx_q->dev_private;
244 /* Copy slave list to protect against slave up/down changes during tx
246 num_of_slaves = internals->active_slave_count;
247 memcpy(slaves, internals->active_slaves,
248 sizeof(internals->active_slaves[0]) * num_of_slaves);
250 if (num_of_slaves < 1)
253 /* Populate slaves mbuf with which packets are to be sent on it */
254 for (i = 0; i < nb_pkts; i++) {
255 cslave_idx = (slave_idx + i) % num_of_slaves;
256 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
259 /* increment current slave index so the next call to tx burst starts on the
261 slave_idx = ++cslave_idx;
263 /* Send packet burst on each slave device */
264 for (i = 0; i < num_of_slaves; i++) {
265 if (slave_nb_pkts[i] > 0) {
266 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
267 slave_bufs[i], slave_nb_pkts[i]);
269 /* if tx burst fails move packets to end of bufs */
270 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
271 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
273 tx_fail_total += tx_fail_slave;
275 memcpy(&bufs[nb_pkts - tx_fail_total],
276 &slave_bufs[i][num_tx_slave],
277 tx_fail_slave * sizeof(bufs[0]));
279 num_tx_total += num_tx_slave;
287 bond_ethdev_tx_burst_active_backup(void *queue,
288 struct rte_mbuf **bufs, uint16_t nb_pkts)
290 struct bond_dev_private *internals;
291 struct bond_tx_queue *bd_tx_q;
293 bd_tx_q = (struct bond_tx_queue *)queue;
294 internals = bd_tx_q->dev_private;
296 if (internals->active_slave_count < 1)
299 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
303 static inline uint16_t
304 ether_hash(struct ether_hdr *eth_hdr)
306 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
307 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
309 return (word_src_addr[0] ^ word_dst_addr[0]) ^
310 (word_src_addr[1] ^ word_dst_addr[1]) ^
311 (word_src_addr[2] ^ word_dst_addr[2]);
314 static inline uint32_t
315 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
317 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
320 static inline uint32_t
321 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
323 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
324 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
326 return (word_src_addr[0] ^ word_dst_addr[0]) ^
327 (word_src_addr[1] ^ word_dst_addr[1]) ^
328 (word_src_addr[2] ^ word_dst_addr[2]) ^
329 (word_src_addr[3] ^ word_dst_addr[3]);
333 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
335 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
337 uint32_t hash = ether_hash(eth_hdr);
339 return (hash ^= hash >> 8) % slave_count;
343 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
345 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
346 uint16_t proto = eth_hdr->ether_type;
347 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
348 uint32_t hash, l3hash = 0;
350 hash = ether_hash(eth_hdr);
352 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
353 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
354 ((char *)(eth_hdr + 1) + vlan_offset);
355 l3hash = ipv4_hash(ipv4_hdr);
357 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
358 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
359 ((char *)(eth_hdr + 1) + vlan_offset);
360 l3hash = ipv6_hash(ipv6_hdr);
363 hash = hash ^ l3hash;
367 return hash % slave_count;
371 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
373 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
374 uint16_t proto = eth_hdr->ether_type;
375 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
377 struct udp_hdr *udp_hdr = NULL;
378 struct tcp_hdr *tcp_hdr = NULL;
379 uint32_t hash, l3hash = 0, l4hash = 0;
381 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
382 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
383 ((char *)(eth_hdr + 1) + vlan_offset);
384 size_t ip_hdr_offset;
386 l3hash = ipv4_hash(ipv4_hdr);
388 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
391 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
392 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
394 l4hash = HASH_L4_PORTS(tcp_hdr);
395 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
396 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
398 l4hash = HASH_L4_PORTS(udp_hdr);
400 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
401 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
402 ((char *)(eth_hdr + 1) + vlan_offset);
403 l3hash = ipv6_hash(ipv6_hdr);
405 if (ipv6_hdr->proto == IPPROTO_TCP) {
406 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
407 l4hash = HASH_L4_PORTS(tcp_hdr);
408 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
409 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
410 l4hash = HASH_L4_PORTS(udp_hdr);
414 hash = l3hash ^ l4hash;
418 return hash % slave_count;
422 uint64_t bwg_left_int;
423 uint64_t bwg_left_remainder;
428 bond_tlb_activate_slave(struct bond_dev_private *internals) {
431 for (i = 0; i < internals->active_slave_count; i++) {
432 tlb_last_obytets[internals->active_slaves[i]] = 0;
437 bandwidth_cmp(const void *a, const void *b)
439 const struct bwg_slave *bwg_a = a;
440 const struct bwg_slave *bwg_b = b;
441 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
442 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
443 (int64_t)bwg_a->bwg_left_remainder;
457 bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
458 struct bwg_slave *bwg_slave)
460 struct rte_eth_link link_status;
462 rte_eth_link_get(port_id, &link_status);
463 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
466 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
467 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
468 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
472 bond_ethdev_update_tlb_slave_cb(void *arg)
474 struct bond_dev_private *internals = arg;
475 struct rte_eth_stats slave_stats;
476 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
480 uint8_t update_stats = 0;
483 internals->slave_update_idx++;
486 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
489 for (i = 0; i < internals->active_slave_count; i++) {
490 slave_id = internals->active_slaves[i];
491 rte_eth_stats_get(slave_id, &slave_stats);
492 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
493 bandwidth_left(slave_id, tx_bytes,
494 internals->slave_update_idx, &bwg_array[i]);
495 bwg_array[i].slave = slave_id;
498 tlb_last_obytets[slave_id] = slave_stats.obytes;
502 if (update_stats == 1)
503 internals->slave_update_idx = 0;
506 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
507 for (i = 0; i < slave_count; i++)
508 internals->tlb_slaves_order[i] = bwg_array[i].slave;
510 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
511 (struct bond_dev_private *)internals);
515 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
517 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
518 struct bond_dev_private *internals = bd_tx_q->dev_private;
520 struct rte_eth_dev *primary_port =
521 &rte_eth_devices[internals->primary_port];
522 uint16_t num_tx_total = 0;
525 uint8_t num_of_slaves = internals->active_slave_count;
526 uint8_t slaves[RTE_MAX_ETHPORTS];
528 struct ether_hdr *ether_hdr;
529 struct ether_addr primary_slave_addr;
530 struct ether_addr active_slave_addr;
532 if (num_of_slaves < 1)
535 memcpy(slaves, internals->tlb_slaves_order,
536 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
539 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
542 for (i = 0; i < 3; i++)
543 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
546 for (i = 0; i < num_of_slaves; i++) {
547 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
548 for (j = num_tx_total; j < nb_pkts; j++) {
550 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
552 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
553 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
554 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
557 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
558 bufs + num_tx_total, nb_pkts - num_tx_total);
560 if (num_tx_total == nb_pkts)
568 bond_tlb_disable(struct bond_dev_private *internals)
570 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
574 bond_tlb_enable(struct bond_dev_private *internals)
576 bond_ethdev_update_tlb_slave_cb(internals);
580 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
582 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
583 struct bond_dev_private *internals = bd_tx_q->dev_private;
585 struct ether_hdr *eth_h;
586 uint16_t ether_type, offset;
588 struct client_data *client_info;
591 * We create transmit buffers for every slave and one additional to send
592 * through tlb. In worst case every packet will be send on one port.
594 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
595 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
598 * We create separate transmit buffers for update packets as they wont be
599 * counted in num_tx_total.
601 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
602 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
604 struct rte_mbuf *upd_pkt;
607 uint16_t num_send, num_not_send = 0;
608 uint16_t num_tx_total = 0;
613 /* Search tx buffer for ARP packets and forward them to alb */
614 for (i = 0; i < nb_pkts; i++) {
615 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
616 ether_type = eth_h->ether_type;
617 offset = get_vlan_offset(eth_h, ðer_type);
619 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
620 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
622 /* Change src mac in eth header */
623 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
625 /* Add packet to slave tx buffer */
626 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
627 slave_bufs_pkts[slave_idx]++;
629 /* If packet is not ARP, send it with TLB policy */
630 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
632 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
636 /* Update connected client ARP tables */
637 if (internals->mode6.ntt) {
638 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
639 client_info = &internals->mode6.client_table[i];
641 if (client_info->in_use) {
642 /* Allocate new packet to send ARP update on current slave */
643 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
644 if (upd_pkt == NULL) {
645 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
648 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
649 + client_info->vlan_count * sizeof(struct vlan_hdr);
650 upd_pkt->data_len = pkt_size;
651 upd_pkt->pkt_len = pkt_size;
653 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
656 /* Add packet to update tx buffer */
657 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
658 update_bufs_pkts[slave_idx]++;
661 internals->mode6.ntt = 0;
664 /* Send ARP packets on proper slaves */
665 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
666 if (slave_bufs_pkts[i] > 0) {
667 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
668 slave_bufs[i], slave_bufs_pkts[i]);
669 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
670 bufs[nb_pkts - 1 - num_not_send - j] =
671 slave_bufs[i][nb_pkts - 1 - j];
674 num_tx_total += num_send;
675 num_not_send += slave_bufs_pkts[i] - num_send;
679 /* Send update packets on proper slaves */
680 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
681 if (update_bufs_pkts[i] > 0) {
682 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
683 update_bufs_pkts[i]);
684 for (j = num_send; j < update_bufs_pkts[i]; j++) {
685 rte_pktmbuf_free(update_bufs[i][j]);
690 /* Send non-ARP packets using tlb policy */
691 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
692 num_send = bond_ethdev_tx_burst_tlb(queue,
693 slave_bufs[RTE_MAX_ETHPORTS],
694 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
696 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
697 bufs[nb_pkts - 1 - num_not_send - j] =
698 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
701 num_tx_total += num_send;
702 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
709 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
712 struct bond_dev_private *internals;
713 struct bond_tx_queue *bd_tx_q;
715 uint8_t num_of_slaves;
716 uint8_t slaves[RTE_MAX_ETHPORTS];
718 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
722 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
723 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
725 bd_tx_q = (struct bond_tx_queue *)queue;
726 internals = bd_tx_q->dev_private;
728 /* Copy slave list to protect against slave up/down changes during tx
730 num_of_slaves = internals->active_slave_count;
731 memcpy(slaves, internals->active_slaves,
732 sizeof(internals->active_slaves[0]) * num_of_slaves);
734 if (num_of_slaves < 1)
737 /* Populate slaves mbuf with the packets which are to be sent on it */
738 for (i = 0; i < nb_pkts; i++) {
739 /* Select output slave using hash based on xmit policy */
740 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
742 /* Populate slave mbuf arrays with mbufs for that slave */
743 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
746 /* Send packet burst on each slave device */
747 for (i = 0; i < num_of_slaves; i++) {
748 if (slave_nb_pkts[i] > 0) {
749 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
750 slave_bufs[i], slave_nb_pkts[i]);
752 /* if tx burst fails move packets to end of bufs */
753 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
754 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
756 tx_fail_total += slave_tx_fail_count;
757 memcpy(&bufs[nb_pkts - tx_fail_total],
758 &slave_bufs[i][num_tx_slave],
759 slave_tx_fail_count * sizeof(bufs[0]));
762 num_tx_total += num_tx_slave;
770 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
773 struct bond_dev_private *internals;
774 struct bond_tx_queue *bd_tx_q;
776 uint8_t num_of_slaves;
777 uint8_t slaves[RTE_MAX_ETHPORTS];
778 /* positions in slaves, not ID */
779 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
780 uint8_t distributing_count;
782 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
783 uint16_t i, j, op_slave_idx;
784 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
786 /* Allocate additional packets in case 8023AD mode. */
787 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
788 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
790 /* Total amount of packets in slave_bufs */
791 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
792 /* Slow packets placed in each slave */
793 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
795 bd_tx_q = (struct bond_tx_queue *)queue;
796 internals = bd_tx_q->dev_private;
798 /* Copy slave list to protect against slave up/down changes during tx
800 num_of_slaves = internals->active_slave_count;
801 if (num_of_slaves < 1)
804 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
806 distributing_count = 0;
807 for (i = 0; i < num_of_slaves; i++) {
808 struct port *port = &mode_8023ad_ports[slaves[i]];
810 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
811 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
812 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
814 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
815 slave_bufs[i][j] = slow_pkts[j];
817 if (ACTOR_STATE(port, DISTRIBUTING))
818 distributing_offsets[distributing_count++] = i;
821 if (likely(distributing_count > 0)) {
822 /* Populate slaves mbuf with the packets which are to be sent on it */
823 for (i = 0; i < nb_pkts; i++) {
824 /* Select output slave using hash based on xmit policy */
825 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
827 /* Populate slave mbuf arrays with mbufs for that slave. Use only
828 * slaves that are currently distributing. */
829 uint8_t slave_offset = distributing_offsets[op_slave_idx];
830 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
831 slave_nb_pkts[slave_offset]++;
835 /* Send packet burst on each slave device */
836 for (i = 0; i < num_of_slaves; i++) {
837 if (slave_nb_pkts[i] == 0)
840 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
841 slave_bufs[i], slave_nb_pkts[i]);
843 /* If tx burst fails drop slow packets */
844 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
845 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
847 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
848 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
850 /* If tx burst fails move packets to end of bufs */
851 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
852 uint16_t j = nb_pkts - num_tx_fail_total;
853 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
854 bufs[j] = slave_bufs[i][num_tx_slave];
861 #ifdef RTE_MBUF_REFCNT
863 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
866 struct bond_dev_private *internals;
867 struct bond_tx_queue *bd_tx_q;
869 uint8_t tx_failed_flag = 0, num_of_slaves;
870 uint8_t slaves[RTE_MAX_ETHPORTS];
872 uint16_t max_nb_of_tx_pkts = 0;
874 int slave_tx_total[RTE_MAX_ETHPORTS];
875 int i, most_successful_tx_slave = -1;
877 bd_tx_q = (struct bond_tx_queue *)queue;
878 internals = bd_tx_q->dev_private;
880 /* Copy slave list to protect against slave up/down changes during tx
882 num_of_slaves = internals->active_slave_count;
883 memcpy(slaves, internals->active_slaves,
884 sizeof(internals->active_slaves[0]) * num_of_slaves);
886 if (num_of_slaves < 1)
889 /* Increment reference count on mbufs */
890 for (i = 0; i < nb_pkts; i++)
891 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
893 /* Transmit burst on each active slave */
894 for (i = 0; i < num_of_slaves; i++) {
895 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
898 if (unlikely(slave_tx_total[i] < nb_pkts))
901 /* record the value and slave index for the slave which transmits the
902 * maximum number of packets */
903 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
904 max_nb_of_tx_pkts = slave_tx_total[i];
905 most_successful_tx_slave = i;
909 /* if slaves fail to transmit packets from burst, the calling application
910 * is not expected to know about multiple references to packets so we must
911 * handle failures of all packets except those of the most successful slave
913 if (unlikely(tx_failed_flag))
914 for (i = 0; i < num_of_slaves; i++)
915 if (i != most_successful_tx_slave)
916 while (slave_tx_total[i] < nb_pkts)
917 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
919 return max_nb_of_tx_pkts;
924 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
925 struct rte_eth_link *slave_dev_link)
927 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
928 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
930 if (slave_dev_link->link_status &&
931 bonded_eth_dev->data->dev_started) {
932 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
933 bonded_dev_link->link_speed = slave_dev_link->link_speed;
935 internals->link_props_set = 1;
940 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
942 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
944 memset(&(bonded_eth_dev->data->dev_link), 0,
945 sizeof(bonded_eth_dev->data->dev_link));
947 internals->link_props_set = 0;
951 link_properties_valid(struct rte_eth_link *bonded_dev_link,
952 struct rte_eth_link *slave_dev_link)
954 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
955 bonded_dev_link->link_speed != slave_dev_link->link_speed)
962 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
964 struct ether_addr *mac_addr;
966 if (eth_dev == NULL) {
967 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
971 if (dst_mac_addr == NULL) {
972 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
976 mac_addr = eth_dev->data->mac_addrs;
978 ether_addr_copy(mac_addr, dst_mac_addr);
983 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
985 struct ether_addr *mac_addr;
987 if (eth_dev == NULL) {
988 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
992 if (new_mac_addr == NULL) {
993 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
997 mac_addr = eth_dev->data->mac_addrs;
999 /* If new MAC is different to current MAC then update */
1000 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1001 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1007 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1009 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1012 /* Update slave devices MAC addresses */
1013 if (internals->slave_count < 1)
1016 switch (internals->mode) {
1017 case BONDING_MODE_ROUND_ROBIN:
1018 case BONDING_MODE_BALANCE:
1019 #ifdef RTE_MBUF_REFCNT
1020 case BONDING_MODE_BROADCAST:
1022 for (i = 0; i < internals->slave_count; i++) {
1023 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1024 bonded_eth_dev->data->mac_addrs)) {
1025 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1026 internals->slaves[i].port_id);
1031 case BONDING_MODE_8023AD:
1032 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1034 case BONDING_MODE_ACTIVE_BACKUP:
1035 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1036 case BONDING_MODE_ALB:
1038 for (i = 0; i < internals->slave_count; i++) {
1039 if (internals->slaves[i].port_id ==
1040 internals->current_primary_port) {
1041 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1042 bonded_eth_dev->data->mac_addrs)) {
1043 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1044 internals->current_primary_port);
1048 if (mac_address_set(
1049 &rte_eth_devices[internals->slaves[i].port_id],
1050 &internals->slaves[i].persisted_mac_addr)) {
1051 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1052 internals->slaves[i].port_id);
1063 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1065 struct bond_dev_private *internals;
1067 internals = eth_dev->data->dev_private;
1070 case BONDING_MODE_ROUND_ROBIN:
1071 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1072 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1074 case BONDING_MODE_ACTIVE_BACKUP:
1075 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1076 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1078 case BONDING_MODE_BALANCE:
1079 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1080 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1082 #ifdef RTE_MBUF_REFCNT
1083 case BONDING_MODE_BROADCAST:
1084 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1085 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1088 case BONDING_MODE_8023AD:
1089 if (bond_mode_8023ad_enable(eth_dev) != 0)
1092 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1093 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1094 RTE_LOG(WARNING, PMD,
1095 "Using mode 4, it is necessary to do TX burst and RX burst "
1096 "at least every 100ms.\n");
1098 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1099 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1100 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1102 case BONDING_MODE_ALB:
1103 if (bond_mode_alb_enable(eth_dev) != 0)
1106 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1107 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1113 internals->mode = mode;
1119 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1120 struct rte_eth_dev *slave_eth_dev)
1122 struct bond_rx_queue *bd_rx_q;
1123 struct bond_tx_queue *bd_tx_q;
1128 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1130 /* Enable interrupts on slave device if supported */
1131 if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
1132 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1134 /* Configure device */
1135 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1136 bonded_eth_dev->data->nb_rx_queues,
1137 bonded_eth_dev->data->nb_tx_queues,
1138 &(slave_eth_dev->data->dev_conf));
1140 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1141 slave_eth_dev->data->port_id, errval);
1145 /* Setup Rx Queues */
1146 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1147 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1149 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1150 bd_rx_q->nb_rx_desc,
1151 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1152 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1155 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1156 slave_eth_dev->data->port_id, q_id, errval);
1161 /* Setup Tx Queues */
1162 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1163 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1165 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1166 bd_tx_q->nb_tx_desc,
1167 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1171 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1172 slave_eth_dev->data->port_id, q_id, errval);
1178 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1180 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1181 slave_eth_dev->data->port_id, errval);
1189 slave_remove(struct bond_dev_private *internals,
1190 struct rte_eth_dev *slave_eth_dev)
1194 for (i = 0; i < internals->slave_count; i++)
1195 if (internals->slaves[i].port_id ==
1196 slave_eth_dev->data->port_id)
1199 if (i < (internals->slave_count - 1))
1200 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1201 sizeof(internals->slaves[0]) *
1202 (internals->slave_count - i - 1));
1204 internals->slave_count--;
1208 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1211 slave_add(struct bond_dev_private *internals,
1212 struct rte_eth_dev *slave_eth_dev)
1214 struct bond_slave_details *slave_details =
1215 &internals->slaves[internals->slave_count];
1217 slave_details->port_id = slave_eth_dev->data->port_id;
1218 slave_details->last_link_status = 0;
1220 /* If slave device doesn't support interrupts then we need to enabled
1221 * polling to monitor link status */
1222 if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1223 slave_details->link_status_poll_enabled = 1;
1225 if (!internals->link_status_polling_enabled) {
1226 internals->link_status_polling_enabled = 1;
1228 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1229 bond_ethdev_slave_link_status_change_monitor,
1230 (void *)&rte_eth_devices[internals->port_id]);
1234 slave_details->link_status_wait_to_complete = 0;
1235 /* clean tlb_last_obytes when adding port for bonding device */
1236 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1237 sizeof(struct ether_addr));
1241 bond_ethdev_primary_set(struct bond_dev_private *internals,
1242 uint8_t slave_port_id)
1246 if (internals->active_slave_count < 1)
1247 internals->current_primary_port = slave_port_id;
1249 /* Search bonded device slave ports for new proposed primary port */
1250 for (i = 0; i < internals->active_slave_count; i++) {
1251 if (internals->active_slaves[i] == slave_port_id)
1252 internals->current_primary_port = slave_port_id;
1257 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1260 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1262 struct bond_dev_private *internals;
1265 /* slave eth dev will be started by bonded device */
1266 if (valid_bonded_ethdev(eth_dev)) {
1267 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1268 eth_dev->data->port_id);
1272 eth_dev->data->dev_link.link_status = 0;
1273 eth_dev->data->dev_started = 1;
1275 internals = eth_dev->data->dev_private;
1277 if (internals->slave_count == 0) {
1278 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1282 if (internals->user_defined_mac == 0) {
1283 struct ether_addr *new_mac_addr = NULL;
1285 for (i = 0; i < internals->slave_count; i++)
1286 if (internals->slaves[i].port_id == internals->primary_port)
1287 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1289 if (new_mac_addr == NULL)
1292 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1293 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1294 eth_dev->data->port_id);
1299 /* Update all slave devices MACs*/
1300 if (mac_address_slaves_update(eth_dev) != 0)
1303 /* If bonded device is configure in promiscuous mode then re-apply config */
1304 if (internals->promiscuous_en)
1305 bond_ethdev_promiscuous_enable(eth_dev);
1307 /* Reconfigure each slave device if starting bonded device */
1308 for (i = 0; i < internals->slave_count; i++) {
1309 if (slave_configure(eth_dev,
1310 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1312 "bonded port (%d) failed to reconfigure slave device (%d)",
1313 eth_dev->data->port_id, internals->slaves[i].port_id);
1318 if (internals->user_defined_primary_port)
1319 bond_ethdev_primary_set(internals, internals->primary_port);
1321 if (internals->mode == BONDING_MODE_8023AD)
1322 bond_mode_8023ad_start(eth_dev);
1324 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
1325 internals->mode == BONDING_MODE_ALB)
1326 bond_tlb_enable(internals);
1332 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1334 struct bond_dev_private *internals = eth_dev->data->dev_private;
1337 if (internals->mode == BONDING_MODE_8023AD) {
1341 bond_mode_8023ad_stop(eth_dev);
1343 /* Discard all messages to/from mode 4 state machines */
1344 for (i = 0; i < internals->slave_count; i++) {
1345 port = &mode_8023ad_ports[internals->slaves[i].port_id];
1347 RTE_VERIFY(port->rx_ring != NULL);
1348 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1349 rte_pktmbuf_free(pkt);
1351 RTE_VERIFY(port->tx_ring != NULL);
1352 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1353 rte_pktmbuf_free(pkt);
1357 if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
1358 internals->mode == BONDING_MODE_ALB) {
1359 bond_tlb_disable(internals);
1360 for (i = 0; i < internals->active_slave_count; i++)
1361 tlb_last_obytets[internals->active_slaves[i]] = 0;
1364 internals->active_slave_count = 0;
1365 internals->link_status_polling_enabled = 0;
1367 eth_dev->data->dev_link.link_status = 0;
1368 eth_dev->data->dev_started = 0;
1372 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
1376 /* forward declaration */
1377 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1380 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1382 struct bond_dev_private *internals = dev->data->dev_private;
1384 dev_info->driver_name = driver_name;
1385 dev_info->max_mac_addrs = 1;
1387 dev_info->max_rx_pktlen = (uint32_t)2048;
1389 dev_info->max_rx_queues = (uint16_t)128;
1390 dev_info->max_tx_queues = (uint16_t)512;
1392 dev_info->min_rx_bufsize = 0;
1393 dev_info->pci_dev = dev->pci_dev;
1395 dev_info->rx_offload_capa = internals->rx_offload_capa;
1396 dev_info->tx_offload_capa = internals->tx_offload_capa;
1400 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1401 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1402 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1404 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1405 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1406 0, dev->pci_dev->numa_node);
1407 if (bd_rx_q == NULL)
1410 bd_rx_q->queue_id = rx_queue_id;
1411 bd_rx_q->dev_private = dev->data->dev_private;
1413 bd_rx_q->nb_rx_desc = nb_rx_desc;
1415 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1416 bd_rx_q->mb_pool = mb_pool;
1418 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1424 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1425 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1426 const struct rte_eth_txconf *tx_conf)
1428 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1429 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1430 0, dev->pci_dev->numa_node);
1432 if (bd_tx_q == NULL)
1435 bd_tx_q->queue_id = tx_queue_id;
1436 bd_tx_q->dev_private = dev->data->dev_private;
1438 bd_tx_q->nb_tx_desc = nb_tx_desc;
1439 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1441 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1447 bond_ethdev_rx_queue_release(void *queue)
1456 bond_ethdev_tx_queue_release(void *queue)
1465 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1467 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1468 struct bond_dev_private *internals;
1470 /* Default value for polling slave found is true as we don't want to
1471 * disable the polling thread if we cannot get the lock */
1472 int i, polling_slave_found = 1;
1477 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1478 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1480 if (!bonded_ethdev->data->dev_started ||
1481 !internals->link_status_polling_enabled)
1484 /* If device is currently being configured then don't check slaves link
1485 * status, wait until next period */
1486 if (rte_spinlock_trylock(&internals->lock)) {
1487 if (internals->slave_count > 0)
1488 polling_slave_found = 0;
1490 for (i = 0; i < internals->slave_count; i++) {
1491 if (!internals->slaves[i].link_status_poll_enabled)
1494 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1495 polling_slave_found = 1;
1497 /* Update slave link status */
1498 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1499 internals->slaves[i].link_status_wait_to_complete);
1501 /* if link status has changed since last checked then call lsc
1503 if (slave_ethdev->data->dev_link.link_status !=
1504 internals->slaves[i].last_link_status) {
1505 internals->slaves[i].last_link_status =
1506 slave_ethdev->data->dev_link.link_status;
1508 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1509 RTE_ETH_EVENT_INTR_LSC,
1510 &bonded_ethdev->data->port_id);
1513 rte_spinlock_unlock(&internals->lock);
1516 if (polling_slave_found)
1517 /* Set alarm to continue monitoring link status of slave ethdev's */
1518 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1519 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1523 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1524 int wait_to_complete)
1526 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1528 if (!bonded_eth_dev->data->dev_started ||
1529 internals->active_slave_count == 0) {
1530 bonded_eth_dev->data->dev_link.link_status = 0;
1533 struct rte_eth_dev *slave_eth_dev;
1536 for (i = 0; i < internals->active_slave_count; i++) {
1537 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1539 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1541 if (slave_eth_dev->data->dev_link.link_status == 1) {
1547 bonded_eth_dev->data->dev_link.link_status = link_up;
1554 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1556 struct bond_dev_private *internals = dev->data->dev_private;
1557 struct rte_eth_stats slave_stats;
1561 /* clear bonded stats before populating from slaves */
1562 memset(stats, 0, sizeof(*stats));
1564 for (i = 0; i < internals->slave_count; i++) {
1565 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1567 stats->ipackets += slave_stats.ipackets;
1568 stats->opackets += slave_stats.opackets;
1569 stats->ibytes += slave_stats.ibytes;
1570 stats->obytes += slave_stats.obytes;
1571 stats->ierrors += slave_stats.ierrors;
1572 stats->oerrors += slave_stats.oerrors;
1573 stats->imcasts += slave_stats.imcasts;
1574 stats->rx_nombuf += slave_stats.rx_nombuf;
1575 stats->fdirmatch += slave_stats.fdirmatch;
1576 stats->fdirmiss += slave_stats.fdirmiss;
1577 stats->tx_pause_xon += slave_stats.tx_pause_xon;
1578 stats->rx_pause_xon += slave_stats.rx_pause_xon;
1579 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
1580 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
1585 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1587 struct bond_dev_private *internals = dev->data->dev_private;
1590 for (i = 0; i < internals->slave_count; i++)
1591 rte_eth_stats_reset(internals->slaves[i].port_id);
1595 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1597 struct bond_dev_private *internals = eth_dev->data->dev_private;
1600 internals->promiscuous_en = 1;
1602 switch (internals->mode) {
1603 /* Promiscuous mode is propagated to all slaves */
1604 case BONDING_MODE_ROUND_ROBIN:
1605 case BONDING_MODE_BALANCE:
1606 #ifdef RTE_MBUF_REFCNT
1607 case BONDING_MODE_BROADCAST:
1609 for (i = 0; i < internals->slave_count; i++)
1610 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1612 /* In mode4 promiscus mode is managed when slave is added/removed */
1613 case BONDING_MODE_8023AD:
1615 /* Promiscuous mode is propagated only to primary slave */
1616 case BONDING_MODE_ACTIVE_BACKUP:
1617 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1618 case BONDING_MODE_ALB:
1620 rte_eth_promiscuous_enable(internals->current_primary_port);
1625 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1627 struct bond_dev_private *internals = dev->data->dev_private;
1630 internals->promiscuous_en = 0;
1632 switch (internals->mode) {
1633 /* Promiscuous mode is propagated to all slaves */
1634 case BONDING_MODE_ROUND_ROBIN:
1635 case BONDING_MODE_BALANCE:
1636 #ifdef RTE_MBUF_REFCNT
1637 case BONDING_MODE_BROADCAST:
1639 for (i = 0; i < internals->slave_count; i++)
1640 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1642 /* In mode4 promiscus mode is set managed when slave is added/removed */
1643 case BONDING_MODE_8023AD:
1645 /* Promiscuous mode is propagated only to primary slave */
1646 case BONDING_MODE_ACTIVE_BACKUP:
1647 case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
1648 case BONDING_MODE_ALB:
1650 rte_eth_promiscuous_disable(internals->current_primary_port);
1655 bond_ethdev_delayed_lsc_propagation(void *arg)
1660 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1661 RTE_ETH_EVENT_INTR_LSC);
1665 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1668 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1669 struct bond_dev_private *internals;
1670 struct rte_eth_link link;
1672 int i, valid_slave = 0;
1674 uint8_t lsc_flag = 0;
1676 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1679 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1680 slave_eth_dev = &rte_eth_devices[port_id];
1682 if (valid_bonded_ethdev(bonded_eth_dev))
1685 internals = bonded_eth_dev->data->dev_private;
1687 /* If the device isn't started don't handle interrupts */
1688 if (!bonded_eth_dev->data->dev_started)
1691 /* verify that port_id is a valid slave of bonded port */
1692 for (i = 0; i < internals->slave_count; i++) {
1693 if (internals->slaves[i].port_id == port_id) {
1702 /* Search for port in active port list */
1703 active_pos = find_slave_by_id(internals->active_slaves,
1704 internals->active_slave_count, port_id);
1706 rte_eth_link_get_nowait(port_id, &link);
1707 if (link.link_status) {
1708 if (active_pos < internals->active_slave_count)
1711 /* if no active slave ports then set this port to be primary port */
1712 if (internals->active_slave_count < 1) {
1713 /* If first active slave, then change link status */
1714 bonded_eth_dev->data->dev_link.link_status = 1;
1715 internals->current_primary_port = port_id;
1718 mac_address_slaves_update(bonded_eth_dev);
1720 /* Inherit eth dev link properties from first active slave */
1721 link_properties_set(bonded_eth_dev,
1722 &(slave_eth_dev->data->dev_link));
1725 activate_slave(bonded_eth_dev, port_id);
1727 /* If user has defined the primary port then default to using it */
1728 if (internals->user_defined_primary_port &&
1729 internals->primary_port == port_id)
1730 bond_ethdev_primary_set(internals, port_id);
1732 if (active_pos == internals->active_slave_count)
1735 /* Remove from active slave list */
1736 deactivate_slave(bonded_eth_dev, port_id);
1738 /* No active slaves, change link status to down and reset other
1739 * link properties */
1740 if (internals->active_slave_count < 1) {
1742 bonded_eth_dev->data->dev_link.link_status = 0;
1744 link_properties_reset(bonded_eth_dev);
1747 /* Update primary id, take first active slave from list or if none
1748 * available set to -1 */
1749 if (port_id == internals->current_primary_port) {
1750 if (internals->active_slave_count > 0)
1751 bond_ethdev_primary_set(internals,
1752 internals->active_slaves[0]);
1754 internals->current_primary_port = internals->primary_port;
1759 /* Cancel any possible outstanding interrupts if delays are enabled */
1760 if (internals->link_up_delay_ms > 0 ||
1761 internals->link_down_delay_ms > 0)
1762 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
1765 if (bonded_eth_dev->data->dev_link.link_status) {
1766 if (internals->link_up_delay_ms > 0)
1767 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
1768 bond_ethdev_delayed_lsc_propagation,
1769 (void *)bonded_eth_dev);
1771 _rte_eth_dev_callback_process(bonded_eth_dev,
1772 RTE_ETH_EVENT_INTR_LSC);
1775 if (internals->link_down_delay_ms > 0)
1776 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
1777 bond_ethdev_delayed_lsc_propagation,
1778 (void *)bonded_eth_dev);
1780 _rte_eth_dev_callback_process(bonded_eth_dev,
1781 RTE_ETH_EVENT_INTR_LSC);
1786 struct eth_dev_ops default_dev_ops = {
1787 .dev_start = bond_ethdev_start,
1788 .dev_stop = bond_ethdev_stop,
1789 .dev_close = bond_ethdev_close,
1790 .dev_configure = bond_ethdev_configure,
1791 .dev_infos_get = bond_ethdev_info,
1792 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1793 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1794 .rx_queue_release = bond_ethdev_rx_queue_release,
1795 .tx_queue_release = bond_ethdev_tx_queue_release,
1796 .link_update = bond_ethdev_link_update,
1797 .stats_get = bond_ethdev_stats_get,
1798 .stats_reset = bond_ethdev_stats_reset,
1799 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1800 .promiscuous_disable = bond_ethdev_promiscuous_disable
1804 bond_init(const char *name, const char *params)
1806 struct bond_dev_private *internals;
1807 struct rte_kvargs *kvlist;
1808 uint8_t bonding_mode, socket_id;
1809 int arg_count, port_id;
1811 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1813 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1817 /* Parse link bonding mode */
1818 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1819 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1820 &bond_ethdev_parse_slave_mode_kvarg,
1821 &bonding_mode) != 0) {
1822 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
1827 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
1828 "device %s\n", name);
1832 /* Parse socket id to create bonding device on */
1833 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
1834 if (arg_count == 1) {
1835 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
1836 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
1838 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
1839 "bonded device %s\n", name);
1842 } else if (arg_count > 1) {
1843 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
1844 "bonded device %s\n", name);
1847 socket_id = rte_socket_id();
1850 /* Create link bonding eth device */
1851 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
1853 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
1854 "socket %u.\n", name, bonding_mode, socket_id);
1857 internals = rte_eth_devices[port_id].data->dev_private;
1858 internals->kvlist = kvlist;
1860 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
1861 "socket %u.\n", name, port_id, bonding_mode, socket_id);
1865 rte_kvargs_free(kvlist);
1870 /* this part will resolve the slave portids after all the other pdev and vdev
1871 * have been allocated */
1873 bond_ethdev_configure(struct rte_eth_dev *dev)
1875 char *name = dev->data->name;
1876 struct bond_dev_private *internals = dev->data->dev_private;
1877 struct rte_kvargs *kvlist = internals->kvlist;
1878 int arg_count, port_id = dev - rte_eth_devices;
1881 * if no kvlist, it means that this bonded device has been created
1882 * through the bonding api.
1887 /* Parse MAC address for bonded device */
1888 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
1889 if (arg_count == 1) {
1890 struct ether_addr bond_mac;
1892 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
1893 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
1894 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
1899 /* Set MAC address */
1900 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
1902 "Failed to set mac address on bonded device %s\n",
1906 } else if (arg_count > 1) {
1908 "MAC address can be specified only once for bonded device %s\n",
1913 /* Parse/set balance mode transmit policy */
1914 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
1915 if (arg_count == 1) {
1916 uint8_t xmit_policy;
1918 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
1919 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
1922 "Invalid xmit policy specified for bonded device %s\n",
1927 /* Set balance mode transmit policy*/
1928 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
1930 "Failed to set balance xmit policy on bonded device %s\n",
1934 } else if (arg_count > 1) {
1936 "Transmit policy can be specified only once for bonded device"
1941 /* Parse/add slave ports to bonded device */
1942 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
1943 struct bond_ethdev_slave_ports slave_ports;
1946 memset(&slave_ports, 0, sizeof(slave_ports));
1948 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
1949 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
1951 "Failed to parse slave ports for bonded device %s\n",
1956 for (i = 0; i < slave_ports.slave_count; i++) {
1957 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
1959 "Failed to add port %d as slave to bonded device %s\n",
1960 slave_ports.slaves[i], name);
1965 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
1969 /* Parse/set primary slave port id*/
1970 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
1971 if (arg_count == 1) {
1972 uint8_t primary_slave_port_id;
1974 if (rte_kvargs_process(kvlist,
1975 PMD_BOND_PRIMARY_SLAVE_KVARG,
1976 &bond_ethdev_parse_primary_slave_port_id_kvarg,
1977 &primary_slave_port_id) < 0) {
1979 "Invalid primary slave port id specified for bonded device"
1984 /* Set balance mode transmit policy*/
1985 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
1988 "Failed to set primary slave port %d on bonded device %s\n",
1989 primary_slave_port_id, name);
1992 } else if (arg_count > 1) {
1994 "Primary slave can be specified only once for bonded device"
1999 /* Parse link status monitor polling interval */
2000 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2001 if (arg_count == 1) {
2002 uint32_t lsc_poll_interval_ms;
2004 if (rte_kvargs_process(kvlist,
2005 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2006 &bond_ethdev_parse_time_ms_kvarg,
2007 &lsc_poll_interval_ms) < 0) {
2009 "Invalid lsc polling interval value specified for bonded"
2010 " device %s\n", name);
2014 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2017 "Failed to set lsc monitor polling interval (%u ms) on"
2018 " bonded device %s\n", lsc_poll_interval_ms, name);
2021 } else if (arg_count > 1) {
2023 "LSC polling interval can be specified only once for bonded"
2024 " device %s\n", name);
2028 /* Parse link up interrupt propagation delay */
2029 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2030 if (arg_count == 1) {
2031 uint32_t link_up_delay_ms;
2033 if (rte_kvargs_process(kvlist,
2034 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2035 &bond_ethdev_parse_time_ms_kvarg,
2036 &link_up_delay_ms) < 0) {
2038 "Invalid link up propagation delay value specified for"
2039 " bonded device %s\n", name);
2043 /* Set balance mode transmit policy*/
2044 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2047 "Failed to set link up propagation delay (%u ms) on bonded"
2048 " device %s\n", link_up_delay_ms, name);
2051 } else if (arg_count > 1) {
2053 "Link up propagation delay can be specified only once for"
2054 " bonded device %s\n", name);
2058 /* Parse link down interrupt propagation delay */
2059 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2060 if (arg_count == 1) {
2061 uint32_t link_down_delay_ms;
2063 if (rte_kvargs_process(kvlist,
2064 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2065 &bond_ethdev_parse_time_ms_kvarg,
2066 &link_down_delay_ms) < 0) {
2068 "Invalid link down propagation delay value specified for"
2069 " bonded device %s\n", name);
2073 /* Set balance mode transmit policy*/
2074 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2077 "Failed to set link down propagation delay (%u ms) on"
2078 " bonded device %s\n", link_down_delay_ms, name);
2081 } else if (arg_count > 1) {
2083 "Link down propagation delay can be specified only once for"
2084 " bonded device %s\n", name);
2091 static struct rte_driver bond_drv = {
2097 PMD_REGISTER_DRIVER(bond_drv);