4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
37 #include <rte_malloc.h>
38 #include <rte_ethdev.h>
42 #include <rte_devargs.h>
43 #include <rte_kvargs.h>
45 #include <rte_alarm.h>
46 #include <rte_cycles.h>
48 #include "rte_eth_bond.h"
49 #include "rte_eth_bond_private.h"
50 #include "rte_eth_bond_8023ad_private.h"
52 #define REORDER_PERIOD_MS 10
54 #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
56 /* Table for statistics in mode 5 TLB */
57 static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
60 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
62 size_t vlan_offset = 0;
64 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
65 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
67 vlan_offset = sizeof(struct vlan_hdr);
68 *proto = vlan_hdr->eth_proto;
70 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
71 vlan_hdr = vlan_hdr + 1;
72 *proto = vlan_hdr->eth_proto;
73 vlan_offset += sizeof(struct vlan_hdr);
80 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
82 struct bond_dev_private *internals;
84 uint16_t num_rx_slave = 0;
85 uint16_t num_rx_total = 0;
89 /* Cast to structure, containing bonded device's port id and queue id */
90 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
92 internals = bd_rx_q->dev_private;
95 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
96 /* Offset of pointer to *bufs increases as packets are received
97 * from other slaves */
98 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
99 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
101 num_rx_total += num_rx_slave;
102 nb_pkts -= num_rx_slave;
110 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
113 struct bond_dev_private *internals;
115 /* Cast to structure, containing bonded device's port id and queue id */
116 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
118 internals = bd_rx_q->dev_private;
120 return rte_eth_rx_burst(internals->current_primary_port,
121 bd_rx_q->queue_id, bufs, nb_pkts);
125 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
128 /* Cast to structure, containing bonded device's port id and queue id */
129 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
130 struct bond_dev_private *internals = bd_rx_q->dev_private;
131 struct ether_addr bond_mac;
133 struct ether_hdr *hdr;
135 const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
136 uint16_t num_rx_total = 0; /* Total number of received packets */
137 uint8_t slaves[RTE_MAX_ETHPORTS];
140 uint8_t collecting; /* current slave collecting status */
141 const uint8_t promisc = internals->promiscuous_en;
144 rte_eth_macaddr_get(internals->port_id, &bond_mac);
145 /* Copy slave list to protect against slave up/down changes during tx
147 slave_count = internals->active_slave_count;
148 memcpy(slaves, internals->active_slaves,
149 sizeof(internals->active_slaves[0]) * slave_count);
151 for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
153 collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
155 /* Read packets from this slave */
156 num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
157 &bufs[num_rx_total], nb_pkts - num_rx_total);
159 for (k = j; k < 2 && k < num_rx_total; k++)
160 rte_prefetch0(rte_pktmbuf_mtod(bufs[k], void *));
162 /* Handle slow protocol packets. */
163 while (j < num_rx_total) {
164 if (j + 3 < num_rx_total)
165 rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
167 hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
168 /* Remove packet from array if it is slow packet or slave is not
169 * in collecting state or bondign interface is not in promiscus
170 * mode and packet address does not match. */
171 if (unlikely(hdr->ether_type == ether_type_slow_be ||
172 !collecting || (!promisc &&
173 !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
175 if (hdr->ether_type == ether_type_slow_be) {
176 bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
179 rte_pktmbuf_free(bufs[j]);
181 /* Packet is managed by mode 4 or dropped, shift the array */
183 if (j < num_rx_total) {
184 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) *
195 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
196 uint32_t burstnumberRX;
197 uint32_t burstnumberTX;
199 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
202 arp_op_name(uint16_t arp_op, char *buf)
206 snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
209 snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
211 case ARP_OP_REVREQUEST:
212 snprintf(buf, sizeof("Reverse ARP Request"), "%s",
213 "Reverse ARP Request");
215 case ARP_OP_REVREPLY:
216 snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
217 "Reverse ARP Reply");
219 case ARP_OP_INVREQUEST:
220 snprintf(buf, sizeof("Peer Identify Request"), "%s",
221 "Peer Identify Request");
223 case ARP_OP_INVREPLY:
224 snprintf(buf, sizeof("Peer Identify Reply"), "%s",
225 "Peer Identify Reply");
230 snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
234 #define MaxIPv4String 16
236 ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
240 ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
241 snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
242 (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
246 #define MAX_CLIENTS_NUMBER 128
247 uint8_t active_clients;
248 struct client_stats_t {
251 uint32_t ipv4_rx_packets;
252 uint32_t ipv4_tx_packets;
254 struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
257 update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
261 for (; i < MAX_CLIENTS_NUMBER; i++) {
262 if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
263 /* Just update RX packets number for this client */
264 if (TXorRXindicator == &burstnumberRX)
265 client_stats[i].ipv4_rx_packets++;
267 client_stats[i].ipv4_tx_packets++;
271 /* We have a new client. Insert him to the table, and increment stats */
272 if (TXorRXindicator == &burstnumberRX)
273 client_stats[active_clients].ipv4_rx_packets++;
275 client_stats[active_clients].ipv4_tx_packets++;
276 client_stats[active_clients].ipv4_addr = addr;
277 client_stats[active_clients].port = port;
282 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
283 #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
284 RTE_LOG(DEBUG, PMD, \
287 "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
289 "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
295 eth_h->s_addr.addr_bytes[0], \
296 eth_h->s_addr.addr_bytes[1], \
297 eth_h->s_addr.addr_bytes[2], \
298 eth_h->s_addr.addr_bytes[3], \
299 eth_h->s_addr.addr_bytes[4], \
300 eth_h->s_addr.addr_bytes[5], \
302 eth_h->d_addr.addr_bytes[0], \
303 eth_h->d_addr.addr_bytes[1], \
304 eth_h->d_addr.addr_bytes[2], \
305 eth_h->d_addr.addr_bytes[3], \
306 eth_h->d_addr.addr_bytes[4], \
307 eth_h->d_addr.addr_bytes[5], \
314 mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
315 uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
317 struct ipv4_hdr *ipv4_h;
318 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
319 struct arp_hdr *arp_h;
326 uint16_t ether_type = eth_h->ether_type;
327 uint16_t offset = get_vlan_offset(eth_h, ðer_type);
329 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
330 snprintf(buf, 16, "%s", info);
333 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
334 ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
335 ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
336 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
337 ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
338 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
340 update_client_stats(ipv4_h->src_addr, port, burstnumber);
342 #ifdef RTE_LIBRTE_BOND_DEBUG_ALB
343 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
344 arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
345 ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
346 ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
347 arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
348 MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
355 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
357 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
358 struct bond_dev_private *internals = bd_tx_q->dev_private;
359 struct ether_hdr *eth_h;
360 uint16_t ether_type, offset;
361 uint16_t nb_recv_pkts;
364 nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
366 for (i = 0; i < nb_recv_pkts; i++) {
367 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
368 ether_type = eth_h->ether_type;
369 offset = get_vlan_offset(eth_h, ðer_type);
371 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
372 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
373 mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
375 bond_mode_alb_arp_recv(eth_h, offset, internals);
377 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
378 else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
379 mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
387 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
390 struct bond_dev_private *internals;
391 struct bond_tx_queue *bd_tx_q;
393 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
394 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
396 uint8_t num_of_slaves;
397 uint8_t slaves[RTE_MAX_ETHPORTS];
399 uint16_t num_tx_total = 0, num_tx_slave;
401 static int slave_idx = 0;
402 int i, cslave_idx = 0, tx_fail_total = 0;
404 bd_tx_q = (struct bond_tx_queue *)queue;
405 internals = bd_tx_q->dev_private;
407 /* Copy slave list to protect against slave up/down changes during tx
409 num_of_slaves = internals->active_slave_count;
410 memcpy(slaves, internals->active_slaves,
411 sizeof(internals->active_slaves[0]) * num_of_slaves);
413 if (num_of_slaves < 1)
416 /* Populate slaves mbuf with which packets are to be sent on it */
417 for (i = 0; i < nb_pkts; i++) {
418 cslave_idx = (slave_idx + i) % num_of_slaves;
419 slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i];
422 /* increment current slave index so the next call to tx burst starts on the
424 slave_idx = ++cslave_idx;
426 /* Send packet burst on each slave device */
427 for (i = 0; i < num_of_slaves; i++) {
428 if (slave_nb_pkts[i] > 0) {
429 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
430 slave_bufs[i], slave_nb_pkts[i]);
432 /* if tx burst fails move packets to end of bufs */
433 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
434 int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave;
436 tx_fail_total += tx_fail_slave;
438 memcpy(&bufs[nb_pkts - tx_fail_total],
439 &slave_bufs[i][num_tx_slave],
440 tx_fail_slave * sizeof(bufs[0]));
442 num_tx_total += num_tx_slave;
450 bond_ethdev_tx_burst_active_backup(void *queue,
451 struct rte_mbuf **bufs, uint16_t nb_pkts)
453 struct bond_dev_private *internals;
454 struct bond_tx_queue *bd_tx_q;
456 bd_tx_q = (struct bond_tx_queue *)queue;
457 internals = bd_tx_q->dev_private;
459 if (internals->active_slave_count < 1)
462 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
466 static inline uint16_t
467 ether_hash(struct ether_hdr *eth_hdr)
469 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
470 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
472 return (word_src_addr[0] ^ word_dst_addr[0]) ^
473 (word_src_addr[1] ^ word_dst_addr[1]) ^
474 (word_src_addr[2] ^ word_dst_addr[2]);
477 static inline uint32_t
478 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
480 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
483 static inline uint32_t
484 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
486 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
487 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
489 return (word_src_addr[0] ^ word_dst_addr[0]) ^
490 (word_src_addr[1] ^ word_dst_addr[1]) ^
491 (word_src_addr[2] ^ word_dst_addr[2]) ^
492 (word_src_addr[3] ^ word_dst_addr[3]);
496 xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
498 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
500 uint32_t hash = ether_hash(eth_hdr);
502 return (hash ^= hash >> 8) % slave_count;
506 xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
508 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
509 uint16_t proto = eth_hdr->ether_type;
510 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
511 uint32_t hash, l3hash = 0;
513 hash = ether_hash(eth_hdr);
515 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
516 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
517 ((char *)(eth_hdr + 1) + vlan_offset);
518 l3hash = ipv4_hash(ipv4_hdr);
520 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
521 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
522 ((char *)(eth_hdr + 1) + vlan_offset);
523 l3hash = ipv6_hash(ipv6_hdr);
526 hash = hash ^ l3hash;
530 return hash % slave_count;
534 xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
536 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
537 uint16_t proto = eth_hdr->ether_type;
538 size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
540 struct udp_hdr *udp_hdr = NULL;
541 struct tcp_hdr *tcp_hdr = NULL;
542 uint32_t hash, l3hash = 0, l4hash = 0;
544 if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
545 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
546 ((char *)(eth_hdr + 1) + vlan_offset);
547 size_t ip_hdr_offset;
549 l3hash = ipv4_hash(ipv4_hdr);
551 ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
554 if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
555 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
557 l4hash = HASH_L4_PORTS(tcp_hdr);
558 } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
559 udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
561 l4hash = HASH_L4_PORTS(udp_hdr);
563 } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
564 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
565 ((char *)(eth_hdr + 1) + vlan_offset);
566 l3hash = ipv6_hash(ipv6_hdr);
568 if (ipv6_hdr->proto == IPPROTO_TCP) {
569 tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
570 l4hash = HASH_L4_PORTS(tcp_hdr);
571 } else if (ipv6_hdr->proto == IPPROTO_UDP) {
572 udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
573 l4hash = HASH_L4_PORTS(udp_hdr);
577 hash = l3hash ^ l4hash;
581 return hash % slave_count;
585 uint64_t bwg_left_int;
586 uint64_t bwg_left_remainder;
591 bond_tlb_activate_slave(struct bond_dev_private *internals) {
594 for (i = 0; i < internals->active_slave_count; i++) {
595 tlb_last_obytets[internals->active_slaves[i]] = 0;
600 bandwidth_cmp(const void *a, const void *b)
602 const struct bwg_slave *bwg_a = a;
603 const struct bwg_slave *bwg_b = b;
604 int64_t diff = (int64_t)bwg_b->bwg_left_int - (int64_t)bwg_a->bwg_left_int;
605 int64_t diff2 = (int64_t)bwg_b->bwg_left_remainder -
606 (int64_t)bwg_a->bwg_left_remainder;
620 bandwidth_left(int port_id, uint64_t load, uint8_t update_idx,
621 struct bwg_slave *bwg_slave)
623 struct rte_eth_link link_status;
625 rte_eth_link_get(port_id, &link_status);
626 uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
629 link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
630 bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
631 bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
635 bond_ethdev_update_tlb_slave_cb(void *arg)
637 struct bond_dev_private *internals = arg;
638 struct rte_eth_stats slave_stats;
639 struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
643 uint8_t update_stats = 0;
646 internals->slave_update_idx++;
649 if (internals->slave_update_idx >= REORDER_PERIOD_MS)
652 for (i = 0; i < internals->active_slave_count; i++) {
653 slave_id = internals->active_slaves[i];
654 rte_eth_stats_get(slave_id, &slave_stats);
655 tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id];
656 bandwidth_left(slave_id, tx_bytes,
657 internals->slave_update_idx, &bwg_array[i]);
658 bwg_array[i].slave = slave_id;
661 tlb_last_obytets[slave_id] = slave_stats.obytes;
665 if (update_stats == 1)
666 internals->slave_update_idx = 0;
669 qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
670 for (i = 0; i < slave_count; i++)
671 internals->tlb_slaves_order[i] = bwg_array[i].slave;
673 rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
674 (struct bond_dev_private *)internals);
678 bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
680 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
681 struct bond_dev_private *internals = bd_tx_q->dev_private;
683 struct rte_eth_dev *primary_port =
684 &rte_eth_devices[internals->primary_port];
685 uint16_t num_tx_total = 0;
688 uint8_t num_of_slaves = internals->active_slave_count;
689 uint8_t slaves[RTE_MAX_ETHPORTS];
691 struct ether_hdr *ether_hdr;
692 struct ether_addr primary_slave_addr;
693 struct ether_addr active_slave_addr;
695 if (num_of_slaves < 1)
698 memcpy(slaves, internals->tlb_slaves_order,
699 sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
702 ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
705 for (i = 0; i < 3; i++)
706 rte_prefetch0(rte_pktmbuf_mtod(bufs[i], void*));
709 for (i = 0; i < num_of_slaves; i++) {
710 rte_eth_macaddr_get(slaves[i], &active_slave_addr);
711 for (j = num_tx_total; j < nb_pkts; j++) {
713 rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
715 ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
716 if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
717 ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
718 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
719 mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
723 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
724 bufs + num_tx_total, nb_pkts - num_tx_total);
726 if (num_tx_total == nb_pkts)
734 bond_tlb_disable(struct bond_dev_private *internals)
736 rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
740 bond_tlb_enable(struct bond_dev_private *internals)
742 bond_ethdev_update_tlb_slave_cb(internals);
746 bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
748 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
749 struct bond_dev_private *internals = bd_tx_q->dev_private;
751 struct ether_hdr *eth_h;
752 uint16_t ether_type, offset;
754 struct client_data *client_info;
757 * We create transmit buffers for every slave and one additional to send
758 * through tlb. In worst case every packet will be send on one port.
760 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
761 uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
764 * We create separate transmit buffers for update packets as they wont be
765 * counted in num_tx_total.
767 struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
768 uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
770 struct rte_mbuf *upd_pkt;
773 uint16_t num_send, num_not_send = 0;
774 uint16_t num_tx_total = 0;
779 /* Search tx buffer for ARP packets and forward them to alb */
780 for (i = 0; i < nb_pkts; i++) {
781 eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
782 ether_type = eth_h->ether_type;
783 offset = get_vlan_offset(eth_h, ðer_type);
785 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
786 slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
788 /* Change src mac in eth header */
789 rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
791 /* Add packet to slave tx buffer */
792 slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
793 slave_bufs_pkts[slave_idx]++;
795 /* If packet is not ARP, send it with TLB policy */
796 slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
798 slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
802 /* Update connected client ARP tables */
803 if (internals->mode6.ntt) {
804 for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
805 client_info = &internals->mode6.client_table[i];
807 if (client_info->in_use) {
808 /* Allocate new packet to send ARP update on current slave */
809 upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
810 if (upd_pkt == NULL) {
811 RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
814 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
815 + client_info->vlan_count * sizeof(struct vlan_hdr);
816 upd_pkt->data_len = pkt_size;
817 upd_pkt->pkt_len = pkt_size;
819 slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
822 /* Add packet to update tx buffer */
823 update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
824 update_bufs_pkts[slave_idx]++;
827 internals->mode6.ntt = 0;
830 /* Send ARP packets on proper slaves */
831 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
832 if (slave_bufs_pkts[i] > 0) {
833 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
834 slave_bufs[i], slave_bufs_pkts[i]);
835 for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
836 bufs[nb_pkts - 1 - num_not_send - j] =
837 slave_bufs[i][nb_pkts - 1 - j];
840 num_tx_total += num_send;
841 num_not_send += slave_bufs_pkts[i] - num_send;
843 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
844 /* Print TX stats including update packets */
845 for (j = 0; j < slave_bufs_pkts[i]; j++) {
846 eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
847 mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
853 /* Send update packets on proper slaves */
854 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
855 if (update_bufs_pkts[i] > 0) {
856 num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
857 update_bufs_pkts[i]);
858 for (j = num_send; j < update_bufs_pkts[i]; j++) {
859 rte_pktmbuf_free(update_bufs[i][j]);
861 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
862 for (j = 0; j < update_bufs_pkts[i]; j++) {
863 eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
864 mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
870 /* Send non-ARP packets using tlb policy */
871 if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
872 num_send = bond_ethdev_tx_burst_tlb(queue,
873 slave_bufs[RTE_MAX_ETHPORTS],
874 slave_bufs_pkts[RTE_MAX_ETHPORTS]);
876 for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
877 bufs[nb_pkts - 1 - num_not_send - j] =
878 slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
881 num_tx_total += num_send;
882 num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
889 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
892 struct bond_dev_private *internals;
893 struct bond_tx_queue *bd_tx_q;
895 uint8_t num_of_slaves;
896 uint8_t slaves[RTE_MAX_ETHPORTS];
898 uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
902 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
903 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
905 bd_tx_q = (struct bond_tx_queue *)queue;
906 internals = bd_tx_q->dev_private;
908 /* Copy slave list to protect against slave up/down changes during tx
910 num_of_slaves = internals->active_slave_count;
911 memcpy(slaves, internals->active_slaves,
912 sizeof(internals->active_slaves[0]) * num_of_slaves);
914 if (num_of_slaves < 1)
917 /* Populate slaves mbuf with the packets which are to be sent on it */
918 for (i = 0; i < nb_pkts; i++) {
919 /* Select output slave using hash based on xmit policy */
920 op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
922 /* Populate slave mbuf arrays with mbufs for that slave */
923 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
926 /* Send packet burst on each slave device */
927 for (i = 0; i < num_of_slaves; i++) {
928 if (slave_nb_pkts[i] > 0) {
929 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
930 slave_bufs[i], slave_nb_pkts[i]);
932 /* if tx burst fails move packets to end of bufs */
933 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
934 int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
936 tx_fail_total += slave_tx_fail_count;
937 memcpy(&bufs[nb_pkts - tx_fail_total],
938 &slave_bufs[i][num_tx_slave],
939 slave_tx_fail_count * sizeof(bufs[0]));
942 num_tx_total += num_tx_slave;
950 bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
953 struct bond_dev_private *internals;
954 struct bond_tx_queue *bd_tx_q;
956 uint8_t num_of_slaves;
957 uint8_t slaves[RTE_MAX_ETHPORTS];
958 /* positions in slaves, not ID */
959 uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
960 uint8_t distributing_count;
962 uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
963 uint16_t i, j, op_slave_idx;
964 const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
966 /* Allocate additional packets in case 8023AD mode. */
967 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
968 void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
970 /* Total amount of packets in slave_bufs */
971 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
972 /* Slow packets placed in each slave */
973 uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
975 bd_tx_q = (struct bond_tx_queue *)queue;
976 internals = bd_tx_q->dev_private;
978 /* Copy slave list to protect against slave up/down changes during tx
980 num_of_slaves = internals->active_slave_count;
981 if (num_of_slaves < 1)
984 memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
986 distributing_count = 0;
987 for (i = 0; i < num_of_slaves; i++) {
988 struct port *port = &mode_8023ad_ports[slaves[i]];
990 slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
991 slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
992 slave_nb_pkts[i] = slave_slow_nb_pkts[i];
994 for (j = 0; j < slave_slow_nb_pkts[i]; j++)
995 slave_bufs[i][j] = slow_pkts[j];
997 if (ACTOR_STATE(port, DISTRIBUTING))
998 distributing_offsets[distributing_count++] = i;
1001 if (likely(distributing_count > 0)) {
1002 /* Populate slaves mbuf with the packets which are to be sent on it */
1003 for (i = 0; i < nb_pkts; i++) {
1004 /* Select output slave using hash based on xmit policy */
1005 op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
1007 /* Populate slave mbuf arrays with mbufs for that slave. Use only
1008 * slaves that are currently distributing. */
1009 uint8_t slave_offset = distributing_offsets[op_slave_idx];
1010 slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
1011 slave_nb_pkts[slave_offset]++;
1015 /* Send packet burst on each slave device */
1016 for (i = 0; i < num_of_slaves; i++) {
1017 if (slave_nb_pkts[i] == 0)
1020 num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1021 slave_bufs[i], slave_nb_pkts[i]);
1023 /* If tx burst fails drop slow packets */
1024 for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
1025 rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
1027 num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
1028 num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
1030 /* If tx burst fails move packets to end of bufs */
1031 if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
1032 uint16_t j = nb_pkts - num_tx_fail_total;
1033 for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
1034 bufs[j] = slave_bufs[i][num_tx_slave];
1038 return num_tx_total;
1042 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
1045 struct bond_dev_private *internals;
1046 struct bond_tx_queue *bd_tx_q;
1048 uint8_t tx_failed_flag = 0, num_of_slaves;
1049 uint8_t slaves[RTE_MAX_ETHPORTS];
1051 uint16_t max_nb_of_tx_pkts = 0;
1053 int slave_tx_total[RTE_MAX_ETHPORTS];
1054 int i, most_successful_tx_slave = -1;
1056 bd_tx_q = (struct bond_tx_queue *)queue;
1057 internals = bd_tx_q->dev_private;
1059 /* Copy slave list to protect against slave up/down changes during tx
1061 num_of_slaves = internals->active_slave_count;
1062 memcpy(slaves, internals->active_slaves,
1063 sizeof(internals->active_slaves[0]) * num_of_slaves);
1065 if (num_of_slaves < 1)
1068 /* Increment reference count on mbufs */
1069 for (i = 0; i < nb_pkts; i++)
1070 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
1072 /* Transmit burst on each active slave */
1073 for (i = 0; i < num_of_slaves; i++) {
1074 slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
1077 if (unlikely(slave_tx_total[i] < nb_pkts))
1080 /* record the value and slave index for the slave which transmits the
1081 * maximum number of packets */
1082 if (slave_tx_total[i] > max_nb_of_tx_pkts) {
1083 max_nb_of_tx_pkts = slave_tx_total[i];
1084 most_successful_tx_slave = i;
1088 /* if slaves fail to transmit packets from burst, the calling application
1089 * is not expected to know about multiple references to packets so we must
1090 * handle failures of all packets except those of the most successful slave
1092 if (unlikely(tx_failed_flag))
1093 for (i = 0; i < num_of_slaves; i++)
1094 if (i != most_successful_tx_slave)
1095 while (slave_tx_total[i] < nb_pkts)
1096 rte_pktmbuf_free(bufs[slave_tx_total[i]++]);
1098 return max_nb_of_tx_pkts;
1102 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
1103 struct rte_eth_link *slave_dev_link)
1105 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
1106 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1108 if (slave_dev_link->link_status &&
1109 bonded_eth_dev->data->dev_started) {
1110 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
1111 bonded_dev_link->link_speed = slave_dev_link->link_speed;
1113 internals->link_props_set = 1;
1118 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
1120 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1122 memset(&(bonded_eth_dev->data->dev_link), 0,
1123 sizeof(bonded_eth_dev->data->dev_link));
1125 internals->link_props_set = 0;
1129 link_properties_valid(struct rte_eth_link *bonded_dev_link,
1130 struct rte_eth_link *slave_dev_link)
1132 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
1133 bonded_dev_link->link_speed != slave_dev_link->link_speed)
1140 mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
1142 struct ether_addr *mac_addr;
1144 if (eth_dev == NULL) {
1145 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
1149 if (dst_mac_addr == NULL) {
1150 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
1154 mac_addr = eth_dev->data->mac_addrs;
1156 ether_addr_copy(mac_addr, dst_mac_addr);
1161 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
1163 struct ether_addr *mac_addr;
1165 if (eth_dev == NULL) {
1166 RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
1170 if (new_mac_addr == NULL) {
1171 RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
1175 mac_addr = eth_dev->data->mac_addrs;
1177 /* If new MAC is different to current MAC then update */
1178 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
1179 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
1185 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
1187 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1190 /* Update slave devices MAC addresses */
1191 if (internals->slave_count < 1)
1194 switch (internals->mode) {
1195 case BONDING_MODE_ROUND_ROBIN:
1196 case BONDING_MODE_BALANCE:
1197 case BONDING_MODE_BROADCAST:
1198 for (i = 0; i < internals->slave_count; i++) {
1199 if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
1200 bonded_eth_dev->data->mac_addrs)) {
1201 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1202 internals->slaves[i].port_id);
1207 case BONDING_MODE_8023AD:
1208 bond_mode_8023ad_mac_address_update(bonded_eth_dev);
1210 case BONDING_MODE_ACTIVE_BACKUP:
1211 case BONDING_MODE_TLB:
1212 case BONDING_MODE_ALB:
1214 for (i = 0; i < internals->slave_count; i++) {
1215 if (internals->slaves[i].port_id ==
1216 internals->current_primary_port) {
1217 if (mac_address_set(&rte_eth_devices[internals->primary_port],
1218 bonded_eth_dev->data->mac_addrs)) {
1219 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1220 internals->current_primary_port);
1224 if (mac_address_set(
1225 &rte_eth_devices[internals->slaves[i].port_id],
1226 &internals->slaves[i].persisted_mac_addr)) {
1227 RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
1228 internals->slaves[i].port_id);
1239 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
1241 struct bond_dev_private *internals;
1243 internals = eth_dev->data->dev_private;
1246 case BONDING_MODE_ROUND_ROBIN:
1247 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
1248 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1250 case BONDING_MODE_ACTIVE_BACKUP:
1251 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
1252 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1254 case BONDING_MODE_BALANCE:
1255 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
1256 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1258 case BONDING_MODE_BROADCAST:
1259 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
1260 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
1262 case BONDING_MODE_8023AD:
1263 if (bond_mode_8023ad_enable(eth_dev) != 0)
1266 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
1267 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
1268 RTE_LOG(WARNING, PMD,
1269 "Using mode 4, it is necessary to do TX burst and RX burst "
1270 "at least every 100ms.\n");
1272 case BONDING_MODE_TLB:
1273 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
1274 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
1276 case BONDING_MODE_ALB:
1277 if (bond_mode_alb_enable(eth_dev) != 0)
1280 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
1281 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
1287 internals->mode = mode;
1293 slave_configure(struct rte_eth_dev *bonded_eth_dev,
1294 struct rte_eth_dev *slave_eth_dev)
1296 struct bond_rx_queue *bd_rx_q;
1297 struct bond_tx_queue *bd_tx_q;
1302 rte_eth_dev_stop(slave_eth_dev->data->port_id);
1304 /* Enable interrupts on slave device if supported */
1305 if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
1306 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
1308 /* Configure device */
1309 errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
1310 bonded_eth_dev->data->nb_rx_queues,
1311 bonded_eth_dev->data->nb_tx_queues,
1312 &(slave_eth_dev->data->dev_conf));
1314 RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
1315 slave_eth_dev->data->port_id, errval);
1319 /* Setup Rx Queues */
1320 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
1321 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
1323 errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
1324 bd_rx_q->nb_rx_desc,
1325 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1326 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
1329 "rte_eth_rx_queue_setup: port=%d queue_id %d, err (%d)",
1330 slave_eth_dev->data->port_id, q_id, errval);
1335 /* Setup Tx Queues */
1336 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
1337 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
1339 errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
1340 bd_tx_q->nb_tx_desc,
1341 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
1345 "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
1346 slave_eth_dev->data->port_id, q_id, errval);
1352 errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
1354 RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
1355 slave_eth_dev->data->port_id, errval);
1363 slave_remove(struct bond_dev_private *internals,
1364 struct rte_eth_dev *slave_eth_dev)
1368 for (i = 0; i < internals->slave_count; i++)
1369 if (internals->slaves[i].port_id ==
1370 slave_eth_dev->data->port_id)
1373 if (i < (internals->slave_count - 1))
1374 memmove(&internals->slaves[i], &internals->slaves[i + 1],
1375 sizeof(internals->slaves[0]) *
1376 (internals->slave_count - i - 1));
1378 internals->slave_count--;
1382 bond_ethdev_slave_link_status_change_monitor(void *cb_arg);
1385 slave_add(struct bond_dev_private *internals,
1386 struct rte_eth_dev *slave_eth_dev)
1388 struct bond_slave_details *slave_details =
1389 &internals->slaves[internals->slave_count];
1391 slave_details->port_id = slave_eth_dev->data->port_id;
1392 slave_details->last_link_status = 0;
1394 /* If slave device doesn't support interrupts then we need to enabled
1395 * polling to monitor link status */
1396 if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1397 slave_details->link_status_poll_enabled = 1;
1399 if (!internals->link_status_polling_enabled) {
1400 internals->link_status_polling_enabled = 1;
1402 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1403 bond_ethdev_slave_link_status_change_monitor,
1404 (void *)&rte_eth_devices[internals->port_id]);
1408 slave_details->link_status_wait_to_complete = 0;
1409 /* clean tlb_last_obytes when adding port for bonding device */
1410 memcpy(&(slave_details->persisted_mac_addr), slave_eth_dev->data->mac_addrs,
1411 sizeof(struct ether_addr));
1415 bond_ethdev_primary_set(struct bond_dev_private *internals,
1416 uint8_t slave_port_id)
1420 if (internals->active_slave_count < 1)
1421 internals->current_primary_port = slave_port_id;
1423 /* Search bonded device slave ports for new proposed primary port */
1424 for (i = 0; i < internals->active_slave_count; i++) {
1425 if (internals->active_slaves[i] == slave_port_id)
1426 internals->current_primary_port = slave_port_id;
1431 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
1434 bond_ethdev_start(struct rte_eth_dev *eth_dev)
1436 struct bond_dev_private *internals;
1439 /* slave eth dev will be started by bonded device */
1440 if (valid_bonded_ethdev(eth_dev)) {
1441 RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
1442 eth_dev->data->port_id);
1446 eth_dev->data->dev_link.link_status = 0;
1447 eth_dev->data->dev_started = 1;
1449 internals = eth_dev->data->dev_private;
1451 if (internals->slave_count == 0) {
1452 RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
1456 if (internals->user_defined_mac == 0) {
1457 struct ether_addr *new_mac_addr = NULL;
1459 for (i = 0; i < internals->slave_count; i++)
1460 if (internals->slaves[i].port_id == internals->primary_port)
1461 new_mac_addr = &internals->slaves[i].persisted_mac_addr;
1463 if (new_mac_addr == NULL)
1466 if (mac_address_set(eth_dev, new_mac_addr) != 0) {
1467 RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
1468 eth_dev->data->port_id);
1473 /* Update all slave devices MACs*/
1474 if (mac_address_slaves_update(eth_dev) != 0)
1477 /* If bonded device is configure in promiscuous mode then re-apply config */
1478 if (internals->promiscuous_en)
1479 bond_ethdev_promiscuous_enable(eth_dev);
1481 /* Reconfigure each slave device if starting bonded device */
1482 for (i = 0; i < internals->slave_count; i++) {
1483 if (slave_configure(eth_dev,
1484 &(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
1486 "bonded port (%d) failed to reconfigure slave device (%d)",
1487 eth_dev->data->port_id, internals->slaves[i].port_id);
1492 if (internals->user_defined_primary_port)
1493 bond_ethdev_primary_set(internals, internals->primary_port);
1495 if (internals->mode == BONDING_MODE_8023AD)
1496 bond_mode_8023ad_start(eth_dev);
1498 if (internals->mode == BONDING_MODE_TLB ||
1499 internals->mode == BONDING_MODE_ALB)
1500 bond_tlb_enable(internals);
1506 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
1508 struct bond_dev_private *internals = eth_dev->data->dev_private;
1511 if (internals->mode == BONDING_MODE_8023AD) {
1515 bond_mode_8023ad_stop(eth_dev);
1517 /* Discard all messages to/from mode 4 state machines */
1518 for (i = 0; i < internals->slave_count; i++) {
1519 port = &mode_8023ad_ports[internals->slaves[i].port_id];
1521 RTE_VERIFY(port->rx_ring != NULL);
1522 while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
1523 rte_pktmbuf_free(pkt);
1525 RTE_VERIFY(port->tx_ring != NULL);
1526 while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
1527 rte_pktmbuf_free(pkt);
1531 if (internals->mode == BONDING_MODE_TLB ||
1532 internals->mode == BONDING_MODE_ALB) {
1533 bond_tlb_disable(internals);
1534 for (i = 0; i < internals->active_slave_count; i++)
1535 tlb_last_obytets[internals->active_slaves[i]] = 0;
1538 internals->active_slave_count = 0;
1539 internals->link_status_polling_enabled = 0;
1541 eth_dev->data->dev_link.link_status = 0;
1542 eth_dev->data->dev_started = 0;
1546 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
1550 /* forward declaration */
1551 static int bond_ethdev_configure(struct rte_eth_dev *dev);
1554 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1556 struct bond_dev_private *internals = dev->data->dev_private;
1558 dev_info->driver_name = driver_name;
1559 dev_info->max_mac_addrs = 1;
1561 dev_info->max_rx_pktlen = (uint32_t)2048;
1563 dev_info->max_rx_queues = (uint16_t)128;
1564 dev_info->max_tx_queues = (uint16_t)512;
1566 dev_info->min_rx_bufsize = 0;
1567 dev_info->pci_dev = dev->pci_dev;
1569 dev_info->rx_offload_capa = internals->rx_offload_capa;
1570 dev_info->tx_offload_capa = internals->tx_offload_capa;
1574 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1575 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
1576 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
1578 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
1579 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
1580 0, dev->pci_dev->numa_node);
1581 if (bd_rx_q == NULL)
1584 bd_rx_q->queue_id = rx_queue_id;
1585 bd_rx_q->dev_private = dev->data->dev_private;
1587 bd_rx_q->nb_rx_desc = nb_rx_desc;
1589 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
1590 bd_rx_q->mb_pool = mb_pool;
1592 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
1598 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1599 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
1600 const struct rte_eth_txconf *tx_conf)
1602 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
1603 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
1604 0, dev->pci_dev->numa_node);
1606 if (bd_tx_q == NULL)
1609 bd_tx_q->queue_id = tx_queue_id;
1610 bd_tx_q->dev_private = dev->data->dev_private;
1612 bd_tx_q->nb_tx_desc = nb_tx_desc;
1613 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
1615 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
1621 bond_ethdev_rx_queue_release(void *queue)
1630 bond_ethdev_tx_queue_release(void *queue)
1639 bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
1641 struct rte_eth_dev *bonded_ethdev, *slave_ethdev;
1642 struct bond_dev_private *internals;
1644 /* Default value for polling slave found is true as we don't want to
1645 * disable the polling thread if we cannot get the lock */
1646 int i, polling_slave_found = 1;
1651 bonded_ethdev = (struct rte_eth_dev *)cb_arg;
1652 internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
1654 if (!bonded_ethdev->data->dev_started ||
1655 !internals->link_status_polling_enabled)
1658 /* If device is currently being configured then don't check slaves link
1659 * status, wait until next period */
1660 if (rte_spinlock_trylock(&internals->lock)) {
1661 if (internals->slave_count > 0)
1662 polling_slave_found = 0;
1664 for (i = 0; i < internals->slave_count; i++) {
1665 if (!internals->slaves[i].link_status_poll_enabled)
1668 slave_ethdev = &rte_eth_devices[internals->slaves[i].port_id];
1669 polling_slave_found = 1;
1671 /* Update slave link status */
1672 (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
1673 internals->slaves[i].link_status_wait_to_complete);
1675 /* if link status has changed since last checked then call lsc
1677 if (slave_ethdev->data->dev_link.link_status !=
1678 internals->slaves[i].last_link_status) {
1679 internals->slaves[i].last_link_status =
1680 slave_ethdev->data->dev_link.link_status;
1682 bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
1683 RTE_ETH_EVENT_INTR_LSC,
1684 &bonded_ethdev->data->port_id);
1687 rte_spinlock_unlock(&internals->lock);
1690 if (polling_slave_found)
1691 /* Set alarm to continue monitoring link status of slave ethdev's */
1692 rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
1693 bond_ethdev_slave_link_status_change_monitor, cb_arg);
1697 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
1698 int wait_to_complete)
1700 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
1702 if (!bonded_eth_dev->data->dev_started ||
1703 internals->active_slave_count == 0) {
1704 bonded_eth_dev->data->dev_link.link_status = 0;
1707 struct rte_eth_dev *slave_eth_dev;
1710 for (i = 0; i < internals->active_slave_count; i++) {
1711 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
1713 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
1715 if (slave_eth_dev->data->dev_link.link_status == 1) {
1721 bonded_eth_dev->data->dev_link.link_status = link_up;
1728 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1730 struct bond_dev_private *internals = dev->data->dev_private;
1731 struct rte_eth_stats slave_stats;
1735 /* clear bonded stats before populating from slaves */
1736 memset(stats, 0, sizeof(*stats));
1738 for (i = 0; i < internals->slave_count; i++) {
1739 rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
1741 stats->ipackets += slave_stats.ipackets;
1742 stats->opackets += slave_stats.opackets;
1743 stats->ibytes += slave_stats.ibytes;
1744 stats->obytes += slave_stats.obytes;
1745 stats->ierrors += slave_stats.ierrors;
1746 stats->oerrors += slave_stats.oerrors;
1747 stats->imcasts += slave_stats.imcasts;
1748 stats->rx_nombuf += slave_stats.rx_nombuf;
1749 stats->fdirmatch += slave_stats.fdirmatch;
1750 stats->fdirmiss += slave_stats.fdirmiss;
1751 stats->tx_pause_xon += slave_stats.tx_pause_xon;
1752 stats->rx_pause_xon += slave_stats.rx_pause_xon;
1753 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
1754 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
1759 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
1761 struct bond_dev_private *internals = dev->data->dev_private;
1764 for (i = 0; i < internals->slave_count; i++)
1765 rte_eth_stats_reset(internals->slaves[i].port_id);
1769 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1771 struct bond_dev_private *internals = eth_dev->data->dev_private;
1774 internals->promiscuous_en = 1;
1776 switch (internals->mode) {
1777 /* Promiscuous mode is propagated to all slaves */
1778 case BONDING_MODE_ROUND_ROBIN:
1779 case BONDING_MODE_BALANCE:
1780 case BONDING_MODE_BROADCAST:
1781 for (i = 0; i < internals->slave_count; i++)
1782 rte_eth_promiscuous_enable(internals->slaves[i].port_id);
1784 /* In mode4 promiscus mode is managed when slave is added/removed */
1785 case BONDING_MODE_8023AD:
1787 /* Promiscuous mode is propagated only to primary slave */
1788 case BONDING_MODE_ACTIVE_BACKUP:
1789 case BONDING_MODE_TLB:
1790 case BONDING_MODE_ALB:
1792 rte_eth_promiscuous_enable(internals->current_primary_port);
1797 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
1799 struct bond_dev_private *internals = dev->data->dev_private;
1802 internals->promiscuous_en = 0;
1804 switch (internals->mode) {
1805 /* Promiscuous mode is propagated to all slaves */
1806 case BONDING_MODE_ROUND_ROBIN:
1807 case BONDING_MODE_BALANCE:
1808 case BONDING_MODE_BROADCAST:
1809 for (i = 0; i < internals->slave_count; i++)
1810 rte_eth_promiscuous_disable(internals->slaves[i].port_id);
1812 /* In mode4 promiscus mode is set managed when slave is added/removed */
1813 case BONDING_MODE_8023AD:
1815 /* Promiscuous mode is propagated only to primary slave */
1816 case BONDING_MODE_ACTIVE_BACKUP:
1817 case BONDING_MODE_TLB:
1818 case BONDING_MODE_ALB:
1820 rte_eth_promiscuous_disable(internals->current_primary_port);
1825 bond_ethdev_delayed_lsc_propagation(void *arg)
1830 _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
1831 RTE_ETH_EVENT_INTR_LSC);
1835 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
1838 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
1839 struct bond_dev_private *internals;
1840 struct rte_eth_link link;
1842 int i, valid_slave = 0;
1844 uint8_t lsc_flag = 0;
1846 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
1849 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
1850 slave_eth_dev = &rte_eth_devices[port_id];
1852 if (valid_bonded_ethdev(bonded_eth_dev))
1855 internals = bonded_eth_dev->data->dev_private;
1857 /* If the device isn't started don't handle interrupts */
1858 if (!bonded_eth_dev->data->dev_started)
1861 /* verify that port_id is a valid slave of bonded port */
1862 for (i = 0; i < internals->slave_count; i++) {
1863 if (internals->slaves[i].port_id == port_id) {
1872 /* Search for port in active port list */
1873 active_pos = find_slave_by_id(internals->active_slaves,
1874 internals->active_slave_count, port_id);
1876 rte_eth_link_get_nowait(port_id, &link);
1877 if (link.link_status) {
1878 if (active_pos < internals->active_slave_count)
1881 /* if no active slave ports then set this port to be primary port */
1882 if (internals->active_slave_count < 1) {
1883 /* If first active slave, then change link status */
1884 bonded_eth_dev->data->dev_link.link_status = 1;
1885 internals->current_primary_port = port_id;
1888 mac_address_slaves_update(bonded_eth_dev);
1890 /* Inherit eth dev link properties from first active slave */
1891 link_properties_set(bonded_eth_dev,
1892 &(slave_eth_dev->data->dev_link));
1895 activate_slave(bonded_eth_dev, port_id);
1897 /* If user has defined the primary port then default to using it */
1898 if (internals->user_defined_primary_port &&
1899 internals->primary_port == port_id)
1900 bond_ethdev_primary_set(internals, port_id);
1902 if (active_pos == internals->active_slave_count)
1905 /* Remove from active slave list */
1906 deactivate_slave(bonded_eth_dev, port_id);
1908 /* No active slaves, change link status to down and reset other
1909 * link properties */
1910 if (internals->active_slave_count < 1) {
1912 bonded_eth_dev->data->dev_link.link_status = 0;
1914 link_properties_reset(bonded_eth_dev);
1917 /* Update primary id, take first active slave from list or if none
1918 * available set to -1 */
1919 if (port_id == internals->current_primary_port) {
1920 if (internals->active_slave_count > 0)
1921 bond_ethdev_primary_set(internals,
1922 internals->active_slaves[0]);
1924 internals->current_primary_port = internals->primary_port;
1929 /* Cancel any possible outstanding interrupts if delays are enabled */
1930 if (internals->link_up_delay_ms > 0 ||
1931 internals->link_down_delay_ms > 0)
1932 rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
1935 if (bonded_eth_dev->data->dev_link.link_status) {
1936 if (internals->link_up_delay_ms > 0)
1937 rte_eal_alarm_set(internals->link_up_delay_ms * 1000,
1938 bond_ethdev_delayed_lsc_propagation,
1939 (void *)bonded_eth_dev);
1941 _rte_eth_dev_callback_process(bonded_eth_dev,
1942 RTE_ETH_EVENT_INTR_LSC);
1945 if (internals->link_down_delay_ms > 0)
1946 rte_eal_alarm_set(internals->link_down_delay_ms * 1000,
1947 bond_ethdev_delayed_lsc_propagation,
1948 (void *)bonded_eth_dev);
1950 _rte_eth_dev_callback_process(bonded_eth_dev,
1951 RTE_ETH_EVENT_INTR_LSC);
1956 struct eth_dev_ops default_dev_ops = {
1957 .dev_start = bond_ethdev_start,
1958 .dev_stop = bond_ethdev_stop,
1959 .dev_close = bond_ethdev_close,
1960 .dev_configure = bond_ethdev_configure,
1961 .dev_infos_get = bond_ethdev_info,
1962 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1963 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1964 .rx_queue_release = bond_ethdev_rx_queue_release,
1965 .tx_queue_release = bond_ethdev_tx_queue_release,
1966 .link_update = bond_ethdev_link_update,
1967 .stats_get = bond_ethdev_stats_get,
1968 .stats_reset = bond_ethdev_stats_reset,
1969 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1970 .promiscuous_disable = bond_ethdev_promiscuous_disable
1974 bond_init(const char *name, const char *params)
1976 struct bond_dev_private *internals;
1977 struct rte_kvargs *kvlist;
1978 uint8_t bonding_mode, socket_id;
1979 int arg_count, port_id;
1981 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1983 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1987 /* Parse link bonding mode */
1988 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1989 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1990 &bond_ethdev_parse_slave_mode_kvarg,
1991 &bonding_mode) != 0) {
1992 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
1997 RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
1998 "device %s\n", name);
2002 /* Parse socket id to create bonding device on */
2003 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
2004 if (arg_count == 1) {
2005 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
2006 &bond_ethdev_parse_socket_id_kvarg, &socket_id)
2008 RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
2009 "bonded device %s\n", name);
2012 } else if (arg_count > 1) {
2013 RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
2014 "bonded device %s\n", name);
2017 socket_id = rte_socket_id();
2020 /* Create link bonding eth device */
2021 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
2023 RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
2024 "socket %u.\n", name, bonding_mode, socket_id);
2027 internals = rte_eth_devices[port_id].data->dev_private;
2028 internals->kvlist = kvlist;
2030 RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
2031 "socket %u.\n", name, port_id, bonding_mode, socket_id);
2035 rte_kvargs_free(kvlist);
2040 /* this part will resolve the slave portids after all the other pdev and vdev
2041 * have been allocated */
2043 bond_ethdev_configure(struct rte_eth_dev *dev)
2045 char *name = dev->data->name;
2046 struct bond_dev_private *internals = dev->data->dev_private;
2047 struct rte_kvargs *kvlist = internals->kvlist;
2048 int arg_count, port_id = dev - rte_eth_devices;
2051 * if no kvlist, it means that this bonded device has been created
2052 * through the bonding api.
2057 /* Parse MAC address for bonded device */
2058 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
2059 if (arg_count == 1) {
2060 struct ether_addr bond_mac;
2062 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
2063 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
2064 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
2069 /* Set MAC address */
2070 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
2072 "Failed to set mac address on bonded device %s\n",
2076 } else if (arg_count > 1) {
2078 "MAC address can be specified only once for bonded device %s\n",
2083 /* Parse/set balance mode transmit policy */
2084 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
2085 if (arg_count == 1) {
2086 uint8_t xmit_policy;
2088 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
2089 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
2092 "Invalid xmit policy specified for bonded device %s\n",
2097 /* Set balance mode transmit policy*/
2098 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
2100 "Failed to set balance xmit policy on bonded device %s\n",
2104 } else if (arg_count > 1) {
2106 "Transmit policy can be specified only once for bonded device"
2111 /* Parse/add slave ports to bonded device */
2112 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
2113 struct bond_ethdev_slave_ports slave_ports;
2116 memset(&slave_ports, 0, sizeof(slave_ports));
2118 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
2119 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
2121 "Failed to parse slave ports for bonded device %s\n",
2126 for (i = 0; i < slave_ports.slave_count; i++) {
2127 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
2129 "Failed to add port %d as slave to bonded device %s\n",
2130 slave_ports.slaves[i], name);
2135 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
2139 /* Parse/set primary slave port id*/
2140 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
2141 if (arg_count == 1) {
2142 uint8_t primary_slave_port_id;
2144 if (rte_kvargs_process(kvlist,
2145 PMD_BOND_PRIMARY_SLAVE_KVARG,
2146 &bond_ethdev_parse_primary_slave_port_id_kvarg,
2147 &primary_slave_port_id) < 0) {
2149 "Invalid primary slave port id specified for bonded device"
2154 /* Set balance mode transmit policy*/
2155 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
2158 "Failed to set primary slave port %d on bonded device %s\n",
2159 primary_slave_port_id, name);
2162 } else if (arg_count > 1) {
2164 "Primary slave can be specified only once for bonded device"
2169 /* Parse link status monitor polling interval */
2170 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LSC_POLL_PERIOD_KVARG);
2171 if (arg_count == 1) {
2172 uint32_t lsc_poll_interval_ms;
2174 if (rte_kvargs_process(kvlist,
2175 PMD_BOND_LSC_POLL_PERIOD_KVARG,
2176 &bond_ethdev_parse_time_ms_kvarg,
2177 &lsc_poll_interval_ms) < 0) {
2179 "Invalid lsc polling interval value specified for bonded"
2180 " device %s\n", name);
2184 if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
2187 "Failed to set lsc monitor polling interval (%u ms) on"
2188 " bonded device %s\n", lsc_poll_interval_ms, name);
2191 } else if (arg_count > 1) {
2193 "LSC polling interval can be specified only once for bonded"
2194 " device %s\n", name);
2198 /* Parse link up interrupt propagation delay */
2199 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_UP_PROP_DELAY_KVARG);
2200 if (arg_count == 1) {
2201 uint32_t link_up_delay_ms;
2203 if (rte_kvargs_process(kvlist,
2204 PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
2205 &bond_ethdev_parse_time_ms_kvarg,
2206 &link_up_delay_ms) < 0) {
2208 "Invalid link up propagation delay value specified for"
2209 " bonded device %s\n", name);
2213 /* Set balance mode transmit policy*/
2214 if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
2217 "Failed to set link up propagation delay (%u ms) on bonded"
2218 " device %s\n", link_up_delay_ms, name);
2221 } else if (arg_count > 1) {
2223 "Link up propagation delay can be specified only once for"
2224 " bonded device %s\n", name);
2228 /* Parse link down interrupt propagation delay */
2229 arg_count = rte_kvargs_count(kvlist, PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG);
2230 if (arg_count == 1) {
2231 uint32_t link_down_delay_ms;
2233 if (rte_kvargs_process(kvlist,
2234 PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
2235 &bond_ethdev_parse_time_ms_kvarg,
2236 &link_down_delay_ms) < 0) {
2238 "Invalid link down propagation delay value specified for"
2239 " bonded device %s\n", name);
2243 /* Set balance mode transmit policy*/
2244 if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
2247 "Failed to set link down propagation delay (%u ms) on"
2248 " bonded device %s\n", link_down_delay_ms, name);
2251 } else if (arg_count > 1) {
2253 "Link down propagation delay can be specified only once for"
2254 " bonded device %s\n", name);
2261 static struct rte_driver bond_drv = {
2267 PMD_REGISTER_DRIVER(bond_drv);