4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
35 #include <linux/binfmts.h>
38 #include <rte_cycles.h>
40 #include <rte_devargs.h>
41 #include <rte_ethdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_malloc.h>
45 #include <rte_memcpy.h>
46 #include <rte_memory.h>
49 #include "rte_eth_bond.h"
50 #include "rte_eth_bond_private.h"
53 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
55 struct bond_dev_private *internals;
57 uint16_t num_rx_slave = 0;
58 uint16_t num_rx_total = 0;
62 /* Cast to structure, containing bonded device's port id and queue id */
63 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
65 internals = bd_rx_q->dev_private;
67 switch (internals->mode) {
68 case BONDING_MODE_ROUND_ROBIN:
69 case BONDING_MODE_BROADCAST:
70 case BONDING_MODE_BALANCE:
71 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
72 /* Offset of pointer to *bufs increases as packets are received
73 * from other slaves */
74 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
75 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
77 num_rx_total += num_rx_slave;
78 nb_pkts -= num_rx_slave;
82 case BONDING_MODE_ACTIVE_BACKUP:
83 num_rx_slave = rte_eth_rx_burst(internals->current_primary_port,
84 bd_rx_q->queue_id, bufs, nb_pkts);
86 num_rx_total = num_rx_slave;
93 bond_ethdev_tx_round_robin(void *queue, struct rte_mbuf **bufs,
96 struct bond_dev_private *dev_private;
97 struct bond_tx_queue *bd_tx_q;
99 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
100 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
102 uint8_t num_of_slaves;
103 uint8_t slaves[RTE_MAX_ETHPORTS];
105 uint16_t num_tx_total = 0;
107 static int slave_idx = 0;
110 bd_tx_q = (struct bond_tx_queue *)queue;
111 dev_private = bd_tx_q->dev_private;
113 /* Copy slave list to protect against slave up/down changes during tx
115 num_of_slaves = dev_private->active_slave_count;
116 memcpy(slaves, dev_private->active_slaves,
117 sizeof(dev_private->active_slaves[0]) * num_of_slaves);
119 if (num_of_slaves < 1)
122 /* Populate slaves mbuf with which packets are to be sent on it */
123 for (i = 0; i < nb_pkts; i++) {
124 cs_idx = (slave_idx + i) % num_of_slaves;
125 slave_bufs[cs_idx][(slave_nb_pkts[cs_idx])++] = bufs[i];
128 /* increment current slave index so the next call to tx burst starts on the
130 slave_idx = ++cs_idx;
132 /* Send packet burst on each slave device */
133 for (i = 0; i < num_of_slaves; i++)
134 if (slave_nb_pkts[i] > 0)
135 num_tx_total += rte_eth_tx_burst(slaves[i],
136 bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]);
142 bond_ethdev_tx_active_backup(void *queue,
143 struct rte_mbuf **bufs, uint16_t nb_pkts)
145 struct bond_dev_private *internals;
146 struct bond_tx_queue *bd_tx_q;
148 bd_tx_q = (struct bond_tx_queue *)queue;
149 internals = bd_tx_q->dev_private;
151 if (internals->active_slave_count < 1)
154 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
158 static inline uint16_t
159 ether_hash(struct ether_hdr *eth_hdr)
161 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
162 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
164 return (word_src_addr[0] ^ word_dst_addr[0]) ^
165 (word_src_addr[1] ^ word_dst_addr[1]) ^
166 (word_src_addr[2] ^ word_dst_addr[2]);
169 static inline uint32_t
170 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
172 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
175 static inline uint32_t
176 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
178 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
179 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
181 return (word_src_addr[0] ^ word_dst_addr[0]) ^
182 (word_src_addr[1] ^ word_dst_addr[1]) ^
183 (word_src_addr[2] ^ word_dst_addr[2]) ^
184 (word_src_addr[3] ^ word_dst_addr[3]);
188 udp_hash(struct udp_hdr *hdr)
190 return hdr->src_port ^ hdr->dst_port;
193 static inline uint16_t
194 xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
196 struct ether_hdr *eth_hdr;
197 struct udp_hdr *udp_hdr;
198 size_t eth_offset = 0;
201 if (slave_count == 1)
205 case BALANCE_XMIT_POLICY_LAYER2:
206 eth_hdr = (struct ether_hdr *)buf->pkt.data;
208 hash = ether_hash(eth_hdr);
210 return hash % slave_count;
212 case BALANCE_XMIT_POLICY_LAYER23:
213 eth_hdr = (struct ether_hdr *)buf->pkt.data;
215 if (buf->ol_flags & PKT_RX_VLAN_PKT)
216 eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
218 eth_offset = sizeof(struct ether_hdr);
220 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
221 struct ipv4_hdr *ipv4_hdr;
222 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
223 unsigned char *) + eth_offset);
225 hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
228 struct ipv6_hdr *ipv6_hdr;
230 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
231 unsigned char *) + eth_offset);
233 hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
237 case BALANCE_XMIT_POLICY_LAYER34:
238 if (buf->ol_flags & PKT_RX_VLAN_PKT)
239 eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
241 eth_offset = sizeof(struct ether_hdr);
243 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
244 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
245 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
247 if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
248 udp_hdr = (struct udp_hdr *)
249 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
250 sizeof(struct ipv4_hdr));
251 hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
253 hash = ipv4_hash(ipv4_hdr);
256 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
257 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
259 if (ipv6_hdr->proto == IPPROTO_UDP) {
260 udp_hdr = (struct udp_hdr *)
261 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
262 sizeof(struct ipv6_hdr));
263 hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
265 hash = ipv6_hash(ipv6_hdr);
274 return hash % slave_count;
278 bond_ethdev_tx_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
280 struct bond_dev_private *internals;
281 struct bond_tx_queue *bd_tx_q;
283 uint8_t num_of_slaves;
284 uint8_t slaves[RTE_MAX_ETHPORTS];
286 uint16_t num_tx_total = 0;
290 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
291 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
293 bd_tx_q = (struct bond_tx_queue *)queue;
294 internals = bd_tx_q->dev_private;
296 /* Copy slave list to protect against slave up/down changes during tx
298 num_of_slaves = internals->active_slave_count;
299 memcpy(slaves, internals->active_slaves,
300 sizeof(internals->active_slaves[0]) * num_of_slaves);
302 if (num_of_slaves < 1)
305 /* Populate slaves mbuf with the packets which are to be sent on it */
306 for (i = 0; i < nb_pkts; i++) {
307 /* Select output slave using hash based on xmit policy */
308 op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
309 internals->balance_xmit_policy);
311 /* Populate slave mbuf arrays with mbufs for that slave */
312 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
315 /* Send packet burst on each slave device */
316 for (i = 0; i < num_of_slaves; i++) {
317 if (slave_nb_pkts[i] > 0) {
318 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
319 slave_bufs[i], slave_nb_pkts[i]);
327 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
330 struct bond_dev_private *internals;
331 struct bond_tx_queue *bd_tx_q;
333 uint8_t num_of_slaves;
334 uint8_t slaves[RTE_MAX_ETHPORTS];
336 uint16_t num_tx_total = 0;
340 bd_tx_q = (struct bond_tx_queue *)queue;
341 internals = bd_tx_q->dev_private;
343 /* Copy slave list to protect against slave up/down changes during tx
345 num_of_slaves = internals->active_slave_count;
346 memcpy(slaves, internals->active_slaves,
347 sizeof(internals->active_slaves[0]) * num_of_slaves);
349 if (num_of_slaves < 1)
352 /* Increment reference count on mbufs */
353 for (i = 0; i < nb_pkts; i++)
354 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
356 /* Transmit burst on each active slave */
357 for (i = 0; i < num_of_slaves; i++)
358 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
365 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
366 struct rte_eth_link *slave_dev_link)
368 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
369 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
371 if (slave_dev_link->link_status &&
372 bonded_eth_dev->data->dev_started) {
373 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
374 bonded_dev_link->link_speed = slave_dev_link->link_speed;
376 internals->link_props_set = 1;
381 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
383 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
385 memset(&(bonded_eth_dev->data->dev_link), 0,
386 sizeof(bonded_eth_dev->data->dev_link));
388 internals->link_props_set = 0;
392 link_properties_valid(struct rte_eth_link *bonded_dev_link,
393 struct rte_eth_link *slave_dev_link)
395 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
396 bonded_dev_link->link_speed != slave_dev_link->link_speed)
403 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
405 struct ether_addr *mac_addr;
407 mac_addr = eth_dev->data->mac_addrs;
409 if (eth_dev == NULL) {
410 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
414 if (new_mac_addr == NULL) {
415 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
419 /* if new MAC is different to current MAC then update */
420 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
421 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
427 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
429 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
432 /* Update slave devices MAC addresses */
433 if (internals->slave_count < 1)
436 switch (internals->mode) {
437 case BONDING_MODE_ROUND_ROBIN:
438 case BONDING_MODE_BALANCE:
439 case BONDING_MODE_BROADCAST:
440 for (i = 0; i < internals->slave_count; i++) {
441 if (mac_address_set(&rte_eth_devices[internals->slaves[i]],
442 bonded_eth_dev->data->mac_addrs)) {
444 "%s: Failed to update port Id %d MAC address\n",
445 __func__, internals->slaves[i]);
450 case BONDING_MODE_ACTIVE_BACKUP:
452 for (i = 0; i < internals->slave_count; i++) {
453 if (internals->slaves[i] == internals->current_primary_port) {
454 if (mac_address_set(&rte_eth_devices[internals->primary_port],
455 bonded_eth_dev->data->mac_addrs)) {
457 "%s: Failed to update port Id %d MAC address\n",
458 __func__, internals->current_primary_port);
461 struct slave_conf *conf =
462 slave_config_get(internals, internals->slaves[i]);
464 if (mac_address_set(&rte_eth_devices[internals->slaves[i]],
467 "%s: Failed to update port Id %d MAC address\n",
468 __func__, internals->slaves[i]);
480 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
482 struct bond_dev_private *internals;
484 internals = eth_dev->data->dev_private;
487 case BONDING_MODE_ROUND_ROBIN:
488 eth_dev->tx_pkt_burst = bond_ethdev_tx_round_robin;
490 case BONDING_MODE_ACTIVE_BACKUP:
491 eth_dev->tx_pkt_burst = bond_ethdev_tx_active_backup;
493 case BONDING_MODE_BALANCE:
494 eth_dev->tx_pkt_burst = bond_ethdev_tx_balance;
496 case BONDING_MODE_BROADCAST:
497 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
503 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
504 internals->mode = mode;
510 slave_configure(struct rte_eth_dev *bonded_eth_dev,
511 struct rte_eth_dev *slave_eth_dev)
513 struct bond_rx_queue *bd_rx_q;
514 struct bond_tx_queue *bd_tx_q;
519 rte_eth_dev_stop(slave_eth_dev->data->port_id);
521 /* Enable interrupts on slave device */
522 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
524 if (rte_eth_dev_configure(slave_eth_dev->data->port_id,
525 bonded_eth_dev->data->nb_rx_queues,
526 bonded_eth_dev->data->nb_tx_queues,
527 &(slave_eth_dev->data->dev_conf)) != 0) {
528 RTE_LOG(ERR, PMD, "Cannot configure slave device: port=%u\n",
529 slave_eth_dev->data->port_id);
533 /* Setup Rx Queues */
534 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
535 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
537 if (rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
539 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
540 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool) != 0) {
541 RTE_LOG(ERR, PMD, "rte_eth_rx_queue_setup: port=%d queue_id %d\n",
542 slave_eth_dev->data->port_id, q_id);
547 /* Setup Tx Queues */
548 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
549 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
551 if (rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
553 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
554 &bd_tx_q->tx_conf) != 0) {
555 RTE_LOG(ERR, PMD, "rte_eth_tx_queue_setup: port=%d queue_id %d\n",
556 slave_eth_dev->data->port_id, q_id);
562 if (rte_eth_dev_start(slave_eth_dev->data->port_id) != 0) {
563 RTE_LOG(ERR, PMD, "rte_eth_dev_start: port=%u\n",
564 slave_eth_dev->data->port_id);
572 slave_config_get(struct bond_dev_private *internals, uint8_t slave_port_id)
576 for (i = 0; i < internals->slave_count; i++) {
577 if (internals->presisted_slaves_conf[i].port_id == slave_port_id)
578 return &internals->presisted_slaves_conf[i];
584 slave_config_clear(struct bond_dev_private *internals,
585 struct rte_eth_dev *slave_eth_dev)
589 for (i = 0; i < internals->slave_count; i++) {
590 if (internals->presisted_slaves_conf[i].port_id ==
591 slave_eth_dev->data->port_id) {
593 memset(&internals->presisted_slaves_conf[i], 0,
594 sizeof(internals->presisted_slaves_conf[i]));
596 if (found && i < (internals->slave_count - 1)) {
597 memcpy(&internals->presisted_slaves_conf[i],
598 &internals->presisted_slaves_conf[i+1],
599 sizeof(internals->presisted_slaves_conf[i]));
605 slave_config_store(struct bond_dev_private *internals,
606 struct rte_eth_dev *slave_eth_dev)
608 struct slave_conf *presisted_slave_conf =
609 &internals->presisted_slaves_conf[internals->slave_count];
611 presisted_slave_conf->port_id = slave_eth_dev->data->port_id;
613 memcpy(&(presisted_slave_conf->mac_addr), slave_eth_dev->data->mac_addrs,
614 sizeof(struct ether_addr));
618 bond_ethdev_primary_set(struct bond_dev_private *internals,
619 uint8_t slave_port_id)
623 if (internals->active_slave_count < 1)
624 internals->current_primary_port = slave_port_id;
626 /* Search bonded device slave ports for new proposed primary port */
627 for (i = 0; i < internals->active_slave_count; i++) {
628 if (internals->active_slaves[i] == slave_port_id)
629 internals->current_primary_port = slave_port_id;
634 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
637 bond_ethdev_start(struct rte_eth_dev *eth_dev)
639 struct bond_dev_private *internals;
642 /* slave eth dev will be started by bonded device */
643 if (valid_bonded_ethdev(eth_dev)) {
645 "%s: user tried to explicitly start a slave eth_dev (%d) of the bonded eth_dev\n",
646 __func__, eth_dev->data->port_id);
650 eth_dev->data->dev_link.link_status = 1;
651 eth_dev->data->dev_started = 1;
653 internals = eth_dev->data->dev_private;
655 if (internals->slave_count == 0) {
657 "%s: Cannot start port since there are no slave devices\n",
662 if (internals->user_defined_mac == 0) {
663 struct slave_conf *conf = slave_config_get(internals,
664 internals->primary_port);
666 if (mac_address_set(eth_dev, &(conf->mac_addr)) != 0) {
668 "bonded port (%d) failed to update mac address",
669 eth_dev->data->port_id);
674 /* Update all slave devices MACs*/
675 if (mac_address_slaves_update(eth_dev) != 0)
678 /* If bonded device is configure in promiscuous mode then re-apply config */
679 if (internals->promiscuous_en)
680 bond_ethdev_promiscuous_enable(eth_dev);
682 /* Reconfigure each slave device if starting bonded device */
683 for (i = 0; i < internals->slave_count; i++) {
684 if (slave_configure(eth_dev, &(rte_eth_devices[internals->slaves[i]]))
687 "bonded port (%d) failed to reconfigure slave device %d)",
688 eth_dev->data->port_id, internals->slaves[i]);
693 if (internals->user_defined_primary_port)
694 bond_ethdev_primary_set(internals, internals->primary_port);
700 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
702 struct bond_dev_private *internals = eth_dev->data->dev_private;
704 internals->active_slave_count = 0;
706 eth_dev->data->dev_link.link_status = 0;
707 eth_dev->data->dev_started = 0;
711 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
716 bond_ethdev_configure(struct rte_eth_dev *dev __rte_unused)
722 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
724 dev_info->driver_name = driver_name;
725 dev_info->max_mac_addrs = 1;
727 dev_info->max_rx_pktlen = (uint32_t)2048;
729 dev_info->max_rx_queues = (uint16_t)128;
730 dev_info->max_tx_queues = (uint16_t)512;
732 dev_info->min_rx_bufsize = 0;
733 dev_info->pci_dev = dev->pci_dev;
737 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
738 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
739 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
741 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
742 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
743 0, dev->pci_dev->numa_node);
747 bd_rx_q->queue_id = rx_queue_id;
748 bd_rx_q->dev_private = dev->data->dev_private;
750 bd_rx_q->nb_rx_desc = nb_rx_desc;
752 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
753 bd_rx_q->mb_pool = mb_pool;
755 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
761 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
762 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
763 const struct rte_eth_txconf *tx_conf)
765 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
766 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
767 0, dev->pci_dev->numa_node);
772 bd_tx_q->queue_id = tx_queue_id;
773 bd_tx_q->dev_private = dev->data->dev_private;
775 bd_tx_q->nb_tx_desc = nb_tx_desc;
776 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
778 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
784 bond_ethdev_rx_queue_release(void *queue)
793 bond_ethdev_tx_queue_release(void *queue)
802 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
803 int wait_to_complete)
805 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
807 if (!bonded_eth_dev->data->dev_started ||
808 internals->active_slave_count == 0) {
809 bonded_eth_dev->data->dev_link.link_status = 0;
812 struct rte_eth_dev *slave_eth_dev;
815 for (i = 0; i < internals->active_slave_count; i++) {
816 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
818 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
820 if (slave_eth_dev->data->dev_link.link_status == 1) {
826 bonded_eth_dev->data->dev_link.link_status = link_up;
833 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
835 struct bond_dev_private *internals = dev->data->dev_private;
836 struct rte_eth_stats slave_stats;
840 /* clear bonded stats before populating from slaves */
841 memset(stats, 0, sizeof(*stats));
843 for (i = 0; i < internals->slave_count; i++) {
844 rte_eth_stats_get(internals->slaves[i], &slave_stats);
846 stats->ipackets += slave_stats.ipackets;
847 stats->opackets += slave_stats.opackets;
848 stats->ibytes += slave_stats.ibytes;
849 stats->obytes += slave_stats.obytes;
850 stats->ierrors += slave_stats.ierrors;
851 stats->oerrors += slave_stats.oerrors;
852 stats->imcasts += slave_stats.imcasts;
853 stats->rx_nombuf += slave_stats.rx_nombuf;
854 stats->fdirmatch += slave_stats.fdirmatch;
855 stats->fdirmiss += slave_stats.fdirmiss;
856 stats->tx_pause_xon += slave_stats.tx_pause_xon;
857 stats->rx_pause_xon += slave_stats.rx_pause_xon;
858 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
859 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
864 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
866 struct bond_dev_private *internals = dev->data->dev_private;
869 for (i = 0; i < internals->slave_count; i++)
870 rte_eth_stats_reset(internals->slaves[i]);
874 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
876 struct bond_dev_private *internals = eth_dev->data->dev_private;
879 internals->promiscuous_en = 1;
881 switch (internals->mode) {
882 /* Promiscuous mode is propagated to all slaves */
883 case BONDING_MODE_ROUND_ROBIN:
884 case BONDING_MODE_BALANCE:
885 case BONDING_MODE_BROADCAST:
886 for (i = 0; i < internals->slave_count; i++)
887 rte_eth_promiscuous_enable(internals->slaves[i]);
889 /* Promiscuous mode is propagated only to primary slave */
890 case BONDING_MODE_ACTIVE_BACKUP:
892 rte_eth_promiscuous_enable(internals->current_primary_port);
898 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
900 struct bond_dev_private *internals = dev->data->dev_private;
903 internals->promiscuous_en = 0;
905 switch (internals->mode) {
906 /* Promiscuous mode is propagated to all slaves */
907 case BONDING_MODE_ROUND_ROBIN:
908 case BONDING_MODE_BALANCE:
909 case BONDING_MODE_BROADCAST:
910 for (i = 0; i < internals->slave_count; i++)
911 rte_eth_promiscuous_disable(internals->slaves[i]);
913 /* Promiscuous mode is propagated only to primary slave */
914 case BONDING_MODE_ACTIVE_BACKUP:
916 rte_eth_promiscuous_disable(internals->current_primary_port);
921 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
924 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
925 struct bond_dev_private *internals;
926 struct rte_eth_link link;
928 int i, bonded_port_id, valid_slave, active_pos = -1;
930 if (type != RTE_ETH_EVENT_INTR_LSC)
936 bonded_port_id = *(uint8_t *)param;
938 bonded_eth_dev = &rte_eth_devices[bonded_port_id];
939 slave_eth_dev = &rte_eth_devices[port_id];
941 if (valid_bonded_ethdev(bonded_eth_dev))
944 internals = bonded_eth_dev->data->dev_private;
946 /* If the device isn't started don't handle interrupts */
947 if (!bonded_eth_dev->data->dev_started)
950 /* verify that port_id is a valid slave of bonded port */
951 for (i = 0; i < internals->slave_count; i++) {
952 if (internals->slaves[i] == port_id) {
961 /* Search for port in active port list */
962 for (i = 0; i < internals->active_slave_count; i++) {
963 if (port_id == internals->active_slaves[i]) {
969 rte_eth_link_get_nowait(port_id, &link);
970 if (link.link_status) {
971 if (active_pos == -1) {
972 /* if no active slave ports then set this port to be primary port */
973 if (internals->active_slave_count == 0) {
974 /* If first active slave, then change link status */
975 bonded_eth_dev->data->dev_link.link_status = 1;
976 internals->current_primary_port = port_id;
978 /* Inherit eth dev link properties from first active slave */
979 link_properties_set(bonded_eth_dev,
980 &(slave_eth_dev->data->dev_link));
983 internals->active_slaves[internals->active_slave_count++] = port_id;
985 /* If user has defined the primary port then default to using it */
986 if (internals->user_defined_primary_port &&
987 internals->primary_port == port_id)
988 bond_ethdev_primary_set(internals, port_id);
992 if (active_pos != -1) {
993 /* Remove from active slave list */
994 for (i = active_pos; i < (internals->active_slave_count - 1); i++)
995 internals->active_slaves[i] = internals->active_slaves[i+1];
997 internals->active_slave_count--;
999 /* No active slaves, change link status to down and reset other
1000 * link properties */
1001 if (internals->active_slave_count == 0)
1002 link_properties_reset(bonded_eth_dev);
1004 /* Update primary id, take first active slave from list or if none
1005 * available set to -1 */
1006 if (port_id == internals->current_primary_port) {
1007 if (internals->active_slave_count > 0)
1008 bond_ethdev_primary_set(internals,
1009 internals->active_slaves[0]);
1011 internals->current_primary_port = internals->primary_port;
1017 struct eth_dev_ops default_dev_ops = {
1018 .dev_start = bond_ethdev_start,
1019 .dev_stop = bond_ethdev_stop,
1020 .dev_close = bond_ethdev_close,
1021 .dev_configure = bond_ethdev_configure,
1022 .dev_infos_get = bond_ethdev_info,
1023 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1024 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1025 .rx_queue_release = bond_ethdev_rx_queue_release,
1026 .tx_queue_release = bond_ethdev_tx_queue_release,
1027 .link_update = bond_ethdev_link_update,
1028 .stats_get = bond_ethdev_stats_get,
1029 .stats_reset = bond_ethdev_stats_reset,
1030 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1031 .promiscuous_disable = bond_ethdev_promiscuous_disable
1035 bond_init(const char *name, const char *params)
1037 struct rte_kvargs *kvlist;
1038 uint8_t bonding_mode, socket_id;
1039 int arg_count, port_id;
1041 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1043 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1047 /* Parse link bonding mode */
1048 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1049 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1050 &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
1051 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
1056 "Mode must be specified only once for bonded device %s\n",
1061 /* Parse socket id to create bonding device on */
1062 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
1063 if (arg_count == 1) {
1064 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
1065 &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
1067 "Invalid socket Id specified for bonded device %s\n",
1071 } else if (arg_count > 1) {
1073 "Socket Id can be specified only once for bonded device %s\n",
1077 socket_id = rte_socket_id();
1080 /* Create link bonding eth device */
1081 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
1084 "Failed to create socket %s in mode %u on socket %u.\n",
1085 name, bonding_mode, socket_id);
1090 "Create bonded device %s on port %d in mode %u on socket %u.\n",
1091 name, port_id, bonding_mode, socket_id);
1093 /* Parse MAC address for bonded device */
1094 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
1095 if (arg_count == 1) {
1096 struct ether_addr bond_mac;
1098 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
1099 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
1100 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
1105 /* Set MAC address */
1106 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
1108 "Failed to set mac address on bonded device %s\n",
1112 } else if (arg_count > 1) {
1114 "MAC address can be specified only once for bonded device %s\n",
1119 /* Parse/set balance mode transmit policy */
1120 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
1121 if (arg_count == 1) {
1122 uint8_t xmit_policy;
1124 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
1125 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
1128 "Invalid xmit policy specified for bonded device %s\n",
1133 /* Set balance mode transmit policy*/
1134 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
1136 "Failed to set balance xmit policy on bonded device %s\n",
1140 } else if (arg_count > 1) {
1142 "Transmit policy can be specified only once for bonded device %s\n",
1147 /* Parse/add slave ports to bonded device */
1148 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
1149 struct bond_ethdev_slave_ports slave_ports;
1152 memset(&slave_ports, 0, sizeof(slave_ports));
1154 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
1155 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
1157 "Failed to parse slave ports for bonded device %s\n",
1162 for (i = 0; i < slave_ports.slave_count; i++) {
1163 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
1165 "Failed to add port %d as slave to bonded device %s\n",
1166 slave_ports.slaves[i], name);
1171 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
1175 /* Parse/set primary slave port id*/
1176 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
1177 if (arg_count == 1) {
1178 uint8_t primary_slave_port_id;
1180 if (rte_kvargs_process(kvlist,
1181 PMD_BOND_PRIMARY_SLAVE_KVARG,
1182 &bond_ethdev_parse_primary_slave_port_id_kvarg,
1183 &primary_slave_port_id) < 0) {
1185 "Invalid primary slave port id specified for bonded device %s\n",
1190 /* Set balance mode transmit policy*/
1191 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
1194 "Failed to set primary slave port %d on bonded device %s\n",
1195 primary_slave_port_id, name);
1198 } else if (arg_count > 1) {
1200 "Primary slave can be specified only once for bonded device %s\n",
1208 static struct rte_driver bond_drv = {
1209 .name = PMD_BOND_NAME,
1214 PMD_REGISTER_DRIVER(bond_drv);