4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_malloc.h>
36 #include <rte_ethdev.h>
40 #include <rte_devargs.h>
41 #include <rte_kvargs.h>
44 #include "rte_eth_bond.h"
45 #include "rte_eth_bond_private.h"
48 bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
50 struct bond_dev_private *internals;
52 uint16_t num_rx_slave = 0;
53 uint16_t num_rx_total = 0;
57 /* Cast to structure, containing bonded device's port id and queue id */
58 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
60 internals = bd_rx_q->dev_private;
63 for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
64 /* Offset of pointer to *bufs increases as packets are received
65 * from other slaves */
66 num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
67 bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
69 num_rx_total += num_rx_slave;
70 nb_pkts -= num_rx_slave;
78 bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
81 struct bond_dev_private *internals;
83 /* Cast to structure, containing bonded device's port id and queue id */
84 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
86 internals = bd_rx_q->dev_private;
88 return rte_eth_rx_burst(internals->current_primary_port,
89 bd_rx_q->queue_id, bufs, nb_pkts);
93 bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
96 struct bond_dev_private *dev_private;
97 struct bond_tx_queue *bd_tx_q;
99 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
100 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
102 uint8_t num_of_slaves;
103 uint8_t slaves[RTE_MAX_ETHPORTS];
105 uint16_t num_tx_total = 0;
107 static int slave_idx = 0;
110 bd_tx_q = (struct bond_tx_queue *)queue;
111 dev_private = bd_tx_q->dev_private;
113 /* Copy slave list to protect against slave up/down changes during tx
115 num_of_slaves = dev_private->active_slave_count;
116 memcpy(slaves, dev_private->active_slaves,
117 sizeof(dev_private->active_slaves[0]) * num_of_slaves);
119 if (num_of_slaves < 1)
122 /* Populate slaves mbuf with which packets are to be sent on it */
123 for (i = 0; i < nb_pkts; i++) {
124 cs_idx = (slave_idx + i) % num_of_slaves;
125 slave_bufs[cs_idx][(slave_nb_pkts[cs_idx])++] = bufs[i];
128 /* increment current slave index so the next call to tx burst starts on the
130 slave_idx = ++cs_idx;
132 /* Send packet burst on each slave device */
133 for (i = 0; i < num_of_slaves; i++)
134 if (slave_nb_pkts[i] > 0)
135 num_tx_total += rte_eth_tx_burst(slaves[i],
136 bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]);
142 bond_ethdev_tx_burst_active_backup(void *queue,
143 struct rte_mbuf **bufs, uint16_t nb_pkts)
145 struct bond_dev_private *internals;
146 struct bond_tx_queue *bd_tx_q;
148 bd_tx_q = (struct bond_tx_queue *)queue;
149 internals = bd_tx_q->dev_private;
151 if (internals->active_slave_count < 1)
154 return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
158 static inline uint16_t
159 ether_hash(struct ether_hdr *eth_hdr)
161 uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
162 uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
164 return (word_src_addr[0] ^ word_dst_addr[0]) ^
165 (word_src_addr[1] ^ word_dst_addr[1]) ^
166 (word_src_addr[2] ^ word_dst_addr[2]);
169 static inline uint32_t
170 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
172 return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
175 static inline uint32_t
176 ipv6_hash(struct ipv6_hdr *ipv6_hdr)
178 uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
179 uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
181 return (word_src_addr[0] ^ word_dst_addr[0]) ^
182 (word_src_addr[1] ^ word_dst_addr[1]) ^
183 (word_src_addr[2] ^ word_dst_addr[2]) ^
184 (word_src_addr[3] ^ word_dst_addr[3]);
188 udp_hash(struct udp_hdr *hdr)
190 return hdr->src_port ^ hdr->dst_port;
193 static inline uint16_t
194 xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
196 struct ether_hdr *eth_hdr;
197 struct udp_hdr *udp_hdr;
198 size_t eth_offset = 0;
201 if (slave_count == 1)
205 case BALANCE_XMIT_POLICY_LAYER2:
206 eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
208 hash = ether_hash(eth_hdr);
210 return hash % slave_count;
212 case BALANCE_XMIT_POLICY_LAYER23:
213 eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
215 if (buf->ol_flags & PKT_RX_VLAN_PKT)
216 eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
218 eth_offset = sizeof(struct ether_hdr);
220 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
221 struct ipv4_hdr *ipv4_hdr;
222 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
223 unsigned char *) + eth_offset);
225 hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
228 struct ipv6_hdr *ipv6_hdr;
230 ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
231 unsigned char *) + eth_offset);
233 hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
237 case BALANCE_XMIT_POLICY_LAYER34:
238 if (buf->ol_flags & PKT_RX_VLAN_PKT)
239 eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
241 eth_offset = sizeof(struct ether_hdr);
243 if (buf->ol_flags & PKT_RX_IPV4_HDR) {
244 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
245 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
247 if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
248 udp_hdr = (struct udp_hdr *)
249 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
250 sizeof(struct ipv4_hdr));
251 hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
253 hash = ipv4_hash(ipv4_hdr);
256 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
257 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
259 if (ipv6_hdr->proto == IPPROTO_UDP) {
260 udp_hdr = (struct udp_hdr *)
261 (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
262 sizeof(struct ipv6_hdr));
263 hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
265 hash = ipv6_hash(ipv6_hdr);
274 return hash % slave_count;
278 bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
281 struct bond_dev_private *internals;
282 struct bond_tx_queue *bd_tx_q;
284 uint8_t num_of_slaves;
285 uint8_t slaves[RTE_MAX_ETHPORTS];
287 uint16_t num_tx_total = 0;
291 struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
292 uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
294 bd_tx_q = (struct bond_tx_queue *)queue;
295 internals = bd_tx_q->dev_private;
297 /* Copy slave list to protect against slave up/down changes during tx
299 num_of_slaves = internals->active_slave_count;
300 memcpy(slaves, internals->active_slaves,
301 sizeof(internals->active_slaves[0]) * num_of_slaves);
303 if (num_of_slaves < 1)
306 /* Populate slaves mbuf with the packets which are to be sent on it */
307 for (i = 0; i < nb_pkts; i++) {
308 /* Select output slave using hash based on xmit policy */
309 op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
310 internals->balance_xmit_policy);
312 /* Populate slave mbuf arrays with mbufs for that slave */
313 slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
316 /* Send packet burst on each slave device */
317 for (i = 0; i < num_of_slaves; i++) {
318 if (slave_nb_pkts[i] > 0) {
319 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
320 slave_bufs[i], slave_nb_pkts[i]);
327 #ifdef RTE_MBUF_REFCNT
329 bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
332 struct bond_dev_private *internals;
333 struct bond_tx_queue *bd_tx_q;
335 uint8_t num_of_slaves;
336 uint8_t slaves[RTE_MAX_ETHPORTS];
338 uint16_t num_tx_total = 0;
342 bd_tx_q = (struct bond_tx_queue *)queue;
343 internals = bd_tx_q->dev_private;
345 /* Copy slave list to protect against slave up/down changes during tx
347 num_of_slaves = internals->active_slave_count;
348 memcpy(slaves, internals->active_slaves,
349 sizeof(internals->active_slaves[0]) * num_of_slaves);
351 if (num_of_slaves < 1)
354 /* Increment reference count on mbufs */
355 for (i = 0; i < nb_pkts; i++)
356 rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1);
358 /* Transmit burst on each active slave */
359 for (i = 0; i < num_of_slaves; i++)
360 num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
368 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
369 struct rte_eth_link *slave_dev_link)
371 struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
372 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
374 if (slave_dev_link->link_status &&
375 bonded_eth_dev->data->dev_started) {
376 bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
377 bonded_dev_link->link_speed = slave_dev_link->link_speed;
379 internals->link_props_set = 1;
384 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
386 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
388 memset(&(bonded_eth_dev->data->dev_link), 0,
389 sizeof(bonded_eth_dev->data->dev_link));
391 internals->link_props_set = 0;
395 link_properties_valid(struct rte_eth_link *bonded_dev_link,
396 struct rte_eth_link *slave_dev_link)
398 if (bonded_dev_link->link_duplex != slave_dev_link->link_duplex ||
399 bonded_dev_link->link_speed != slave_dev_link->link_speed)
406 mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
408 struct ether_addr *mac_addr;
410 mac_addr = eth_dev->data->mac_addrs;
412 if (eth_dev == NULL) {
413 RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
417 if (new_mac_addr == NULL) {
418 RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
422 /* if new MAC is different to current MAC then update */
423 if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
424 memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
430 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
432 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
435 /* Update slave devices MAC addresses */
436 if (internals->slave_count < 1)
439 switch (internals->mode) {
440 case BONDING_MODE_ROUND_ROBIN:
441 case BONDING_MODE_BALANCE:
442 #ifdef RTE_MBUF_REFCNT
443 case BONDING_MODE_BROADCAST:
445 for (i = 0; i < internals->slave_count; i++) {
446 if (mac_address_set(&rte_eth_devices[internals->slaves[i]],
447 bonded_eth_dev->data->mac_addrs)) {
449 "%s: Failed to update port Id %d MAC address\n",
450 __func__, internals->slaves[i]);
455 case BONDING_MODE_ACTIVE_BACKUP:
457 for (i = 0; i < internals->slave_count; i++) {
458 if (internals->slaves[i] == internals->current_primary_port) {
459 if (mac_address_set(&rte_eth_devices[internals->primary_port],
460 bonded_eth_dev->data->mac_addrs)) {
462 "%s: Failed to update port Id %d MAC address\n",
463 __func__, internals->current_primary_port);
466 struct slave_conf *conf =
467 slave_config_get(internals, internals->slaves[i]);
469 if (mac_address_set(&rte_eth_devices[internals->slaves[i]],
472 "%s: Failed to update port Id %d MAC address\n",
473 __func__, internals->slaves[i]);
485 bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
487 struct bond_dev_private *internals;
489 internals = eth_dev->data->dev_private;
492 case BONDING_MODE_ROUND_ROBIN:
493 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin;
494 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
496 case BONDING_MODE_ACTIVE_BACKUP:
497 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup;
498 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
500 case BONDING_MODE_BALANCE:
501 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
502 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
504 #ifdef RTE_MBUF_REFCNT
505 case BONDING_MODE_BROADCAST:
506 eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
507 eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
514 internals->mode = mode;
520 slave_configure(struct rte_eth_dev *bonded_eth_dev,
521 struct rte_eth_dev *slave_eth_dev)
523 struct bond_rx_queue *bd_rx_q;
524 struct bond_tx_queue *bd_tx_q;
529 rte_eth_dev_stop(slave_eth_dev->data->port_id);
531 /* Enable interrupts on slave device */
532 slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
534 if (rte_eth_dev_configure(slave_eth_dev->data->port_id,
535 bonded_eth_dev->data->nb_rx_queues,
536 bonded_eth_dev->data->nb_tx_queues,
537 &(slave_eth_dev->data->dev_conf)) != 0) {
538 RTE_LOG(ERR, PMD, "Cannot configure slave device: port=%u\n",
539 slave_eth_dev->data->port_id);
543 /* Setup Rx Queues */
544 for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
545 bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
547 if (rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
549 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
550 &(bd_rx_q->rx_conf), bd_rx_q->mb_pool) != 0) {
551 RTE_LOG(ERR, PMD, "rte_eth_rx_queue_setup: port=%d queue_id %d\n",
552 slave_eth_dev->data->port_id, q_id);
557 /* Setup Tx Queues */
558 for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
559 bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
561 if (rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
563 rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
564 &bd_tx_q->tx_conf) != 0) {
565 RTE_LOG(ERR, PMD, "rte_eth_tx_queue_setup: port=%d queue_id %d\n",
566 slave_eth_dev->data->port_id, q_id);
572 if (rte_eth_dev_start(slave_eth_dev->data->port_id) != 0) {
573 RTE_LOG(ERR, PMD, "rte_eth_dev_start: port=%u\n",
574 slave_eth_dev->data->port_id);
582 slave_config_get(struct bond_dev_private *internals, uint8_t slave_port_id)
586 for (i = 0; i < internals->slave_count; i++) {
587 if (internals->presisted_slaves_conf[i].port_id == slave_port_id)
588 return &internals->presisted_slaves_conf[i];
594 slave_config_clear(struct bond_dev_private *internals,
595 struct rte_eth_dev *slave_eth_dev)
599 for (i = 0; i < internals->slave_count; i++) {
600 if (internals->presisted_slaves_conf[i].port_id ==
601 slave_eth_dev->data->port_id) {
603 memset(&internals->presisted_slaves_conf[i], 0,
604 sizeof(internals->presisted_slaves_conf[i]));
606 if (found && i < (internals->slave_count - 1)) {
607 memcpy(&internals->presisted_slaves_conf[i],
608 &internals->presisted_slaves_conf[i+1],
609 sizeof(internals->presisted_slaves_conf[i]));
615 slave_config_store(struct bond_dev_private *internals,
616 struct rte_eth_dev *slave_eth_dev)
618 struct slave_conf *presisted_slave_conf =
619 &internals->presisted_slaves_conf[internals->slave_count];
621 presisted_slave_conf->port_id = slave_eth_dev->data->port_id;
623 memcpy(&(presisted_slave_conf->mac_addr), slave_eth_dev->data->mac_addrs,
624 sizeof(struct ether_addr));
628 bond_ethdev_primary_set(struct bond_dev_private *internals,
629 uint8_t slave_port_id)
633 if (internals->active_slave_count < 1)
634 internals->current_primary_port = slave_port_id;
636 /* Search bonded device slave ports for new proposed primary port */
637 for (i = 0; i < internals->active_slave_count; i++) {
638 if (internals->active_slaves[i] == slave_port_id)
639 internals->current_primary_port = slave_port_id;
644 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
647 bond_ethdev_start(struct rte_eth_dev *eth_dev)
649 struct bond_dev_private *internals;
652 /* slave eth dev will be started by bonded device */
653 if (valid_bonded_ethdev(eth_dev)) {
655 "%s: user tried to explicitly start a slave eth_dev (%d) of the bonded eth_dev\n",
656 __func__, eth_dev->data->port_id);
660 eth_dev->data->dev_link.link_status = 1;
661 eth_dev->data->dev_started = 1;
663 internals = eth_dev->data->dev_private;
665 if (internals->slave_count == 0) {
667 "%s: Cannot start port since there are no slave devices\n",
672 if (internals->user_defined_mac == 0) {
673 struct slave_conf *conf = slave_config_get(internals,
674 internals->primary_port);
676 if (mac_address_set(eth_dev, &(conf->mac_addr)) != 0) {
678 "bonded port (%d) failed to update mac address",
679 eth_dev->data->port_id);
684 /* Update all slave devices MACs*/
685 if (mac_address_slaves_update(eth_dev) != 0)
688 /* If bonded device is configure in promiscuous mode then re-apply config */
689 if (internals->promiscuous_en)
690 bond_ethdev_promiscuous_enable(eth_dev);
692 /* Reconfigure each slave device if starting bonded device */
693 for (i = 0; i < internals->slave_count; i++) {
694 if (slave_configure(eth_dev, &(rte_eth_devices[internals->slaves[i]]))
696 RTE_LOG(ERR, PMD, "bonded port "
697 "(%d) failed to reconfigure slave device (%d)\n)",
698 eth_dev->data->port_id, internals->slaves[i]);
703 if (internals->user_defined_primary_port)
704 bond_ethdev_primary_set(internals, internals->primary_port);
710 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
712 struct bond_dev_private *internals = eth_dev->data->dev_private;
714 internals->active_slave_count = 0;
716 eth_dev->data->dev_link.link_status = 0;
717 eth_dev->data->dev_started = 0;
721 bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
725 /* forward declaration */
726 static int bond_ethdev_configure(struct rte_eth_dev *dev);
729 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
731 dev_info->driver_name = driver_name;
732 dev_info->max_mac_addrs = 1;
734 dev_info->max_rx_pktlen = (uint32_t)2048;
736 dev_info->max_rx_queues = (uint16_t)128;
737 dev_info->max_tx_queues = (uint16_t)512;
739 dev_info->min_rx_bufsize = 0;
740 dev_info->pci_dev = dev->pci_dev;
744 bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
745 uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
746 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
748 struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
749 rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
750 0, dev->pci_dev->numa_node);
754 bd_rx_q->queue_id = rx_queue_id;
755 bd_rx_q->dev_private = dev->data->dev_private;
757 bd_rx_q->nb_rx_desc = nb_rx_desc;
759 memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
760 bd_rx_q->mb_pool = mb_pool;
762 dev->data->rx_queues[rx_queue_id] = bd_rx_q;
768 bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
769 uint16_t nb_tx_desc, unsigned int socket_id __rte_unused,
770 const struct rte_eth_txconf *tx_conf)
772 struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
773 rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
774 0, dev->pci_dev->numa_node);
779 bd_tx_q->queue_id = tx_queue_id;
780 bd_tx_q->dev_private = dev->data->dev_private;
782 bd_tx_q->nb_tx_desc = nb_tx_desc;
783 memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
785 dev->data->tx_queues[tx_queue_id] = bd_tx_q;
791 bond_ethdev_rx_queue_release(void *queue)
800 bond_ethdev_tx_queue_release(void *queue)
809 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
810 int wait_to_complete)
812 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
814 if (!bonded_eth_dev->data->dev_started ||
815 internals->active_slave_count == 0) {
816 bonded_eth_dev->data->dev_link.link_status = 0;
819 struct rte_eth_dev *slave_eth_dev;
822 for (i = 0; i < internals->active_slave_count; i++) {
823 slave_eth_dev = &rte_eth_devices[internals->active_slaves[i]];
825 (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
827 if (slave_eth_dev->data->dev_link.link_status == 1) {
833 bonded_eth_dev->data->dev_link.link_status = link_up;
840 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
842 struct bond_dev_private *internals = dev->data->dev_private;
843 struct rte_eth_stats slave_stats;
847 /* clear bonded stats before populating from slaves */
848 memset(stats, 0, sizeof(*stats));
850 for (i = 0; i < internals->slave_count; i++) {
851 rte_eth_stats_get(internals->slaves[i], &slave_stats);
853 stats->ipackets += slave_stats.ipackets;
854 stats->opackets += slave_stats.opackets;
855 stats->ibytes += slave_stats.ibytes;
856 stats->obytes += slave_stats.obytes;
857 stats->ierrors += slave_stats.ierrors;
858 stats->oerrors += slave_stats.oerrors;
859 stats->imcasts += slave_stats.imcasts;
860 stats->rx_nombuf += slave_stats.rx_nombuf;
861 stats->fdirmatch += slave_stats.fdirmatch;
862 stats->fdirmiss += slave_stats.fdirmiss;
863 stats->tx_pause_xon += slave_stats.tx_pause_xon;
864 stats->rx_pause_xon += slave_stats.rx_pause_xon;
865 stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
866 stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
871 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
873 struct bond_dev_private *internals = dev->data->dev_private;
876 for (i = 0; i < internals->slave_count; i++)
877 rte_eth_stats_reset(internals->slaves[i]);
881 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
883 struct bond_dev_private *internals = eth_dev->data->dev_private;
886 internals->promiscuous_en = 1;
888 switch (internals->mode) {
889 /* Promiscuous mode is propagated to all slaves */
890 case BONDING_MODE_ROUND_ROBIN:
891 case BONDING_MODE_BALANCE:
892 #ifdef RTE_MBUF_REFCNT
893 case BONDING_MODE_BROADCAST:
895 for (i = 0; i < internals->slave_count; i++)
896 rte_eth_promiscuous_enable(internals->slaves[i]);
898 /* Promiscuous mode is propagated only to primary slave */
899 case BONDING_MODE_ACTIVE_BACKUP:
901 rte_eth_promiscuous_enable(internals->current_primary_port);
907 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
909 struct bond_dev_private *internals = dev->data->dev_private;
912 internals->promiscuous_en = 0;
914 switch (internals->mode) {
915 /* Promiscuous mode is propagated to all slaves */
916 case BONDING_MODE_ROUND_ROBIN:
917 case BONDING_MODE_BALANCE:
918 #ifdef RTE_MBUF_REFCNT
919 case BONDING_MODE_BROADCAST:
921 for (i = 0; i < internals->slave_count; i++)
922 rte_eth_promiscuous_disable(internals->slaves[i]);
924 /* Promiscuous mode is propagated only to primary slave */
925 case BONDING_MODE_ACTIVE_BACKUP:
927 rte_eth_promiscuous_disable(internals->current_primary_port);
932 bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
935 struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
936 struct bond_dev_private *internals;
937 struct rte_eth_link link;
939 int i, valid_slave = 0, active_pos = -1;
940 uint8_t lsc_flag = 0;
942 if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
945 bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
946 slave_eth_dev = &rte_eth_devices[port_id];
948 if (valid_bonded_ethdev(bonded_eth_dev))
951 internals = bonded_eth_dev->data->dev_private;
953 /* If the device isn't started don't handle interrupts */
954 if (!bonded_eth_dev->data->dev_started)
957 /* verify that port_id is a valid slave of bonded port */
958 for (i = 0; i < internals->slave_count; i++) {
959 if (internals->slaves[i] == port_id) {
968 /* Search for port in active port list */
969 for (i = 0; i < internals->active_slave_count; i++) {
970 if (port_id == internals->active_slaves[i]) {
976 rte_eth_link_get_nowait(port_id, &link);
977 if (link.link_status) {
981 /* if no active slave ports then set this port to be primary port */
982 if (internals->active_slave_count < 1) {
983 /* If first active slave, then change link status */
984 bonded_eth_dev->data->dev_link.link_status = 1;
985 internals->current_primary_port = port_id;
988 /* Inherit eth dev link properties from first active slave */
989 link_properties_set(bonded_eth_dev,
990 &(slave_eth_dev->data->dev_link));
992 internals->active_slaves[internals->active_slave_count++] = port_id;
994 /* If user has defined the primary port then default to using it */
995 if (internals->user_defined_primary_port &&
996 internals->primary_port == port_id)
997 bond_ethdev_primary_set(internals, port_id);
1002 /* Remove from active slave list */
1003 for (i = active_pos; i < (internals->active_slave_count - 1); i++)
1004 internals->active_slaves[i] = internals->active_slaves[i+1];
1006 internals->active_slave_count--;
1008 /* No active slaves, change link status to down and reset other
1009 * link properties */
1010 if (internals->active_slave_count < 1) {
1012 bonded_eth_dev->data->dev_link.link_status = 0;
1014 link_properties_reset(bonded_eth_dev);
1017 /* Update primary id, take first active slave from list or if none
1018 * available set to -1 */
1019 if (port_id == internals->current_primary_port) {
1020 if (internals->active_slave_count > 0)
1021 bond_ethdev_primary_set(internals,
1022 internals->active_slaves[0]);
1024 internals->current_primary_port = internals->primary_port;
1029 _rte_eth_dev_callback_process(bonded_eth_dev, RTE_ETH_EVENT_INTR_LSC);
1032 struct eth_dev_ops default_dev_ops = {
1033 .dev_start = bond_ethdev_start,
1034 .dev_stop = bond_ethdev_stop,
1035 .dev_close = bond_ethdev_close,
1036 .dev_configure = bond_ethdev_configure,
1037 .dev_infos_get = bond_ethdev_info,
1038 .rx_queue_setup = bond_ethdev_rx_queue_setup,
1039 .tx_queue_setup = bond_ethdev_tx_queue_setup,
1040 .rx_queue_release = bond_ethdev_rx_queue_release,
1041 .tx_queue_release = bond_ethdev_tx_queue_release,
1042 .link_update = bond_ethdev_link_update,
1043 .stats_get = bond_ethdev_stats_get,
1044 .stats_reset = bond_ethdev_stats_reset,
1045 .promiscuous_enable = bond_ethdev_promiscuous_enable,
1046 .promiscuous_disable = bond_ethdev_promiscuous_disable
1050 bond_init(const char *name, const char *params)
1052 struct bond_dev_private *internals;
1053 struct rte_kvargs *kvlist;
1054 uint8_t bonding_mode, socket_id;
1055 int arg_count, port_id;
1057 RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
1059 kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
1063 /* Parse link bonding mode */
1064 if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
1065 if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
1066 &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
1067 RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
1072 "Mode must be specified only once for bonded device %s\n",
1077 /* Parse socket id to create bonding device on */
1078 arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
1079 if (arg_count == 1) {
1080 if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
1081 &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
1083 "Invalid socket Id specified for bonded device %s\n",
1087 } else if (arg_count > 1) {
1089 "Socket Id can be specified only once for bonded device %s\n",
1093 socket_id = rte_socket_id();
1096 /* Create link bonding eth device */
1097 port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
1100 "Failed to create socket %s in mode %u on socket %u.\n",
1101 name, bonding_mode, socket_id);
1104 internals = rte_eth_devices[port_id].data->dev_private;
1105 internals->kvlist = kvlist;
1108 "Create bonded device %s on port %d in mode %u on socket %u.\n",
1109 name, port_id, bonding_mode, socket_id);
1113 /* this part will resolve the slave portids after all the other pdev and vdev
1114 * have been allocated */
1116 bond_ethdev_configure(struct rte_eth_dev *dev)
1118 char *name = dev->data->name;
1119 struct bond_dev_private *internals = dev->data->dev_private;
1120 struct rte_kvargs *kvlist = internals->kvlist;
1121 int arg_count, port_id = dev - rte_eth_devices;
1124 * if no kvlist, it means that this bonded device has been created
1125 * through the bonding api.
1130 /* Parse MAC address for bonded device */
1131 arg_count = rte_kvargs_count(kvlist, PMD_BOND_MAC_ADDR_KVARG);
1132 if (arg_count == 1) {
1133 struct ether_addr bond_mac;
1135 if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
1136 &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
1137 RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
1142 /* Set MAC address */
1143 if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
1145 "Failed to set mac address on bonded device %s\n",
1149 } else if (arg_count > 1) {
1151 "MAC address can be specified only once for bonded device %s\n",
1156 /* Parse/set balance mode transmit policy */
1157 arg_count = rte_kvargs_count(kvlist, PMD_BOND_XMIT_POLICY_KVARG);
1158 if (arg_count == 1) {
1159 uint8_t xmit_policy;
1161 if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
1162 &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
1165 "Invalid xmit policy specified for bonded device %s\n",
1170 /* Set balance mode transmit policy*/
1171 if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
1173 "Failed to set balance xmit policy on bonded device %s\n",
1177 } else if (arg_count > 1) {
1179 "Transmit policy can be specified only once for bonded device %s\n",
1184 /* Parse/add slave ports to bonded device */
1185 if (rte_kvargs_count(kvlist, PMD_BOND_SLAVE_PORT_KVARG) > 0) {
1186 struct bond_ethdev_slave_ports slave_ports;
1189 memset(&slave_ports, 0, sizeof(slave_ports));
1191 if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
1192 &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
1194 "Failed to parse slave ports for bonded device %s\n",
1199 for (i = 0; i < slave_ports.slave_count; i++) {
1200 if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
1202 "Failed to add port %d as slave to bonded device %s\n",
1203 slave_ports.slaves[i], name);
1208 RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
1212 /* Parse/set primary slave port id*/
1213 arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
1214 if (arg_count == 1) {
1215 uint8_t primary_slave_port_id;
1217 if (rte_kvargs_process(kvlist,
1218 PMD_BOND_PRIMARY_SLAVE_KVARG,
1219 &bond_ethdev_parse_primary_slave_port_id_kvarg,
1220 &primary_slave_port_id) < 0) {
1222 "Invalid primary slave port id specified for bonded device %s\n",
1227 /* Set balance mode transmit policy*/
1228 if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
1231 "Failed to set primary slave port %d on bonded device %s\n",
1232 primary_slave_port_id, name);
1235 } else if (arg_count > 1) {
1237 "Primary slave can be specified only once for bonded device %s\n",
1245 static struct rte_driver bond_drv = {
1251 PMD_REGISTER_DRIVER(bond_drv);