X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=7a1ef604e3f7c150b25f30bc54d85c8acdd2bc63;hb=49dad9028e2ac2c9576c41bda13ae62814cb6e97;hp=4bc532926ae563ce64d027daf23845c1181c09d4;hpb=f8244c6399d9fae6afab6770ae367aef38742ea5;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 4bc532926a..7a1ef604e3 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1,41 +1,12 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include #include #include #include -#include +#include #include #include #include @@ -43,9 +14,10 @@ #include #include #include -#include +#include #include #include +#include #include "rte_eth_bond.h" #include "rte_eth_bond_private.h" @@ -125,11 +97,12 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs, } static inline uint8_t -is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci) +is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf) { const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); - return !vlan_tci && (ethertype == ether_type_slow_be && + return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) && + (ethertype == ether_type_slow_be && (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP)); } @@ -308,87 +281,114 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, static uint16_t bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, - uint16_t nb_pkts) + uint16_t nb_bufs) { - struct bond_dev_private *internals; - struct bond_tx_queue *bd_tx_q; + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t num_of_slaves; - uint16_t slaves[RTE_MAX_ETHPORTS]; - /* positions in slaves, not ID */ - uint8_t distributing_offsets[RTE_MAX_ETHPORTS]; - uint8_t distributing_count; + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; - uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0; - uint16_t i, op_slave_idx; + uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t dist_slave_count; - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; + /* 2-D array to sort mbufs for transmission on each slave into */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; + /* Number of mbufs for transmission on each slave */ + uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; + /* Mapping array generated by hash function to map mbufs to slaves */ + uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - /* Total amount of packets in slave_bufs */ - uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; - /* Slow packets placed in each slave */ + uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t total_tx_count = 0, total_tx_fail_count = 0; - if (unlikely(nb_pkts == 0)) - return 0; + uint16_t i, j; - bd_tx_q = (struct bond_tx_queue *)queue; - internals = bd_tx_q->dev_private; + if (unlikely(nb_bufs == 0)) + return 0; /* Copy slave list to protect against slave up/down changes during tx * bursting */ - num_of_slaves = internals->active_slave_count; - if (num_of_slaves < 1) - return num_tx_total; + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; - memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * - num_of_slaves); + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); + + + dist_slave_count = 0; + for (i = 0; i < slave_count; i++) { + struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; - distributing_count = 0; - for (i = 0; i < num_of_slaves; i++) { - struct port *port = &mode_8023ad_ports[slaves[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) - distributing_offsets[distributing_count++] = i; + dist_slave_port_ids[dist_slave_count++] = + slave_port_ids[i]; } - if (likely(distributing_count > 0)) { - /* Populate slaves mbuf with the packets which are to be sent */ - for (i = 0; i < nb_pkts; i++) { - /* Select output slave using hash based on xmit policy */ - op_slave_idx = internals->xmit_hash(bufs[i], - distributing_count); + if (unlikely(dist_slave_count < 1)) + return 0; - /* Populate slave mbuf arrays with mbufs for that slave. - * Use only slaves that are currently distributing. - */ - uint8_t slave_offset = - distributing_offsets[op_slave_idx]; - slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = - bufs[i]; - slave_nb_pkts[slave_offset]++; - } + /* + * Populate slaves mbuf with the packets which are to be sent on it + * selecting output slave using hash based on xmit policy + */ + internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count, + bufs_slave_port_idxs); + + for (i = 0; i < nb_bufs; i++) { + /* Populate slave mbuf arrays with mbufs for that slave. */ + uint8_t slave_idx = bufs_slave_port_idxs[i]; + + slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i]; } + /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) { - if (slave_nb_pkts[i] == 0) + for (i = 0; i < dist_slave_count; i++) { + if (slave_nb_bufs[i] == 0) continue; - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - slave_bufs[i], slave_nb_pkts[i]); + slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i], + bd_tx_q->queue_id, slave_bufs[i], + slave_nb_bufs[i]); - num_tx_total += num_tx_slave; - num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave; + total_tx_count += slave_tx_count; /* If tx burst fails move packets to end of bufs */ - if (unlikely(num_tx_slave < slave_nb_pkts[i])) { - uint16_t j = nb_pkts - num_tx_fail_total; - for ( ; num_tx_slave < slave_nb_pkts[i]; j++, - num_tx_slave++) - bufs[j] = slave_bufs[i][num_tx_slave]; + if (unlikely(slave_tx_count < slave_nb_bufs[i])) { + slave_tx_fail_count[i] = slave_nb_bufs[i] - + slave_tx_count; + total_tx_fail_count += slave_tx_fail_count[i]; + + /* + * Shift bufs to beginning of array to allow reordering + * later + */ + for (j = 0; j < slave_tx_fail_count[i]; j++) { + slave_bufs[i][j] = + slave_bufs[i][(slave_tx_count - 1) + j]; + } } } - return num_tx_total; + /* + * If there are tx burst failures we move packets to end of bufs to + * preserve expected PMD behaviour of all failed transmitted being + * at the end of the input mbuf array + */ + if (unlikely(total_tx_fail_count > 0)) { + int bufs_idx = nb_bufs - total_tx_fail_count - 1; + + for (i = 0; i < slave_count; i++) { + if (slave_tx_fail_count[i] > 0) { + for (j = 0; j < slave_tx_fail_count[i]; j++) + bufs[bufs_idx++] = slave_bufs[i][j]; + } + } + } + + return total_tx_count; } @@ -455,7 +455,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Remove packet from array if it is slow packet or slave is not * in collecting state or bonding interface is not in promiscuous * mode and packet address does not match. */ - if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) || + if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) || !collecting || (!promisc && !is_multicast_ether_addr(&hdr->d_addr) && !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { @@ -537,7 +537,7 @@ ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size) #define MAX_CLIENTS_NUMBER 128 uint8_t active_clients; struct client_stats_t { - uint8_t port; + uint16_t port; uint32_t ipv4_addr; uint32_t ipv4_rx_packets; uint32_t ipv4_tx_packets; @@ -545,7 +545,7 @@ struct client_stats_t { struct client_stats_t client_stats[MAX_CLIENTS_NUMBER]; static void -update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator) +update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator) { int i = 0; @@ -603,7 +603,7 @@ update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator) static void mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, - uint8_t port, uint32_t __attribute__((unused)) *burstnumber) + uint16_t port, uint32_t __attribute__((unused)) *burstnumber) { struct ipv4_hdr *ipv4_h; #ifdef RTE_LIBRTE_BOND_DEBUG_ALB @@ -618,7 +618,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, uint16_t offset = get_vlan_offset(eth_h, ðer_type); #ifdef RTE_LIBRTE_BOND_DEBUG_ALB - snprintf(buf, 16, "%s", info); + strlcpy(buf, info, 16); #endif if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { @@ -787,96 +787,129 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr) (word_src_addr[3] ^ word_dst_addr[3]); } -uint16_t -xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count) + +void +burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint8_t slave_count, uint16_t *slaves) { - struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + struct ether_hdr *eth_hdr; + uint32_t hash; + int i; + + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *); - uint32_t hash = ether_hash(eth_hdr); + hash = ether_hash(eth_hdr); - return (hash ^= hash >> 8) % slave_count; + slaves[i] = (hash ^= hash >> 8) % slave_count; + } } -uint16_t -xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count) +void +burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint8_t slave_count, uint16_t *slaves) { - struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); - uint16_t proto = eth_hdr->ether_type; - size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); - uint32_t hash, l3hash = 0; + uint16_t i; + struct ether_hdr *eth_hdr; + uint16_t proto; + size_t vlan_offset; + uint32_t hash, l3hash; - hash = ether_hash(eth_hdr); + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *); + l3hash = 0; - if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) - ((char *)(eth_hdr + 1) + vlan_offset); - l3hash = ipv4_hash(ipv4_hdr); + proto = eth_hdr->ether_type; + hash = ether_hash(eth_hdr); - } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) - ((char *)(eth_hdr + 1) + vlan_offset); - l3hash = ipv6_hash(ipv6_hdr); - } + vlan_offset = get_vlan_offset(eth_hdr, &proto); - hash = hash ^ l3hash; - hash ^= hash >> 16; - hash ^= hash >> 8; + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv4_hash(ipv4_hdr); - return hash % slave_count; -} + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + } -uint16_t -xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count) -{ - struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); - uint16_t proto = eth_hdr->ether_type; - size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); + hash = hash ^ l3hash; + hash ^= hash >> 16; + hash ^= hash >> 8; - struct udp_hdr *udp_hdr = NULL; - struct tcp_hdr *tcp_hdr = NULL; - uint32_t hash, l3hash = 0, l4hash = 0; + slaves[i] = hash % slave_count; + } +} - if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) - ((char *)(eth_hdr + 1) + vlan_offset); - size_t ip_hdr_offset; +void +burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + uint8_t slave_count, uint16_t *slaves) +{ + struct ether_hdr *eth_hdr; + uint16_t proto; + size_t vlan_offset; + int i; - l3hash = ipv4_hash(ipv4_hdr); + struct udp_hdr *udp_hdr; + struct tcp_hdr *tcp_hdr; + uint32_t hash, l3hash, l4hash; - /* there is no L4 header in fragmented packet */ - if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) { - ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + for (i = 0; i < nb_pkts; i++) { + eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *); + proto = eth_hdr->ether_type; + vlan_offset = get_vlan_offset(eth_hdr, &proto); + l3hash = 0; + l4hash = 0; + + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + size_t ip_hdr_offset; + + l3hash = ipv4_hash(ipv4_hdr); + + /* there is no L4 header in fragmented packet */ + if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) + == 0)) { + ip_hdr_offset = (ipv4_hdr->version_ihl + & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER; - if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + - ip_hdr_offset); + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *) + ((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == + IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *) + ((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(udp_hdr); + } + } + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + + if (ipv6_hdr->proto == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(tcp_hdr); - } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + - ip_hdr_offset); + } else if (ipv6_hdr->proto == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); l4hash = HASH_L4_PORTS(udp_hdr); } } - } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) - ((char *)(eth_hdr + 1) + vlan_offset); - l3hash = ipv6_hash(ipv6_hdr); - - if (ipv6_hdr->proto == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); - l4hash = HASH_L4_PORTS(tcp_hdr); - } else if (ipv6_hdr->proto == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); - l4hash = HASH_L4_PORTS(udp_hdr); - } - } - hash = l3hash ^ l4hash; - hash ^= hash >> 16; - hash ^= hash >> 8; + hash = l3hash ^ l4hash; + hash ^= hash >> 16; + hash ^= hash >> 8; - return hash % slave_count; + slaves[i] = hash % slave_count; + } } struct bwg_slave { @@ -1184,156 +1217,239 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static uint16_t bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, - uint16_t nb_pkts) + uint16_t nb_bufs) { - struct bond_dev_private *internals; - struct bond_tx_queue *bd_tx_q; + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t num_of_slaves; - uint16_t slaves[RTE_MAX_ETHPORTS]; + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; - uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0; + /* Array to sort mbufs for transmission on each slave into */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; + /* Number of mbufs for transmission on each slave */ + uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; + /* Mapping array generated by hash function to map mbufs to slaves */ + uint16_t bufs_slave_port_idxs[nb_bufs]; - int i, op_slave_id; + uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t total_tx_count = 0, total_tx_fail_count = 0; - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; - uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t i, j; - bd_tx_q = (struct bond_tx_queue *)queue; - internals = bd_tx_q->dev_private; + if (unlikely(nb_bufs == 0)) + return 0; /* Copy slave list to protect against slave up/down changes during tx * bursting */ - num_of_slaves = internals->active_slave_count; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * num_of_slaves); + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; - if (num_of_slaves < 1) - return num_tx_total; + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); - /* Populate slaves mbuf with the packets which are to be sent on it */ - for (i = 0; i < nb_pkts; i++) { - /* Select output slave using hash based on xmit policy */ - op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves); + /* + * Populate slaves mbuf with the packets which are to be sent on it + * selecting output slave using hash based on xmit policy + */ + internals->burst_xmit_hash(bufs, nb_bufs, slave_count, + bufs_slave_port_idxs); + + for (i = 0; i < nb_bufs; i++) { + /* Populate slave mbuf arrays with mbufs for that slave. */ + uint8_t slave_idx = bufs_slave_port_idxs[i]; - /* Populate slave mbuf arrays with mbufs for that slave */ - slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i]; + slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i]; } /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) { - if (slave_nb_pkts[i] > 0) { - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - slave_bufs[i], slave_nb_pkts[i]); + for (i = 0; i < slave_count; i++) { + if (slave_nb_bufs[i] == 0) + continue; - /* if tx burst fails move packets to end of bufs */ - if (unlikely(num_tx_slave < slave_nb_pkts[i])) { - int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave; + slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], + bd_tx_q->queue_id, slave_bufs[i], + slave_nb_bufs[i]); - tx_fail_total += slave_tx_fail_count; - memcpy(&bufs[nb_pkts - tx_fail_total], - &slave_bufs[i][num_tx_slave], - slave_tx_fail_count * sizeof(bufs[0])); + total_tx_count += slave_tx_count; + + /* If tx burst fails move packets to end of bufs */ + if (unlikely(slave_tx_count < slave_nb_bufs[i])) { + slave_tx_fail_count[i] = slave_nb_bufs[i] - + slave_tx_count; + total_tx_fail_count += slave_tx_fail_count[i]; + + /* + * Shift bufs to beginning of array to allow reordering + * later + */ + for (j = 0; j < slave_tx_fail_count[i]; j++) { + slave_bufs[i][j] = + slave_bufs[i][(slave_tx_count - 1) + j]; } + } + } - num_tx_total += num_tx_slave; + /* + * If there are tx burst failures we move packets to end of bufs to + * preserve expected PMD behaviour of all failed transmitted being + * at the end of the input mbuf array + */ + if (unlikely(total_tx_fail_count > 0)) { + int bufs_idx = nb_bufs - total_tx_fail_count - 1; + + for (i = 0; i < slave_count; i++) { + if (slave_tx_fail_count[i] > 0) { + for (j = 0; j < slave_tx_fail_count[i]; j++) + bufs[bufs_idx++] = slave_bufs[i][j]; + } } } - return num_tx_total; + return total_tx_count; } static uint16_t bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, - uint16_t nb_pkts) + uint16_t nb_bufs) { - struct bond_dev_private *internals; - struct bond_tx_queue *bd_tx_q; + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; - uint16_t num_of_slaves; - uint16_t slaves[RTE_MAX_ETHPORTS]; - /* positions in slaves, not ID */ - uint8_t distributing_offsets[RTE_MAX_ETHPORTS]; - uint8_t distributing_count; + uint16_t slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t slave_count; - uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0; - uint16_t i, j, op_slave_idx; - const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1; + uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS]; + uint16_t dist_slave_count; - /* Allocate additional packets in case 8023AD mode. */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size]; - void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL }; + /* 2-D array to sort mbufs for transmission on each slave into */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs]; + /* Number of mbufs for transmission on each slave */ + uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 }; + /* Mapping array generated by hash function to map mbufs to slaves */ + uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 }; - /* Total amount of packets in slave_bufs */ - uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; - /* Slow packets placed in each slave */ - uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 }; + uint16_t total_tx_count = 0, total_tx_fail_count = 0; - bd_tx_q = (struct bond_tx_queue *)queue; - internals = bd_tx_q->dev_private; + uint16_t i, j; + + if (unlikely(nb_bufs == 0)) + return 0; /* Copy slave list to protect against slave up/down changes during tx * bursting */ - num_of_slaves = internals->active_slave_count; - if (num_of_slaves < 1) - return num_tx_total; - - memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves); - - distributing_count = 0; - for (i = 0; i < num_of_slaves; i++) { - struct port *port = &mode_8023ad_ports[slaves[i]]; + slave_count = internals->active_slave_count; + if (unlikely(slave_count < 1)) + return 0; - slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring, - slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS, - NULL); - slave_nb_pkts[i] = slave_slow_nb_pkts[i]; + memcpy(slave_port_ids, internals->active_slaves, + sizeof(slave_port_ids[0]) * slave_count); - for (j = 0; j < slave_slow_nb_pkts[i]; j++) - slave_bufs[i][j] = slow_pkts[j]; + dist_slave_count = 0; + for (i = 0; i < slave_count; i++) { + struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) - distributing_offsets[distributing_count++] = i; + dist_slave_port_ids[dist_slave_count++] = + slave_port_ids[i]; } - if (likely(distributing_count > 0)) { - /* Populate slaves mbuf with the packets which are to be sent on it */ - for (i = 0; i < nb_pkts; i++) { - /* Select output slave using hash based on xmit policy */ - op_slave_idx = internals->xmit_hash(bufs[i], distributing_count); + if (likely(dist_slave_count > 1)) { + + /* + * Populate slaves mbuf with the packets which are to be sent + * on it, selecting output slave using hash based on xmit policy + */ + internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count, + bufs_slave_port_idxs); + + for (i = 0; i < nb_bufs; i++) { + /* + * Populate slave mbuf arrays with mbufs for that + * slave + */ + uint8_t slave_idx = bufs_slave_port_idxs[i]; - /* Populate slave mbuf arrays with mbufs for that slave. Use only - * slaves that are currently distributing. */ - uint8_t slave_offset = distributing_offsets[op_slave_idx]; - slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i]; - slave_nb_pkts[slave_offset]++; + slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = + bufs[i]; } - } - /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) { - if (slave_nb_pkts[i] == 0) - continue; - num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - slave_bufs[i], slave_nb_pkts[i]); + /* Send packet burst on each slave device */ + for (i = 0; i < dist_slave_count; i++) { + if (slave_nb_bufs[i] == 0) + continue; + + slave_tx_count = rte_eth_tx_burst( + dist_slave_port_ids[i], + bd_tx_q->queue_id, slave_bufs[i], + slave_nb_bufs[i]); + + total_tx_count += slave_tx_count; + + /* If tx burst fails move packets to end of bufs */ + if (unlikely(slave_tx_count < slave_nb_bufs[i])) { + slave_tx_fail_count[i] = slave_nb_bufs[i] - + slave_tx_count; + total_tx_fail_count += slave_tx_fail_count[i]; + + /* + * Shift bufs to beginning of array to allow + * reordering later + */ + for (j = 0; j < slave_tx_fail_count[i]; j++) + slave_bufs[i][j] = + slave_bufs[i] + [(slave_tx_count - 1) + + j]; + } + } + + /* + * If there are tx burst failures we move packets to end of + * bufs to preserve expected PMD behaviour of all failed + * transmitted being at the end of the input mbuf array + */ + if (unlikely(total_tx_fail_count > 0)) { + int bufs_idx = nb_bufs - total_tx_fail_count - 1; + + for (i = 0; i < slave_count; i++) { + if (slave_tx_fail_count[i] > 0) { + for (j = 0; + j < slave_tx_fail_count[i]; + j++) { + bufs[bufs_idx++] = + slave_bufs[i][j]; + } + } + } + } + } - /* If tx burst fails drop slow packets */ - for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++) - rte_pktmbuf_free(slave_bufs[i][num_tx_slave]); + /* Check for LACP control packets and send if available */ + for (i = 0; i < slave_count; i++) { + struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct rte_mbuf *ctrl_pkt = NULL; - num_tx_total += num_tx_slave - slave_slow_nb_pkts[i]; - num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave; + if (likely(rte_ring_empty(port->tx_ring))) + continue; - /* If tx burst fails move packets to end of bufs */ - if (unlikely(num_tx_slave < slave_nb_pkts[i])) { - uint16_t j = nb_pkts - num_tx_fail_total; - for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++) - bufs[j] = slave_bufs[i][num_tx_slave]; + if (rte_ring_dequeue(port->tx_ring, + (void **)&ctrl_pkt) != -ENOENT) { + slave_tx_count = rte_eth_tx_burst(slave_port_ids[i], + bd_tx_q->queue_id, &ctrl_pkt, 1); + /* + * re-enqueue LAG control plane packets to buffering + * ring if transmission fails so the packet isn't lost. + */ + if (slave_tx_count != 1) + rte_ring_enqueue(port->tx_ring, ctrl_pkt); } } - return num_tx_total; + return total_tx_count; } static uint16_t @@ -1499,7 +1615,8 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: for (i = 0; i < internals->slave_count; i++) { - if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id], + if (rte_eth_dev_default_mac_addr_set( + internals->slaves[i].port_id, bonded_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->slaves[i].port_id); @@ -1517,15 +1634,16 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) for (i = 0; i < internals->slave_count; i++) { if (internals->slaves[i].port_id == internals->current_primary_port) { - if (mac_address_set(&rte_eth_devices[internals->primary_port], + if (rte_eth_dev_default_mac_addr_set( + internals->primary_port, bonded_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->current_primary_port); return -1; } } else { - if (mac_address_set( - &rte_eth_devices[internals->slaves[i].port_id], + if (rte_eth_dev_default_mac_addr_set( + internals->slaves[i].port_id, &internals->slaves[i].persisted_mac_addr)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->slaves[i].port_id); @@ -1701,8 +1819,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, bonded_eth_dev->data->dev_conf.rxmode.mq_mode; } - slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter = - bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter; + if (bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + slave_eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + slave_eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1714,6 +1837,14 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } } + errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id, + bonded_eth_dev->data->mtu); + if (errval != 0 && errval != -ENOTSUP) { + RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, nb_rx_queues, nb_tx_queues, @@ -1833,10 +1964,19 @@ slave_remove(struct bond_dev_private *internals, slave_eth_dev->data->port_id) break; - if (i < (internals->slave_count - 1)) + if (i < (internals->slave_count - 1)) { + struct rte_flow *flow; + memmove(&internals->slaves[i], &internals->slaves[i + 1], sizeof(internals->slaves[0]) * (internals->slave_count - i - 1)); + TAILQ_FOREACH(flow, &internals->flow_list, next) { + memmove(&flow->flows[i], &flow->flows[i + 1], + sizeof(flow->flows[0]) * + (internals->slave_count - i - 1)); + flow->flows[internals->slave_count - 1] = NULL; + } + } internals->slave_count--; @@ -1909,7 +2049,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slave_count == 0) { RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); - return -1; + goto out_err; } if (internals->user_defined_mac == 0) { @@ -1920,18 +2060,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) new_mac_addr = &internals->slaves[i].persisted_mac_addr; if (new_mac_addr == NULL) - return -1; + goto out_err; if (mac_address_set(eth_dev, new_mac_addr) != 0) { RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", eth_dev->data->port_id); - return -1; + goto out_err; } } /* Update all slave devices MACs*/ if (mac_address_slaves_update(eth_dev) != 0) - return -1; + goto out_err; /* If bonded device is configure in promiscuous mode then re-apply config */ if (internals->promiscuous_en) @@ -1956,7 +2096,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) "bonded port (%d) failed to reconfigure slave device (%d)", eth_dev->data->port_id, internals->slaves[i].port_id); - return -1; + goto out_err; } /* We will need to poll for link status if any slave doesn't * support interrupts @@ -1964,6 +2104,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slaves[i].link_status_poll_enabled) internals->link_status_polling_enabled = 1; } + /* start polling if needed */ if (internals->link_status_polling_enabled) { rte_eal_alarm_set( @@ -1983,6 +2124,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) bond_tlb_enable(internals); return 0; + +out_err: + eth_dev->data->dev_started = 0; + return -1; } static void @@ -2055,10 +2200,11 @@ bond_ethdev_close(struct rte_eth_dev *dev) struct bond_dev_private *internals = dev->data->dev_private; uint8_t bond_port_id = internals->port_id; int skipped = 0; + struct rte_flow_error ferror; RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name); while (internals->slave_count != skipped) { - uint8_t port_id = internals->slaves[skipped].port_id; + uint16_t port_id = internals->slaves[skipped].port_id; rte_eth_dev_stop(port_id); @@ -2069,6 +2215,7 @@ bond_ethdev_close(struct rte_eth_dev *dev) skipped++; } } + bond_flow_ops.flush(dev, &ferror); bond_ethdev_free_queues(dev); rte_bitmap_reset(internals->vlan_filter_bmp); } @@ -2127,6 +2274,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_offload_capa = internals->rx_offload_capa; dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa; dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; dev_info->reta_size = internals->reta_size; @@ -2370,7 +2519,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) } -static void +static int bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct bond_dev_private *internals = dev->data->dev_private; @@ -2398,6 +2547,8 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) } } + + return 0; } static void @@ -2473,7 +2624,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg) return; _rte_eth_dev_callback_process((struct rte_eth_dev *)arg, - RTE_ETH_EVENT_INTR_LSC, NULL, NULL); + RTE_ETH_EVENT_INTR_LSC, NULL); } int @@ -2581,7 +2732,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, else _rte_eth_dev_callback_process(bonded_eth_dev, RTE_ETH_EVENT_INTR_LSC, - NULL, NULL); + NULL); } else { if (internals->link_down_delay_ms > 0) @@ -2591,7 +2742,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, else _rte_eth_dev_callback_process(bonded_eth_dev, RTE_ETH_EVENT_INTR_LSC, - NULL, NULL); + NULL); } } return 0; @@ -2704,6 +2855,56 @@ bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int +bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct rte_eth_dev *slave_eth_dev; + struct bond_dev_private *internals = dev->data->dev_private; + int ret, i; + + rte_spinlock_lock(&internals->lock); + + for (i = 0; i < internals->slave_count; i++) { + slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id]; + if (*slave_eth_dev->dev_ops->mtu_set == NULL) { + rte_spinlock_unlock(&internals->lock); + return -ENOTSUP; + } + } + for (i = 0; i < internals->slave_count; i++) { + ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu); + if (ret < 0) { + rte_spinlock_unlock(&internals->lock); + return ret; + } + } + + rte_spinlock_unlock(&internals->lock); + return 0; +} + +static int +bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + if (mac_address_set(dev, addr)) { + RTE_BOND_LOG(ERR, "Failed to update MAC address"); + return -EINVAL; + } + + return 0; +} + +static int +bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type type, enum rte_filter_op op, void *arg) +{ + if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &bond_flow_ops; + return 0; + } + return -ENOTSUP; +} + const struct eth_dev_ops default_dev_ops = { .dev_start = bond_ethdev_start, .dev_stop = bond_ethdev_stop, @@ -2723,7 +2924,10 @@ const struct eth_dev_ops default_dev_ops = { .reta_update = bond_ethdev_rss_reta_update, .reta_query = bond_ethdev_rss_reta_query, .rss_hash_update = bond_ethdev_rss_hash_update, - .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get + .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get, + .mtu_set = bond_ethdev_mtu_set, + .mac_addr_set = bond_ethdev_mac_address_set, + .filter_ctrl = bond_filter_ctrl }; static int @@ -2758,8 +2962,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) } eth_dev->dev_ops = &default_dev_ops; - eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC | - RTE_ETH_DEV_DETACHABLE; + eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC; rte_spinlock_init(&internals->lock); @@ -2767,7 +2970,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->mode = BONDING_MODE_INVALID; internals->current_primary_port = RTE_MAX_ETHPORTS + 1; internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2; - internals->xmit_hash = xmit_l2_hash; + internals->burst_xmit_hash = burst_xmit_l2_hash; internals->user_defined_mac = 0; internals->link_status_polling_enabled = 0; @@ -2781,6 +2984,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->active_slave_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; internals->candidate_max_rx_pktlen = 0; internals->max_rx_pktlen = 0; @@ -2790,10 +2995,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); + TAILQ_INIT(&internals->flow_list); + internals->flow_isolated_valid = 0; + /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { - RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d", + RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d\n", eth_dev->data->port_id, mode); goto err; } @@ -2838,6 +3046,8 @@ bond_probe(struct rte_vdev_device *dev) struct rte_kvargs *kvlist; uint8_t bonding_mode, socket_id/*, agg_mode*/; int arg_count, port_id; + uint8_t agg_mode; + struct rte_eth_dev *eth_dev; if (!dev) return -EINVAL; @@ -2845,6 +3055,18 @@ bond_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + RTE_LOG(ERR, PMD, "Failed to probe %s\n", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &default_dev_ops; + return 0; + } + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), pmd_bond_init_valid_arguments); if (kvlist == NULL) @@ -2895,6 +3117,25 @@ bond_probe(struct rte_vdev_device *dev) internals = rte_eth_devices[port_id].data->dev_private; internals->kvlist = kvlist; + + if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { + if (rte_kvargs_process(kvlist, + PMD_BOND_AGG_MODE_KVARG, + &bond_ethdev_parse_slave_agg_mode_kvarg, + &agg_mode) != 0) { + RTE_LOG(ERR, EAL, + "Failed to parse agg selection mode for bonded device %s\n", + name); + goto parse_error; + } + + if (internals->mode == BONDING_MODE_8023AD) + rte_eth_bond_8023ad_agg_selection_set(port_id, + agg_mode); + } else { + rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE); + } + RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on " "socket %u.\n", name, port_id, bonding_mode, socket_id); return 0; @@ -2943,6 +3184,10 @@ bond_remove(struct rte_vdev_device *dev) eth_dev->tx_pkt_burst = NULL; internals = eth_dev->data->dev_private; + /* Try to release mempool used in mode6. If the bond + * device is not mode6, free the NULL is not problem. + */ + rte_mempool_free(internals->mode6.mempool); rte_bitmap_free(internals->vlan_filter_bmp); rte_free(internals->vlan_filter_bmpmem); rte_free(eth_dev->data->dev_private); @@ -3061,7 +3306,6 @@ bond_ethdev_configure(struct rte_eth_dev *dev) name); } if (internals->mode == BONDING_MODE_8023AD) - if (agg_mode != 0) rte_eth_bond_8023ad_agg_selection_set(port_id, agg_mode); }