/* Table for statistics in mode 5 TLB */
static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
+static inline size_t
+get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
+{
+ size_t vlan_offset = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+
+ vlan_offset = sizeof(struct vlan_hdr);
+ *proto = vlan_hdr->eth_proto;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ vlan_hdr = vlan_hdr + 1;
+ *proto = vlan_hdr->eth_proto;
+ vlan_offset += sizeof(struct vlan_hdr);
+ }
+ }
+ return vlan_offset;
+}
+
static uint16_t
bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
return num_rx_total;
}
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+uint32_t burstnumberRX;
+uint32_t burstnumberTX;
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+
+static void
+arp_op_name(uint16_t arp_op, char *buf)
+{
+ switch (arp_op) {
+ case ARP_OP_REQUEST:
+ snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
+ return;
+ case ARP_OP_REPLY:
+ snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
+ return;
+ case ARP_OP_REVREQUEST:
+ snprintf(buf, sizeof("Reverse ARP Request"), "%s",
+ "Reverse ARP Request");
+ return;
+ case ARP_OP_REVREPLY:
+ snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
+ "Reverse ARP Reply");
+ return;
+ case ARP_OP_INVREQUEST:
+ snprintf(buf, sizeof("Peer Identify Request"), "%s",
+ "Peer Identify Request");
+ return;
+ case ARP_OP_INVREPLY:
+ snprintf(buf, sizeof("Peer Identify Reply"), "%s",
+ "Peer Identify Reply");
+ return;
+ default:
+ break;
+ }
+ snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
+ return;
+}
+#endif
+#define MaxIPv4String 16
+static void
+ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size)
+{
+ uint32_t ipv4_addr;
+
+ ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr);
+ snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF,
+ (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF,
+ ipv4_addr & 0xFF);
+}
+
+#define MAX_CLIENTS_NUMBER 128
+uint8_t active_clients;
+struct client_stats_t {
+ uint8_t port;
+ uint32_t ipv4_addr;
+ uint32_t ipv4_rx_packets;
+ uint32_t ipv4_tx_packets;
+};
+struct client_stats_t client_stats[MAX_CLIENTS_NUMBER];
+
+static void
+update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator)
+{
+ int i = 0;
+
+ for (; i < MAX_CLIENTS_NUMBER; i++) {
+ if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) {
+ /* Just update RX packets number for this client */
+ if (TXorRXindicator == &burstnumberRX)
+ client_stats[i].ipv4_rx_packets++;
+ else
+ client_stats[i].ipv4_tx_packets++;
+ return;
+ }
+ }
+ /* We have a new client. Insert him to the table, and increment stats */
+ if (TXorRXindicator == &burstnumberRX)
+ client_stats[active_clients].ipv4_rx_packets++;
+ else
+ client_stats[active_clients].ipv4_tx_packets++;
+ client_stats[active_clients].ipv4_addr = addr;
+ client_stats[active_clients].port = port;
+ active_clients++;
+
+}
+
+void print_client_stats(void);
+void print_client_stats(void)
+{
+ int i = 0;
+ char buf[MaxIPv4String];
+
+ for (; i < active_clients; i++) {
+ ipv4_addr_to_dot(client_stats[i].ipv4_addr, buf, MaxIPv4String);
+ printf("port:%d client:%s RX:%d TX:%d\n", client_stats[i].port, buf,
+ client_stats[i].ipv4_rx_packets,
+ client_stats[i].ipv4_tx_packets);
+ }
+}
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
+ RTE_LOG(DEBUG, PMD, \
+ "%s " \
+ "port:%d " \
+ "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
+ "SrcIP:%s " \
+ "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
+ "DstIP:%s " \
+ "%s " \
+ "%d\n", \
+ info, \
+ port, \
+ eth_h->s_addr.addr_bytes[0], \
+ eth_h->s_addr.addr_bytes[1], \
+ eth_h->s_addr.addr_bytes[2], \
+ eth_h->s_addr.addr_bytes[3], \
+ eth_h->s_addr.addr_bytes[4], \
+ eth_h->s_addr.addr_bytes[5], \
+ src_ip, \
+ eth_h->d_addr.addr_bytes[0], \
+ eth_h->d_addr.addr_bytes[1], \
+ eth_h->d_addr.addr_bytes[2], \
+ eth_h->d_addr.addr_bytes[3], \
+ eth_h->d_addr.addr_bytes[4], \
+ eth_h->d_addr.addr_bytes[5], \
+ dst_ip, \
+ arp_op, \
+ ++burstnumber)
+#endif
+
+static void
+mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
+ uint8_t port, uint32_t __attribute__((unused)) *burstnumber)
+{
+ struct ipv4_hdr *ipv4_h;
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ struct arp_hdr *arp_h;
+ char dst_ip[16];
+ char ArpOp[24];
+ char buf[16];
+#endif
+ char src_ip[16];
+
+ uint16_t ether_type = eth_h->ether_type;
+ uint16_t offset = get_vlan_offset(eth_h, ðer_type);
+
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ snprintf(buf, 16, "%s", info);
+#endif
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
+ ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
+ MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber);
+#endif
+ update_client_stats(ipv4_h->src_addr, port, burstnumber);
+ }
+#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
+ ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
+ ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
+ MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
+ }
+#endif
+}
+#endif
+
+static uint16_t
+bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+ struct ether_hdr *eth_h;
+ uint16_t ether_type, offset;
+ uint16_t nb_recv_pkts;
+ int i;
+
+ nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
+
+ for (i = 0; i < nb_recv_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, ðer_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
+#endif
+ bond_mode_alb_arp_recv(eth_h, offset, internals);
+ }
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
+#endif
+ }
+
+ return nb_recv_pkts;
+}
+
static uint16_t
bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
(word_src_addr[3] ^ word_dst_addr[3]);
}
-static inline size_t
-get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
-{
- size_t vlan_offset = 0;
-
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
- struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
- vlan_offset = sizeof(struct vlan_hdr);
- *proto = vlan_hdr->eth_proto;
-
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
- vlan_hdr = vlan_hdr + 1;
-
- *proto = vlan_hdr->eth_proto;
- vlan_offset += sizeof(struct vlan_hdr);
- }
- }
- return vlan_offset;
-}
-
uint16_t
xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
{
uint8_t slave;
};
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals) {
+ int i;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
+ }
+}
+
static int
bandwidth_cmp(const void *a, const void *b)
{
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
- link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
+ link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
}
internals->slave_update_idx, &bwg_array[i]);
bwg_array[i].slave = slave_id;
- if (update_stats)
+ if (update_stats) {
tlb_last_obytets[slave_id] = slave_stats.obytes;
+ }
}
if (update_stats == 1)
slave_count = i;
qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
for (i = 0; i < slave_count; i++)
- internals->active_slaves[i] = bwg_array[i].slave;
+ internals->tlb_slaves_order[i] = bwg_array[i].slave;
rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
(struct bond_dev_private *)internals);
if (num_of_slaves < 1)
return num_tx_total;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
+ memcpy(slaves, internals->tlb_slaves_order,
+ sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
}
for (i = 0; i < num_of_slaves; i++) {
- ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
- &active_slave_addr);
-
+ rte_eth_macaddr_get(slaves[i], &active_slave_addr);
for (j = num_tx_total; j < nb_pkts; j++) {
if (j + 3 < nb_pkts)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr))
ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr);
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
+#endif
}
num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
return num_tx_total;
}
+void
+bond_tlb_disable(struct bond_dev_private *internals)
+{
+ rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+}
+
+void
+bond_tlb_enable(struct bond_dev_private *internals)
+{
+ bond_ethdev_update_tlb_slave_cb(internals);
+}
+
+static uint16_t
+bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct ether_hdr *eth_h;
+ uint16_t ether_type, offset;
+
+ struct client_data *client_info;
+
+ /*
+ * We create transmit buffers for every slave and one additional to send
+ * through tlb. In worst case every packet will be send on one port.
+ */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
+ uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
+
+ /*
+ * We create separate transmit buffers for update packets as they wont be
+ * counted in num_tx_total.
+ */
+ struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
+ uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ struct rte_mbuf *upd_pkt;
+ size_t pkt_size;
+
+ uint16_t num_send, num_not_send = 0;
+ uint16_t num_tx_total = 0;
+ uint8_t slave_idx;
+
+ int i, j;
+
+ /* Search tx buffer for ARP packets and forward them to alb */
+ for (i = 0; i < nb_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, ðer_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
+
+ /* Change src mac in eth header */
+ rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
+
+ /* Add packet to slave tx buffer */
+ slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
+ slave_bufs_pkts[slave_idx]++;
+ } else {
+ /* If packet is not ARP, send it with TLB policy */
+ slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
+ bufs[i];
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
+ }
+ }
+
+ /* Update connected client ARP tables */
+ if (internals->mode6.ntt) {
+ for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
+ client_info = &internals->mode6.client_table[i];
+
+ if (client_info->in_use) {
+ /* Allocate new packet to send ARP update on current slave */
+ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
+ if (upd_pkt == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
+ continue;
+ }
+ pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
+ + client_info->vlan_count * sizeof(struct vlan_hdr);
+ upd_pkt->data_len = pkt_size;
+ upd_pkt->pkt_len = pkt_size;
+
+ slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
+ internals);
+
+ /* Add packet to update tx buffer */
+ update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
+ update_bufs_pkts[slave_idx]++;
+ }
+ }
+ internals->mode6.ntt = 0;
+ }
+
+ /* Send ARP packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (slave_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
+ slave_bufs[i], slave_bufs_pkts[i]);
+ for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[i][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ num_not_send += slave_bufs_pkts[i] - num_send;
+
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ /* Print TX stats including update packets */
+ for (j = 0; j < slave_bufs_pkts[i]; j++) {
+ eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *);
+ mode6_debug("TX ARP:", eth_h, i, &burstnumberTX);
+ }
+#endif
+ }
+ }
+
+ /* Send update packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (update_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
+ update_bufs_pkts[i]);
+ for (j = num_send; j < update_bufs_pkts[i]; j++) {
+ rte_pktmbuf_free(update_bufs[i][j]);
+ }
+#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
+ for (j = 0; j < update_bufs_pkts[i]; j++) {
+ eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *);
+ mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX);
+ }
+#endif
+ }
+ }
+
+ /* Send non-ARP packets using tlb policy */
+ if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
+ num_send = bond_ethdev_tx_burst_tlb(queue,
+ slave_bufs[RTE_MAX_ETHPORTS],
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]);
+
+ for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
+ }
+
+ return num_tx_total;
+}
+
static uint16_t
bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
return num_tx_total;
}
-#ifdef RTE_MBUF_REFCNT
static uint16_t
bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
return max_nb_of_tx_pkts;
}
-#endif
void
link_properties_set(struct rte_eth_dev *bonded_eth_dev,
switch (internals->mode) {
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
case BONDING_MODE_BROADCAST:
-#endif
for (i = 0; i < internals->slave_count; i++) {
if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
bonded_eth_dev->data->mac_addrs)) {
bond_mode_8023ad_mac_address_update(bonded_eth_dev);
break;
case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
default:
for (i = 0; i < internals->slave_count; i++) {
if (internals->slaves[i].port_id ==
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance;
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
break;
-#ifdef RTE_MBUF_REFCNT
case BONDING_MODE_BROADCAST:
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast;
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst;
break;
-#endif
case BONDING_MODE_8023AD:
if (bond_mode_8023ad_enable(eth_dev) != 0)
return -1;
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_BOND_LOG(WARNING,
+ RTE_LOG(WARNING, PMD,
"Using mode 4, it is necessary to do TX burst and RX burst "
- "at least every 100ms.");
+ "at least every 100ms.\n");
break;
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_TLB:
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
break;
+ case BONDING_MODE_ALB:
+ if (bond_mode_alb_enable(eth_dev) != 0)
+ return -1;
+
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
+ break;
default:
return -1;
}
if (internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_start(eth_dev);
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
- bond_ethdev_update_tlb_slave_cb(internals);
+ if (internals->mode == BONDING_MODE_TLB ||
+ internals->mode == BONDING_MODE_ALB)
+ bond_tlb_enable(internals);
return 0;
}
}
}
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
- rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+ if (internals->mode == BONDING_MODE_TLB ||
+ internals->mode == BONDING_MODE_ALB) {
+ bond_tlb_disable(internals);
+ for (i = 0; i < internals->active_slave_count; i++)
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
}
internals->active_slave_count = 0;
{
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_eth_stats slave_stats;
+
int i;
+ /* clear bonded stats before populating from slaves */
+ memset(stats, 0, sizeof(*stats));
+
for (i = 0; i < internals->slave_count; i++) {
rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
/* Promiscuous mode is propagated to all slaves */
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
case BONDING_MODE_BROADCAST:
-#endif
for (i = 0; i < internals->slave_count; i++)
rte_eth_promiscuous_enable(internals->slaves[i].port_id);
break;
break;
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
default:
rte_eth_promiscuous_enable(internals->current_primary_port);
}
/* Promiscuous mode is propagated to all slaves */
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
-#ifdef RTE_MBUF_REFCNT
case BONDING_MODE_BROADCAST:
-#endif
for (i = 0; i < internals->slave_count; i++)
rte_eth_promiscuous_disable(internals->slaves[i].port_id);
break;
break;
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
- case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
default:
rte_eth_promiscuous_disable(internals->current_primary_port);
}