X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_bond%2Frte_eth_bond_pmd.c;h=7dee5f24e14cc1dc5b2ac87a7c905b8e19db16e1;hb=3981ab2b79d04ab6efb9c767aadaf8b9faca09c8;hp=bb4a53793d07046eed1df805d4008b36869178da;hpb=a0399ce10ff56036476fa2d0f2a7062c19dda266;p=dpdk.git diff --git a/lib/librte_pmd_bond/rte_eth_bond_pmd.c b/lib/librte_pmd_bond/rte_eth_bond_pmd.c index bb4a53793d..7dee5f24e1 100644 --- a/lib/librte_pmd_bond/rte_eth_bond_pmd.c +++ b/lib/librte_pmd_bond/rte_eth_bond_pmd.c @@ -31,6 +31,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include + #include #include #include @@ -48,9 +50,32 @@ #include "rte_eth_bond_8023ad_private.h" #define REORDER_PERIOD_MS 10 + +#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port) + /* Table for statistics in mode 5 TLB */ static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS]; +static inline size_t +get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto) +{ + size_t vlan_offset = 0; + + if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + + vlan_offset = sizeof(struct vlan_hdr); + *proto = vlan_hdr->eth_proto; + + if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) { + vlan_hdr = vlan_hdr + 1; + *proto = vlan_hdr->eth_proto; + vlan_offset += sizeof(struct vlan_hdr); + } + } + return vlan_offset; +} + static uint16_t bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -167,6 +192,210 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, return num_rx_total; } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) +uint32_t burstnumberRX; +uint32_t burstnumberTX; + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + +static void +arp_op_name(uint16_t arp_op, char *buf) +{ + switch (arp_op) { + case ARP_OP_REQUEST: + snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request"); + return; + case ARP_OP_REPLY: + snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply"); + return; + case ARP_OP_REVREQUEST: + snprintf(buf, sizeof("Reverse ARP Request"), "%s", + "Reverse ARP Request"); + return; + case ARP_OP_REVREPLY: + snprintf(buf, sizeof("Reverse ARP Reply"), "%s", + "Reverse ARP Reply"); + return; + case ARP_OP_INVREQUEST: + snprintf(buf, sizeof("Peer Identify Request"), "%s", + "Peer Identify Request"); + return; + case ARP_OP_INVREPLY: + snprintf(buf, sizeof("Peer Identify Reply"), "%s", + "Peer Identify Reply"); + return; + default: + break; + } + snprintf(buf, sizeof("Unknown"), "%s", "Unknown"); + return; +} +#endif +#define MaxIPv4String 16 +static void +ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf, uint8_t buf_size) +{ + uint32_t ipv4_addr; + + ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr); + snprintf(buf, buf_size, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF, + (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF, + ipv4_addr & 0xFF); +} + +#define MAX_CLIENTS_NUMBER 128 +uint8_t active_clients; +struct client_stats_t { + uint8_t port; + uint32_t ipv4_addr; + uint32_t ipv4_rx_packets; + uint32_t ipv4_tx_packets; +}; +struct client_stats_t client_stats[MAX_CLIENTS_NUMBER]; + +static void +update_client_stats(uint32_t addr, uint8_t port, uint32_t *TXorRXindicator) +{ + int i = 0; + + for (; i < MAX_CLIENTS_NUMBER; i++) { + if ((client_stats[i].ipv4_addr == addr) && (client_stats[i].port == port)) { + /* Just update RX packets number for this client */ + if (TXorRXindicator == &burstnumberRX) + client_stats[i].ipv4_rx_packets++; + else + client_stats[i].ipv4_tx_packets++; + return; + } + } + /* We have a new client. Insert him to the table, and increment stats */ + if (TXorRXindicator == &burstnumberRX) + client_stats[active_clients].ipv4_rx_packets++; + else + client_stats[active_clients].ipv4_tx_packets++; + client_stats[active_clients].ipv4_addr = addr; + client_stats[active_clients].port = port; + active_clients++; + +} + +void print_client_stats(void); +void print_client_stats(void) +{ + int i = 0; + char buf[MaxIPv4String]; + + for (; i < active_clients; i++) { + ipv4_addr_to_dot(client_stats[i].ipv4_addr, buf, MaxIPv4String); + printf("port:%d client:%s RX:%d TX:%d\n", client_stats[i].port, buf, + client_stats[i].ipv4_rx_packets, + client_stats[i].ipv4_tx_packets); + } +} +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB +#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ + RTE_LOG(DEBUG, PMD, \ + "%s " \ + "port:%d " \ + "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ + "SrcIP:%s " \ + "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ + "DstIP:%s " \ + "%s " \ + "%d\n", \ + info, \ + port, \ + eth_h->s_addr.addr_bytes[0], \ + eth_h->s_addr.addr_bytes[1], \ + eth_h->s_addr.addr_bytes[2], \ + eth_h->s_addr.addr_bytes[3], \ + eth_h->s_addr.addr_bytes[4], \ + eth_h->s_addr.addr_bytes[5], \ + src_ip, \ + eth_h->d_addr.addr_bytes[0], \ + eth_h->d_addr.addr_bytes[1], \ + eth_h->d_addr.addr_bytes[2], \ + eth_h->d_addr.addr_bytes[3], \ + eth_h->d_addr.addr_bytes[4], \ + eth_h->d_addr.addr_bytes[5], \ + dst_ip, \ + arp_op, \ + ++burstnumber) +#endif + +static void +mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, + uint8_t port, uint32_t __attribute__((unused)) *burstnumber) +{ + struct ipv4_hdr *ipv4_h; +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + struct arp_hdr *arp_h; + char dst_ip[16]; + char ArpOp[24]; + char buf[16]; +#endif + char src_ip[16]; + + uint16_t ether_type = eth_h->ether_type; + uint16_t offset = get_vlan_offset(eth_h, ðer_type); + +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + snprintf(buf, 16, "%s", info); +#endif + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String); +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, "", port, *burstnumber); +#endif + update_client_stats(ipv4_h->src_addr, port, burstnumber); + } +#ifdef RTE_LIBRTE_BOND_DEBUG_ALB + else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset); + ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String); + ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String); + arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp); + MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber); + } +#endif +} +#endif + +static uint16_t +bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + struct ether_hdr *eth_h; + uint16_t ether_type, offset; + uint16_t nb_recv_pkts; + int i; + + nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts); + + for (i = 0; i < nb_recv_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + bond_mode_alb_arp_recv(eth_h, offset, internals); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) + mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX); +#endif + } + + return nb_recv_pkts; +} + static uint16_t bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -276,90 +505,89 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr) (word_src_addr[3] ^ word_dst_addr[3]); } -static uint32_t -udp_hash(struct udp_hdr *hdr) +uint16_t +xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count) { - return hdr->src_port ^ hdr->dst_port; -} + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); -static inline uint16_t -xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy) -{ - struct ether_hdr *eth_hdr; - struct udp_hdr *udp_hdr; - size_t eth_offset = 0; - uint32_t hash = 0; - - if (slave_count == 1) - return 0; + uint32_t hash = ether_hash(eth_hdr); - switch (policy) { - case BALANCE_XMIT_POLICY_LAYER2: - eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); - - hash = ether_hash(eth_hdr); - hash ^= hash >> 8; - return hash % slave_count; - - case BALANCE_XMIT_POLICY_LAYER23: - eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); - - if (buf->ol_flags & PKT_RX_VLAN_PKT) - eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr); - else - eth_offset = sizeof(struct ether_hdr); - - if (buf->ol_flags & PKT_RX_IPV4_HDR) { - struct ipv4_hdr *ipv4_hdr; - ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf, - unsigned char *) + eth_offset); + return (hash ^= hash >> 8) % slave_count; +} - hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr); +uint16_t +xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + uint16_t proto = eth_hdr->ether_type; + size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); + uint32_t hash, l3hash = 0; + + hash = ether_hash(eth_hdr); + + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv4_hash(ipv4_hdr); + + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + } - } else { - struct ipv6_hdr *ipv6_hdr; + hash = hash ^ l3hash; + hash ^= hash >> 16; + hash ^= hash >> 8; - ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf, - unsigned char *) + eth_offset); + return hash % slave_count; +} - hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr); +uint16_t +xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); + uint16_t proto = eth_hdr->ether_type; + size_t vlan_offset = get_vlan_offset(eth_hdr, &proto); + + struct udp_hdr *udp_hdr = NULL; + struct tcp_hdr *tcp_hdr = NULL; + uint32_t hash, l3hash = 0, l4hash = 0; + + if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) { + struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + size_t ip_hdr_offset; + + l3hash = ipv4_hash(ipv4_hdr); + + ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + IPV4_IHL_MULTIPLIER; + + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(udp_hdr); } - break; - - case BALANCE_XMIT_POLICY_LAYER34: - if (buf->ol_flags & PKT_RX_VLAN_PKT) - eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr); - else - eth_offset = sizeof(struct ether_hdr); - - if (buf->ol_flags & PKT_RX_IPV4_HDR) { - struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *) - (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset); - - if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *) - (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset + - sizeof(struct ipv4_hdr)); - hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr); - } else { - hash = ipv4_hash(ipv4_hdr); - } - } else { - struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) - (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset); - - if (ipv6_hdr->proto == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *) - (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset + - sizeof(struct ipv6_hdr)); - hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr); - } else { - hash = ipv6_hash(ipv6_hdr); - } + } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) + ((char *)(eth_hdr + 1) + vlan_offset); + l3hash = ipv6_hash(ipv6_hdr); + + if (ipv6_hdr->proto == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv6_hdr->proto == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1); + l4hash = HASH_L4_PORTS(udp_hdr); } - break; } + hash = l3hash ^ l4hash; hash ^= hash >> 16; hash ^= hash >> 8; @@ -372,6 +600,15 @@ struct bwg_slave { uint8_t slave; }; +void +bond_tlb_activate_slave(struct bond_dev_private *internals) { + int i; + + for (i = 0; i < internals->active_slave_count; i++) { + tlb_last_obytets[internals->active_slaves[i]] = 0; + } +} + static int bandwidth_cmp(const void *a, const void *b) { @@ -402,7 +639,7 @@ bandwidth_left(int port_id, uint64_t load, uint8_t update_idx, uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8; if (link_bwg == 0) return; - link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS); + link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS; bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg; bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg; } @@ -433,8 +670,9 @@ bond_ethdev_update_tlb_slave_cb(void *arg) internals->slave_update_idx, &bwg_array[i]); bwg_array[i].slave = slave_id; - if (update_stats) + if (update_stats) { tlb_last_obytets[slave_id] = slave_stats.obytes; + } } if (update_stats == 1) @@ -443,7 +681,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) slave_count = i; qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp); for (i = 0; i < slave_count; i++) - internals->active_slaves[i] = bwg_array[i].slave; + internals->tlb_slaves_order[i] = bwg_array[i].slave; rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, (struct bond_dev_private *)internals); @@ -470,8 +708,8 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (num_of_slaves < 1) return num_tx_total; - memcpy(slaves, internals->active_slaves, - sizeof(internals->active_slaves[0]) * num_of_slaves); + memcpy(slaves, internals->tlb_slaves_order, + sizeof(internals->tlb_slaves_order[0]) * num_of_slaves); ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr); @@ -482,9 +720,7 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } for (i = 0; i < num_of_slaves; i++) { - ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr, - &active_slave_addr); - + rte_eth_macaddr_get(slaves[i], &active_slave_addr); for (j = num_tx_total; j < nb_pkts; j++) { if (j + 3 < nb_pkts) rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*)); @@ -492,6 +728,9 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ether_hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *); if (is_same_ether_addr(ðer_hdr->s_addr, &primary_slave_addr)) ether_addr_copy(&active_slave_addr, ðer_hdr->s_addr); +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX); +#endif } num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, @@ -504,6 +743,161 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return num_tx_total; } +void +bond_tlb_disable(struct bond_dev_private *internals) +{ + rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals); +} + +void +bond_tlb_enable(struct bond_dev_private *internals) +{ + bond_ethdev_update_tlb_slave_cb(internals); +} + +static uint16_t +bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; + struct bond_dev_private *internals = bd_tx_q->dev_private; + + struct ether_hdr *eth_h; + uint16_t ether_type, offset; + + struct client_data *client_info; + + /* + * We create transmit buffers for every slave and one additional to send + * through tlb. In worst case every packet will be send on one port. + */ + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts]; + uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 }; + + /* + * We create separate transmit buffers for update packets as they wont be + * counted in num_tx_total. + */ + struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE]; + uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 }; + + struct rte_mbuf *upd_pkt; + size_t pkt_size; + + uint16_t num_send, num_not_send = 0; + uint16_t num_tx_total = 0; + uint8_t slave_idx; + + int i, j; + + /* Search tx buffer for ARP packets and forward them to alb */ + for (i = 0; i < nb_pkts; i++) { + eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *); + ether_type = eth_h->ether_type; + offset = get_vlan_offset(eth_h, ðer_type); + + if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); + + /* Change src mac in eth header */ + rte_eth_macaddr_get(slave_idx, ð_h->s_addr); + + /* Add packet to slave tx buffer */ + slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i]; + slave_bufs_pkts[slave_idx]++; + } else { + /* If packet is not ARP, send it with TLB policy */ + slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] = + bufs[i]; + slave_bufs_pkts[RTE_MAX_ETHPORTS]++; + } + } + + /* Update connected client ARP tables */ + if (internals->mode6.ntt) { + for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) { + client_info = &internals->mode6.client_table[i]; + + if (client_info->in_use) { + /* Allocate new packet to send ARP update on current slave */ + upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); + if (upd_pkt == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n"); + continue; + } + pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr) + + client_info->vlan_count * sizeof(struct vlan_hdr); + upd_pkt->data_len = pkt_size; + upd_pkt->pkt_len = pkt_size; + + slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt, + internals); + + /* Add packet to update tx buffer */ + update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt; + update_bufs_pkts[slave_idx]++; + } + } + internals->mode6.ntt = 0; + } + + /* Send ARP packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (slave_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, + slave_bufs[i], slave_bufs_pkts[i]); + for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[i][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + num_not_send += slave_bufs_pkts[i] - num_send; + +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + /* Print TX stats including update packets */ + for (j = 0; j < slave_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(slave_bufs[i][j], struct ether_hdr *); + mode6_debug("TX ARP:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send update packets on proper slaves */ + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + if (update_bufs_pkts[i] > 0) { + num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i], + update_bufs_pkts[i]); + for (j = num_send; j < update_bufs_pkts[i]; j++) { + rte_pktmbuf_free(update_bufs[i][j]); + } +#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) + for (j = 0; j < update_bufs_pkts[i]; j++) { + eth_h = rte_pktmbuf_mtod(update_bufs[i][j], struct ether_hdr *); + mode6_debug("TX ARPupd:", eth_h, i, &burstnumberTX); + } +#endif + } + } + + /* Send non-ARP packets using tlb policy */ + if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) { + num_send = bond_ethdev_tx_burst_tlb(queue, + slave_bufs[RTE_MAX_ETHPORTS], + slave_bufs_pkts[RTE_MAX_ETHPORTS]); + + for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) { + bufs[nb_pkts - 1 - num_not_send - j] = + slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j]; + } + + num_tx_total += num_send; + num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send; + } + + return num_tx_total; +} + static uint16_t bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -536,8 +930,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Populate slaves mbuf with the packets which are to be sent on it */ for (i = 0; i < nb_pkts; i++) { /* Select output slave using hash based on xmit policy */ - op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves, - internals->balance_xmit_policy); + op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves); /* Populate slave mbuf arrays with mbufs for that slave */ slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i]; @@ -575,7 +968,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - /* possitions in slaves, not ID */ + /* positions in slaves, not ID */ uint8_t distributing_offsets[RTE_MAX_ETHPORTS]; uint8_t distributing_count; @@ -622,8 +1015,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Populate slaves mbuf with the packets which are to be sent on it */ for (i = 0; i < nb_pkts; i++) { /* Select output slave using hash based on xmit policy */ - op_slave_idx = xmit_slave_hash(bufs[i], distributing_count, - internals->balance_xmit_policy); + op_slave_idx = internals->xmit_hash(bufs[i], distributing_count); /* Populate slave mbuf arrays with mbufs for that slave. Use only * slaves that are currently distributing. */ @@ -659,7 +1051,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, return num_tx_total; } -#ifdef RTE_MBUF_REFCNT static uint16_t bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -719,7 +1110,6 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, return max_nb_of_tx_pkts; } -#endif void link_properties_set(struct rte_eth_dev *bonded_eth_dev, @@ -817,9 +1207,7 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) switch (internals->mode) { case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: -#ifdef RTE_MBUF_REFCNT case BONDING_MODE_BROADCAST: -#endif for (i = 0; i < internals->slave_count; i++) { if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id], bonded_eth_dev->data->mac_addrs)) { @@ -833,7 +1221,8 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) bond_mode_8023ad_mac_address_update(bonded_eth_dev); break; case BONDING_MODE_ACTIVE_BACKUP: - case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: default: for (i = 0; i < internals->slave_count; i++) { if (internals->slaves[i].port_id == @@ -879,26 +1268,31 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; break; -#ifdef RTE_MBUF_REFCNT case BONDING_MODE_BROADCAST: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; break; -#endif case BONDING_MODE_8023AD: if (bond_mode_8023ad_enable(eth_dev) != 0) return -1; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; - RTE_BOND_LOG(WARNING, + RTE_LOG(WARNING, PMD, "Using mode 4, it is necessary to do TX burst and RX burst " - "at least every 100ms."); + "at least every 100ms.\n"); break; - case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING: + case BONDING_MODE_TLB: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; break; + case BONDING_MODE_ALB: + if (bond_mode_alb_enable(eth_dev) != 0) + return -1; + + eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb; + eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb; + break; default: return -1; } @@ -1114,8 +1508,9 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->mode == BONDING_MODE_8023AD) bond_mode_8023ad_start(eth_dev); - if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) - bond_ethdev_update_tlb_slave_cb(internals); + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) + bond_tlb_enable(internals); return 0; } @@ -1146,8 +1541,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) } } - if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) { - rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals); + if (internals->mode == BONDING_MODE_TLB || + internals->mode == BONDING_MODE_ALB) { + bond_tlb_disable(internals); + for (i = 0; i < internals->active_slave_count; i++) + tlb_last_obytets[internals->active_slaves[i]] = 0; } internals->active_slave_count = 0; @@ -1392,9 +1790,7 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) /* Promiscuous mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: -#ifdef RTE_MBUF_REFCNT case BONDING_MODE_BROADCAST: -#endif for (i = 0; i < internals->slave_count; i++) rte_eth_promiscuous_enable(internals->slaves[i].port_id); break; @@ -1403,7 +1799,8 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev) break; /* Promiscuous mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: - case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: default: rte_eth_promiscuous_enable(internals->current_primary_port); } @@ -1421,9 +1818,7 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) /* Promiscuous mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: -#ifdef RTE_MBUF_REFCNT case BONDING_MODE_BROADCAST: -#endif for (i = 0; i < internals->slave_count; i++) rte_eth_promiscuous_disable(internals->slaves[i].port_id); break; @@ -1432,7 +1827,8 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) break; /* Promiscuous mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: - case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING: + case BONDING_MODE_TLB: + case BONDING_MODE_ALB: default: rte_eth_promiscuous_disable(internals->current_primary_port); } @@ -1604,32 +2000,32 @@ bond_init(const char *name, const char *params) /* Parse link bonding mode */ if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, - &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) { - RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name); - return -1; + &bond_ethdev_parse_slave_mode_kvarg, + &bonding_mode) != 0) { + RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", + name); + goto parse_error; } } else { - RTE_LOG(ERR, EAL, - "Mode must be specified only once for bonded device %s\n", - name); - return -1; + RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded " + "device %s\n", name); + goto parse_error; } /* Parse socket id to create bonding device on */ arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG); if (arg_count == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG, - &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) { - RTE_LOG(ERR, EAL, - "Invalid socket Id specified for bonded device %s\n", - name); - return -1; + &bond_ethdev_parse_socket_id_kvarg, &socket_id) + != 0) { + RTE_LOG(ERR, EAL, "Invalid socket Id specified for " + "bonded device %s\n", name); + goto parse_error; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, - "Socket Id can be specified only once for bonded device %s\n", - name); - return -1; + RTE_LOG(ERR, EAL, "Socket Id can be specified only once for " + "bonded device %s\n", name); + goto parse_error; } else { socket_id = rte_socket_id(); } @@ -1637,18 +2033,21 @@ bond_init(const char *name, const char *params) /* Create link bonding eth device */ port_id = rte_eth_bond_create(name, bonding_mode, socket_id); if (port_id < 0) { - RTE_LOG(ERR, EAL, - "Failed to create socket %s in mode %u on socket %u.\n", - name, bonding_mode, socket_id); - return -1; + RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on " + "socket %u.\n", name, bonding_mode, socket_id); + goto parse_error; } internals = rte_eth_devices[port_id].data->dev_private; internals->kvlist = kvlist; - RTE_LOG(INFO, EAL, - "Create bonded device %s on port %d in mode %u on socket %u.\n", - name, port_id, bonding_mode, socket_id); + RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on " + "socket %u.\n", name, port_id, bonding_mode, socket_id); return 0; + +parse_error: + rte_kvargs_free(kvlist); + + return -1; } /* this part will resolve the slave portids after all the other pdev and vdev