* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
+#include <netinet/in.h>
+
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include "rte_eth_bond_8023ad_private.h"
#define REORDER_PERIOD_MS 10
+
+#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
+
/* Table for statistics in mode 5 TLB */
static uint64_t tlb_last_obytets[RTE_MAX_ETHPORTS];
+static inline size_t
+get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
+{
+ size_t vlan_offset = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+
+ vlan_offset = sizeof(struct vlan_hdr);
+ *proto = vlan_hdr->eth_proto;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ vlan_hdr = vlan_hdr + 1;
+ *proto = vlan_hdr->eth_proto;
+ vlan_offset += sizeof(struct vlan_hdr);
+ }
+ }
+ return vlan_offset;
+}
+
static uint16_t
bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
return num_rx_total;
}
+static uint16_t
+bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct ether_hdr *eth_h;
+
+ uint16_t ether_type, offset;
+ uint16_t nb_recv_pkts;
+
+ int i;
+
+ nb_recv_pkts = bond_ethdev_rx_burst(queue, bufs, nb_pkts);
+
+ for (i = 0; i < nb_recv_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, ðer_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ bond_mode_alb_arp_recv(eth_h, offset, internals);
+ }
+ }
+
+ return nb_recv_pkts;
+}
+
static uint16_t
bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
(word_src_addr[3] ^ word_dst_addr[3]);
}
-static uint32_t
-udp_hash(struct udp_hdr *hdr)
+uint16_t
+xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
{
- return hdr->src_port ^ hdr->dst_port;
-}
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
-static inline uint16_t
-xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
-{
- struct ether_hdr *eth_hdr;
- struct udp_hdr *udp_hdr;
- size_t eth_offset = 0;
- uint32_t hash = 0;
+ uint32_t hash = ether_hash(eth_hdr);
- if (slave_count == 1)
- return 0;
-
- switch (policy) {
- case BALANCE_XMIT_POLICY_LAYER2:
- eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
-
- hash = ether_hash(eth_hdr);
- hash ^= hash >> 8;
- return hash % slave_count;
-
- case BALANCE_XMIT_POLICY_LAYER23:
- eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
-
- if (buf->ol_flags & PKT_RX_VLAN_PKT)
- eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
- else
- eth_offset = sizeof(struct ether_hdr);
-
- if (buf->ol_flags & PKT_RX_IPV4_HDR) {
- struct ipv4_hdr *ipv4_hdr;
- ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(buf,
- unsigned char *) + eth_offset);
+ return (hash ^= hash >> 8) % slave_count;
+}
- hash = ether_hash(eth_hdr) ^ ipv4_hash(ipv4_hdr);
+uint16_t
+xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+ uint16_t proto = eth_hdr->ether_type;
+ size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
+ uint32_t hash, l3hash = 0;
+
+ hash = ether_hash(eth_hdr);
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv4_hash(ipv4_hdr);
+
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+ }
- } else {
- struct ipv6_hdr *ipv6_hdr;
+ hash = hash ^ l3hash;
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
- ipv6_hdr = (struct ipv6_hdr *)(rte_pktmbuf_mtod(buf,
- unsigned char *) + eth_offset);
+ return hash % slave_count;
+}
- hash = ether_hash(eth_hdr) ^ ipv6_hash(ipv6_hdr);
+uint16_t
+xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+ uint16_t proto = eth_hdr->ether_type;
+ size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
+
+ struct udp_hdr *udp_hdr = NULL;
+ struct tcp_hdr *tcp_hdr = NULL;
+ uint32_t hash, l3hash = 0, l4hash = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ size_t ip_hdr_offset;
+
+ l3hash = ipv4_hash(ipv4_hdr);
+
+ ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(udp_hdr);
}
- break;
-
- case BALANCE_XMIT_POLICY_LAYER34:
- if (buf->ol_flags & PKT_RX_VLAN_PKT)
- eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
- else
- eth_offset = sizeof(struct ether_hdr);
-
- if (buf->ol_flags & PKT_RX_IPV4_HDR) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
- if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
- sizeof(struct ipv4_hdr));
- hash = ipv4_hash(ipv4_hdr) ^ udp_hash(udp_hdr);
- } else {
- hash = ipv4_hash(ipv4_hdr);
- }
- } else {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset);
-
- if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
- (rte_pktmbuf_mtod(buf, unsigned char *) + eth_offset +
- sizeof(struct ipv6_hdr));
- hash = ipv6_hash(ipv6_hdr) ^ udp_hash(udp_hdr);
- } else {
- hash = ipv6_hash(ipv6_hdr);
- }
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+
+ if (ipv6_hdr->proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv6_hdr->proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+ l4hash = HASH_L4_PORTS(udp_hdr);
}
- break;
}
+ hash = l3hash ^ l4hash;
hash ^= hash >> 16;
hash ^= hash >> 8;
uint8_t slave;
};
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals) {
+ int i;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
+ }
+}
+
static int
bandwidth_cmp(const void *a, const void *b)
{
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
- link_bwg = (link_bwg * (update_idx+1) * REORDER_PERIOD_MS);
+ link_bwg = link_bwg * (update_idx+1) * REORDER_PERIOD_MS;
bwg_slave->bwg_left_int = (link_bwg - 1000*load) / link_bwg;
bwg_slave->bwg_left_remainder = (link_bwg - 1000*load) % link_bwg;
}
internals->slave_update_idx, &bwg_array[i]);
bwg_array[i].slave = slave_id;
- if (update_stats)
+ if (update_stats) {
tlb_last_obytets[slave_id] = slave_stats.obytes;
+ }
}
if (update_stats == 1)
slave_count = i;
qsort(bwg_array, slave_count, sizeof(bwg_array[0]), bandwidth_cmp);
for (i = 0; i < slave_count; i++)
- internals->active_slaves[i] = bwg_array[i].slave;
+ internals->tlb_slaves_order[i] = bwg_array[i].slave;
rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb,
(struct bond_dev_private *)internals);
if (num_of_slaves < 1)
return num_tx_total;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
+ memcpy(slaves, internals->tlb_slaves_order,
+ sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
}
for (i = 0; i < num_of_slaves; i++) {
- ether_addr_copy(&internals->slaves[slaves[i]].persisted_mac_addr,
- &active_slave_addr);
-
+ rte_eth_macaddr_get(slaves[i], &active_slave_addr);
for (j = num_tx_total; j < nb_pkts; j++) {
if (j + 3 < nb_pkts)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
return num_tx_total;
}
+void
+bond_tlb_disable(struct bond_dev_private *internals)
+{
+ rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+}
+
+void
+bond_tlb_enable(struct bond_dev_private *internals)
+{
+ bond_ethdev_update_tlb_slave_cb(internals);
+}
+
+static uint16_t
+bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ struct ether_hdr *eth_h;
+ uint16_t ether_type, offset;
+
+ struct client_data *client_info;
+
+ /*
+ * We create transmit buffers for every slave and one additional to send
+ * through tlb. In worst case every packet will be send on one port.
+ */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS + 1][nb_pkts];
+ uint16_t slave_bufs_pkts[RTE_MAX_ETHPORTS + 1] = { 0 };
+
+ /*
+ * We create separate transmit buffers for update packets as they wont be
+ * counted in num_tx_total.
+ */
+ struct rte_mbuf *update_bufs[RTE_MAX_ETHPORTS][ALB_HASH_TABLE_SIZE];
+ uint16_t update_bufs_pkts[RTE_MAX_ETHPORTS] = { 0 };
+
+ struct rte_mbuf *upd_pkt;
+ size_t pkt_size;
+
+ uint16_t num_send, num_not_send = 0;
+ uint16_t num_tx_total = 0;
+ uint8_t slave_idx;
+
+ int i, j;
+
+ /* Search tx buffer for ARP packets and forward them to alb */
+ for (i = 0; i < nb_pkts; i++) {
+ eth_h = rte_pktmbuf_mtod(bufs[i], struct ether_hdr *);
+ ether_type = eth_h->ether_type;
+ offset = get_vlan_offset(eth_h, ðer_type);
+
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
+
+ /* Change src mac in eth header */
+ rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
+
+ /* Add packet to slave tx buffer */
+ slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
+ slave_bufs_pkts[slave_idx]++;
+ } else {
+ /* If packet is not ARP, send it with TLB policy */
+ slave_bufs[RTE_MAX_ETHPORTS][slave_bufs_pkts[RTE_MAX_ETHPORTS]] =
+ bufs[i];
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]++;
+ }
+ }
+
+ /* Update connected client ARP tables */
+ if (internals->mode6.ntt) {
+ for (i = 0; i < ALB_HASH_TABLE_SIZE; i++) {
+ client_info = &internals->mode6.client_table[i];
+
+ if (client_info->in_use) {
+ /* Allocate new packet to send ARP update on current slave */
+ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
+ if (upd_pkt == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
+ continue;
+ }
+ pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
+ + client_info->vlan_count * sizeof(struct vlan_hdr);
+ upd_pkt->data_len = pkt_size;
+ upd_pkt->pkt_len = pkt_size;
+
+ slave_idx = bond_mode_alb_arp_upd(client_info, upd_pkt,
+ internals);
+
+ /* Add packet to update tx buffer */
+ update_bufs[slave_idx][update_bufs_pkts[slave_idx]] = upd_pkt;
+ update_bufs_pkts[slave_idx]++;
+ }
+ }
+ internals->mode6.ntt = 0;
+ }
+
+ /* Send ARP packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (slave_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id,
+ slave_bufs[i], slave_bufs_pkts[i]);
+ for (j = 0; j < slave_bufs_pkts[i] - num_send; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[i][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ num_not_send += slave_bufs_pkts[i] - num_send;
+ }
+ }
+
+ /* Send update packets on proper slaves */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (update_bufs_pkts[i] > 0) {
+ num_send = rte_eth_tx_burst(i, bd_tx_q->queue_id, update_bufs[i],
+ update_bufs_pkts[i]);
+ for (j = num_send; j < update_bufs_pkts[i]; j++) {
+ rte_pktmbuf_free(update_bufs[i][j]);
+ }
+ }
+ }
+
+ /* Send non-ARP packets using tlb policy */
+ if (slave_bufs_pkts[RTE_MAX_ETHPORTS] > 0) {
+ num_send = bond_ethdev_tx_burst_tlb(queue,
+ slave_bufs[RTE_MAX_ETHPORTS],
+ slave_bufs_pkts[RTE_MAX_ETHPORTS]);
+
+ for (j = 0; j < slave_bufs_pkts[RTE_MAX_ETHPORTS]; j++) {
+ bufs[nb_pkts - 1 - num_not_send - j] =
+ slave_bufs[RTE_MAX_ETHPORTS][nb_pkts - 1 - j];
+ }
+
+ num_tx_total += num_send;
+ num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
+ }
+
+ return num_tx_total;
+}
+
static uint16_t
bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
/* Populate slaves mbuf with the packets which are to be sent on it */
for (i = 0; i < nb_pkts; i++) {
/* Select output slave using hash based on xmit policy */
- op_slave_id = xmit_slave_hash(bufs[i], num_of_slaves,
- internals->balance_xmit_policy);
+ op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
/* Populate slave mbuf arrays with mbufs for that slave */
slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
uint8_t num_of_slaves;
uint8_t slaves[RTE_MAX_ETHPORTS];
- /* possitions in slaves, not ID */
+ /* positions in slaves, not ID */
uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
uint8_t distributing_count;
/* Allocate additional packets in case 8023AD mode. */
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
- void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS];
+ void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
/* Total amount of packets in slave_bufs */
uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
/* Populate slaves mbuf with the packets which are to be sent on it */
for (i = 0; i < nb_pkts; i++) {
/* Select output slave using hash based on xmit policy */
- op_slave_idx = xmit_slave_hash(bufs[i], distributing_count,
- internals->balance_xmit_policy);
+ op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
/* Populate slave mbuf arrays with mbufs for that slave. Use only
* slaves that are currently distributing. */
{
struct ether_addr *mac_addr;
- mac_addr = eth_dev->data->mac_addrs;
-
if (eth_dev == NULL) {
RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
return -1;
return -1;
}
+ mac_addr = eth_dev->data->mac_addrs;
+
ether_addr_copy(mac_addr, dst_mac_addr);
return 0;
}
{
struct ether_addr *mac_addr;
- mac_addr = eth_dev->data->mac_addrs;
-
if (eth_dev == NULL) {
RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
return -1;
return -1;
}
+ mac_addr = eth_dev->data->mac_addrs;
+
/* If new MAC is different to current MAC then update */
if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
memcpy(mac_addr, new_mac_addr, sizeof(*mac_addr));
break;
case BONDING_MODE_ACTIVE_BACKUP:
case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_ALB:
default:
for (i = 0; i < internals->slave_count; i++) {
if (internals->slaves[i].port_id ==
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_BOND_LOG(WARNING,
+ RTE_LOG(WARNING, PMD,
"Using mode 4, it is necessary to do TX burst and RX burst "
- "at least every 100ms.");
+ "at least every 100ms.\n");
break;
case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb;
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup;
break;
+ case BONDING_MODE_ALB:
+ if (bond_mode_alb_enable(eth_dev) != 0)
+ return -1;
+
+ eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb;
+ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb;
+ break;
default:
return -1;
}
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev)
{
- int i, found = 0;
+ uint8_t i;
- for (i = 0; i < internals->slave_count; i++) {
- if (internals->slaves[i].port_id == slave_eth_dev->data->port_id)
- found = 1;
+ for (i = 0; i < internals->slave_count; i++)
+ if (internals->slaves[i].port_id ==
+ slave_eth_dev->data->port_id)
+ break;
- if (found && i < (internals->slave_count - 1))
- memcpy(&internals->slaves[i], &internals->slaves[i+1],
- sizeof(internals->slaves[i]));
- }
+ if (i < (internals->slave_count - 1))
+ memmove(&internals->slaves[i], &internals->slaves[i + 1],
+ sizeof(internals->slaves[0]) *
+ (internals->slave_count - i - 1));
internals->slave_count--;
}
if (internals->mode == BONDING_MODE_8023AD)
bond_mode_8023ad_start(eth_dev);
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING)
- bond_ethdev_update_tlb_slave_cb(internals);
+ if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
+ internals->mode == BONDING_MODE_ALB)
+ bond_tlb_enable(internals);
return 0;
}
}
}
- if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING) {
- rte_eal_alarm_cancel(bond_ethdev_update_tlb_slave_cb, internals);
+ if (internals->mode == BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING ||
+ internals->mode == BONDING_MODE_ALB) {
+ bond_tlb_disable(internals);
+ for (i = 0; i < internals->active_slave_count; i++)
+ tlb_last_obytets[internals->active_slaves[i]] = 0;
}
internals->active_slave_count = 0;
static void
bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ struct bond_dev_private *internals = dev->data->dev_private;
+
dev_info->driver_name = driver_name;
dev_info->max_mac_addrs = 1;
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = dev->pci_dev;
+
+ dev_info->rx_offload_capa = internals->rx_offload_capa;
+ dev_info->tx_offload_capa = internals->tx_offload_capa;
}
static int
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_ALB:
default:
rte_eth_promiscuous_enable(internals->current_primary_port);
}
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
case BONDING_MODE_ADAPTIVE_TRANSMIT_LOAD_BALANCING:
+ case BONDING_MODE_ALB:
default:
rte_eth_promiscuous_disable(internals->current_primary_port);
}
internals->current_primary_port = port_id;
lsc_flag = 1;
+ mac_address_slaves_update(bonded_eth_dev);
+
/* Inherit eth dev link properties from first active slave */
link_properties_set(bonded_eth_dev,
&(slave_eth_dev->data->dev_link));
/* Parse link bonding mode */
if (rte_kvargs_count(kvlist, PMD_BOND_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
- &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) {
- RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", name);
- return -1;
+ &bond_ethdev_parse_slave_mode_kvarg,
+ &bonding_mode) != 0) {
+ RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
+ name);
+ goto parse_error;
}
} else {
- RTE_LOG(ERR, EAL,
- "Mode must be specified only once for bonded device %s\n",
- name);
- return -1;
+ RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
+ "device %s\n", name);
+ goto parse_error;
}
/* Parse socket id to create bonding device on */
arg_count = rte_kvargs_count(kvlist, PMD_BOND_SOCKET_ID_KVARG);
if (arg_count == 1) {
if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
- &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) {
- RTE_LOG(ERR, EAL,
- "Invalid socket Id specified for bonded device %s\n",
- name);
- return -1;
+ &bond_ethdev_parse_socket_id_kvarg, &socket_id)
+ != 0) {
+ RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
+ "bonded device %s\n", name);
+ goto parse_error;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "Socket Id can be specified only once for bonded device %s\n",
- name);
- return -1;
+ RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
+ "bonded device %s\n", name);
+ goto parse_error;
} else {
socket_id = rte_socket_id();
}
/* Create link bonding eth device */
port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
if (port_id < 0) {
- RTE_LOG(ERR, EAL,
- "Failed to create socket %s in mode %u on socket %u.\n",
- name, bonding_mode, socket_id);
- return -1;
+ RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
+ "socket %u.\n", name, bonding_mode, socket_id);
+ goto parse_error;
}
internals = rte_eth_devices[port_id].data->dev_private;
internals->kvlist = kvlist;
- RTE_LOG(INFO, EAL,
- "Create bonded device %s on port %d in mode %u on socket %u.\n",
- name, port_id, bonding_mode, socket_id);
+ RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
+ "socket %u.\n", name, port_id, bonding_mode, socket_id);
return 0;
+
+parse_error:
+ rte_kvargs_free(kvlist);
+
+ return -1;
}
/* this part will resolve the slave portids after all the other pdev and vdev