{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
+ rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
vlan_offset = sizeof(struct vlan_hdr);
{
struct bond_dev_private *internals;
- uint16_t num_rx_slave = 0;
uint16_t num_rx_total = 0;
-
+ uint16_t slave_count;
+ uint16_t active_slave;
int i;
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-
internals = bd_rx_q->dev_private;
+ slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
- for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
/* Offset of pointer to *bufs increases as packets are received
* from other slaves */
- num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
- bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
- if (num_rx_slave) {
- num_rx_total += num_rx_slave;
- nb_pkts -= num_rx_slave;
- }
+ num_rx_slave =
+ rte_eth_rx_burst(internals->active_slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
+ if (++internals->active_slave >= slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
uint16_t num_rx_total = 0; /* Total number of received packets */
uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t slave_count;
-
- uint16_t i, idx;
+ uint16_t active_slave;
+ uint16_t i;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
- for (i = 0, idx = internals->active_slave;
- i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
- idx = idx % slave_count;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
/* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
- &bufs[num_rx_total], nb_pkts - num_rx_total);
+ num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave >= slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
struct bond_dev_private *internals = bd_rx_q->dev_private;
- struct ether_addr bond_mac;
-
+ struct rte_eth_dev *bonded_eth_dev =
+ &rte_eth_devices[internals->port_id];
+ struct ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
struct ether_hdr *hdr;
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
- uint8_t i, j, k;
uint8_t subtype;
+ uint16_t i;
+ uint16_t j;
+ uint16_t k;
- rte_eth_macaddr_get(internals->port_id, &bond_mac);
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
}
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
COLLECTING);
/* Read packets from this slave */
* in collecting state or bonding interface is not in promiscuous
* mode and packet address does not match. */
if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
- !collecting || (!promisc &&
- !is_multicast_ether_addr(&hdr->d_addr) &&
- !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
+ !collecting ||
+ (!promisc &&
+ !is_multicast_ether_addr(&hdr->d_addr) &&
+ !is_same_ether_addr(bond_mac,
+ &hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
bond_mode_8023ad_handle_slow_pkt(
idx = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave >= slave_count)
+ internals->active_slave = 0;
+
return num_rx_total;
}
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
static void
-arp_op_name(uint16_t arp_op, char *buf)
+arp_op_name(uint16_t arp_op, char *buf, size_t buf_len)
{
switch (arp_op) {
case ARP_OP_REQUEST:
- snprintf(buf, sizeof("ARP Request"), "%s", "ARP Request");
+ snprintf(buf, buf_len, "%s", "ARP Request");
return;
case ARP_OP_REPLY:
- snprintf(buf, sizeof("ARP Reply"), "%s", "ARP Reply");
+ snprintf(buf, buf_len, "%s", "ARP Reply");
return;
case ARP_OP_REVREQUEST:
- snprintf(buf, sizeof("Reverse ARP Request"), "%s",
- "Reverse ARP Request");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Request");
return;
case ARP_OP_REVREPLY:
- snprintf(buf, sizeof("Reverse ARP Reply"), "%s",
- "Reverse ARP Reply");
+ snprintf(buf, buf_len, "%s", "Reverse ARP Reply");
return;
case ARP_OP_INVREQUEST:
- snprintf(buf, sizeof("Peer Identify Request"), "%s",
- "Peer Identify Request");
+ snprintf(buf, buf_len, "%s", "Peer Identify Request");
return;
case ARP_OP_INVREPLY:
- snprintf(buf, sizeof("Peer Identify Reply"), "%s",
- "Peer Identify Reply");
+ snprintf(buf, buf_len, "%s", "Peer Identify Reply");
return;
default:
break;
}
- snprintf(buf, sizeof("Unknown"), "%s", "Unknown");
+ snprintf(buf, buf_len, "%s", "Unknown");
return;
}
#endif
arp_h = (struct arp_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
- arp_op_name(rte_be_to_cpu_16(arp_h->arp_op), ArpOp);
+ arp_op_name(rte_be_to_cpu_16(arp_h->arp_op),
+ ArpOp, sizeof(ArpOp));
MODE6_DEBUG(buf, src_ip, dst_ip, eth_h, ArpOp, port, *burstnumber);
}
#endif
tx_fail_total += tx_fail_slave;
memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- tx_fail_slave * sizeof(bufs[0]));
+ &slave_bufs[i][num_tx_slave],
+ tx_fail_slave * sizeof(bufs[0]));
}
num_tx_total += num_tx_slave;
}
void
burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint32_t hash;
void
burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
uint16_t i;
struct ether_hdr *eth_hdr;
void
burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint8_t slave_count, uint16_t *slaves)
+ uint16_t slave_count, uint16_t *slaves)
{
struct ether_hdr *eth_hdr;
uint16_t proto;
struct bwg_slave {
uint64_t bwg_left_int;
uint64_t bwg_left_remainder;
- uint8_t slave;
+ uint16_t slave;
};
void
struct bond_dev_private *internals = arg;
struct rte_eth_stats slave_stats;
struct bwg_slave bwg_array[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint16_t slave_count;
uint64_t tx_bytes;
uint8_t update_stats = 0;
- uint8_t i, slave_id;
+ uint16_t slave_id;
+ uint16_t i;
internals->slave_update_idx++;
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[nb_bufs];
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
+ uint16_t i;
if (unlikely(nb_bufs == 0))
return 0;
for (i = 0; i < nb_bufs; i++) {
/* Populate slave mbuf arrays with mbufs for that slave. */
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow reordering
- * later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++) {
- slave_bufs[i][j] =
- slave_bufs[i][(slave_tx_count - 1) + j];
- }
- }
- }
-
- /*
- * If there are tx burst failures we move packets to end of bufs to
- * preserve expected PMD behaviour of all failed transmitted being
- * at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- bufs[bufs_idx++] = slave_bufs[i][j];
- }
+ total_tx_fail_count += slave_tx_fail_count;
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
/* Mapping array generated by hash function to map mbufs to slaves */
uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count;
uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- uint16_t i, j;
-
- if (unlikely(nb_bufs == 0))
- return 0;
+ uint16_t i;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
+ }
+ }
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
slave_port_ids[i];
}
- if (likely(dist_slave_count > 1)) {
+ if (likely(dist_slave_count > 0)) {
/*
* Populate slaves mbuf with the packets which are to be sent
* Populate slave mbuf arrays with mbufs for that
* slave
*/
- uint8_t slave_idx = bufs_slave_port_idxs[i];
+ uint16_t slave_idx = bufs_slave_port_idxs[i];
slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
bufs[i];
/* If tx burst fails move packets to end of bufs */
if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ int slave_tx_fail_count = slave_nb_bufs[i] -
slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count[i];
-
- /*
- * Shift bufs to beginning of array to allow
- * reordering later
- */
- for (j = 0; j < slave_tx_fail_count[i]; j++)
- slave_bufs[i][j] =
- slave_bufs[i]
- [(slave_tx_count - 1)
- + j];
- }
- }
+ total_tx_fail_count += slave_tx_fail_count;
- /*
- * If there are tx burst failures we move packets to end of
- * bufs to preserve expected PMD behaviour of all failed
- * transmitted being at the end of the input mbuf array
- */
- if (unlikely(total_tx_fail_count > 0)) {
- int bufs_idx = nb_bufs - total_tx_fail_count - 1;
-
- for (i = 0; i < slave_count; i++) {
- if (slave_tx_fail_count[i] > 0) {
- for (j = 0;
- j < slave_tx_fail_count[i];
- j++) {
- bufs[bufs_idx++] =
- slave_bufs[i][j];
- }
- }
+ memcpy(&bufs[nb_bufs - total_tx_fail_count],
+ &slave_bufs[i][slave_tx_count],
+ slave_tx_fail_count * sizeof(bufs[0]));
}
}
}
- /* Check for LACP control packets and send if available */
- for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
- struct rte_mbuf *ctrl_pkt = NULL;
-
- if (likely(rte_ring_empty(port->tx_ring)))
- continue;
-
- if (rte_ring_dequeue(port->tx_ring,
- (void **)&ctrl_pkt) != -ENOENT) {
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
- bd_tx_q->queue_id, &ctrl_pkt, 1);
- /*
- * re-enqueue LAG control plane packets to buffering
- * ring if transmission fails so the packet isn't lost.
- */
- if (slave_tx_count != 1)
- rte_ring_enqueue(port->tx_ring, ctrl_pkt);
- }
- }
-
return total_tx_count;
}
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t tx_failed_flag = 0, num_of_slaves;
uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint8_t tx_failed_flag = 0;
+ uint16_t num_of_slaves;
uint16_t max_nb_of_tx_pkts = 0;
return max_nb_of_tx_pkts;
}
-void
+static void
link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
{
struct bond_dev_private *bond_ctx = ethdev->data->dev_private;
}
}
-int
+static int
link_properties_valid(struct rte_eth_dev *ethdev,
struct rte_eth_link *slave_link)
{
int errval = 0;
struct bond_dev_private *internals = (struct bond_dev_private *)
bonded_eth_dev->data->dev_private;
- struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
char mem_name[256];
/* If RSS is enabled for bonding, try to enable it for slaves */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
- != 0) {
+ if (internals->rss_key_len != 0) {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ internals->rss_key_len;
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ internals->rss_key;
} else {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
}
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev)
{
- uint8_t i;
+ uint16_t i;
for (i = 0; i < internals->slave_count; i++)
if (internals->slaves[i].port_id ==
static void
bond_ethdev_free_queues(struct rte_eth_dev *dev)
{
- uint8_t i;
+ uint16_t i;
if (dev->data->rx_queues != NULL) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
bond_ethdev_stop(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t i;
+ uint16_t i;
if (internals->mode == BONDING_MODE_8023AD) {
struct port *port;
/* Discard all messages to/from mode 4 state machines */
for (i = 0; i < internals->active_slave_count; i++) {
- port = &mode_8023ad_ports[internals->active_slaves[i]];
+ port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->link_status_polling_enabled = 0;
- for (i = 0; i < internals->slave_count; i++)
- internals->slaves[i].last_link_status = 0;
-
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
+
+ internals->link_status_polling_enabled = 0;
+ for (i = 0; i < internals->slave_count; i++) {
+ uint16_t slave_id = internals->slaves[i].port_id;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) !=
+ internals->active_slave_count) {
+ internals->slaves[i].last_link_status = 0;
+ rte_eth_dev_stop(slave_id);
+ deactivate_slave(eth_dev, slave_id);
+ }
+ }
}
void
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
- uint8_t bond_port_id = internals->port_id;
+ uint16_t bond_port_id = internals->port_id;
int skipped = 0;
struct rte_flow_error ferror;
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
+ uint16_t max_rx_desc_lim = UINT16_MAX;
+ uint16_t max_tx_desc_lim = UINT16_MAX;
dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
*/
if (internals->slave_count > 0) {
struct rte_eth_dev_info slave_info;
- uint8_t idx;
+ uint16_t idx;
for (idx = 0; idx < internals->slave_count; idx++) {
rte_eth_dev_info_get(internals->slaves[idx].port_id,
if (slave_info.max_tx_queues < max_nb_tx_queues)
max_nb_tx_queues = slave_info.max_tx_queues;
+
+ if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim)
+ max_rx_desc_lim = slave_info.rx_desc_lim.nb_max;
+
+ if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim)
+ max_tx_desc_lim = slave_info.tx_desc_lim.nb_max;
}
}
dev_info->max_rx_queues = max_nb_rx_queues;
dev_info->max_tx_queues = max_nb_tx_queues;
+ memcpy(&dev_info->default_rxconf, &internals->default_rxconf,
+ sizeof(dev_info->default_rxconf));
+ memcpy(&dev_info->default_txconf, &internals->default_txconf,
+ sizeof(dev_info->default_txconf));
+
+ dev_info->rx_desc_lim.nb_max = max_rx_desc_lim;
+ dev_info->tx_desc_lim.nb_max = max_tx_desc_lim;
+
/**
* If dedicated hw queues enabled for link bonding device in LACP mode
* then we need to reduce the maximum number of data path queues by 1.
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_enable(internals->current_primary_port);
}
}
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ /* Do not touch promisc when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
rte_eth_promiscuous_disable(internals->current_primary_port);
}
}
struct rte_eth_link link;
int rc = -1;
- int i, valid_slave = 0;
- uint8_t active_pos;
uint8_t lsc_flag = 0;
+ int valid_slave = 0;
+ uint16_t active_pos;
+ uint16_t i;
if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
return rc;
- bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
+ bonded_eth_dev = &rte_eth_devices[*(uint16_t *)param];
if (check_for_bonded_ethdev(bonded_eth_dev))
return rc;
if (active_pos < internals->active_slave_count)
goto link_update;
- /* if no active slave ports then set this port to be primary port */
- if (internals->active_slave_count < 1) {
- /* If first active slave, then change link status */
- bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- internals->current_primary_port = port_id;
- lsc_flag = 1;
-
- mac_address_slaves_update(bonded_eth_dev);
- }
-
/* check link state properties if bonded link is up*/
if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
link_properties_set(bonded_eth_dev, &link);
}
+ /* If no active slave ports then set this port to be
+ * the primary port.
+ */
+ if (internals->active_slave_count < 1) {
+ /* If first active slave, then change link status */
+ bonded_eth_dev->data->dev_link.link_status =
+ ETH_LINK_UP;
+ internals->current_primary_port = port_id;
+ lsc_flag = 1;
+
+ mac_address_slaves_update(bonded_eth_dev);
+ }
+
activate_slave(bonded_eth_dev, port_id);
- /* If user has defined the primary port then default to using it */
+ /* If the user has defined the primary port then default to
+ * using it.
+ */
if (internals->user_defined_primary_port &&
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
/* Initially allow to choose any offload type */
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ memset(&internals->default_rxconf, 0,
+ sizeof(internals->default_rxconf));
+ memset(&internals->default_txconf, 0,
+ sizeof(internals->default_txconf));
+
+ memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim));
+ memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim));
+
memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
memset(internals->slaves, 0, sizeof(internals->slaves));
err:
rte_free(internals);
- if (eth_dev != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- rte_eth_dev_release_port(eth_dev);
- }
+ if (eth_dev != NULL)
+ eth_dev->data->dev_private = NULL;
+ rte_eth_dev_release_port(eth_dev);
return -1;
}
name = rte_vdev_device_name(dev);
RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(rte_vdev_device_args(dev)) == 0) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
RTE_BOND_LOG(ERR, "Failed to probe %s", name);
internals = rte_eth_devices[port_id].data->dev_private;
internals->kvlist = kvlist;
- rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
-
if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist,
PMD_BOND_AGG_MODE_KVARG,
}
if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
- agg_mode);
+ internals->mode4.agg_selection = agg_mode;
} else {
- rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
+ internals->mode4.agg_selection = AGG_STABLE;
}
+ rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
"socket %u.", name, port_id, bonding_mode, socket_id);
return 0;
if (eth_dev == NULL)
return -ENODEV;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return rte_eth_dev_release_port(eth_dev);
+
RTE_ASSERT(eth_dev->device == &dev->device);
internals = eth_dev->data->dev_private;
rte_mempool_free(internals->mode6.mempool);
rte_bitmap_free(internals->vlan_filter_bmp);
rte_free(internals->vlan_filter_bmpmem);
- rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data->mac_addrs);
rte_eth_dev_release_port(eth_dev);
unsigned i, j;
- /* If RSS is enabled, fill table and key with default values */
+ /*
+ * If RSS is enabled, fill table with default values and
+ * set key to the the value specified in port RSS configuration.
+ * Fall back to default RSS key if the key is not specified
+ */
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
- memcpy(internals->rss_key, default_rss_key, 40);
+ if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
+ internals->rss_key_len =
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ memcpy(internals->rss_key,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
+ internals->rss_key_len);
+ } else {
+ internals->rss_key_len = sizeof(default_rss_key);
+ memcpy(internals->rss_key, default_rss_key,
+ internals->rss_key_len);
+ }
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
- internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
+ internals->reta_conf[i].reta[j] =
+ (i * RTE_RETA_GROUP_SIZE + j) %
+ dev->data->nb_rx_queues;
}
}
"Failed to parse agg selection mode for bonded device %s",
name);
}
- if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
- agg_mode);
+ if (internals->mode == BONDING_MODE_8023AD) {
+ int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR,
+ "Invalid args for agg selection set for bonded device %s",
+ name);
+ return -1;
+ }
+ }
}
/* Parse/add slave ports to bonded device */
RTE_INIT(bond_init_log)
{
- bond_logtype = rte_log_register("pmd.net.bon");
+ bond_logtype = rte_log_register("pmd.net.bond");
if (bond_logtype >= 0)
rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
}