* Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdlib.h>
+#include <stdbool.h>
#include <netinet/in.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-#include "rte_eth_bond_8023ad_private.h"
+#include "eth_bond_private.h"
+#include "eth_bond_8023ad_private.h"
#define REORDER_PERIOD_MS 10
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
internals = bd_rx_q->dev_private;
slave_count = internals->active_slave_count;
- active_slave = internals->active_slave;
+ active_slave = bd_rx_q->active_slave;
for (i = 0; i < slave_count && nb_pkts; i++) {
uint16_t num_rx_slave;
active_slave = 0;
}
- if (++internals->active_slave >= slave_count)
- internals->active_slave = 0;
+ if (++bd_rx_q->active_slave >= slave_count)
+ bd_rx_q->active_slave = 0;
return num_rx_total;
}
uint16_t slave_port) {
struct rte_eth_dev_info slave_info;
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
const struct rte_flow_action_queue lacp_queue_conf = {
.index = 0,
return -1;
}
- rte_eth_dev_info_get(slave_port, &slave_info);
+ ret = rte_eth_dev_info_get(slave_port, &slave_info);
+ if (ret != 0) {
+ RTE_BOND_LOG(ERR,
+ "%s: Error during getting device (port %u) info: %s\n",
+ __func__, slave_port, strerror(-ret));
+
+ return ret;
+ }
+
if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues ||
slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) {
RTE_BOND_LOG(ERR,
int
bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_eth_dev_info bond_info;
uint16_t idx;
+ int ret;
/* Verify if all slaves in bonding supports flow director and */
if (internals->slave_count > 0) {
- rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
+ ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info);
+ if (ret != 0) {
+ RTE_BOND_LOG(ERR,
+ "%s: Error during getting device (port %u) info: %s\n",
+ __func__, bond_dev->data->port_id,
+ strerror(-ret));
+
+ return ret;
+ }
internals->mode4.dedicated_queues.rx_qid = bond_info.nb_rx_queues;
internals->mode4.dedicated_queues.tx_qid = bond_info.nb_tx_queues;
bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
-
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_flow_action_queue lacp_queue_conf = {
.index = internals->mode4.dedicated_queues.rx_qid,
};
return 0;
}
-static uint16_t
-bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
-{
- struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
- struct bond_dev_private *internals = bd_rx_q->dev_private;
- uint16_t num_rx_total = 0; /* Total number of received packets */
- uint16_t slaves[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
- uint16_t active_slave;
- uint16_t i;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- active_slave = internals->active_slave;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * slave_count);
-
- for (i = 0; i < slave_count && nb_pkts; i++) {
- uint16_t num_rx_slave;
-
- /* Read packets from this slave */
- num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
- bd_rx_q->queue_id,
- bufs + num_rx_total, nb_pkts);
- num_rx_total += num_rx_slave;
- nb_pkts -= num_rx_slave;
-
- if (++active_slave == slave_count)
- active_slave = 0;
- }
-
- if (++internals->active_slave >= slave_count)
- internals->active_slave = 0;
-
- return num_rx_total;
-}
-
-static uint16_t
-bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
-{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
- struct bond_dev_private *internals = bd_tx_q->dev_private;
-
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
- uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t dist_slave_count;
-
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
- uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
-
- uint16_t i;
-
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
-
- dist_slave_count = 0;
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
-
- if (ACTOR_STATE(port, DISTRIBUTING))
- dist_slave_port_ids[dist_slave_count++] =
- slave_port_ids[i];
- }
-
- if (unlikely(dist_slave_count < 1))
- return 0;
-
- /*
- * Populate slaves mbuf with the packets which are to be sent on it
- * selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /* Populate slave mbuf arrays with mbufs for that slave. */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
-
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
-
- return total_tx_count;
-}
-
-
-static uint16_t
-bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+static inline uint16_t
+rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
+ bool dedicated_rxq)
{
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
uint16_t slave_count, idx;
uint8_t collecting; /* current slave collecting status */
- const uint8_t promisc = internals->promiscuous_en;
+ const uint8_t promisc = rte_eth_promiscuous_get(internals->port_id);
+ const uint8_t allmulti = rte_eth_allmulticast_get(internals->port_id);
uint8_t subtype;
uint16_t i;
uint16_t j;
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
- idx = internals->active_slave;
+ idx = bd_rx_q->active_slave;
if (idx >= slave_count) {
- internals->active_slave = 0;
+ bd_rx_q->active_slave = 0;
idx = 0;
}
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
/* Handle slow protocol packets. */
while (j < num_rx_total) {
-
- /* If packet is not pure L2 and is known, skip it */
- if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
- j++;
- continue;
- }
-
if (j + 3 < num_rx_total)
rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *);
subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
- /* Remove packet from array if it is slow packet or slave is not
- * in collecting state or bonding interface is not in promiscuous
- * mode and packet address does not match. */
- if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
+ /* Remove packet from array if:
+ * - it is slow packet but no dedicated rxq is present,
+ * - slave is not in collecting state,
+ * - bonding interface is not in promiscuous mode:
+ * - packet is unicast and address does not match,
+ * - packet is multicast and bonding interface
+ * is not in allmulti,
+ */
+ if (unlikely(
+ (!dedicated_rxq &&
+ is_lacp_packets(hdr->ether_type, subtype,
+ bufs[j])) ||
!collecting ||
(!promisc &&
- !rte_is_multicast_ether_addr(&hdr->d_addr) &&
- !rte_is_same_ether_addr(bond_mac,
- &hdr->d_addr)))) {
+ ((rte_is_unicast_ether_addr(&hdr->dst_addr) &&
+ !rte_is_same_ether_addr(bond_mac,
+ &hdr->dst_addr)) ||
+ (!allmulti &&
+ rte_is_multicast_ether_addr(&hdr->dst_addr)))))) {
if (hdr->ether_type == ether_type_slow_be) {
bond_mode_8023ad_handle_slow_pkt(
idx = 0;
}
- if (++internals->active_slave >= slave_count)
- internals->active_slave = 0;
+ if (++bd_rx_q->active_slave >= slave_count)
+ bd_rx_q->active_slave = 0;
return num_rx_total;
}
+static uint16_t
+bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ return rx_burst_8023ad(queue, bufs, nb_pkts, false);
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_pkts)
+{
+ return rx_burst_8023ad(queue, bufs, nb_pkts, true);
+}
+
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
uint32_t burstnumberRX;
uint32_t burstnumberTX;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
rte_log(RTE_LOG_DEBUG, bond_logtype, \
- "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
- "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
+ "%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \
+ "DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d\n", \
info, \
port, \
- eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
- eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
- eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
+ RTE_ETHER_ADDR_BYTES(ð_h->src_addr), \
src_ip, \
- eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
- eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
- eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
+ RTE_ETHER_ADDR_BYTES(ð_h->dst_addr), \
dst_ip, \
arp_op, ++burstnumber)
#endif
static void
-mode6_debug(const char __attribute__((unused)) *info,
+mode6_debug(const char __rte_unused *info,
struct rte_ether_hdr *eth_h, uint16_t port,
- uint32_t __attribute__((unused)) *burstnumber)
+ uint32_t __rte_unused *burstnumber)
{
struct rte_ipv4_hdr *ipv4_h;
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
static uint16_t
bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
- struct bond_dev_private *internals = bd_tx_q->dev_private;
+ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+ struct bond_dev_private *internals = bd_rx_q->dev_private;
struct rte_ether_hdr *eth_h;
uint16_t ether_type, offset;
uint16_t nb_recv_pkts;
ether_hash(struct rte_ether_hdr *eth_hdr)
{
unaligned_uint16_t *word_src_addr =
- (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
+ (unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes;
unaligned_uint16_t *word_dst_addr =
- (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
+ (unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes;
return (word_src_addr[0] ^ word_dst_addr[0]) ^
(word_src_addr[1] ^ word_dst_addr[1]) ^
struct bwg_slave *bwg_slave)
{
struct rte_eth_link link_status;
+ int ret;
- rte_eth_link_get_nowait(port_id, &link_status);
+ ret = rte_eth_link_get_nowait(port_id, &link_status);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
+ port_id, rte_strerror(-ret));
+ return;
+ }
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
ether_hdr = rte_pktmbuf_mtod(bufs[j],
struct rte_ether_hdr *);
- if (rte_is_same_ether_addr(ðer_hdr->s_addr,
+ if (rte_is_same_ether_addr(ðer_hdr->src_addr,
&primary_slave_addr))
rte_ether_addr_copy(&active_slave_addr,
- ðer_hdr->s_addr);
+ ðer_hdr->src_addr);
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX);
#endif
slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
/* Change src mac in eth header */
- rte_eth_macaddr_get(slave_idx, ð_h->s_addr);
+ rte_eth_macaddr_get(slave_idx, ð_h->src_addr);
/* Add packet to slave tx buffer */
slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i];
return num_tx_total;
}
-static uint16_t
-bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+static inline uint16_t
+tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ uint16_t *slave_port_ids, uint16_t slave_count)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
/* Array to sort mbufs for transmission on each slave into */
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
/* Number of mbufs for transmission on each slave */
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
/*
* Populate slaves mbuf with the packets which are to be sent on it
* selecting output slave using hash based on xmit policy
}
static uint16_t
-bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
uint16_t nb_bufs)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t slave_count;
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting
+ */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+ return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
+ slave_count);
+}
+
+static inline uint16_t
+tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ bool dedicated_txq)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t dist_slave_count;
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
uint16_t i;
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ if (dedicated_txq)
+ goto skip_tx_ring;
+
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
}
}
+skip_tx_ring:
if (unlikely(nb_bufs == 0))
return 0;
slave_port_ids[i];
}
- if (likely(dist_slave_count > 0)) {
-
- /*
- * Populate slaves mbuf with the packets which are to be sent
- * on it, selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /*
- * Populate slave mbuf arrays with mbufs for that
- * slave
- */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
- bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(
- dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
+ if (unlikely(dist_slave_count < 1))
+ return 0;
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
+ return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
+ dist_slave_count);
+}
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
- }
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, false);
+}
- return total_tx_count;
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, true);
}
static uint16_t
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
{
struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+ bool set;
int i;
/* Update slave devices MAC addresses */
case BONDING_MODE_TLB:
case BONDING_MODE_ALB:
default:
+ set = true;
for (i = 0; i < internals->slave_count; i++) {
if (internals->slaves[i].port_id ==
internals->current_primary_port) {
if (rte_eth_dev_default_mac_addr_set(
- internals->primary_port,
+ internals->current_primary_port,
bonded_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->current_primary_port);
- return -1;
+ set = false;
}
} else {
if (rte_eth_dev_default_mac_addr_set(
&internals->slaves[i].persisted_mac_addr)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->slaves[i].port_id);
- return -1;
}
}
}
+ if (!set)
+ return -1;
}
return 0;
struct rte_eth_dev *slave_eth_dev)
{
int errval = 0;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
uint16_t q_id;
struct rte_flow_error flow_error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
/* Stop slave */
- rte_eth_dev_stop(slave_eth_dev->data->port_id);
+ errval = rte_eth_dev_stop(slave_eth_dev->data->port_id);
+ if (errval != 0)
+ RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
/* Enable interrupts on slave device if supported */
if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* If RSS is enabled for bonding, try to enable it for slaves */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- if (internals->rss_key_len != 0) {
- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
+ /* rss_key won't be empty if RSS is configured in bonded dev */
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
internals->rss_key_len;
- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
+ slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
internals->rss_key;
- } else {
- slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
- }
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
slave_eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_VLAN_FILTER;
+ slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
+ slave_eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ slave_eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
!= 0)
return errval;
- if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
- slave_eth_dev->data->port_id) != 0) {
+ errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev,
+ slave_eth_dev->data->port_id);
+ if (errval != 0) {
RTE_BOND_LOG(ERR,
- "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)",
- slave_eth_dev->data->port_id, q_id, errval);
- return -1;
+ "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
}
if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
&flow_error);
- bond_ethdev_8023ad_flow_set(bonded_eth_dev,
+ errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
slave_eth_dev->data->port_id);
+ if (errval != 0) {
+ RTE_BOND_LOG(ERR,
+ "bond_ethdev_8023ad_flow_set: port=%d, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
}
/* Start device */
internals->slave_count--;
/* force reconfiguration of slave interfaces */
- _rte_eth_dev_reset(slave_eth_dev);
+ rte_eth_dev_internal_reset(slave_eth_dev);
}
static void
}
}
-static void
+static int
bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
static int
}
}
- /* If bonded device is configure in promiscuous mode then re-apply config */
- if (internals->promiscuous_en)
- bond_ethdev_promiscuous_enable(eth_dev);
-
if (internals->mode == BONDING_MODE_8023AD) {
if (internals->mode4.dedicated_queues.enabled == 1) {
internals->mode4.dedicated_queues.rx_qid =
}
}
-void
+int
bond_ethdev_stop(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint16_t i;
+ int ret;
if (internals->mode == BONDING_MODE_8023AD) {
struct port *port;
internals->active_slave_count, slave_id) !=
internals->active_slave_count) {
internals->slaves[i].last_link_status = 0;
- rte_eth_dev_stop(slave_id);
+ ret = rte_eth_dev_stop(slave_id);
+ if (ret != 0) {
+ RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
+ slave_id);
+ return ret;
+ }
deactivate_slave(eth_dev, slave_id);
}
}
+
+ return 0;
}
-void
+int
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
int skipped = 0;
struct rte_flow_error ferror;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
while (internals->slave_count != skipped) {
uint16_t port_id = internals->slaves[skipped].port_id;
- rte_eth_dev_stop(port_id);
+ if (rte_eth_dev_stop(port_id) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to stop device on port %u",
+ port_id);
+ skipped++;
+ }
if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
RTE_BOND_LOG(ERR,
bond_flow_ops.flush(dev, &ferror);
bond_ethdev_free_queues(dev);
rte_bitmap_reset(internals->vlan_filter_bmp);
+ rte_bitmap_free(internals->vlan_filter_bmp);
+ rte_free(internals->vlan_filter_bmpmem);
+
+ /* Try to release mempool used in mode6. If the bond
+ * device is not mode6, free the NULL is not problem.
+ */
+ rte_mempool_free(internals->mode6.mempool);
+
+ if (internals->kvlist != NULL)
+ rte_kvargs_free(internals->kvlist);
+
+ return 0;
}
/* forward declaration */
static int bond_ethdev_configure(struct rte_eth_dev *dev);
-static void
+static int
bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ struct bond_slave_details slave;
+ int ret;
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
uint16_t idx;
for (idx = 0; idx < internals->slave_count; idx++) {
- rte_eth_dev_info_get(internals->slaves[idx].port_id,
- &slave_info);
+ slave = internals->slaves[idx];
+ ret = rte_eth_dev_info_get(slave.port_id, &slave_info);
+ if (ret != 0) {
+ RTE_BOND_LOG(ERR,
+ "%s: Error during getting device (port %u) info: %s\n",
+ __func__,
+ slave.port_id,
+ strerror(-ret));
+
+ return ret;
+ }
if (slave_info.max_rx_queues < max_nb_rx_queues)
max_nb_rx_queues = slave_info.max_rx_queues;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
dev_info->reta_size = internals->reta_size;
+ dev_info->hash_key_size = internals->rss_key_len;
+
+ return 0;
}
static int
}
static void
-bond_ethdev_rx_queue_release(void *queue)
+bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ void *queue = dev->data->rx_queues[queue_id];
+
if (queue == NULL)
return;
}
static void
-bond_ethdev_tx_queue_release(void *queue)
+bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ void *queue = dev->data->rx_queues[queue_id];
+
if (queue == NULL)
return;
if (cb_arg == NULL)
return;
- bonded_ethdev = (struct rte_eth_dev *)cb_arg;
- internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+ bonded_ethdev = cb_arg;
+ internals = bonded_ethdev->data->dev_private;
if (!bonded_ethdev->data->dev_started ||
!internals->link_status_polling_enabled)
static int
bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
+ int (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
struct bond_dev_private *bond_ctx;
struct rte_eth_link slave_link;
+ bool one_link_update_succeeded;
uint32_t idx;
+ int ret;
bond_ctx = ethdev->data->dev_private;
* packet loss will occur on this slave if transmission at rates
* greater than this are attempted
*/
- for (idx = 1; idx < bond_ctx->active_slave_count; idx++) {
- link_update(bond_ctx->active_slaves[0], &slave_link);
+ for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
+ ret = link_update(bond_ctx->active_slaves[idx],
+ &slave_link);
+ if (ret < 0) {
+ ethdev->data->dev_link.link_speed =
+ ETH_SPEED_NUM_NONE;
+ RTE_BOND_LOG(ERR,
+ "Slave (port %u) link get failed: %s",
+ bond_ctx->active_slaves[idx],
+ rte_strerror(-ret));
+ return 0;
+ }
if (slave_link.link_speed <
ethdev->data->dev_link.link_speed)
break;
case BONDING_MODE_ACTIVE_BACKUP:
/* Current primary slave */
- link_update(bond_ctx->current_primary_port, &slave_link);
+ ret = link_update(bond_ctx->current_primary_port, &slave_link);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s",
+ bond_ctx->current_primary_port,
+ rte_strerror(-ret));
+ return 0;
+ }
ethdev->data->dev_link.link_speed = slave_link.link_speed;
break;
bond_ctx->mode4.slave_link.link_autoneg;
ethdev->data->dev_link.link_duplex =
bond_ctx->mode4.slave_link.link_duplex;
- /* fall through to update link speed */
+ /* fall through */
+ /* to update link speed */
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
case BONDING_MODE_TLB:
* of all the slaves
*/
ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ one_link_update_succeeded = false;
for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
- link_update(bond_ctx->active_slaves[idx], &slave_link);
+ ret = link_update(bond_ctx->active_slaves[idx],
+ &slave_link);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR,
+ "Slave (port %u) link get failed: %s",
+ bond_ctx->active_slaves[idx],
+ rte_strerror(-ret));
+ continue;
+ }
+ one_link_update_succeeded = true;
ethdev->data->dev_link.link_speed +=
slave_link.link_speed;
}
+
+ if (!one_link_update_succeeded) {
+ RTE_BOND_LOG(ERR, "All slaves link get failed");
+ return 0;
+ }
}
return 0;
}
-static void
+static int
bond_ethdev_stats_reset(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
int i;
+ int err;
+ int ret;
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_stats_reset(internals->slaves[i].port_id);
+ for (i = 0, err = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_stats_reset(internals->slaves[i].port_id);
+ if (ret != 0)
+ err = ret;
+ }
+
+ return err;
}
-static void
+static int
bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
int i;
-
- internals->promiscuous_en = 1;
+ int ret = 0;
+ uint16_t port_id;
switch (internals->mode) {
/* Promiscuous mode is propagated to all slaves */
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
case BONDING_MODE_BROADCAST:
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_promiscuous_enable(internals->slaves[i].port_id);
- break;
- /* In mode4 promiscus mode is managed when slave is added/removed */
- case BONDING_MODE_8023AD:
+ case BONDING_MODE_8023AD: {
+ unsigned int slave_ok = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ port_id = internals->slaves[i].port_id;
+
+ ret = rte_eth_promiscuous_enable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to enable promiscuous mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ else
+ slave_ok++;
+ }
+ /*
+ * Report success if operation is successful on at least
+ * on one slave. Otherwise return last error code.
+ */
+ if (slave_ok > 0)
+ ret = 0;
break;
+ }
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
case BONDING_MODE_TLB:
/* Do not touch promisc when there cannot be primary ports */
if (internals->slave_count == 0)
break;
- rte_eth_promiscuous_enable(internals->current_primary_port);
+ port_id = internals->current_primary_port;
+ ret = rte_eth_promiscuous_enable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to enable promiscuous mode for port %u: %s",
+ port_id, rte_strerror(-ret));
}
+
+ return ret;
}
-static void
+static int
bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
int i;
-
- internals->promiscuous_en = 0;
+ int ret = 0;
+ uint16_t port_id;
switch (internals->mode) {
/* Promiscuous mode is propagated to all slaves */
case BONDING_MODE_ROUND_ROBIN:
case BONDING_MODE_BALANCE:
case BONDING_MODE_BROADCAST:
- for (i = 0; i < internals->slave_count; i++)
- rte_eth_promiscuous_disable(internals->slaves[i].port_id);
- break;
- /* In mode4 promiscus mode is set managed when slave is added/removed */
- case BONDING_MODE_8023AD:
+ case BONDING_MODE_8023AD: {
+ unsigned int slave_ok = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ port_id = internals->slaves[i].port_id;
+
+ if (internals->mode == BONDING_MODE_8023AD &&
+ bond_mode_8023ad_ports[port_id].forced_rx_flags ==
+ BOND_8023AD_FORCED_PROMISC) {
+ slave_ok++;
+ continue;
+ }
+ ret = rte_eth_promiscuous_disable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to disable promiscuous mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ else
+ slave_ok++;
+ }
+ /*
+ * Report success if operation is successful on at least
+ * on one slave. Otherwise return last error code.
+ */
+ if (slave_ok > 0)
+ ret = 0;
break;
+ }
/* Promiscuous mode is propagated only to primary slave */
case BONDING_MODE_ACTIVE_BACKUP:
case BONDING_MODE_TLB:
/* Do not touch promisc when there cannot be primary ports */
if (internals->slave_count == 0)
break;
- rte_eth_promiscuous_disable(internals->current_primary_port);
+ port_id = internals->current_primary_port;
+ ret = rte_eth_promiscuous_disable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to disable promiscuous mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ }
+
+ return ret;
+}
+
+static int
+bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ int i;
+ int ret = 0;
+ uint16_t port_id;
+
+ switch (internals->mode) {
+ /* allmulti mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ case BONDING_MODE_8023AD: {
+ unsigned int slave_ok = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ port_id = internals->slaves[i].port_id;
+
+ ret = rte_eth_allmulticast_enable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to enable allmulti mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ else
+ slave_ok++;
+ }
+ /*
+ * Report success if operation is successful on at least
+ * on one slave. Otherwise return last error code.
+ */
+ if (slave_ok > 0)
+ ret = 0;
+ break;
+ }
+ /* allmulti mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ /* Do not touch allmulti when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
+ port_id = internals->current_primary_port;
+ ret = rte_eth_allmulticast_enable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to enable allmulti mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ }
+
+ return ret;
+}
+
+static int
+bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct bond_dev_private *internals = eth_dev->data->dev_private;
+ int i;
+ int ret = 0;
+ uint16_t port_id;
+
+ switch (internals->mode) {
+ /* allmulti mode is propagated to all slaves */
+ case BONDING_MODE_ROUND_ROBIN:
+ case BONDING_MODE_BALANCE:
+ case BONDING_MODE_BROADCAST:
+ case BONDING_MODE_8023AD: {
+ unsigned int slave_ok = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ uint16_t port_id = internals->slaves[i].port_id;
+
+ if (internals->mode == BONDING_MODE_8023AD &&
+ bond_mode_8023ad_ports[port_id].forced_rx_flags ==
+ BOND_8023AD_FORCED_ALLMULTI)
+ continue;
+
+ ret = rte_eth_allmulticast_disable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to disable allmulti mode for port %u: %s",
+ port_id, rte_strerror(-ret));
+ else
+ slave_ok++;
+ }
+ /*
+ * Report success if operation is successful on at least
+ * on one slave. Otherwise return last error code.
+ */
+ if (slave_ok > 0)
+ ret = 0;
+ break;
+ }
+ /* allmulti mode is propagated only to primary slave */
+ case BONDING_MODE_ACTIVE_BACKUP:
+ case BONDING_MODE_TLB:
+ case BONDING_MODE_ALB:
+ default:
+ /* Do not touch allmulti when there cannot be primary ports */
+ if (internals->slave_count == 0)
+ break;
+ port_id = internals->current_primary_port;
+ ret = rte_eth_allmulticast_disable(port_id);
+ if (ret != 0)
+ RTE_BOND_LOG(ERR,
+ "Failed to disable allmulti mode for port %u: %s",
+ port_id, rte_strerror(-ret));
}
+
+ return ret;
}
static void
if (arg == NULL)
return;
- _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
+ rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
RTE_ETH_EVENT_INTR_LSC, NULL);
}
struct bond_dev_private *internals;
struct rte_eth_link link;
int rc = -1;
+ int ret;
uint8_t lsc_flag = 0;
int valid_slave = 0;
active_pos = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, port_id);
- rte_eth_link_get_nowait(port_id, &link);
- if (link.link_status) {
+ ret = rte_eth_link_get_nowait(port_id, &link);
+ if (ret < 0)
+ RTE_BOND_LOG(ERR, "Slave (port %u) link get failed", port_id);
+
+ if (ret == 0 && link.link_status) {
if (active_pos < internals->active_slave_count)
goto link_update;
internals->active_slaves[0]);
else
internals->current_primary_port = internals->primary_port;
+ mac_address_slaves_update(bonded_eth_dev);
}
}
bond_ethdev_delayed_lsc_propagation,
(void *)bonded_eth_dev);
else
- _rte_eth_dev_callback_process(bonded_eth_dev,
+ rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
bond_ethdev_delayed_lsc_propagation,
(void *)bonded_eth_dev);
else
- _rte_eth_dev_callback_process(bonded_eth_dev,
+ rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
return -EINVAL;
/* Copy RETA table */
- reta_count = reta_size / RTE_RETA_GROUP_SIZE;
+ reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
+ RTE_RETA_GROUP_SIZE;
for (i = 0; i < reta_count; i++) {
internals->reta_conf[i].mask = reta_conf[i].mask;
if (bond_rss_conf.rss_hf != 0)
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf;
- if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len <
- sizeof(internals->rss_key)) {
- if (bond_rss_conf.rss_key_len == 0)
- bond_rss_conf.rss_key_len = 40;
- internals->rss_key_len = bond_rss_conf.rss_key_len;
+ if (bond_rss_conf.rss_key) {
+ if (bond_rss_conf.rss_key_len < internals->rss_key_len)
+ return -EINVAL;
+ else if (bond_rss_conf.rss_key_len > internals->rss_key_len)
+ RTE_BOND_LOG(WARNING, "rss_key will be truncated");
+
memcpy(internals->rss_key, bond_rss_conf.rss_key,
internals->rss_key_len);
+ bond_rss_conf.rss_key_len = internals->rss_key_len;
}
for (i = 0; i < internals->slave_count; i++) {
}
static int
-bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type type, enum rte_filter_op op, void *arg)
+bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
- *(const void **)arg = &bond_flow_ops;
- return 0;
- }
- return -ENOTSUP;
+ *ops = &bond_flow_ops;
+ return 0;
}
static int
.stats_reset = bond_ethdev_stats_reset,
.promiscuous_enable = bond_ethdev_promiscuous_enable,
.promiscuous_disable = bond_ethdev_promiscuous_disable,
+ .allmulticast_enable = bond_ethdev_allmulticast_enable,
+ .allmulticast_disable = bond_ethdev_allmulticast_disable,
.reta_update = bond_ethdev_rss_reta_update,
.reta_query = bond_ethdev_rss_reta_query,
.rss_hash_update = bond_ethdev_rss_hash_update,
.mac_addr_set = bond_ethdev_mac_address_set,
.mac_addr_add = bond_ethdev_mac_addr_add,
.mac_addr_remove = bond_ethdev_mac_addr_remove,
- .filter_ctrl = bond_filter_ctrl
+ .flow_ops_get = bond_flow_ops_get
};
static int
}
eth_dev->dev_ops = &default_dev_ops;
- eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_spinlock_init(&internals->lock);
rte_spinlock_init(&internals->lsc_lock);
const char *name;
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
- uint8_t bonding_mode, socket_id/*, agg_mode*/;
- int arg_count, port_id;
+ uint8_t bonding_mode;
+ int arg_count, port_id;
+ int socket_id;
uint8_t agg_mode;
struct rte_eth_dev *eth_dev;
struct rte_eth_dev *eth_dev;
struct bond_dev_private *internals;
const char *name;
+ int ret = 0;
if (!dev)
return -EINVAL;
name = rte_vdev_device_name(dev);
RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
- /* now free all data allocation - for eth_dev structure,
- * dummy pci driver and internal (private) data
- */
-
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
if (eth_dev == NULL)
- return -ENODEV;
+ return 0; /* port already released */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return rte_eth_dev_release_port(eth_dev);
return -EBUSY;
if (eth_dev->data->dev_started == 1) {
- bond_ethdev_stop(eth_dev);
+ ret = bond_ethdev_stop(eth_dev);
bond_ethdev_close(eth_dev);
}
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
- internals = eth_dev->data->dev_private;
- /* Try to release mempool used in mode6. If the bond
- * device is not mode6, free the NULL is not problem.
- */
- rte_mempool_free(internals->mode6.mempool);
- rte_bitmap_free(internals->vlan_filter_bmp);
- rte_free(internals->vlan_filter_bmpmem);
-
rte_eth_dev_release_port(eth_dev);
- return 0;
+ return ret;
}
/* this part will resolve the slave portids after all the other pdev and vdev
* Fall back to default RSS key if the key is not specified
*/
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
- internals->rss_key_len =
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
- memcpy(internals->rss_key,
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
+ struct rte_eth_rss_conf *rss_conf =
+ &dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (rss_conf->rss_key != NULL) {
+ if (internals->rss_key_len > rss_conf->rss_key_len) {
+ RTE_BOND_LOG(ERR, "Invalid rss key length(%u)",
+ rss_conf->rss_key_len);
+ return -EINVAL;
+ }
+
+ memcpy(internals->rss_key, rss_conf->rss_key,
internals->rss_key_len);
} else {
- internals->rss_key_len = sizeof(default_rss_key);
+ if (internals->rss_key_len > sizeof(default_rss_key)) {
+ RTE_BOND_LOG(ERR,
+ "There is no suitable default hash key");
+ return -EINVAL;
+ }
+
memcpy(internals->rss_key, default_rss_key,
internals->rss_key_len);
}
"up_delay=<int> "
"down_delay=<int>");
-int bond_logtype;
-
-RTE_INIT(bond_init_log)
-{
- bond_logtype = rte_log_register("pmd.net.bond");
- if (bond_logtype >= 0)
- rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
-}
+/* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for
+ * this library, see meson.build.
+ */
+RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);