* Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdlib.h>
+#include <stdbool.h>
#include <netinet/in.h>
#include <rte_mbuf.h>
uint16_t slave_port) {
struct rte_eth_dev_info slave_info;
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
const struct rte_flow_action_queue lacp_queue_conf = {
.index = 0,
int
bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_eth_dev_info bond_info;
uint16_t idx;
bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
struct rte_flow_error error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- (bond_dev->data->dev_private);
-
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
struct rte_flow_action_queue lacp_queue_conf = {
.index = internals->mode4.dedicated_queues.rx_qid,
};
return num_rx_total;
}
-static uint16_t
-bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
-{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
- struct bond_dev_private *internals = bd_tx_q->dev_private;
-
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
- uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t dist_slave_count;
-
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
- uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
-
- uint16_t i;
-
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
-
- dist_slave_count = 0;
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
-
- if (ACTOR_STATE(port, DISTRIBUTING))
- dist_slave_port_ids[dist_slave_count++] =
- slave_port_ids[i];
- }
-
- if (unlikely(dist_slave_count < 1))
- return 0;
-
- /*
- * Populate slaves mbuf with the packets which are to be sent on it
- * selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /* Populate slave mbuf arrays with mbufs for that slave. */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
-
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
-
- return total_tx_count;
-}
-
-
static uint16_t
bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
strlcpy(buf, info, 16);
#endif
- if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
+ if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
bond_mode_alb_arp_recv(eth_h, offset, internals);
}
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
- else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
+ else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
#endif
}
vlan_offset = get_vlan_offset(eth_hdr, &proto);
- if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv4_hash(ipv4_hdr);
- } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
size_t vlan_offset;
int i;
- struct udp_hdr *udp_hdr;
+ struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
uint32_t hash, l3hash, l4hash;
l3hash = 0;
l4hash = 0;
- if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
+ if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) == proto) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
size_t ip_hdr_offset;
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv4_hdr->next_proto_id ==
IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)
+ udp_hdr = (struct rte_udp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
if ((size_t)udp_hdr + sizeof(*udp_hdr)
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
- } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
+ } else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) == proto) {
struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
((char *)(eth_hdr + 1) + vlan_offset);
l3hash = ipv6_hash(ipv6_hdr);
tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
+ udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
return num_tx_total;
}
-static uint16_t
-bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+static inline uint16_t
+tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ uint16_t *slave_port_ids, uint16_t slave_count)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
/* Array to sort mbufs for transmission on each slave into */
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
/* Number of mbufs for transmission on each slave */
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
/*
* Populate slaves mbuf with the packets which are to be sent on it
* selecting output slave using hash based on xmit policy
}
static uint16_t
-bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
uint16_t nb_bufs)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t slave_count;
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting
+ */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+ return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
+ slave_count);
+}
+
+static inline uint16_t
+tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ bool dedicated_txq)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t dist_slave_count;
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
uint16_t i;
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ if (dedicated_txq)
+ goto skip_tx_ring;
+
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
}
}
+skip_tx_ring:
if (unlikely(nb_bufs == 0))
return 0;
slave_port_ids[i];
}
- if (likely(dist_slave_count > 0)) {
-
- /*
- * Populate slaves mbuf with the packets which are to be sent
- * on it, selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /*
- * Populate slave mbuf arrays with mbufs for that
- * slave
- */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
- bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(
- dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
+ if (unlikely(dist_slave_count < 1))
+ return 0;
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
+ return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
+ dist_slave_count);
+}
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
- }
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, false);
+}
- return total_tx_count;
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, true);
}
static uint16_t
struct rte_eth_dev *slave_eth_dev)
{
int errval = 0;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
uint16_t q_id;
struct rte_flow_error flow_error;
- struct bond_dev_private *internals = (struct bond_dev_private *)
- bonded_eth_dev->data->dev_private;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
/* Stop slave */
rte_eth_dev_stop(slave_eth_dev->data->port_id);
if (cb_arg == NULL)
return;
- bonded_ethdev = (struct rte_eth_dev *)cb_arg;
- internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+ bonded_ethdev = cb_arg;
+ internals = bonded_ethdev->data->dev_private;
if (!bonded_ethdev->data->dev_started ||
!internals->link_status_polling_enabled)