* Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdlib.h>
+#include <stdbool.h>
#include <netinet/in.h>
#include <rte_mbuf.h>
return num_rx_total;
}
-static uint16_t
-bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
-{
- struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
- struct bond_dev_private *internals = bd_tx_q->dev_private;
-
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
- uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t dist_slave_count;
-
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
- uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
-
- uint16_t i;
-
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
-
- dist_slave_count = 0;
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
-
- if (ACTOR_STATE(port, DISTRIBUTING))
- dist_slave_port_ids[dist_slave_count++] =
- slave_port_ids[i];
- }
-
- if (unlikely(dist_slave_count < 1))
- return 0;
-
- /*
- * Populate slaves mbuf with the packets which are to be sent on it
- * selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /* Populate slave mbuf arrays with mbufs for that slave. */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
-
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
-
- return total_tx_count;
-}
-
-
static uint16_t
bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
return num_tx_total;
}
-static uint16_t
-bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+static inline uint16_t
+tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ uint16_t *slave_port_ids, uint16_t slave_count)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
- uint16_t slave_count;
-
/* Array to sort mbufs for transmission on each slave into */
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
/* Number of mbufs for transmission on each slave */
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
- /* Copy slave list to protect against slave up/down changes during tx
- * bursting */
- slave_count = internals->active_slave_count;
- if (unlikely(slave_count < 1))
- return 0;
-
- memcpy(slave_port_ids, internals->active_slaves,
- sizeof(slave_port_ids[0]) * slave_count);
-
/*
* Populate slaves mbuf with the packets which are to be sent on it
* selecting output slave using hash based on xmit policy
}
static uint16_t
-bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
uint16_t nb_bufs)
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t slave_count;
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
+ /* Copy slave list to protect against slave up/down changes during tx
+ * bursting
+ */
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
+
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+ return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
+ slave_count);
+}
+
+static inline uint16_t
+tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+ bool dedicated_txq)
+{
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
+
uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
uint16_t dist_slave_count;
- /* 2-D array to sort mbufs for transmission on each slave into */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
- /* Number of mbufs for transmission on each slave */
- uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
- /* Mapping array generated by hash function to map mbufs to slaves */
- uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
uint16_t slave_tx_count;
- uint16_t total_tx_count = 0, total_tx_fail_count = 0;
uint16_t i;
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ if (dedicated_txq)
+ goto skip_tx_ring;
+
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
}
}
+skip_tx_ring:
if (unlikely(nb_bufs == 0))
return 0;
slave_port_ids[i];
}
- if (likely(dist_slave_count > 0)) {
-
- /*
- * Populate slaves mbuf with the packets which are to be sent
- * on it, selecting output slave using hash based on xmit policy
- */
- internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
- bufs_slave_port_idxs);
-
- for (i = 0; i < nb_bufs; i++) {
- /*
- * Populate slave mbuf arrays with mbufs for that
- * slave
- */
- uint16_t slave_idx = bufs_slave_port_idxs[i];
-
- slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
- bufs[i];
- }
-
-
- /* Send packet burst on each slave device */
- for (i = 0; i < dist_slave_count; i++) {
- if (slave_nb_bufs[i] == 0)
- continue;
-
- slave_tx_count = rte_eth_tx_burst(
- dist_slave_port_ids[i],
- bd_tx_q->queue_id, slave_bufs[i],
- slave_nb_bufs[i]);
-
- total_tx_count += slave_tx_count;
+ if (unlikely(dist_slave_count < 1))
+ return 0;
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
- int slave_tx_fail_count = slave_nb_bufs[i] -
- slave_tx_count;
- total_tx_fail_count += slave_tx_fail_count;
+ return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
+ dist_slave_count);
+}
- memcpy(&bufs[nb_bufs - total_tx_fail_count],
- &slave_bufs[i][slave_tx_count],
- slave_tx_fail_count * sizeof(bufs[0]));
- }
- }
- }
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, false);
+}
- return total_tx_count;
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ return tx_burst_8023ad(queue, bufs, nb_bufs, true);
}
static uint16_t