/*
* Known limitations:
- * - Multiple RX VLAN filters can be configured, but only the first one
- * works properly.
* - RSS hash key and options cannot be modified.
* - Hardware counters aren't implemented.
*/
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
-#include <linux/if.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
+#include <fcntl.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-pedantic"
#endif
-#include <rte_config.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_dev.h>
#include <rte_atomic.h>
#include <rte_version.h>
#include <rte_log.h>
+#include <rte_alarm.h>
+#include <rte_memory.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
#define WR_ID(o) (((wr_id_t *)&(o))->data)
-/* Compile-time check. */
-static inline void wr_id_t_check(void)
-{
- wr_id_t check[1 + (2 * -!(sizeof(wr_id_t) == sizeof(uint64_t)))];
-
- (void)check;
- (void)wr_id_t_check;
-}
-
-/* If raw send operations are available, use them since they are faster. */
-#ifdef SEND_RAW_WR_SUPPORT
-typedef struct ibv_send_wr_raw mlx4_send_wr_t;
-#define mlx4_post_send ibv_post_send_raw
-#else
-typedef struct ibv_send_wr mlx4_send_wr_t;
-#define mlx4_post_send ibv_post_send
-#endif
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+ (((from) >= (to)) ? \
+ (((val) & (from)) / ((from) / (to))) : \
+ (((val) & (from)) * ((to) / (from))))
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
/*
- * There is exactly one flow configured per MAC address. Each flow
- * may contain several specifications, one per configured VLAN ID.
+ * Each VLAN ID requires a separate flow steering rule.
*/
BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- struct ibv_exp_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES];
- struct ibv_exp_flow *promisc_flow; /* Promiscuous flow. */
- struct ibv_exp_flow *allmulti_flow; /* Multicast flow. */
+ struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
+ struct ibv_flow *promisc_flow; /* Promiscuous flow. */
+ struct ibv_flow *allmulti_flow; /* Multicast flow. */
unsigned int port_id; /* Port ID for incoming packets. */
unsigned int elts_n; /* (*elts)[] length. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
union {
struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
struct rxq_elt (*no_sp)[]; /* RX elements. */
} elts;
unsigned int sp:1; /* Use scattered RX elements. */
+ unsigned int csum:1; /* Enable checksum offloading. */
+ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
uint32_t mb_len; /* Length of a mp-issued mbuf. */
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
/* TX element. */
struct txq_elt {
- mlx4_send_wr_t wr; /* Work Request. */
- struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
- /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
+ struct rte_mbuf *buf;
};
/* Linear buffer type. It is used when transmitting buffers with too many
struct txq {
struct priv *priv; /* Back pointer to private data. */
struct {
- struct rte_mempool *mp; /* Cached Memory Pool. */
+ const struct rte_mempool *mp; /* Cached Memory Pool. */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* mr->lkey */
} mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
#if MLX4_PMD_MAX_INLINE > 0
uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
#endif
unsigned int elts_head; /* Current index in (*elts)[]. */
unsigned int elts_tail; /* First element awaiting completion. */
unsigned int elts_comp; /* Number of completion requests. */
+ unsigned int elts_comp_cd; /* Countdown for next completion request. */
+ unsigned int elts_comp_cd_init; /* Initial value for countdown. */
struct mlx4_txq_stats stats; /* TX queue counters. */
linear_t (*elts_linear)[]; /* Linearized buffers. */
struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
};
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr device_attr; /* Device properties. */
- struct ibv_port_attr port_attr; /* Physical port properties. */
struct ibv_pd *pd; /* Protection Domain. */
/*
* MAC addresses array and configuration bit-field.
uint8_t port; /* Physical port number. */
unsigned int started:1; /* Device started, flows enabled. */
unsigned int promisc:1; /* Device in promiscuous mode. */
- unsigned int promisc_ok:1; /* Promiscuous flow is supported. */
unsigned int allmulti:1; /* Device receives all multicast packets. */
unsigned int hw_qpg:1; /* QP groups are supported. */
unsigned int hw_tss:1; /* TSS is supported. */
unsigned int hw_rss:1; /* RSS is supported. */
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
+ unsigned int pending_alarm:1; /* An alarm is pending. */
#ifdef INLINE_RECV
unsigned int inl_recv_size; /* Inline recv size */
#endif
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle; /* Interrupt handler. */
rte_spinlock_t lock; /* Lock for control functions. */
};
+/* Local storage for secondary process data. */
+struct mlx4_secondary_data {
+ struct rte_eth_dev_data data; /* Local device data. */
+ struct priv *primary_priv; /* Private structure from primary. */
+ struct rte_eth_dev_data *shared_dev_data; /* Shared device data. */
+ rte_spinlock_t lock; /* Port configuration lock. */
+} mlx4_secondary_data[RTE_MAX_ETHPORTS];
+
+/**
+ * Check if running as a secondary process.
+ *
+ * @return
+ * Nonzero if running as a secondary process.
+ */
+static inline int
+mlx4_is_secondary(void)
+{
+ return rte_eal_process_type() != RTE_PROC_PRIMARY;
+}
+
+/**
+ * Return private structure associated with an Ethernet device.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * Pointer to private structure.
+ */
+static struct priv *
+mlx4_get_priv(struct rte_eth_dev *dev)
+{
+ struct mlx4_secondary_data *sd;
+
+ if (!mlx4_is_secondary())
+ return dev->data->dev_private;
+ sd = &mlx4_secondary_data[dev->data->port_id];
+ return sd->data.dev_private;
+}
+
/**
* Lock private structure to protect it from concurrent access in the
* control path.
static int
priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
{
- int ret = -1;
DIR *dir;
struct dirent *dent;
+ unsigned int dev_type = 0;
+ unsigned int dev_port_prev = ~0u;
+ char match[IF_NAMESIZE] = "";
{
MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
while ((dent = readdir(dir)) != NULL) {
char *name = dent->d_name;
FILE *file;
- unsigned int dev_id;
+ unsigned int dev_port;
int r;
if ((name[0] == '.') &&
((name[1] == '.') && (name[2] == '\0'))))
continue;
- MKSTR(path, "%s/device/net/%s/dev_id",
- priv->ctx->device->ibdev_path, name);
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ctx->device->ibdev_path, name,
+ (dev_type ? "dev_id" : "dev_port"));
file = fopen(path, "rb");
- if (file == NULL)
+ if (file == NULL) {
+ if (errno != ENOENT)
+ continue;
+ /*
+ * Switch to dev_id when dev_port does not exist as
+ * is the case with Linux kernel versions < 3.15.
+ */
+try_dev_id:
+ match[0] = '\0';
+ if (dev_type)
+ break;
+ dev_type = 1;
+ dev_port_prev = ~0u;
+ rewinddir(dir);
continue;
- r = fscanf(file, "%x", &dev_id);
- fclose(file);
- if ((r == 1) && (dev_id == (priv->port - 1u))) {
- snprintf(*ifname, sizeof(*ifname), "%s", name);
- ret = 0;
- break;
}
+ r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
+ fclose(file);
+ if (r != 1)
+ continue;
+ /*
+ * Switch to dev_id when dev_port returns the same value for
+ * all ports. May happen when using a MOFED release older than
+ * 3.0 with a Linux kernel >= 3.15.
+ */
+ if (dev_port == dev_port_prev)
+ goto try_dev_id;
+ dev_port_prev = dev_port;
+ if (dev_port == (priv->port - 1u))
+ snprintf(match, sizeof(match), "%s", name);
}
closedir(dir);
- return ret;
+ if (match[0] == '\0')
+ return -1;
+ strncpy(*ifname, match, sizeof(*ifname));
+ return 0;
}
/**
/* Device configuration. */
+static int
+txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+
+static void
+txq_cleanup(struct txq *txq);
+
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
+ unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
static void
}
if (rxqs_n == priv->rxqs_n)
return 0;
+ if (!rte_is_power_of_2(rxqs_n)) {
+ unsigned n_active;
+
+ n_active = rte_align32pow2(rxqs_n + 1) >> 1;
+ WARN("%p: number of RX queues must be a power"
+ " of 2: %u queues among %u will be active",
+ (void *)dev, n_active, rxqs_n);
+ }
+
INFO("%p: RX queues number update: %u -> %u",
(void *)dev, priv->rxqs_n, rxqs_n);
/* If RSS is enabled, disable it first. */
priv->rss = 1;
tmp = priv->rxqs_n;
priv->rxqs_n = rxqs_n;
- ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, NULL, NULL);
+ ret = rxq_setup(dev, &priv->rxq_parent, 0, 0, 0, NULL, NULL);
if (!ret)
return 0;
/* Failure, rollback. */
struct priv *priv = dev->data->dev_private;
int ret;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
ret = dev_configure(dev);
assert(ret >= 0);
return -ret;
}
+static uint16_t mlx4_tx_burst(void *, struct rte_mbuf **, uint16_t);
+static uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
+
+/**
+ * Configure secondary process queues from a private data pointer (primary
+ * or secondary) and update burst callbacks. Can take place only once.
+ *
+ * All queues must have been previously created by the primary process to
+ * avoid undefined behavior.
+ *
+ * @param priv
+ * Private data pointer from either primary or secondary process.
+ *
+ * @return
+ * Private data pointer from secondary process, NULL in case of error.
+ */
+static struct priv *
+mlx4_secondary_data_setup(struct priv *priv)
+{
+ unsigned int port_id = 0;
+ struct mlx4_secondary_data *sd;
+ void **tx_queues;
+ void **rx_queues;
+ unsigned int nb_tx_queues;
+ unsigned int nb_rx_queues;
+ unsigned int i;
+
+ /* priv must be valid at this point. */
+ assert(priv != NULL);
+ /* priv->dev must also be valid but may point to local memory from
+ * another process, possibly with the same address and must not
+ * be dereferenced yet. */
+ assert(priv->dev != NULL);
+ /* Determine port ID by finding out where priv comes from. */
+ while (1) {
+ sd = &mlx4_secondary_data[port_id];
+ rte_spinlock_lock(&sd->lock);
+ /* Primary process? */
+ if (sd->primary_priv == priv)
+ break;
+ /* Secondary process? */
+ if (sd->data.dev_private == priv)
+ break;
+ rte_spinlock_unlock(&sd->lock);
+ if (++port_id == RTE_DIM(mlx4_secondary_data))
+ port_id = 0;
+ }
+ /* Switch to secondary private structure. If private data has already
+ * been updated by another thread, there is nothing else to do. */
+ priv = sd->data.dev_private;
+ if (priv->dev->data == &sd->data)
+ goto end;
+ /* Sanity checks. Secondary private structure is supposed to point
+ * to local eth_dev, itself still pointing to the shared device data
+ * structure allocated by the primary process. */
+ assert(sd->shared_dev_data != &sd->data);
+ assert(sd->data.nb_tx_queues == 0);
+ assert(sd->data.tx_queues == NULL);
+ assert(sd->data.nb_rx_queues == 0);
+ assert(sd->data.rx_queues == NULL);
+ assert(priv != sd->primary_priv);
+ assert(priv->dev->data == sd->shared_dev_data);
+ assert(priv->txqs_n == 0);
+ assert(priv->txqs == NULL);
+ assert(priv->rxqs_n == 0);
+ assert(priv->rxqs == NULL);
+ nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
+ nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
+ /* Allocate local storage for queues. */
+ tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
+ sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
+ RTE_CACHE_LINE_SIZE);
+ rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
+ sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (tx_queues == NULL || rx_queues == NULL)
+ goto error;
+ /* Lock to prevent control operations during setup. */
+ priv_lock(priv);
+ /* TX queues. */
+ for (i = 0; i != nb_tx_queues; ++i) {
+ struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
+ struct txq *txq;
+
+ if (primary_txq == NULL)
+ continue;
+ txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0,
+ primary_txq->socket);
+ if (txq != NULL) {
+ if (txq_setup(priv->dev,
+ txq,
+ primary_txq->elts_n * MLX4_PMD_SGE_WR_N,
+ primary_txq->socket,
+ NULL) == 0) {
+ txq->stats.idx = primary_txq->stats.idx;
+ tx_queues[i] = txq;
+ continue;
+ }
+ rte_free(txq);
+ }
+ while (i) {
+ txq = tx_queues[--i];
+ txq_cleanup(txq);
+ rte_free(txq);
+ }
+ goto error;
+ }
+ /* RX queues. */
+ for (i = 0; i != nb_rx_queues; ++i) {
+ struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i];
+
+ if (primary_rxq == NULL)
+ continue;
+ /* Not supported yet. */
+ rx_queues[i] = NULL;
+ }
+ /* Update everything. */
+ priv->txqs = (void *)tx_queues;
+ priv->txqs_n = nb_tx_queues;
+ priv->rxqs = (void *)rx_queues;
+ priv->rxqs_n = nb_rx_queues;
+ sd->data.rx_queues = rx_queues;
+ sd->data.tx_queues = tx_queues;
+ sd->data.nb_rx_queues = nb_rx_queues;
+ sd->data.nb_tx_queues = nb_tx_queues;
+ sd->data.dev_link = sd->shared_dev_data->dev_link;
+ sd->data.mtu = sd->shared_dev_data->mtu;
+ memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
+ sizeof(sd->data.rx_queue_state));
+ memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
+ sizeof(sd->data.tx_queue_state));
+ sd->data.dev_flags = sd->shared_dev_data->dev_flags;
+ /* Use local data from now on. */
+ rte_mb();
+ priv->dev->data = &sd->data;
+ rte_mb();
+ priv->dev->tx_pkt_burst = mlx4_tx_burst;
+ priv->dev->rx_pkt_burst = removed_rx_burst;
+ priv_unlock(priv);
+end:
+ /* More sanity checks. */
+ assert(priv->dev->tx_pkt_burst == mlx4_tx_burst);
+ assert(priv->dev->rx_pkt_burst == removed_rx_burst);
+ assert(priv->dev->data == &sd->data);
+ rte_spinlock_unlock(&sd->lock);
+ return priv;
+error:
+ priv_unlock(priv);
+ rte_free(tx_queues);
+ rte_free(rx_queues);
+ rte_spinlock_unlock(&sd->lock);
+ return NULL;
+}
+
/* TX queues handling. */
/**
}
mr_linear =
ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear),
- (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
+ IBV_ACCESS_LOCAL_WRITE);
if (mr_linear == NULL) {
ERROR("%p: unable to configure MR, ibv_reg_mr() failed",
(void *)txq);
}
for (i = 0; (i != elts_n); ++i) {
struct txq_elt *elt = &(*elts)[i];
- mlx4_send_wr_t *wr = &elt->wr;
- /* Configure WR. */
- WR_ID(wr->wr_id).id = i;
- WR_ID(wr->wr_id).offset = 0;
- wr->sg_list = &elt->sges[0];
- wr->opcode = IBV_WR_SEND;
- /* Other fields are updated during TX. */
+ elt->buf = NULL;
}
DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
txq->elts_n = elts_n;
txq->elts_head = 0;
txq->elts_tail = 0;
txq->elts_comp = 0;
+ /* Request send completion every MLX4_PMD_TX_PER_COMP_REQ packets or
+ * at least 4 times per ring. */
+ txq->elts_comp_cd_init =
+ ((MLX4_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ?
+ MLX4_PMD_TX_PER_COMP_REQ : (elts_n / 4));
+ txq->elts_comp_cd = txq->elts_comp_cd_init;
txq->elts_linear = elts_linear;
txq->mr_linear = mr_linear;
assert(ret == 0);
static void
txq_free_elts(struct txq *txq)
{
- unsigned int i;
unsigned int elts_n = txq->elts_n;
+ unsigned int elts_head = txq->elts_head;
+ unsigned int elts_tail = txq->elts_tail;
struct txq_elt (*elts)[elts_n] = txq->elts;
linear_t (*elts_linear)[elts_n] = txq->elts_linear;
struct ibv_mr *mr_linear = txq->mr_linear;
DEBUG("%p: freeing WRs", (void *)txq);
txq->elts_n = 0;
+ txq->elts_head = 0;
+ txq->elts_tail = 0;
+ txq->elts_comp = 0;
+ txq->elts_comp_cd = 0;
+ txq->elts_comp_cd_init = 0;
txq->elts = NULL;
txq->elts_linear = NULL;
txq->mr_linear = NULL;
rte_free(elts_linear);
if (elts == NULL)
return;
- for (i = 0; (i != elemof(*elts)); ++i) {
- struct txq_elt *elt = &(*elts)[i];
+ while (elts_tail != elts_head) {
+ struct txq_elt *elt = &(*elts)[elts_tail];
- if (WR_ID(elt->wr.wr_id).offset == 0)
- continue;
- rte_pktmbuf_free((void *)((uintptr_t)elt->sges[0].addr -
- WR_ID(elt->wr.wr_id).offset));
+ assert(elt->buf != NULL);
+ rte_pktmbuf_free(elt->buf);
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(elt, 0x77, sizeof(*elt));
+#endif
+ if (++elts_tail == elts_n)
+ elts_tail = 0;
}
rte_free(elts);
}
static void
txq_cleanup(struct txq *txq)
{
+ struct ibv_exp_release_intf_params params;
size_t i;
DEBUG("cleaning up %p", (void *)txq);
txq_free_elts(txq);
+ if (txq->if_qp != NULL) {
+ assert(txq->priv != NULL);
+ assert(txq->priv->ctx != NULL);
+ assert(txq->qp != NULL);
+ params = (struct ibv_exp_release_intf_params){
+ .comp_mask = 0,
+ };
+ claim_zero(ibv_exp_release_intf(txq->priv->ctx,
+ txq->if_qp,
+ ¶ms));
+ }
+ if (txq->if_cq != NULL) {
+ assert(txq->priv != NULL);
+ assert(txq->priv->ctx != NULL);
+ assert(txq->cq != NULL);
+ params = (struct ibv_exp_release_intf_params){
+ .comp_mask = 0,
+ };
+ claim_zero(ibv_exp_release_intf(txq->priv->ctx,
+ txq->if_cq,
+ ¶ms));
+ }
if (txq->qp != NULL)
claim_zero(ibv_destroy_qp(txq->qp));
if (txq->cq != NULL)
claim_zero(ibv_destroy_cq(txq->cq));
+ if (txq->rd != NULL) {
+ struct ibv_exp_destroy_res_domain_attr attr = {
+ .comp_mask = 0,
+ };
+
+ assert(txq->priv != NULL);
+ assert(txq->priv->ctx != NULL);
+ claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
+ txq->rd,
+ &attr));
+ }
for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
if (txq->mp2mr[i].mp == NULL)
break;
* Manage TX completions.
*
* When sending a burst, mlx4_tx_burst() posts several WRs.
- * To improve performance, a completion event is only required for the last of
- * them. Doing so discards completion information for other WRs, but this
- * information would not be used anyway.
+ * To improve performance, a completion event is only required once every
+ * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
+ * for other WRs, but this information would not be used anyway.
*
* @param txq
* Pointer to TX queue structure.
txq_complete(struct txq *txq)
{
unsigned int elts_comp = txq->elts_comp;
- unsigned int elts_tail;
+ unsigned int elts_tail = txq->elts_tail;
const unsigned int elts_n = txq->elts_n;
- struct ibv_wc wcs[elts_comp];
int wcs_n;
if (unlikely(elts_comp == 0))
DEBUG("%p: processing %u work requests completions",
(void *)txq, elts_comp);
#endif
- wcs_n = ibv_poll_cq(txq->cq, elts_comp, wcs);
+ wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
if (unlikely(wcs_n == 0))
return 0;
if (unlikely(wcs_n < 0)) {
elts_comp -= wcs_n;
assert(elts_comp <= txq->elts_comp);
/*
- * Work Completion ID contains the associated element index in
- * (*txq->elts)[]. Since WCs are returned in order, we only need to
- * look at the last WC to clear older Work Requests.
- *
* Assume WC status is successful as nothing can be done about it
* anyway.
*/
- elts_tail = WR_ID(wcs[wcs_n - 1].wr_id).id;
- /* Consume the last WC. */
- if (++elts_tail >= elts_n)
- elts_tail = 0;
+ elts_tail += wcs_n * txq->elts_comp_cd_init;
+ if (elts_tail >= elts_n)
+ elts_tail -= elts_n;
txq->elts_tail = elts_tail;
txq->elts_comp = elts_comp;
return 0;
}
+struct mlx4_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx4_check_mempool() when iterating the memory chunks. */
+static void mlx4_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx)
+{
+ struct mlx4_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contigous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx4_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
+/* For best performance, this function should not be inlined. */
+static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
+ __attribute__((noinline));
+
+/**
+ * Register mempool as a memory region.
+ *
+ * @param pd
+ * Pointer to protection domain.
+ * @param mp
+ * Pointer to memory pool.
+ *
+ * @return
+ * Memory region pointer, NULL in case of error.
+ */
+static struct ibv_mr *
+mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start;
+ uintptr_t end;
+ unsigned int i;
+
+ if (mlx4_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
+ DEBUG("mempool %p area start=%p end=%p size=%zu",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
+
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ return ibv_reg_mr(pd,
+ (void *)start,
+ end - start,
+ IBV_ACCESS_LOCAL_WRITE);
+}
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
+ * the cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+txq_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
/**
* Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
* Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
}
}
/* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp);
- mr = ibv_reg_mr(txq->priv->pd,
- (void *)mp->elt_va_start,
- (mp->elt_va_end - mp->elt_va_start),
- (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
+ DEBUG("%p: discovered new memory pool \"%s\" (%p)",
+ (void *)txq, mp->name, (void *)mp);
+ mr = mlx4_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq);
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq);
--i;
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
txq->mp2mr[i].mp = mp;
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32,
- (void *)txq, (void *)mp, txq->mp2mr[i].lkey);
+ DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
+struct txq_mp2mr_mbuf_check_data {
+ int ret;
+};
+
+/**
+ * Callback function for rte_mempool_obj_iter() to check whether a given
+ * mempool object looks like a mbuf.
+ *
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
+ * @param index
+ * Object index, unused.
+ */
+static void
+txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
+{
+ struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct rte_mbuf *buf = obj;
+
+ /* Check whether mbuf structure fits element size and whether mempool
+ * pointer is valid. */
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
+ data->ret = -1;
+}
+
+/**
+ * Iterator function for rte_mempool_walk() to register existing mempools and
+ * fill the MP to MR cache of a TX queue.
+ *
+ * @param[in] mp
+ * Memory Pool to register.
+ * @param *arg
+ * Pointer to TX queue structure.
+ */
+static void
+txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+{
+ struct txq *txq = arg;
+ struct txq_mp2mr_mbuf_check_data data = {
+ .ret = 0,
+ };
+
+ /* Register mempool only if the first element looks like a mbuf. */
+ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
+ return;
+ txq_mp2mr(txq, mp);
+}
+
+#if MLX4_PMD_SGE_WR_N > 1
+
/**
* Copy scattered mbuf contents to a single linear buffer.
*
return size;
}
+/**
+ * Handle scattered buffers for mlx4_tx_burst().
+ *
+ * @param txq
+ * TX queue structure.
+ * @param segs
+ * Number of segments in buf.
+ * @param elt
+ * TX queue element to fill.
+ * @param[in] buf
+ * Buffer to process.
+ * @param elts_head
+ * Index of the linear buffer to use if necessary (normally txq->elts_head).
+ * @param[out] sges
+ * Array filled with SGEs on success.
+ *
+ * @return
+ * A structure containing the processed packet size in bytes and the
+ * number of SGEs. Both fields are set to (unsigned int)-1 in case of
+ * failure.
+ */
+static struct tx_burst_sg_ret {
+ unsigned int length;
+ unsigned int num;
+}
+tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
+ struct rte_mbuf *buf, unsigned int elts_head,
+ struct ibv_sge (*sges)[MLX4_PMD_SGE_WR_N])
+{
+ unsigned int sent_size = 0;
+ unsigned int j;
+ int linearize = 0;
+
+ /* When there are too many segments, extra segments are
+ * linearized in the last SGE. */
+ if (unlikely(segs > elemof(*sges))) {
+ segs = (elemof(*sges) - 1);
+ linearize = 1;
+ }
+ /* Update element. */
+ elt->buf = buf;
+ /* Register segments as SGEs. */
+ for (j = 0; (j != segs); ++j) {
+ struct ibv_sge *sge = &(*sges)[j];
+ uint32_t lkey;
+
+ /* Retrieve Memory Region key for this memory pool. */
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ if (unlikely(lkey == (uint32_t)-1)) {
+ /* MR does not exist. */
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ /* Clean up TX element. */
+ elt->buf = NULL;
+ goto stop;
+ }
+ /* Update SGE. */
+ sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ if (txq->priv->vf)
+ rte_prefetch0((volatile void *)
+ (uintptr_t)sge->addr);
+ sge->length = DATA_LEN(buf);
+ sge->lkey = lkey;
+ sent_size += sge->length;
+ buf = NEXT(buf);
+ }
+ /* If buf is not NULL here and is not going to be linearized,
+ * nb_segs is not valid. */
+ assert(j == segs);
+ assert((buf == NULL) || (linearize));
+ /* Linearize extra segments. */
+ if (linearize) {
+ struct ibv_sge *sge = &(*sges)[segs];
+ linear_t *linear = &(*txq->elts_linear)[elts_head];
+ unsigned int size = linearize_mbuf(linear, buf);
+
+ assert(segs == (elemof(*sges) - 1));
+ if (size == 0) {
+ /* Invalid packet. */
+ DEBUG("%p: packet too large to be linearized.",
+ (void *)txq);
+ /* Clean up TX element. */
+ elt->buf = NULL;
+ goto stop;
+ }
+ /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately. */
+ if (elemof(*sges) == 1) {
+ do {
+ struct rte_mbuf *next = NEXT(buf);
+
+ rte_pktmbuf_free_seg(buf);
+ buf = next;
+ } while (buf != NULL);
+ elt->buf = NULL;
+ }
+ /* Update SGE. */
+ sge->addr = (uintptr_t)&(*linear)[0];
+ sge->length = size;
+ sge->lkey = txq->mr_linear->lkey;
+ sent_size += size;
+ /* Include last segment. */
+ segs++;
+ }
+ return (struct tx_burst_sg_ret){
+ .length = sent_size,
+ .num = segs,
+ };
+stop:
+ return (struct tx_burst_sg_ret){
+ .length = -1,
+ .num = -1,
+ };
+}
+
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
+
/**
* DPDK callback for TX.
*
mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
- mlx4_send_wr_t head;
- mlx4_send_wr_t **wr_next = &head.next;
- mlx4_send_wr_t *bad_wr;
unsigned int elts_head = txq->elts_head;
- const unsigned int elts_tail = txq->elts_tail;
const unsigned int elts_n = txq->elts_n;
+ unsigned int elts_comp_cd = txq->elts_comp_cd;
+ unsigned int elts_comp = 0;
unsigned int i;
unsigned int max;
int err;
+ assert(elts_comp_cd != 0);
txq_complete(txq);
- max = (elts_n - (elts_head - elts_tail));
+ max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
assert(max >= 1);
max = pkts_n;
for (i = 0; (i != max); ++i) {
struct rte_mbuf *buf = pkts[i];
+ unsigned int elts_head_next =
+ (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
+ struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
struct txq_elt *elt = &(*txq->elts)[elts_head];
- mlx4_send_wr_t *wr = &elt->wr;
unsigned int segs = NB_SEGS(buf);
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
+#ifdef MLX4_PMD_SOFT_COUNTERS
unsigned int sent_size = 0;
#endif
- unsigned int j;
- int linearize = 0;
+ uint32_t send_flags = 0;
/* Clean up old buffer. */
- if (likely(WR_ID(wr->wr_id).offset != 0)) {
- struct rte_mbuf *tmp = (void *)
- ((uintptr_t)elt->sges[0].addr -
- WR_ID(wr->wr_id).offset);
+ if (likely(elt->buf != NULL)) {
+ struct rte_mbuf *tmp = elt->buf;
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(elt, 0x66, sizeof(*elt));
+#endif
/* Faster than rte_pktmbuf_free(). */
do {
struct rte_mbuf *next = NEXT(tmp);
tmp = next;
} while (tmp != NULL);
}
-#ifndef NDEBUG
- /* For assert(). */
- WR_ID(wr->wr_id).offset = 0;
- for (j = 0; ((int)j < wr->num_sge); ++j) {
- elt->sges[j].addr = 0;
- elt->sges[j].length = 0;
- elt->sges[j].lkey = 0;
+ /* Request TX completion. */
+ if (unlikely(--elts_comp_cd == 0)) {
+ elts_comp_cd = txq->elts_comp_cd_init;
+ ++elts_comp;
+ send_flags |= IBV_EXP_QP_BURST_SIGNALED;
}
- wr->next = NULL;
- wr->num_sge = 0;
-#endif
- /* Sanity checks, most of which are only relevant with
- * debugging enabled. */
- assert(WR_ID(wr->wr_id).id == elts_head);
- assert(WR_ID(wr->wr_id).offset == 0);
- assert(wr->next == NULL);
- assert(wr->sg_list == &elt->sges[0]);
- assert(wr->num_sge == 0);
- assert(wr->opcode == IBV_WR_SEND);
- /* When there are too many segments, extra segments are
- * linearized in the last SGE. */
- if (unlikely(segs > elemof(elt->sges))) {
- segs = (elemof(elt->sges) - 1);
- linearize = 1;
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+ /* HW does not support checksum offloads at arbitrary
+ * offsets but automatically recognizes the packet
+ * type. For inner L3/L4 checksums, only VXLAN (UDP)
+ * tunnels are currently supported. */
+ if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
+ send_flags |= IBV_EXP_QP_BURST_TUNNEL;
}
- /* Set WR fields. */
- assert((rte_pktmbuf_mtod(buf, uintptr_t) -
- (uintptr_t)buf) <= 0xffff);
- WR_ID(wr->wr_id).offset =
- (rte_pktmbuf_mtod(buf, uintptr_t) -
- (uintptr_t)buf);
- wr->num_sge = segs;
- /* Register segments as SGEs. */
- for (j = 0; (j != segs); ++j) {
- struct ibv_sge *sge = &elt->sges[j];
+ if (likely(segs == 1)) {
+ uintptr_t addr;
+ uint32_t length;
uint32_t lkey;
+ /* Retrieve buffer information. */
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
/* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, buf->pool);
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR"
" association", (void *)txq);
/* Clean up TX element. */
- WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
- /* For assert(). */
- while (j) {
- --j;
- --sge;
- sge->addr = 0;
- sge->length = 0;
- sge->lkey = 0;
- }
- wr->num_sge = 0;
-#endif
+ elt->buf = NULL;
goto stop;
}
- /* Sanity checks, only relevant with debugging
- * enabled. */
- assert(sge->addr == 0);
- assert(sge->length == 0);
- assert(sge->lkey == 0);
- /* Update SGE. */
- sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ /* Update element. */
+ elt->buf = buf;
if (txq->priv->vf)
rte_prefetch0((volatile void *)
- (uintptr_t)sge->addr);
- sge->length = DATA_LEN(buf);
- sge->lkey = lkey;
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
- sent_size += sge->length;
-#endif
- buf = NEXT(buf);
- }
- /* If buf is not NULL here and is not going to be linearized,
- * nb_segs is not valid. */
- assert(j == segs);
- assert((buf == NULL) || (linearize));
- /* Linearize extra segments. */
- if (linearize) {
- struct ibv_sge *sge = &elt->sges[segs];
- linear_t *linear = &(*txq->elts_linear)[elts_head];
- unsigned int size = linearize_mbuf(linear, buf);
-
- assert(segs == (elemof(elt->sges) - 1));
- if (size == 0) {
- /* Invalid packet. */
- DEBUG("%p: packet too large to be linearized.",
- (void *)txq);
- /* Clean up TX element. */
- WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
- /* For assert(). */
- while (j) {
- --j;
- --sge;
- sge->addr = 0;
- sge->length = 0;
- sge->lkey = 0;
- }
- wr->num_sge = 0;
+ (uintptr_t)addr);
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ /* Put packet into send queue. */
+#if MLX4_PMD_MAX_INLINE > 0
+ if (length <= txq->max_inline)
+ err = txq->if_qp->send_pending_inline
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags);
+ else
#endif
+ err = txq->if_qp->send_pending
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags);
+ if (unlikely(err))
goto stop;
- }
- /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately
- * and clear offset from WR ID. */
- if (elemof(elt->sges) == 1) {
- do {
- struct rte_mbuf *next = NEXT(buf);
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ sent_size += length;
+#endif
+ } else {
+#if MLX4_PMD_SGE_WR_N > 1
+ struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
+ struct tx_burst_sg_ret ret;
- rte_pktmbuf_free_seg(buf);
- buf = next;
- } while (buf != NULL);
- WR_ID(wr->wr_id).offset = 0;
- }
- /* Set WR fields and fill SGE with linear buffer. */
- ++wr->num_sge;
- /* Sanity checks, only relevant with debugging
- * enabled. */
- assert(sge->addr == 0);
- assert(sge->length == 0);
- assert(sge->lkey == 0);
- /* Update SGE. */
- sge->addr = (uintptr_t)&(*linear)[0];
- sge->length = size;
- sge->lkey = txq->mr_linear->lkey;
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
- sent_size += size;
+ ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
+ &sges);
+ if (ret.length == (unsigned int)-1)
+ goto stop;
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ /* Put SG list into send queue. */
+ err = txq->if_qp->send_pending_sg_list
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags);
+ if (unlikely(err))
+ goto stop;
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ sent_size += ret.length;
#endif
+#else /* MLX4_PMD_SGE_WR_N > 1 */
+ DEBUG("%p: TX scattered buffers support not"
+ " compiled in", (void *)txq);
+ goto stop;
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
}
- /* Link WRs together for ibv_post_send(). */
- *wr_next = wr;
- wr_next = &wr->next;
-#if MLX4_PMD_MAX_INLINE > 0
- if (sent_size <= txq->max_inline)
- wr->send_flags = IBV_SEND_INLINE;
- else
-#endif
- wr->send_flags = 0;
- if (++elts_head >= elts_n)
- elts_head = 0;
+ elts_head = elts_head_next;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += sent_size;
/* Increment sent packets counter. */
txq->stats.opackets += i;
#endif
- *wr_next = NULL;
- /* The last WR is the only one asking for a completion event. */
- containerof(wr_next, mlx4_send_wr_t, next)->
- send_flags |= IBV_SEND_SIGNALED;
- err = mlx4_post_send(txq->qp, head.next, &bad_wr);
+ /* Ring QP doorbell. */
+ err = txq->if_qp->send_flush(txq->qp);
if (unlikely(err)) {
- unsigned int unsent = 0;
-
- /* An error occurred, completion event is lost. Fix counters. */
- while (bad_wr != NULL) {
- struct txq_elt *elt =
- containerof(bad_wr, struct txq_elt, wr);
- mlx4_send_wr_t *wr = &elt->wr;
- mlx4_send_wr_t *next = wr->next;
-#if defined(MLX4_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- unsigned int j;
-#endif
-
- assert(wr == bad_wr);
- /* Clean up TX element without freeing it, caller
- * should take care of this. */
- WR_ID(elt->wr.wr_id).offset = 0;
-#ifdef MLX4_PMD_SOFT_COUNTERS
- for (j = 0; ((int)j < wr->num_sge); ++j)
- txq->stats.obytes -= wr->sg_list[j].length;
-#endif
- ++unsent;
-#ifndef NDEBUG
- /* For assert(). */
- for (j = 0; ((int)j < wr->num_sge); ++j) {
- elt->sges[j].addr = 0;
- elt->sges[j].length = 0;
- elt->sges[j].lkey = 0;
- }
- wr->next = NULL;
- wr->num_sge = 0;
-#endif
- bad_wr = next;
- }
-#ifdef MLX4_PMD_SOFT_COUNTERS
- txq->stats.opackets -= unsent;
-#endif
- assert(i >= unsent);
- i -= unsent;
- /* "Unsend" remaining packets. */
- elts_head -= unsent;
- if (elts_head >= elts_n)
- elts_head += elts_n;
- assert(elts_head < elts_n);
- DEBUG("%p: mlx4_post_send() failed, %u unprocessed WRs: %s",
- (void *)txq, unsent,
- ((err <= -1) ? "Internal error" : strerror(err)));
- } else
- ++txq->elts_comp;
+ /* A nonzero value is not supposed to be returned.
+ * Nothing can be done about it. */
+ DEBUG("%p: send_flush() failed with error %d",
+ (void *)txq, err);
+ }
txq->elts_head = elts_head;
+ txq->elts_comp += elts_comp;
+ txq->elts_comp_cd = elts_comp_cd;
return i;
}
+/**
+ * DPDK callback for TX in secondary processes.
+ *
+ * This function configures all queues from primary process information
+ * if necessary before reverting to the normal TX burst callback.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+static uint16_t
+mlx4_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct txq *txq = dpdk_txq;
+ struct priv *priv = mlx4_secondary_data_setup(txq->priv);
+ struct priv *primary_priv;
+ unsigned int index;
+
+ if (priv == NULL)
+ return 0;
+ primary_priv =
+ mlx4_secondary_data[priv->dev->data->port_id].primary_priv;
+ /* Look for queue index in both private structures. */
+ for (index = 0; index != priv->txqs_n; ++index)
+ if (((*primary_priv->txqs)[index] == txq) ||
+ ((*priv->txqs)[index] == txq))
+ break;
+ if (index == priv->txqs_n)
+ return 0;
+ txq = (*priv->txqs)[index];
+ return priv->dev->tx_pkt_burst(txq, pkts, pkts_n);
+}
+
/**
* Configure a TX queue.
*
txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
struct txq tmpl = {
.priv = priv,
.socket = socket
};
union {
- struct ibv_qp_init_attr init;
+ struct ibv_exp_query_intf_params params;
+ struct ibv_exp_qp_init_attr init;
+ struct ibv_exp_res_domain_init_attr rd;
+ struct ibv_exp_cq_init_attr cq;
struct ibv_exp_qp_attr mod;
} attr;
+ enum ibv_exp_query_intf_status status;
int ret = 0;
(void)conf; /* Thresholds configuration (ignored). */
+ if (priv == NULL)
+ return EINVAL;
if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
ERROR("%p: invalid number of TX descriptors (must be a"
- " multiple of %d)", (void *)dev, desc);
+ " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
return EINVAL;
}
desc /= MLX4_PMD_SGE_WR_N;
/* MRs will be registered in mp2mr[] later. */
- tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+ attr.rd = (struct ibv_exp_res_domain_init_attr){
+ .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+ IBV_EXP_RES_DOMAIN_MSG_MODEL),
+ .thread_model = IBV_EXP_THREAD_SINGLE,
+ .msg_model = IBV_EXP_MSG_HIGH_BW,
+ };
+ tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+ if (tmpl.rd == NULL) {
+ ret = ENOMEM;
+ ERROR("%p: RD creation failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
+ attr.cq = (struct ibv_exp_cq_init_attr){
+ .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+ .res_domain = tmpl.rd,
+ };
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
priv->device_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
- attr.init = (struct ibv_qp_init_attr){
+ attr.init = (struct ibv_exp_qp_init_attr){
/* CQ to be associated with the send queue. */
.send_cq = tmpl.cq,
/* CQ to be associated with the receive queue. */
.qp_type = IBV_QPT_RAW_PACKET,
/* Do *NOT* enable this, completions events are managed per
* TX burst. */
- .sq_sig_all = 0
+ .sq_sig_all = 0,
+ .pd = priv->pd,
+ .res_domain = tmpl.rd,
+ .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
};
- tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
+ tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
ERROR("%p: QP creation failure: %s",
(void *)dev, strerror(ret));
goto error;
}
+ attr.params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_CQ,
+ .obj = tmpl.cq,
+ };
+ tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+ if (tmpl.if_cq == NULL) {
+ ERROR("%p: CQ interface family query failed with status %d",
+ (void *)dev, status);
+ goto error;
+ }
+ attr.params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_QP_BURST,
+ .obj = tmpl.qp,
+#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK
+ /* MC loopback must be disabled when not using a VF. */
+ .family_flags =
+ (!priv->vf ?
+ IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK :
+ 0),
+#endif
+ };
+ tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+ if (tmpl.if_qp == NULL) {
+ ERROR("%p: QP interface family query failed with status %d",
+ (void *)dev, status);
+ goto error;
+ }
/* Clean up txq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
txq_cleanup(txq);
*txq = tmpl;
DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
+ /* Pre-register known mempools. */
+ rte_mempool_walk(txq_mp2mr_iter, txq);
assert(ret == 0);
return 0;
error:
struct txq *txq = (*priv->txqs)[idx];
int ret;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
struct priv *priv;
unsigned int i;
+ if (mlx4_is_secondary())
+ return;
if (txq == NULL)
return;
priv = txq->priv;
DEBUG("%p: allocated and configured %u WRs (%zu segments)",
(void *)rxq, elts_n, (elts_n * elemof((*elts)[0].sges)));
rxq->elts_n = elts_n;
+ rxq->elts_head = 0;
rxq->elts.sp = elts;
assert(ret == 0);
return 0;
DEBUG("%p: allocated and configured %u single-segment WRs",
(void *)rxq, elts_n);
rxq->elts_n = elts_n;
+ rxq->elts_head = 0;
rxq->elts.no_sp = elts;
assert(ret == 0);
return 0;
}
/**
- * Unregister a MAC address from a RX queue.
+ * Delete flow steering rule.
*
* @param rxq
* Pointer to RX queue structure.
* @param mac_index
* MAC address index.
+ * @param vlan_index
+ * VLAN index.
*/
static void
-rxq_mac_addr_del(struct rxq *rxq, unsigned int mac_index)
+rxq_del_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
{
#ifndef NDEBUG
struct priv *priv = rxq->priv;
(const uint8_t (*)[ETHER_ADDR_LEN])
priv->mac[mac_index].addr_bytes;
#endif
+ assert(rxq->mac_flow[mac_index][vlan_index] != NULL);
+ DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
+ " (VLAN ID %" PRIu16 ")",
+ (void *)rxq,
+ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
+ mac_index, priv->vlan_filter[vlan_index].id);
+ claim_zero(ibv_destroy_flow(rxq->mac_flow[mac_index][vlan_index]));
+ rxq->mac_flow[mac_index][vlan_index] = NULL;
+}
+
+/**
+ * Unregister a MAC address from a RX queue.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param mac_index
+ * MAC address index.
+ */
+static void
+rxq_mac_addr_del(struct rxq *rxq, unsigned int mac_index)
+{
+ struct priv *priv = rxq->priv;
+ unsigned int i;
+ unsigned int vlans = 0;
assert(mac_index < elemof(priv->mac));
- if (!BITFIELD_ISSET(rxq->mac_configured, mac_index)) {
- assert(rxq->mac_flow[mac_index] == NULL);
+ if (!BITFIELD_ISSET(rxq->mac_configured, mac_index))
return;
+ for (i = 0; (i != elemof(priv->vlan_filter)); ++i) {
+ if (!priv->vlan_filter[i].enabled)
+ continue;
+ rxq_del_flow(rxq, mac_index, i);
+ vlans++;
+ }
+ if (!vlans) {
+ rxq_del_flow(rxq, mac_index, 0);
}
- DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x"
- " index %u",
- (void *)rxq,
- (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
- mac_index);
- assert(rxq->mac_flow[mac_index] != NULL);
- claim_zero(ibv_exp_destroy_flow(rxq->mac_flow[mac_index]));
- rxq->mac_flow[mac_index] = NULL;
BITFIELD_RESET(rxq->mac_configured, mac_index);
}
static void rxq_promiscuous_disable(struct rxq *);
/**
- * Register a MAC address in a RX queue.
+ * Add single flow steering rule.
*
* @param rxq
* Pointer to RX queue structure.
* @param mac_index
* MAC address index to register.
+ * @param vlan_index
+ * VLAN index. Use -1 for a flow without VLAN.
*
* @return
* 0 on success, errno value on failure.
*/
static int
-rxq_mac_addr_add(struct rxq *rxq, unsigned int mac_index)
+rxq_add_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
{
+ struct ibv_flow *flow;
struct priv *priv = rxq->priv;
const uint8_t (*mac)[ETHER_ADDR_LEN] =
- (const uint8_t (*)[ETHER_ADDR_LEN])
- priv->mac[mac_index].addr_bytes;
- unsigned int vlans = 0;
- unsigned int specs = 0;
- unsigned int i, j;
- struct ibv_exp_flow *flow;
-
- assert(mac_index < elemof(priv->mac));
- if (BITFIELD_ISSET(rxq->mac_configured, mac_index))
- rxq_mac_addr_del(rxq, mac_index);
- /* Number of configured VLANs. */
- for (i = 0; (i != elemof(priv->vlan_filter)); ++i)
- if (priv->vlan_filter[i].enabled)
- ++vlans;
- specs = (vlans ? vlans : 1);
+ (const uint8_t (*)[ETHER_ADDR_LEN])
+ priv->mac[mac_index].addr_bytes;
/* Allocate flow specification on the stack. */
- struct ibv_exp_flow_attr data
- [1 +
- (sizeof(struct ibv_exp_flow_spec_eth[specs]) /
- sizeof(struct ibv_exp_flow_attr)) +
- !!(sizeof(struct ibv_exp_flow_spec_eth[specs]) %
- sizeof(struct ibv_exp_flow_attr))];
- struct ibv_exp_flow_attr *attr = (void *)&data[0];
- struct ibv_exp_flow_spec_eth *spec = (void *)&data[1];
+ struct __attribute__((packed)) {
+ struct ibv_flow_attr attr;
+ struct ibv_flow_spec_eth spec;
+ } data;
+ struct ibv_flow_attr *attr = &data.attr;
+ struct ibv_flow_spec_eth *spec = &data.spec;
+ assert(mac_index < elemof(priv->mac));
+ assert((vlan_index < elemof(priv->vlan_filter)) || (vlan_index == -1u));
/*
* No padding must be inserted by the compiler between attr and spec.
* This layout is expected by libibverbs.
*/
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
- *attr = (struct ibv_exp_flow_attr){
- .type = IBV_EXP_FLOW_ATTR_NORMAL,
- .num_of_specs = specs,
+ *attr = (struct ibv_flow_attr){
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .num_of_specs = 1,
.port = priv->port,
.flags = 0
};
- *spec = (struct ibv_exp_flow_spec_eth){
- .type = IBV_EXP_FLOW_SPEC_ETH,
+ *spec = (struct ibv_flow_spec_eth){
+ .type = IBV_FLOW_SPEC_ETH,
.size = sizeof(*spec),
.val = {
.dst_mac = {
(*mac)[0], (*mac)[1], (*mac)[2],
(*mac)[3], (*mac)[4], (*mac)[5]
- }
+ },
+ .vlan_tag = ((vlan_index != -1u) ?
+ htons(priv->vlan_filter[vlan_index].id) :
+ 0),
},
.mask = {
.dst_mac = "\xff\xff\xff\xff\xff\xff",
- .vlan_tag = (vlans ? htons(0xfff) : 0)
+ .vlan_tag = ((vlan_index != -1u) ? htons(0xfff) : 0),
}
};
- /* Fill VLAN specifications. */
- for (i = 0, j = 0; (i != elemof(priv->vlan_filter)); ++i) {
- if (!priv->vlan_filter[i].enabled)
- continue;
- assert(j != vlans);
- if (j)
- spec[j] = spec[0];
- spec[j].val.vlan_tag = htons(priv->vlan_filter[i].id);
- ++j;
- }
DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
- " (%u VLAN(s) configured)",
+ " (VLAN %s %" PRIu16 ")",
(void *)rxq,
(*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
mac_index,
- vlans);
+ ((vlan_index != -1u) ? "ID" : "index"),
+ ((vlan_index != -1u) ? priv->vlan_filter[vlan_index].id : -1u));
/* Create related flow. */
errno = 0;
- flow = ibv_exp_create_flow(rxq->qp, attr);
+ flow = ibv_create_flow(rxq->qp, attr);
if (flow == NULL) {
- int err = errno;
-
- /* Flow creation failure is not fatal when in DMFS A0 mode.
- * Ignore error if promiscuity is already enabled or can be
- * enabled. */
- if (priv->promisc_ok)
- return 0;
- if ((rxq->promisc_flow != NULL) ||
- (rxq_promiscuous_enable(rxq) == 0)) {
- if (rxq->promisc_flow != NULL)
- rxq_promiscuous_disable(rxq);
- WARN("cannot configure normal flow but promiscuous"
- " mode is fine, assuming promiscuous optimization"
- " is enabled"
- " (options mlx4_core log_num_mgm_entry_size=-7)");
- priv->promisc_ok = 1;
- return 0;
- }
- errno = err;
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
(void *)rxq, errno,
return errno;
return EINVAL;
}
- assert(rxq->mac_flow[mac_index] == NULL);
- rxq->mac_flow[mac_index] = flow;
+ if (vlan_index == -1u)
+ vlan_index = 0;
+ assert(rxq->mac_flow[mac_index][vlan_index] == NULL);
+ rxq->mac_flow[mac_index][vlan_index] = flow;
+ return 0;
+}
+
+/**
+ * Register a MAC address in a RX queue.
+ *
+ * @param rxq
+ * Pointer to RX queue structure.
+ * @param mac_index
+ * MAC address index to register.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+rxq_mac_addr_add(struct rxq *rxq, unsigned int mac_index)
+{
+ struct priv *priv = rxq->priv;
+ unsigned int i;
+ unsigned int vlans = 0;
+ int ret;
+
+ assert(mac_index < elemof(priv->mac));
+ if (BITFIELD_ISSET(rxq->mac_configured, mac_index))
+ rxq_mac_addr_del(rxq, mac_index);
+ /* Fill VLAN specifications. */
+ for (i = 0; (i != elemof(priv->vlan_filter)); ++i) {
+ if (!priv->vlan_filter[i].enabled)
+ continue;
+ /* Create related flow. */
+ ret = rxq_add_flow(rxq, mac_index, i);
+ if (!ret) {
+ vlans++;
+ continue;
+ }
+ /* Failure, rollback. */
+ while (i != 0)
+ if (priv->vlan_filter[--i].enabled)
+ rxq_del_flow(rxq, mac_index, i);
+ assert(ret > 0);
+ return ret;
+ }
+ /* In case there is no VLAN filter. */
+ if (!vlans) {
+ ret = rxq_add_flow(rxq, mac_index, -1);
+ if (ret)
+ return ret;
+ }
BITFIELD_SET(rxq->mac_configured, mac_index);
return 0;
}
static int
rxq_allmulticast_enable(struct rxq *rxq)
{
- struct ibv_exp_flow *flow;
- struct ibv_exp_flow_attr attr = {
- .type = IBV_EXP_FLOW_ATTR_MC_DEFAULT,
+ struct ibv_flow *flow;
+ struct ibv_flow_attr attr = {
+ .type = IBV_FLOW_ATTR_MC_DEFAULT,
.num_of_specs = 0,
.port = rxq->priv->port,
.flags = 0
if (rxq->allmulti_flow != NULL)
return EBUSY;
errno = 0;
- flow = ibv_exp_create_flow(rxq->qp, &attr);
+ flow = ibv_create_flow(rxq->qp, &attr);
if (flow == NULL) {
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
DEBUG("%p: disabling allmulticast mode", (void *)rxq);
if (rxq->allmulti_flow == NULL)
return;
- claim_zero(ibv_exp_destroy_flow(rxq->allmulti_flow));
+ claim_zero(ibv_destroy_flow(rxq->allmulti_flow));
rxq->allmulti_flow = NULL;
DEBUG("%p: allmulticast mode disabled", (void *)rxq);
}
static int
rxq_promiscuous_enable(struct rxq *rxq)
{
- struct ibv_exp_flow *flow;
- struct ibv_exp_flow_attr attr = {
- .type = IBV_EXP_FLOW_ATTR_ALL_DEFAULT,
+ struct ibv_flow *flow;
+ struct ibv_flow_attr attr = {
+ .type = IBV_FLOW_ATTR_ALL_DEFAULT,
.num_of_specs = 0,
.port = rxq->priv->port,
.flags = 0
if (rxq->promisc_flow != NULL)
return EBUSY;
errno = 0;
- flow = ibv_exp_create_flow(rxq->qp, &attr);
+ flow = ibv_create_flow(rxq->qp, &attr);
if (flow == NULL) {
/* It's not clear whether errno is always set in this case. */
ERROR("%p: flow configuration failed, errno=%d: %s",
DEBUG("%p: disabling promiscuous mode", (void *)rxq);
if (rxq->promisc_flow == NULL)
return;
- claim_zero(ibv_exp_destroy_flow(rxq->promisc_flow));
+ claim_zero(ibv_destroy_flow(rxq->promisc_flow));
rxq->promisc_flow = NULL;
DEBUG("%p: promiscuous mode disabled", (void *)rxq);
}
static void
rxq_cleanup(struct rxq *rxq)
{
+ struct ibv_exp_release_intf_params params;
+
DEBUG("cleaning up %p", (void *)rxq);
if (rxq->sp)
rxq_free_elts_sp(rxq);
else
rxq_free_elts(rxq);
+ if (rxq->if_qp != NULL) {
+ assert(rxq->priv != NULL);
+ assert(rxq->priv->ctx != NULL);
+ assert(rxq->qp != NULL);
+ params = (struct ibv_exp_release_intf_params){
+ .comp_mask = 0,
+ };
+ claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
+ rxq->if_qp,
+ ¶ms));
+ }
+ if (rxq->if_cq != NULL) {
+ assert(rxq->priv != NULL);
+ assert(rxq->priv->ctx != NULL);
+ assert(rxq->cq != NULL);
+ params = (struct ibv_exp_release_intf_params){
+ .comp_mask = 0,
+ };
+ claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
+ rxq->if_cq,
+ ¶ms));
+ }
if (rxq->qp != NULL) {
rxq_promiscuous_disable(rxq);
rxq_allmulticast_disable(rxq);
}
if (rxq->cq != NULL)
claim_zero(ibv_destroy_cq(rxq->cq));
+ if (rxq->rd != NULL) {
+ struct ibv_exp_destroy_res_domain_attr attr = {
+ .comp_mask = 0,
+ };
+
+ assert(rxq->priv != NULL);
+ assert(rxq->priv->ctx != NULL);
+ claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
+ rxq->rd,
+ &attr));
+ }
if (rxq->mr != NULL)
claim_zero(ibv_dereg_mr(rxq->mr));
memset(rxq, 0, sizeof(*rxq));
}
+/**
+ * Translate RX completion flags to packet type.
+ *
+ * @param flags
+ * RX completion flags returned by poll_length_flags().
+ *
+ * @note: fix mlx4_dev_supported_ptypes_get() if any change here.
+ *
+ * @return
+ * Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(uint32_t flags)
+{
+ uint32_t pkt_type;
+
+ if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+ pkt_type =
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6);
+ else
+ pkt_type =
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
+ return pkt_type;
+}
+
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] rxq
+ * Pointer to RX queue structure.
+ * @param flags
+ * RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ * Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+{
+ uint32_t ol_flags = 0;
+
+ if (rxq->csum)
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_IP_CSUM_OK,
+ PKT_RX_IP_CKSUM_BAD) |
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_BAD);
+ /*
+ * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+ * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+ * (its value is 0).
+ */
+ if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+ PKT_RX_IP_CKSUM_BAD) |
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_BAD);
+ return ol_flags;
+}
+
static uint16_t
mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
{
struct rxq *rxq = (struct rxq *)dpdk_rxq;
struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
- struct ibv_wc wcs[pkts_n];
+ const unsigned int elts_n = rxq->elts_n;
+ unsigned int elts_head = rxq->elts_head;
struct ibv_recv_wr head;
struct ibv_recv_wr **next = &head.next;
struct ibv_recv_wr *bad_wr;
- int ret = 0;
- int wcs_n;
- int i;
+ unsigned int i;
+ unsigned int pkts_ret = 0;
+ int ret;
if (unlikely(!rxq->sp))
return mlx4_rx_burst(dpdk_rxq, pkts, pkts_n);
if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
return 0;
- wcs_n = ibv_poll_cq(rxq->cq, pkts_n, wcs);
- if (unlikely(wcs_n == 0))
- return 0;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wc_n=%d)",
- (void *)rxq, wcs_n);
- return 0;
- }
- assert(wcs_n <= (int)pkts_n);
- /* For each work completion. */
- for (i = 0; (i != wcs_n); ++i) {
- struct ibv_wc *wc = &wcs[i];
- uint64_t wr_id = wc->wr_id;
- uint32_t len = wc->byte_len;
- struct rxq_elt_sp *elt = &(*elts)[wr_id];
+ for (i = 0; (i != pkts_n); ++i) {
+ struct rxq_elt_sp *elt = &(*elts)[elts_head];
struct ibv_recv_wr *wr = &elt->wr;
+ uint64_t wr_id = wr->wr_id;
+ unsigned int len;
+ unsigned int pkt_buf_len;
struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
struct rte_mbuf **pkt_buf_next = &pkt_buf;
unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
unsigned int j = 0;
+ uint32_t flags;
/* Sanity checks. */
+#ifdef NDEBUG
+ (void)wr_id;
+#endif
assert(wr_id < rxq->elts_n);
- assert(wr_id == wr->wr_id);
assert(wr->sg_list == elt->sges);
assert(wr->num_sge == elemof(elt->sges));
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
- if (unlikely(wc->status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work completion"
- " status (%d): %s",
- (void *)rxq, wc->wr_id, wc->status,
- ibv_wc_status_str(wc->status));
+ assert(elts_head < rxq->elts_n);
+ assert(rxq->elts_head < rxq->elts_n);
+ ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+ &flags);
+ if (unlikely(ret < 0)) {
+ struct ibv_wc wc;
+ int wcs_n;
+
+ DEBUG("rxq=%p, poll_length() failed (ret=%d)",
+ (void *)rxq, ret);
+ /* ibv_poll_cq() must be used in case of failure. */
+ wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
+ if (unlikely(wcs_n == 0))
+ break;
+ if (unlikely(wcs_n < 0)) {
+ DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
+ (void *)rxq, wcs_n);
+ break;
+ }
+ assert(wcs_n == 1);
+ if (unlikely(wc.status != IBV_WC_SUCCESS)) {
+ /* Whatever, just repost the offending WR. */
+ DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
+ " completion status (%d): %s",
+ (void *)rxq, wc.wr_id, wc.status,
+ ibv_wc_status_str(wc.status));
#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase dropped packets counter. */
- ++rxq->stats.idropped;
+ /* Increment dropped packets counter. */
+ ++rxq->stats.idropped;
#endif
- goto repost;
+ /* Link completed WRs together for repost. */
+ *next = wr;
+ next = &wr->next;
+ goto repost;
+ }
+ ret = wc.byte_len;
}
+ if (ret == 0)
+ break;
+ len = ret;
+ pkt_buf_len = len;
+ /* Link completed WRs together for repost. */
+ *next = wr;
+ next = &wr->next;
/*
* Replace spent segments with new ones, concatenate and
* return them as pkt_buf.
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
assert(j != 0);
NB_SEGS(pkt_buf) = j;
PORT(pkt_buf) = rxq->port_id;
- PKT_LEN(pkt_buf) = wc->byte_len;
- pkt_buf->ol_flags = 0;
+ PKT_LEN(pkt_buf) = pkt_buf_len;
+ pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
+ pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
/* Return packet. */
*(pkts++) = pkt_buf;
- ++ret;
+ ++pkts_ret;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
- rxq->stats.ibytes += wc->byte_len;
+ rxq->stats.ibytes += pkt_buf_len;
#endif
repost:
+ if (++elts_head >= elts_n)
+ elts_head = 0;
continue;
}
+ if (unlikely(i == 0))
+ return 0;
*next = NULL;
/* Repost WRs. */
#ifdef DEBUG_RECV
- DEBUG("%p: reposting %d WRs starting from %" PRIu64 " (%p)",
- (void *)rxq, wcs_n, wcs[0].wr_id, (void *)head.next);
+ DEBUG("%p: reposting %d WRs", (void *)rxq, i);
#endif
- i = ibv_post_recv(rxq->qp, head.next, &bad_wr);
- if (unlikely(i)) {
+ ret = ibv_post_recv(rxq->qp, head.next, &bad_wr);
+ if (unlikely(ret)) {
/* Inability to repost WRs is fatal. */
DEBUG("%p: ibv_post_recv(): failed for WR %p: %s",
(void *)rxq->priv,
(void *)bad_wr,
- strerror(i));
+ strerror(ret));
abort();
}
+ rxq->elts_head = elts_head;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
- rxq->stats.ipackets += ret;
+ rxq->stats.ipackets += pkts_ret;
#endif
- return ret;
+ return pkts_ret;
}
/**
{
struct rxq *rxq = (struct rxq *)dpdk_rxq;
struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
- struct ibv_wc wcs[pkts_n];
- struct ibv_recv_wr head;
- struct ibv_recv_wr **next = &head.next;
- struct ibv_recv_wr *bad_wr;
- int ret = 0;
- int wcs_n;
- int i;
+ const unsigned int elts_n = rxq->elts_n;
+ unsigned int elts_head = rxq->elts_head;
+ struct ibv_sge sges[pkts_n];
+ unsigned int i;
+ unsigned int pkts_ret = 0;
+ int ret;
if (unlikely(rxq->sp))
return mlx4_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
- wcs_n = ibv_poll_cq(rxq->cq, pkts_n, wcs);
- if (unlikely(wcs_n == 0))
- return 0;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wc_n=%d)",
- (void *)rxq, wcs_n);
- return 0;
- }
- assert(wcs_n <= (int)pkts_n);
- /* For each work completion. */
- for (i = 0; (i != wcs_n); ++i) {
- struct ibv_wc *wc = &wcs[i];
- uint64_t wr_id = wc->wr_id;
- uint32_t len = wc->byte_len;
- struct rxq_elt *elt = &(*elts)[WR_ID(wr_id).id];
+ for (i = 0; (i != pkts_n); ++i) {
+ struct rxq_elt *elt = &(*elts)[elts_head];
struct ibv_recv_wr *wr = &elt->wr;
+ uint64_t wr_id = wr->wr_id;
+ unsigned int len;
struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
WR_ID(wr_id).offset);
struct rte_mbuf *rep;
+ uint32_t flags;
/* Sanity checks. */
assert(WR_ID(wr_id).id < rxq->elts_n);
- assert(wr_id == wr->wr_id);
assert(wr->sg_list == &elt->sge);
assert(wr->num_sge == 1);
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
- if (unlikely(wc->status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu32 ": bad work completion"
- " status (%d): %s",
- (void *)rxq, WR_ID(wr_id).id, wc->status,
- ibv_wc_status_str(wc->status));
-#ifdef MLX4_PMD_SOFT_COUNTERS
- /* Increase dropped packets counter. */
- ++rxq->stats.idropped;
-#endif
- goto repost;
- }
+ assert(elts_head < rxq->elts_n);
+ assert(rxq->elts_head < rxq->elts_n);
/*
* Fetch initial bytes of packet descriptor into a
* cacheline while allocating rep.
*/
- rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rte_mbuf_prefetch_part1(seg);
+ rte_mbuf_prefetch_part2(seg);
+ ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+ &flags);
+ if (unlikely(ret < 0)) {
+ struct ibv_wc wc;
+ int wcs_n;
+
+ DEBUG("rxq=%p, poll_length() failed (ret=%d)",
+ (void *)rxq, ret);
+ /* ibv_poll_cq() must be used in case of failure. */
+ wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
+ if (unlikely(wcs_n == 0))
+ break;
+ if (unlikely(wcs_n < 0)) {
+ DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
+ (void *)rxq, wcs_n);
+ break;
+ }
+ assert(wcs_n == 1);
+ if (unlikely(wc.status != IBV_WC_SUCCESS)) {
+ /* Whatever, just repost the offending WR. */
+ DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
+ " completion status (%d): %s",
+ (void *)rxq, wc.wr_id, wc.status,
+ ibv_wc_status_str(wc.status));
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ /* Increment dropped packets counter. */
+ ++rxq->stats.idropped;
+#endif
+ /* Add SGE to array for repost. */
+ sges[i] = elt->sge;
+ goto repost;
+ }
+ ret = wc.byte_len;
+ }
+ if (ret == 0)
+ break;
+ len = ret;
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
(uintptr_t)rep);
assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id);
+ /* Add SGE to array for repost. */
+ sges[i] = elt->sge;
+
/* Update seg information. */
SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
NB_SEGS(seg) = 1;
NEXT(seg) = NULL;
PKT_LEN(seg) = len;
DATA_LEN(seg) = len;
- seg->ol_flags = 0;
+ seg->packet_type = rxq_cq_to_pkt_type(flags);
+ seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
/* Return packet. */
*(pkts++) = seg;
- ++ret;
+ ++pkts_ret;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
- rxq->stats.ibytes += wc->byte_len;
+ rxq->stats.ibytes += len;
#endif
repost:
+ if (++elts_head >= elts_n)
+ elts_head = 0;
continue;
}
- *next = NULL;
+ if (unlikely(i == 0))
+ return 0;
/* Repost WRs. */
#ifdef DEBUG_RECV
- DEBUG("%p: reposting %d WRs starting from %" PRIu32 " (%p)",
- (void *)rxq, wcs_n, WR_ID(wcs[0].wr_id).id, (void *)head.next);
+ DEBUG("%p: reposting %u WRs", (void *)rxq, i);
#endif
- i = ibv_post_recv(rxq->qp, head.next, &bad_wr);
- if (unlikely(i)) {
+ ret = rxq->if_qp->recv_burst(rxq->qp, sges, i);
+ if (unlikely(ret)) {
/* Inability to repost WRs is fatal. */
- DEBUG("%p: ibv_post_recv(): failed for WR %p: %s",
+ DEBUG("%p: recv_burst(): failed (ret=%d)",
(void *)rxq->priv,
- (void *)bad_wr,
- strerror(i));
+ ret);
abort();
}
+ rxq->elts_head = elts_head;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
- rxq->stats.ipackets += ret;
+ rxq->stats.ipackets += pkts_ret;
#endif
- return ret;
+ return pkts_ret;
}
-#ifdef INLINE_RECV
-
/**
- * Allocate a Queue Pair in case inline receive is supported.
+ * DPDK callback for RX in secondary processes.
*
- * @param priv
- * Pointer to private structure.
- * @param cq
- * Completion queue to associate with QP.
- * @param desc
- * Number of descriptors in QP (hint only).
+ * This function configures all queues from primary process information
+ * if necessary before reverting to the normal RX burst callback.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
*
* @return
- * QP pointer or NULL in case of error.
+ * Number of packets successfully received (<= pkts_n).
*/
-static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+static uint16_t
+mlx4_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
{
- struct ibv_exp_qp_init_attr attr = {
- /* CQ to be associated with the send queue. */
- .send_cq = cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = cq,
- .max_inl_recv = priv->inl_recv_size,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX4_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX4_PMD_SGE_WR_N),
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- .pd = priv->pd
- };
-
- attr.comp_mask = IBV_EXP_QP_INIT_ATTR_PD;
- attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
+ struct rxq *rxq = dpdk_rxq;
+ struct priv *priv = mlx4_secondary_data_setup(rxq->priv);
+ struct priv *primary_priv;
+ unsigned int index;
- return ibv_exp_create_qp(priv->ctx, &attr);
+ if (priv == NULL)
+ return 0;
+ primary_priv =
+ mlx4_secondary_data[priv->dev->data->port_id].primary_priv;
+ /* Look for queue index in both private structures. */
+ for (index = 0; index != priv->rxqs_n; ++index)
+ if (((*primary_priv->rxqs)[index] == rxq) ||
+ ((*priv->rxqs)[index] == rxq))
+ break;
+ if (index == priv->rxqs_n)
+ return 0;
+ rxq = (*priv->rxqs)[index];
+ return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
}
-#else /* INLINE_RECV */
-
/**
* Allocate a Queue Pair.
+ * Optionally setup inline receive if supported.
*
* @param priv
* Pointer to private structure.
* QP pointer or NULL in case of error.
*/
static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
+ struct ibv_exp_res_domain *rd)
{
- struct ibv_qp_init_attr attr = {
+ struct ibv_exp_qp_init_attr attr = {
/* CQ to be associated with the send queue. */
.send_cq = cq,
/* CQ to be associated with the receive queue. */
priv->device_attr.max_sge :
MLX4_PMD_SGE_WR_N),
},
- .qp_type = IBV_QPT_RAW_PACKET
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
+ .pd = priv->pd,
+ .res_domain = rd,
};
- return ibv_create_qp(priv->pd, &attr);
+#ifdef INLINE_RECV
+ attr.max_inl_recv = priv->inl_recv_size;
+ attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
+#endif
+ return ibv_exp_create_qp(priv->ctx, &attr);
}
-#endif /* INLINE_RECV */
-
#ifdef RSS_SUPPORT
/**
* Allocate a RSS Queue Pair.
+ * Optionally setup inline receive if supported.
*
* @param priv
* Pointer to private structure.
*/
static struct ibv_qp *
rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
- int parent)
+ int parent, struct ibv_exp_res_domain *rd)
{
struct ibv_exp_qp_init_attr attr = {
/* CQ to be associated with the send queue. */
.send_cq = cq,
/* CQ to be associated with the receive queue. */
.recv_cq = cq,
-#ifdef INLINE_RECV
- .max_inl_recv = priv->inl_recv_size,
-#endif
.cap = {
/* Max number of outstanding WRs. */
.max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
},
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
IBV_EXP_QP_INIT_ATTR_QPG),
- .pd = priv->pd
+ .pd = priv->pd,
+ .res_domain = rd,
};
#ifdef INLINE_RECV
+ attr.max_inl_recv = priv->inl_recv_size,
attr.comp_mask |= IBV_EXP_QP_INIT_ATTR_INL_RECV;
#endif
if (parent) {
attr.qpg.qpg_type = IBV_EXP_QPG_PARENT;
/* TSS isn't necessary. */
attr.qpg.parent_attrib.tss_child_count = 0;
- attr.qpg.parent_attrib.rss_child_count = priv->rxqs_n;
+ attr.qpg.parent_attrib.rss_child_count =
+ rte_align32pow2(priv->rxqs_n + 1) >> 1;
DEBUG("initializing parent RSS queue");
} else {
attr.qpg.qpg_type = IBV_EXP_QPG_CHILD_RX;
/* Number of descriptors and mbufs currently allocated. */
desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
mbuf_n = desc_n;
+ /* Toggle RX checksum offload if hardware supports it. */
+ if (priv->hw_csum) {
+ tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->csum = tmpl.csum;
+ }
+ if (priv->hw_csum_l2tun) {
+ tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->csum_l2tun = tmpl.csum_l2tun;
+ }
/* Enable scattered packets support for this queue if necessary. */
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
* Number of descriptors to configure in queue.
* @param socket
* NUMA socket on which memory must be allocated.
+ * @param inactive
+ * If true, the queue is disabled because its index is higher or
+ * equal to the real number of queues, which must be a power of 2.
* @param[in] conf
* Thresholds parameters.
* @param mp
*/
static int
rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
+ unsigned int socket, int inactive, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
.socket = socket
};
struct ibv_exp_qp_attr mod;
+ union {
+ struct ibv_exp_query_intf_params params;
+ struct ibv_exp_cq_init_attr cq;
+ struct ibv_exp_res_domain_init_attr rd;
+ } attr;
+ enum ibv_exp_query_intf_status status;
struct ibv_recv_wr *bad_wr;
struct rte_mbuf *buf;
int ret = 0;
}
if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
ERROR("%p: invalid number of RX descriptors (must be a"
- " multiple of %d)", (void *)dev, desc);
+ " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
return EINVAL;
}
/* Get mbuf length. */
rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
rte_pktmbuf_free(buf);
+ /* Toggle RX checksum offload if hardware supports it. */
+ if (priv->hw_csum)
+ tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ if (priv->hw_csum_l2tun)
+ tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
DEBUG("%p: %s scattered packets support (%u WRs)",
(void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
/* Use the entire RX mempool as the memory region. */
- tmpl.mr = ibv_reg_mr(priv->pd,
- (void *)mp->elt_va_start,
- (mp->elt_va_end - mp->elt_va_start),
- (IBV_ACCESS_LOCAL_WRITE |
- IBV_ACCESS_REMOTE_WRITE));
+ tmpl.mr = mlx4_mp2mr(priv->pd, mp);
if (tmpl.mr == NULL) {
ret = EINVAL;
ERROR("%p: MR creation failure: %s",
goto error;
}
skip_mr:
- tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+ attr.rd = (struct ibv_exp_res_domain_init_attr){
+ .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+ IBV_EXP_RES_DOMAIN_MSG_MODEL),
+ .thread_model = IBV_EXP_THREAD_SINGLE,
+ .msg_model = IBV_EXP_MSG_HIGH_BW,
+ };
+ tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+ if (tmpl.rd == NULL) {
+ ret = ENOMEM;
+ ERROR("%p: RD creation failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
+ attr.cq = (struct ibv_exp_cq_init_attr){
+ .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+ .res_domain = tmpl.rd,
+ };
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
#ifdef RSS_SUPPORT
- if (priv->rss)
- tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent);
+ if (priv->rss && !inactive)
+ tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
+ tmpl.rd);
else
#endif /* RSS_SUPPORT */
- tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
+ tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
ERROR("%p: QP creation failure: %s",
/* Save port ID. */
tmpl.port_id = dev->data->port_id;
DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
+ attr.params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_CQ,
+ .obj = tmpl.cq,
+ };
+ tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+ if (tmpl.if_cq == NULL) {
+ ERROR("%p: CQ interface family query failed with status %d",
+ (void *)dev, status);
+ goto error;
+ }
+ attr.params = (struct ibv_exp_query_intf_params){
+ .intf_scope = IBV_EXP_INTF_GLOBAL,
+ .intf = IBV_EXP_INTF_QP_BURST,
+ .obj = tmpl.qp,
+ };
+ tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+ if (tmpl.if_qp == NULL) {
+ ERROR("%p: QP interface family query failed with status %d",
+ (void *)dev, status);
+ goto error;
+ }
/* Clean up rxq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
rxq_cleanup(rxq);
{
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
+ int inactive = 0;
int ret;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
return -ENOMEM;
}
}
- ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
+ if (idx >= rte_align32pow2(priv->rxqs_n + 1) >> 1)
+ inactive = 1;
+ ret = rxq_setup(dev, rxq, desc, socket, inactive, conf, mp);
if (ret)
rte_free(rxq);
else {
struct priv *priv;
unsigned int i;
+ if (mlx4_is_secondary())
+ return;
if (rxq == NULL)
return;
priv = rxq->priv;
priv_unlock(priv);
}
+static void
+priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to start the device.
*
unsigned int r;
struct rxq *rxq;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
if (priv->started) {
priv_unlock(priv);
}
}
priv->started = 0;
+ priv_unlock(priv);
return -ret;
} while ((--r) && ((rxq = (*priv->rxqs)[++i]), i));
+ priv_dev_interrupt_handler_install(priv, dev);
priv_unlock(priv);
return 0;
}
unsigned int r;
struct rxq *rxq;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
if (!priv->started) {
priv_unlock(priv);
return 0;
}
+static void
+priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to close the device.
*
static void
mlx4_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
void *tmp;
unsigned int i;
+ if (priv == NULL)
+ return;
priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
(void *)dev,
claim_zero(ibv_close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
+ priv_dev_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
static void
mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
unsigned int max;
+ char ifname[IF_NAMESIZE];
+ if (priv == NULL)
+ return;
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
max = 65535;
info->max_rx_queues = max;
info->max_tx_queues = max;
- info->max_mac_addrs = elemof(priv->mac);
+ /* Last array entry is reserved for broadcast. */
+ info->max_mac_addrs = (elemof(priv->mac) - 1);
+ info->rx_offload_capa =
+ (priv->hw_csum ?
+ (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM) :
+ 0);
+ info->tx_offload_capa =
+ (priv->hw_csum ?
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM) :
+ 0);
+ if (priv_get_ifname(priv, &ifname) == 0)
+ info->if_index = if_nametoindex(ifname);
+ info->speed_capa =
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_56G;
priv_unlock(priv);
}
+static const uint32_t *
+mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rxq_cq_to_pkt_type() */
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == mlx4_rx_burst ||
+ dev->rx_pkt_burst == mlx4_rx_burst_sp)
+ return ptypes;
+ return NULL;
+}
+
/**
* DPDK callback to get device statistics.
*
static void
mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
struct rte_eth_stats tmp = {0};
unsigned int i;
unsigned int idx;
+ if (priv == NULL)
+ return;
priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
static void
mlx4_stats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
unsigned int i;
unsigned int idx;
+ if (priv == NULL)
+ return;
priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
for (i = 0; (i != priv->txqs_n); ++i) {
if ((*priv->txqs)[i] == NULL)
continue;
- idx = (*priv->rxqs)[i]->stats.idx;
+ idx = (*priv->txqs)[i]->stats.idx;
(*priv->txqs)[i]->stats =
(struct mlx4_txq_stats){ .idx = idx };
}
{
struct priv *priv = dev->data->dev_private;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
DEBUG("%p: removing MAC address from index %" PRIu32,
(void *)dev, index);
- if (index >= MLX4_MAX_MAC_ADDRESSES)
- goto end;
- /* Refuse to remove the broadcast address, this one is special. */
- if (!memcmp(priv->mac[index].addr_bytes, "\xff\xff\xff\xff\xff\xff",
- ETHER_ADDR_LEN))
+ /* Last array entry is reserved for broadcast. */
+ if (index >= (elemof(priv->mac) - 1))
goto end;
priv_mac_addr_del(priv, index);
end:
{
struct priv *priv = dev->data->dev_private;
+ if (mlx4_is_secondary())
+ return;
(void)vmdq;
priv_lock(priv);
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
- if (index >= MLX4_MAX_MAC_ADDRESSES)
- goto end;
- /* Refuse to add the broadcast address, this one is special. */
- if (!memcmp(mac_addr->addr_bytes, "\xff\xff\xff\xff\xff\xff",
- ETHER_ADDR_LEN))
+ /* Last array entry is reserved for broadcast. */
+ if (index >= (elemof(priv->mac) - 1))
goto end;
priv_mac_addr_add(priv, index,
(const uint8_t (*)[ETHER_ADDR_LEN])
priv_unlock(priv);
}
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+static void
+mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ DEBUG("%p: setting primary MAC address", (void *)dev);
+ mlx4_mac_addr_remove(dev, 0);
+ mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
/**
* DPDK callback to enable promiscuous mode.
*
unsigned int i;
int ret;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
if (priv->promisc) {
priv_unlock(priv);
struct priv *priv = dev->data->dev_private;
unsigned int i;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
if (!priv->promisc) {
priv_unlock(priv);
unsigned int i;
int ret;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
if (priv->allmulti) {
priv_unlock(priv);
struct priv *priv = dev->data->dev_private;
unsigned int i;
+ if (mlx4_is_secondary())
+ return;
priv_lock(priv);
if (!priv->allmulti) {
priv_unlock(priv);
static int
mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = dev->data->dev_private;
- struct ibv_port_attr port_attr;
- static const uint8_t width_mult[] = {
- /* Multiplier values taken from devinfo.c in libibverbs. */
- 0, 1, 4, 0, 8, 0, 0, 0, 12, 0
+ struct priv *priv = mlx4_get_priv(dev);
+ struct ethtool_cmd edata = {
+ .cmd = ETHTOOL_GSET
};
+ struct ifreq ifr;
+ struct rte_eth_link dev_link;
+ int link_speed = 0;
+ if (priv == NULL)
+ return -EINVAL;
(void)wait_to_complete;
- errno = ibv_query_port(priv->ctx, priv->port, &port_attr);
- if (errno) {
- WARN("port query failed: %s", strerror(errno));
+ if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
}
- dev->data->dev_link = (struct rte_eth_link){
- .link_speed = (ibv_rate_to_mbps(mult_to_ibv_rate
- (port_attr.active_speed)) *
- width_mult[(port_attr.active_width %
- sizeof(width_mult))]),
- .link_duplex = ETH_LINK_FULL_DUPLEX,
- .link_status = (port_attr.state == IBV_PORT_ACTIVE)
- };
- if (memcmp(&port_attr, &priv->port_attr, sizeof(port_attr))) {
+ memset(&dev_link, 0, sizeof(dev_link));
+ dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
+ (ifr.ifr_flags & IFF_RUNNING));
+ ifr.ifr_data = (void *)&edata;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ link_speed = ethtool_cmd_speed(&edata);
+ if (link_speed == -1)
+ dev_link.link_speed = 0;
+ else
+ dev_link.link_speed = link_speed;
+ dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
+ dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
/* Link status changed. */
- priv->port_attr = port_attr;
+ dev->data->dev_link = dev_link;
return 0;
}
/* Link status is still the same. */
static int
mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = dev->data->dev_private;
+ struct priv *priv = mlx4_get_priv(dev);
int ret;
+ if (priv == NULL)
+ return -EINVAL;
priv_lock(priv);
ret = mlx4_link_update_unlocked(dev, wait_to_complete);
priv_unlock(priv);
uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
mlx4_rx_burst;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
/* Set kernel interface MTU first. */
if (priv_set_mtu(priv, mtu)) {
};
int ret;
- ifr.ifr_data = ðpause;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
+ ifr.ifr_data = (void *)ðpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
};
int ret;
- ifr.ifr_data = ðpause;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
+ ifr.ifr_data = (void *)ðpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
(fc_conf->mode & RTE_FC_RX_PAUSE))
struct priv *priv = dev->data->dev_private;
int ret;
+ if (mlx4_is_secondary())
+ return -E_RTE_SECONDARY;
priv_lock(priv);
ret = vlan_filter_set(dev, vlan_id, on);
priv_unlock(priv);
.stats_reset = mlx4_stats_reset,
.queue_stats_mapping_set = NULL,
.dev_infos_get = mlx4_dev_infos_get,
+ .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
.vlan_filter_set = mlx4_vlan_filter_set,
.vlan_tpid_set = NULL,
.vlan_strip_queue_set = NULL,
.priority_flow_ctrl_set = NULL,
.mac_addr_remove = mlx4_mac_addr_remove,
.mac_addr_add = mlx4_mac_addr_add,
+ .mac_addr_set = mlx4_mac_addr_set,
.mtu_set = mlx4_dev_set_mtu,
- .fdir_add_signature_filter = NULL,
- .fdir_update_signature_filter = NULL,
- .fdir_remove_signature_filter = NULL,
- .fdir_add_perfect_filter = NULL,
- .fdir_update_perfect_filter = NULL,
- .fdir_remove_perfect_filter = NULL,
- .fdir_set_masks = NULL
};
/**
}
/**
- * Derive MAC address from port GID.
+ * Get MAC address by querying netdevice.
*
+ * @param[in] priv
+ * struct priv for the requested device.
* @param[out] mac
* MAC address output buffer.
- * @param port
- * Physical port number.
- * @param[in] gid
- * Port GID.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
*/
-static void
-mac_from_gid(uint8_t (*mac)[ETHER_ADDR_LEN], uint32_t port, uint8_t *gid)
+static int
+priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
{
- memcpy(&(*mac)[0], gid + 8, 3);
- memcpy(&(*mac)[3], gid + 13, 3);
- if (port == 1)
- (*mac)[0] ^= 2;
+ struct ifreq request;
+
+ if (priv_ifreq(priv, SIOCGIFHWADDR, &request))
+ return -1;
+ memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return 0;
}
/* Support up to 32 adapters. */
return atoi(val);
}
+static void
+mlx4_dev_link_status_handler(void *);
+static void
+mlx4_dev_interrupt_handler(struct rte_intr_handle *, void *);
+
+/**
+ * Link status handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ *
+ * @return
+ * Nonzero if the callback process can be called immediately.
+ */
+static int
+priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
+{
+ struct ibv_async_event event;
+ int port_change = 0;
+ int ret = 0;
+
+ /* Read all message and acknowledge them. */
+ for (;;) {
+ if (ibv_get_async_event(priv->ctx, &event))
+ break;
+
+ if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR)
+ port_change = 1;
+ else
+ DEBUG("event type %d on port %d not handled",
+ event.event_type, event.element.port_num);
+ ibv_ack_async_event(&event);
+ }
+
+ if (port_change ^ priv->pending_alarm) {
+ struct rte_eth_link *link = &dev->data->dev_link;
+
+ priv->pending_alarm = 0;
+ mlx4_link_update_unlocked(dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ /* Inconsistent status, check again later. */
+ priv->pending_alarm = 1;
+ rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
+ mlx4_dev_link_status_handler,
+ dev);
+ } else
+ ret = 1;
+ }
+ return ret;
+}
+
+/**
+ * Handle delayed link status event.
+ *
+ * @param arg
+ * Registered argument.
+ */
+static void
+mlx4_dev_link_status_handler(void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ priv_lock(priv);
+ assert(priv->pending_alarm == 1);
+ ret = priv_dev_link_status_handler(priv, dev);
+ priv_unlock(priv);
+ if (ret)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+}
+
+/**
+ * Handle interrupts from the NIC.
+ *
+ * @param[in] intr_handle
+ * Interrupt handler.
+ * @param cb_arg
+ * Callback argument.
+ */
+static void
+mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ (void)intr_handle;
+ priv_lock(priv);
+ ret = priv_dev_link_status_handler(priv, dev);
+ priv_unlock(priv);
+ if (ret)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ */
+static void
+priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
+{
+ if (!dev->data->dev_conf.intr_conf.lsc)
+ return;
+ rte_intr_callback_unregister(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ if (priv->pending_alarm)
+ rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev);
+ priv->pending_alarm = 0;
+ priv->intr_handle.fd = 0;
+ priv->intr_handle.type = 0;
+}
+
+/**
+ * Install interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ */
+static void
+priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+{
+ int rc, flags;
+
+ if (!dev->data->dev_conf.intr_conf.lsc)
+ return;
+ assert(priv->ctx->async_fd > 0);
+ flags = fcntl(priv->ctx->async_fd, F_GETFL);
+ rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ INFO("failed to change file descriptor async event queue");
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ } else {
+ priv->intr_handle.fd = priv->ctx->async_fd;
+ priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ rte_intr_callback_register(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ }
+}
+
static struct eth_driver mlx4_driver;
/**
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
- struct rte_eth_dev *eth_dev;
-#if defined(INLINE_RECV) || defined(RSS_SUPPORT)
+ struct rte_eth_dev *eth_dev = NULL;
+#ifdef HAVE_EXP_QUERY_DEVICE
struct ibv_exp_device_attr exp_device_attr;
-#endif
+#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
- union ibv_gid temp_gid;
+#ifdef HAVE_EXP_QUERY_DEVICE
+ exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
#ifdef RSS_SUPPORT
- exp_device_attr.comp_mask =
- (IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
- IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ);
+ exp_device_attr.comp_mask |= IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ;
#endif /* RSS_SUPPORT */
+#endif /* HAVE_EXP_QUERY_DEVICE */
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ERROR("port query failed: %s", strerror(err));
goto port_error;
}
+
+ if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ ERROR("port %d is not configured in Ethernet mode",
+ port);
+ goto port_error;
+ }
+
if (port_attr.state != IBV_PORT_ACTIVE)
- WARN("bad state for port %d: \"%s\" (%d)",
- port, ibv_port_state_str(port_attr.state),
- port_attr.state);
+ DEBUG("port %d is not active: \"%s\" (%d)",
+ port, ibv_port_state_str(port_attr.state),
+ port_attr.state);
/* Allocate protection domain. */
pd = ibv_alloc_pd(ctx);
priv->ctx = ctx;
priv->device_attr = device_attr;
- priv->port_attr = port_attr;
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
-#ifdef RSS_SUPPORT
+#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
- INFO("experimental ibv_exp_query_device");
+ ERROR("ibv_exp_query_device() failed");
goto port_error;
}
+#ifdef RSS_SUPPORT
if ((exp_device_attr.exp_device_cap_flags &
IBV_EXP_DEVICE_QPG) &&
(exp_device_attr.exp_device_cap_flags &
exp_device_attr.max_rss_tbl_sz);
#endif /* RSS_SUPPORT */
+ priv->hw_csum =
+ ((exp_device_attr.exp_device_cap_flags &
+ IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
+ (exp_device_attr.exp_device_cap_flags &
+ IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
+ DEBUG("checksum offloading is %ssupported",
+ (priv->hw_csum ? "" : "not "));
+
+ priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
+ IBV_EXP_DEVICE_VXLAN_SUPPORT);
+ DEBUG("L2 tunnel checksum offloads are %ssupported",
+ (priv->hw_csum_l2tun ? "" : "not "));
+
#ifdef INLINE_RECV
priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE");
priv->inl_recv_size);
}
#endif /* INLINE_RECV */
+#endif /* HAVE_EXP_QUERY_DEVICE */
(void)mlx4_getenv_int;
priv->vf = vf;
- if (ibv_query_gid(ctx, port, 0, &temp_gid)) {
- ERROR("ibv_query_gid() failure");
+ /* Configure the first MAC address by default. */
+ if (priv_get_mac(priv, &mac.addr_bytes)) {
+ ERROR("cannot get MAC address, is mlx4_en loaded?"
+ " (errno: %s)", strerror(errno));
goto port_error;
}
- /* Configure the first MAC address by default. */
- mac_from_gid(&mac.addr_bytes, port, temp_gid.raw);
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
priv->port,
mac.addr_bytes[0], mac.addr_bytes[1],
claim_zero(priv_mac_addr_add(priv, 0,
(const uint8_t (*)[ETHER_ADDR_LEN])
mac.addr_bytes));
- claim_zero(priv_mac_addr_add(priv, 1,
+ claim_zero(priv_mac_addr_add(priv, (elemof(priv->mac) - 1),
&(const uint8_t [ETHER_ADDR_LEN])
{ "\xff\xff\xff\xff\xff\xff" }));
#ifndef NDEBUG
goto port_error;
}
- eth_dev->data->dev_private = priv;
+ /* Secondary processes have to use local storage for their
+ * private data as well as a copy of eth_dev->data, but this
+ * pointer must not be modified before burst functions are
+ * actually called. */
+ if (mlx4_is_secondary()) {
+ struct mlx4_secondary_data *sd =
+ &mlx4_secondary_data[eth_dev->data->port_id];
+
+ sd->primary_priv = eth_dev->data->dev_private;
+ if (sd->primary_priv == NULL) {
+ ERROR("no private data for port %u",
+ eth_dev->data->port_id);
+ err = EINVAL;
+ goto port_error;
+ }
+ sd->shared_dev_data = eth_dev->data;
+ rte_spinlock_init(&sd->lock);
+ memcpy(sd->data.name, sd->shared_dev_data->name,
+ sizeof(sd->data.name));
+ sd->data.dev_private = priv;
+ sd->data.rx_mbuf_alloc_failed = 0;
+ sd->data.mtu = ETHER_MTU;
+ sd->data.port_id = sd->shared_dev_data->port_id;
+ sd->data.mac_addrs = priv->mac;
+ eth_dev->tx_pkt_burst = mlx4_tx_burst_secondary_setup;
+ eth_dev->rx_pkt_burst = mlx4_rx_burst_secondary_setup;
+ } else {
+ eth_dev->data->dev_private = priv;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+ eth_dev->data->mtu = ETHER_MTU;
+ eth_dev->data->mac_addrs = priv->mac;
+ }
eth_dev->pci_dev = pci_dev;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
eth_dev->driver = &mlx4_driver;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
- eth_dev->data->mtu = ETHER_MTU;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
- eth_dev->data->mac_addrs = priv->mac;
+ TAILQ_INIT(ð_dev->link_intr_cbs);
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
claim_zero(ibv_dealloc_pd(pd));
if (ctx)
claim_zero(ibv_close_device(ctx));
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
break;
}
.name = MLX4_DRIVER_NAME,
.id_table = mlx4_pci_id_map,
.devinit = mlx4_pci_devinit,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC,
},
.dev_private_size = sizeof(struct priv)
};
{
(void)name;
(void)args;
+
+ RTE_BUILD_BUG_ON(sizeof(wr_id_t) != sizeof(uint64_t));
+ /*
+ * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
+ * huge pages. Calling ibv_fork_init() during init allows
+ * applications to use fork() safely for purposes other than
+ * using this PMD, which is not supported in forked processes.
+ */
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+ ibv_fork_init();
rte_eal_pci_register(&mlx4_driver.pci_drv);
return 0;
}