* Rx queues configuration for mlx4 driver.
*/
-#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_errno.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
void
mlx4_rss_put(struct mlx4_rss *rss)
{
- assert(rss->refcnt);
+ MLX4_ASSERT(rss->refcnt);
if (--rss->refcnt)
return;
- assert(!rss->usecnt);
- assert(!rss->qp);
- assert(!rss->ind);
+ MLX4_ASSERT(!rss->usecnt);
+ MLX4_ASSERT(!rss->qp);
+ MLX4_ASSERT(!rss->ind);
LIST_REMOVE(rss, next);
rte_free(rss);
}
int
mlx4_rss_attach(struct mlx4_rss *rss)
{
- assert(rss->refcnt);
+ MLX4_ASSERT(rss->refcnt);
if (rss->usecnt++) {
- assert(rss->qp);
- assert(rss->ind);
+ MLX4_ASSERT(rss->qp);
+ MLX4_ASSERT(rss->ind);
return 0;
}
struct ibv_wq *ind_tbl[rss->queues];
struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const char *msg;
unsigned int i = 0;
int ret;
uint16_t id = rss->queue_id[i];
struct rxq *rxq = NULL;
- if (id < priv->dev->data->nb_rx_queues)
- rxq = priv->dev->data->rx_queues[id];
+ if (id < dev->data->nb_rx_queues)
+ rxq = dev->data->rx_queues[id];
if (!rxq) {
ret = EINVAL;
msg = "RSS target queue is not configured";
rss->ind = NULL;
}
while (i--)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
ERROR("mlx4: %s", msg);
--rss->usecnt;
rte_errno = ret;
mlx4_rss_detach(struct mlx4_rss *rss)
{
struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
- assert(rss->refcnt);
- assert(rss->qp);
- assert(rss->ind);
+ MLX4_ASSERT(rss->refcnt);
+ MLX4_ASSERT(rss->qp);
+ MLX4_ASSERT(rss->ind);
if (--rss->usecnt)
return;
claim_zero(mlx4_glue->destroy_qp(rss->qp));
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
}
/**
int
mlx4_rss_init(struct mlx4_priv *priv)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
uint32_t wq_num_prev = 0;
const char *msg;
if (priv->rss_init)
return 0;
- if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
ERROR("RSS does not support more than %d queues",
priv->hw_rss_max_qps);
rte_errno = EINVAL;
rte_errno = ret;
return -ret;
}
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
struct ibv_cq *cq;
struct ibv_wq *wq;
uint32_t wq_num;
/* Attach the configured Rx queues. */
if (rxq) {
- assert(!rxq->usecnt);
+ MLX4_ASSERT(!rxq->usecnt);
ret = mlx4_rxq_attach(rxq);
if (!ret) {
wq_num = rxq->wq->wq_num;
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
i, msg, strerror(ret));
while (i--) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq)
mlx4_rxq_detach(rxq);
if (!priv->rss_init)
return;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
- assert(rxq->usecnt == 1);
+ MLX4_ASSERT(rxq->usecnt == 1);
mlx4_rxq_detach(rxq);
}
}
mlx4_rxq_attach(struct rxq *rxq)
{
if (rxq->usecnt++) {
- assert(rxq->cq);
- assert(rxq->wq);
- assert(rxq->wqes);
- assert(rxq->rq_db);
+ MLX4_ASSERT(rxq->cq);
+ MLX4_ASSERT(rxq->wq);
+ MLX4_ASSERT(rxq->wqes);
+ MLX4_ASSERT(rxq->rq_db);
return 0;
}
struct mlx4_priv *priv = rxq->priv;
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
unsigned int i;
int ret;
- assert(rte_is_power_of_2(elts_n));
+ MLX4_ASSERT(rte_is_power_of_2(elts_n));
+ priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_RX_QUEUE;
+ priv->verbs_alloc_ctx.obj = rxq;
cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
rxq->channel, 0);
if (!cq) {
}
/* Pre-register Rx mempool. */
DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
- priv->dev->data->port_id, rxq->stats.idx,
+ ETH_DEV(priv)->data->port_id, rxq->stats.idx,
rxq->mp->name, rxq->mp->nb_mem_chunks);
mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
+ MLX4_ASSERT(buf->data_off == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
+ MLX4_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+ MLX4_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
/* Only the first segment keeps headroom. */
if (i % sges_n)
buf->data_off = 0;
rxq->rq_ci = elts_n / sges_n;
rte_wmb();
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
return 0;
error:
if (wq)
rte_errno = ret;
ERROR("error while attaching Rx queue %p: %s: %s",
(void *)rxq, msg, strerror(ret));
+ priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
return -ret;
}
uint64_t
mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
- uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_KEEP_CRC |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)
- offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return offloads;
}
uint64_t
mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
{
- uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
(void)priv;
return offloads;
int ret;
uint32_t crc_present;
uint64_t offloads;
+ uint32_t max_rx_pktlen;
offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
}
/* By default, FCS (CRC) is stripped by hardware. */
crc_present = 0;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
if (priv->hw_fcs_strip) {
crc_present = 1;
} else {
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = priv->hw_csum &&
- (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
.csum_l2tun = priv->hw_csum_l2tun &&
- (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM),
.crc_present = crc_present,
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
},
.socket = socket,
};
+ dev->data->rx_queues[idx] = rxq;
/* Enable scattered packets support for this queue if necessary. */
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- uint32_t size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+ uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
uint32_t sges_n;
/*
/* Make sure sges_n did not overflow. */
size = mb_len * (1 << rxq->sges_n);
size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ if (size < max_rx_pktlen) {
rte_errno = EOVERFLOW;
ERROR("%p: too many SGEs (%u) needed to handle"
" requested maximum packet size %u",
(void *)dev,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ 1 << sges_n, max_rx_pktlen);
goto error;
}
} else {
WARN("%p: the requested maximum Rx packet size (%u) is"
" larger than a single mbuf (%u) and scattered"
" mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ (void *)dev, max_rx_pktlen,
mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: maximum number of segments per packet: %u",
}
}
DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
- dev->data->rx_queues[idx] = rxq;
return 0;
error:
- dev->data->rx_queues[idx] = NULL;
ret = rte_errno;
- mlx4_rx_queue_release(rxq);
+ mlx4_rx_queue_release(dev, idx);
rte_errno = ret;
- assert(rte_errno > 0);
+ MLX4_ASSERT(rte_errno > 0);
return -rte_errno;
}
/**
* DPDK callback to release a Rx queue.
*
- * @param dpdk_rxq
- * Generic Rx queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Receive queue index.
*/
void
-mlx4_rx_queue_release(void *dpdk_rxq)
+mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct mlx4_priv *priv;
- unsigned int i;
+ struct rxq *rxq = dev->data->rx_queues[idx];
if (rxq == NULL)
return;
- priv = rxq->priv;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
- if (priv->dev->data->rx_queues[i] == rxq) {
- DEBUG("%p: removing Rx queue %p from list",
- (void *)priv->dev, (void *)rxq);
- priv->dev->data->rx_queues[i] = NULL;
- break;
- }
- assert(!rxq->cq);
- assert(!rxq->wq);
- assert(!rxq->wqes);
- assert(!rxq->rq_db);
+ dev->data->rx_queues[idx] = NULL;
+ DEBUG("%p: removing Rx queue %hu from list", (void *)dev, idx);
+ MLX4_ASSERT(!rxq->cq);
+ MLX4_ASSERT(!rxq->wq);
+ MLX4_ASSERT(!rxq->wqes);
+ MLX4_ASSERT(!rxq->rq_db);
if (rxq->channel)
claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);