#include <mlx5_glue.h>
#include <mlx5_malloc.h>
+#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
mbuf_init->nb_segs = 1;
mbuf_init->port = rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
- mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
+ mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
* prevent compiler reordering:
* rearm_data covers previous fields.
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
- uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_RSS_HASH);
+ uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
if (!config->mprq.enabled)
offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
if (config->hw_fcs_strip)
- offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (config->hw_csum)
- offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
+ offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (MLX5_LRO_SUPPORTED(dev))
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
uint64_t
mlx5_get_rx_port_offloads(void)
{
- uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
return offloads;
}
dev->data->dev_conf.rxmode.offloads;
/* The offloads should be checked on rte_eth_dev layer. */
- MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+ MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
DRV_LOG(ERR, "port %u queue index %u split "
"offload not configured",
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
- intr_handle->intr_vec = mlx5_malloc(0,
- n * sizeof(intr_handle->intr_vec[0]),
- 0, SOCKET_ID_ANY);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
" vector, Rx interrupts will not be supported",
rte_errno = ENOMEM;
return -rte_errno;
}
- intr_handle->type = RTE_INTR_HANDLE_EXT;
+
+ if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
for (i = 0; i != n; ++i) {
/* This rxq obj must not be released in this function. */
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
if (!rxq_obj || (!rxq_obj->ibv_channel &&
!rxq_obj->devx_channel)) {
/* Use invalid intr_vec[] index to disable entry. */
- intr_handle->intr_vec[i] =
- RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID;
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
+ return -rte_errno;
/* Decrease the rxq_ctrl's refcnt */
if (rxq_ctrl)
mlx5_rxq_release(dev, i);
mlx5_rx_intr_vec_disable(dev);
return -rte_errno;
}
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = rxq_obj->fd;
+
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + count))
+ return -rte_errno;
+ if (rte_intr_efds_index_set(intr_handle, count,
+ rxq_obj->fd))
+ return -rte_errno;
count++;
}
if (!count)
mlx5_rx_intr_vec_disable(dev);
- else
- intr_handle->nb_efd = count;
+ else if (rte_intr_nb_efd_set(intr_handle, count))
+ return -rte_errno;
return 0;
}
if (!dev->data->dev_conf.intr_conf.rxq)
return;
- if (!intr_handle->intr_vec)
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
goto free;
for (i = 0; i != n; ++i) {
- if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID)
+ if (rte_intr_vec_list_index_get(intr_handle, i) ==
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
continue;
/**
* Need to access directly the queue to release the reference
}
free:
rte_intr_free_epoll_fd(intr_handle);
- if (intr_handle->intr_vec)
- mlx5_free(intr_handle->intr_vec);
- intr_handle->nb_efd = 0;
- intr_handle->intr_vec = NULL;
+
+ rte_intr_vec_list_free(intr_handle);
+
+ rte_intr_nb_efd_set(intr_handle, 0);
}
/**
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->share_cache, priv->sh->pd,
- mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
+ priv->sh->cdev->pd, mp, &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
- unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
+ unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
unsigned int max_rx_pktlen = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
MLX5_ASSERT(tmpl->rxq.rxseg_n &&
tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
- if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
" configured and no enough mbuf space(%u) to contain "
"the maximum RX packet length(%u) with head-room(%u)",
goto error;
}
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
- if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
- MLX5_MR_BTREE_CACHE_N, socket)) {
+ if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
+ &priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
- /* Rx queues don't use this pointer, but we want a valid structure. */
- tmpl->rxq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
config->mprq.stride_size_n : mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_scatter_en =
- !!(offloads & DEV_RX_OFFLOAD_SCATTER);
+ !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pktlen,
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
max_lro_size = max_rx_pktlen;
- } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
unsigned int sges_n;
if (lro_on_queue && first_mb_free_size <
}
mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
/* Configure Rx timestamp. */
- tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
tmpl->rxq.timestamp_rx_flag = 0;
if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
&tmpl->rxq.timestamp_offset,
goto error;
}
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
tmpl->rxq.lro = lro_on_queue;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
tmpl->rxq.crc_present << 2);
/* Save port ID. */
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
- (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+ (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = rx_seg[0].mp;