const unsigned int cqe_n = (1 << rxq->cqe_n);
const unsigned int sges_n = (1 << rxq->sges_n);
const unsigned int elts_n = (1 << rxq->elts_n);
- const unsigned int strd_n = (1 << rxq->strd_num_n);
+ const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
const unsigned int cqe_cnt = cqe_n - 1;
unsigned int cq_ci, used;
mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
if (!rxq)
return;
qinfo->conf.rx_thresh.wthresh = 0;
qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
qinfo->conf.rx_drop_en = 1;
- qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
+ if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
+ qinfo->conf.rx_deferred_start = 0;
+ else
+ qinfo->conf.rx_deferred_start = 1;
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
}
/**
* Pointer to the device structure.
*
* @param rx_queue_id
- * Rx queue identificatior.
+ * Rx queue identification.
*
* @param mode
* Pointer to the burts mode information.
struct rte_eth_burst_mode *mode)
{
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
- rxq = (*priv->rxqs)[rx_queue_id];
if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
dev = &rte_eth_devices[rxq->port_id];
if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
+ dev->rx_pkt_burst == rte_eth_pkt_burst_dummy) {
rte_errno = ENOTSUP;
return -rte_errno;
}
volatile struct mlx5_wqe_data_seg *scat;
uintptr_t addr;
uint32_t byte_count;
+ uint32_t lkey;
if (mlx5_rxq_mprq_enabled(rxq)) {
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
scat = &((volatile struct mlx5_wqe_mprq *)
rxq->wqes)[i].dseg;
- addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
- 1 << rxq->strd_num_n);
- byte_count = (1 << rxq->strd_sz_n) *
- (1 << rxq->strd_num_n);
+ addr = (uintptr_t)mlx5_mprq_buf_addr
+ (buf, RTE_BIT32(rxq->log_strd_num));
+ byte_count = RTE_BIT32(rxq->log_strd_sz) *
+ RTE_BIT32(rxq->log_strd_num);
+ lkey = mlx5_rx_addr2mr(rxq, addr);
} else {
struct rte_mbuf *buf = (*rxq->elts)[i];
rxq->wqes)[i];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
byte_count = DATA_LEN(buf);
+ lkey = mlx5_rx_mb2mr(rxq, buf);
}
/* scat->addr must be able to store a pointer. */
MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
- .lkey = mlx5_rx_addr2mr(rxq, addr),
+ .lkey = lkey,
};
}
rxq->consumed_strd = 0;
.ai = 0,
};
rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
- (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
+ (wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
/* Update doorbell counter. */
rxq->rq_ci = wqe_n >> rxq->sges_n;
rte_io_wmb();
const uint16_t cqe_n = 1 << rxq->cqe_n;
const uint16_t cqe_mask = cqe_n - 1;
const uint16_t wqe_n = 1 << rxq->elts_n;
- const uint16_t strd_n = 1 << rxq->strd_num_n;
+ const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
union {
{
/* Update packet information. */
pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
+ pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id;
if (rxq->rss_hash) {
uint32_t rss_hash_res = 0;
mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
- const uint32_t strd_sz = 1 << rxq->strd_sz_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
return i;
}
-/**
- * Dummy DPDK callback for RX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-removed_rx_burst(void *dpdk_rxq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- rte_mb();
- return 0;
-}
-
/*
* Vectorized Rx routines are not compiled in when required vector instructions
* are not supported on a target architecture.
{
return -ENOTSUP;
}
-