#include <mlx5_prm.h>
#include <mlx5_common.h>
+#include <mlx5_common_mr.h>
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rx.h"
const unsigned int cqe_n = (1 << rxq->cqe_n);
const unsigned int sges_n = (1 << rxq->sges_n);
const unsigned int elts_n = (1 << rxq->elts_n);
- const unsigned int strd_n = (1 << rxq->strd_num_n);
+ const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
const unsigned int cqe_cnt = cqe_n - 1;
unsigned int cq_ci, used;
mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct mlx5_rxq_data *rxq = rx_queue;
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
- if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
if (offset >= (1 << rxq->cqe_n)) {
rte_errno = EINVAL;
return -rte_errno;
mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
if (!rxq)
return;
qinfo->conf.rx_thresh.wthresh = 0;
qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
qinfo->conf.rx_drop_en = 1;
- qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
+ if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
+ qinfo->conf.rx_deferred_start = 0;
+ else
+ qinfo->conf.rx_deferred_start = 1;
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
}
/**
* Pointer to the device structure.
*
* @param rx_queue_id
- * Rx queue identificatior.
+ * Rx queue identification.
*
* @param mode
* Pointer to the burts mode information.
struct rte_eth_burst_mode *mode)
{
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
- rxq = (*priv->rxqs)[rx_queue_id];
if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
/**
* DPDK callback to get the number of used descriptors in a RX queue.
*
- * @param dev
- * Pointer to the device structure.
- *
- * @param rx_queue_id
- * The Rx queue.
+ * @param rx_queue
+ * The Rx queue pointer.
*
* @return
* The number of used rx descriptor.
* -EINVAL if the queue is invalid
*/
uint32_t
-mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+mlx5_rx_queue_count(void *rx_queue)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_data *rxq = rx_queue;
+ struct rte_eth_dev *dev;
- if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
- rxq = (*priv->rxqs)[rx_queue_id];
if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
}
+
+ dev = &rte_eth_devices[rxq->port_id];
+
+ if (dev->rx_pkt_burst == NULL ||
+ dev->rx_pkt_burst == rte_eth_pkt_burst_dummy) {
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+
return rx_queue_count(rxq);
}
volatile struct mlx5_wqe_data_seg *scat;
uintptr_t addr;
uint32_t byte_count;
+ uint32_t lkey;
if (mlx5_rxq_mprq_enabled(rxq)) {
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
scat = &((volatile struct mlx5_wqe_mprq *)
rxq->wqes)[i].dseg;
- addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
- 1 << rxq->strd_num_n);
- byte_count = (1 << rxq->strd_sz_n) *
- (1 << rxq->strd_num_n);
+ addr = (uintptr_t)mlx5_mprq_buf_addr
+ (buf, RTE_BIT32(rxq->log_strd_num));
+ byte_count = RTE_BIT32(rxq->log_strd_sz) *
+ RTE_BIT32(rxq->log_strd_num);
+ lkey = mlx5_rx_addr2mr(rxq, addr);
} else {
struct rte_mbuf *buf = (*rxq->elts)[i];
rxq->wqes)[i];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
byte_count = DATA_LEN(buf);
+ lkey = mlx5_rx_mb2mr(rxq, buf);
}
/* scat->addr must be able to store a pointer. */
MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
- .lkey = mlx5_rx_addr2mr(rxq, addr),
+ .lkey = lkey,
};
}
rxq->consumed_strd = 0;
.ai = 0,
};
rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
- (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
+ (wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
/* Update doorbell counter. */
rxq->rq_ci = wqe_n >> rxq->sges_n;
rte_io_wmb();
const uint16_t cqe_n = 1 << rxq->cqe_n;
const uint16_t cqe_mask = cqe_n - 1;
const uint16_t wqe_n = 1 << rxq->elts_n;
- const uint16_t strd_n = 1 << rxq->strd_num_n;
+ const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
union {
sm.is_wq = 1;
sm.queue_id = rxq->idx;
sm.state = IBV_WQS_RESET;
- if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
+ if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
return -1;
if (rxq_ctrl->dump_file_n <
- rxq_ctrl->priv->config.max_dump_files_num) {
+ RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
MKSTR(err_str, "Unexpected CQE error syndrome "
"0x%02x CQN = %u RQN = %u wqe_counter = %u"
" rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
sm.is_wq = 1;
sm.queue_id = rxq->idx;
sm.state = IBV_WQS_RDY;
- if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
- &sm))
+ if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
return -1;
if (vec) {
const uint32_t elts_n =
ol_flags =
TRANSPOSE(flags,
MLX5_CQE_RX_L3_HDR_VALID,
- PKT_RX_IP_CKSUM_GOOD) |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
TRANSPOSE(flags,
MLX5_CQE_RX_L4_HDR_VALID,
- PKT_RX_L4_CKSUM_GOOD);
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return ol_flags;
}
{
/* Update packet information. */
pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
+ pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id;
if (rxq->rss_hash) {
uint32_t rss_hash_res = 0;
rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
if (rss_hash_res) {
pkt->hash.rss = rss_hash_res;
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
if (rxq->mark) {
mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
(mcqe->flow_tag_high << 16);
if (MLX5_FLOW_MARK_IS_VALID(mark)) {
- pkt->ol_flags |= PKT_RX_FDIR;
+ pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
- pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
vlan_strip = mcqe->hdr_type &
RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
if (vlan_strip) {
- pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
}
}
}
pkt = seg;
MLX5_ASSERT(len >= (rxq->crc_present << 2));
- pkt->ol_flags &= EXT_ATTACHED_MBUF;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
mlx5_lro_update_hdr
(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
mcqe, rxq, len);
- pkt->ol_flags |= PKT_RX_LRO;
+ pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
}
mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
}
-void
-mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
-{
- struct mlx5_mprq_buf *buf = opaque;
-
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
- rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
- __ATOMIC_RELAXED) == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
- rte_mempool_put(buf->mp, buf);
- }
-}
-
void
mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
{
mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
- const uint32_t strd_n = 1 << rxq->strd_num_n;
- const uint32_t strd_sz = 1 << rxq->strd_sz_n;
+ const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
+ const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
if (cqe->lro_num_seg > 1) {
mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
cqe, mcqe, rxq, len);
- pkt->ol_flags |= PKT_RX_LRO;
+ pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
PKT_LEN(pkt) = len;
return i;
}
-/**
- * Dummy DPDK callback for RX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-removed_rx_burst(void *dpdk_rxq __rte_unused,
- struct rte_mbuf **pkts __rte_unused,
- uint16_t pkts_n __rte_unused)
-{
- rte_mb();
- return 0;
-}
-
/*
* Vectorized Rx routines are not compiled in when required vector instructions
* are not supported on a target architecture.
{
return -ENOTSUP;
}
-