Prefix struct rxq_ctrl and associated functions with mlx5.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl;
if (rxq == NULL)
continue;
- rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
(*priv->rxqs)[i] = NULL;
- rxq_cleanup(rxq_ctrl);
+ mlx5_rxq_cleanup(rxq_ctrl);
rte_free(rxq_ctrl);
}
priv->rxqs_n = 0;
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
- struct rxq *(*rxqs)[]; /* RX queues. */
+ struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
/* Indirection tables referencing all RX WQs. */
struct ibv_rwq_ind_table *(*ind_tables)[];
int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
int priv_flow_start(struct priv *);
void priv_flow_stop(struct priv *);
-int priv_flow_rxq_in_use(struct priv *, struct rxq *);
+int priv_flow_rxq_in_use(struct priv *, struct mlx5_rxq_data *);
/* mlx5_socket.c */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct rxq *rxqs[]; /**< Pointer to the queues array. */
+ struct mlx5_rxq_data *rxqs[]; /**< Pointer to the queues array. */
};
/** Static initializer for items. */
return NULL;
}
for (i = 0; i < flow->actions.queues_n; ++i) {
- struct rxq_ctrl *rxq;
+ struct mlx5_rxq_ctrl *rxq;
rxq = container_of((*priv->rxqs)[flow->actions.queues[i]],
- struct rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl, rxq);
wqs[i] = rxq->wq;
rte_flow->rxqs[i] = &rxq->rxq;
++rte_flow->rxqs_n;
claim_zero(ibv_destroy_rwq_ind_table(flow->ind_table));
if (flow->mark) {
struct rte_flow *tmp;
- struct rxq *rxq;
+ struct mlx5_rxq_data *rxq;
uint32_t mark_n = 0;
uint32_t queue_n;
for (tqueue_n = 0;
tqueue_n < tmp->rxqs_n;
++tqueue_n) {
- struct rxq *trxq;
+ struct mlx5_rxq_data *trxq;
trxq = tmp->rxqs[tqueue_n];
if (rxq == trxq)
* Nonzero if the queue is used by a flow.
*/
int
-priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
+priv_flow_rxq_in_use(struct priv *priv, struct mlx5_rxq_data *rxq)
{
struct rte_flow *flow;
priv->reta_idx_n);
}
for (i = 0; (i != priv->reta_idx_n); ++i) {
- struct rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
- struct rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl, rxq);
wqs[i] = rxq_ctrl->wq;
}
/* Get number of hash RX queues to configure. */
* 0 on success, errno value on failure.
*/
static int
-rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n)
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int i;
(*rxq_ctrl->rxq.elts)[i] = buf;
}
if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
- struct rxq *rxq = &rxq_ctrl->rxq;
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
assert(rxq->elts_n == rxq->cqe_n);
* Pointer to RX queue structure.
*/
static void
-rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct rxq *rxq = &rxq_ctrl->rxq;
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = (1 << rxq->elts_n);
const uint16_t q_mask = q_n - 1;
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
* Pointer to RX queue structure.
*/
void
-rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
+mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
DEBUG("cleaning up %p", (void *)rxq_ctrl);
rxq_free_elts(rxq_ctrl);
* 0 on success, errno value on failure.
*/
static inline int
-rxq_setup(struct rxq_ctrl *tmpl)
+rxq_setup(struct mlx5_rxq_ctrl *tmpl)
{
struct ibv_cq *ibcq = tmpl->cq;
struct mlx5dv_cq cq_info;
* 0 on success, errno value on failure.
*/
static int
-rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
+rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
- struct rxq_ctrl tmpl = {
+ struct mlx5_rxq_ctrl tmpl = {
.priv = priv,
.socket = socket,
.rxq = {
}
/* Clean up rxq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
- rxq_cleanup(rxq_ctrl);
+ mlx5_rxq_cleanup(rxq_ctrl);
/* Move mbuf pointers to dedicated storage area in RX queue. */
elts = (void *)(rxq_ctrl + 1);
rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
return 0;
error:
elts = tmpl.rxq.elts;
- rxq_cleanup(&tmpl);
+ mlx5_rxq_cleanup(&tmpl);
rte_free(elts);
assert(ret > 0);
return ret;
struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = (*priv->rxqs)[idx];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
const uint16_t desc_n =
desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
int ret;
return -EEXIST;
}
(*priv->rxqs)[idx] = NULL;
- rxq_cleanup(rxq_ctrl);
+ mlx5_rxq_cleanup(rxq_ctrl);
/* Resize if rxq size is changed. */
if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
rxq_ctrl = rte_realloc(rxq_ctrl,
void
mlx5_rx_queue_release(void *dpdk_rxq)
{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
struct priv *priv;
unsigned int i;
if (rxq == NULL)
return;
- rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
priv_lock(priv);
if (priv_flow_rxq_in_use(priv, rxq))
(*priv->rxqs)[i] = NULL;
break;
}
- rxq_cleanup(rxq_ctrl);
+ mlx5_rxq_cleanup(rxq_ctrl);
rte_free(rxq_ctrl);
priv_unlock(priv);
}
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int fd;
int flags;
int rc;
* Sequence number per receive queue .
*/
static inline void
-mlx5_arm_cq(struct rxq *rxq, int sq_n_rxq)
+mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
{
int sq_n = 0;
uint32_t doorbell_hi;
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
if (!rxq || !rxq_ctrl->channel) {
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct ibv_cq *ev_cq;
void *ev_ctx;
int ret;
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
uint16_t cqe_cnt, uint32_t *rss_hash);
static __rte_always_inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
int
mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
- struct rxq *rxq = rx_queue;
+ struct mlx5_rxq_data *rxq = rx_queue;
struct rxq_zip *zip = &rxq->zip;
volatile struct mlx5_cqe *cqe;
const unsigned int cqe_n = (1 << rxq->cqe_n);
* with error.
*/
static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
uint16_t cqe_cnt, uint32_t *rss_hash)
{
struct rxq_zip *zip = &rxq->zip;
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
uint16_t
mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct rxq *rxq = dpdk_rxq;
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
const unsigned int sges_n = rxq->sges_n;
}
int __attribute__((weak))
-rxq_check_vec_support(struct rxq *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
(void)rxq;
return -ENOTSUP;
};
/* RX queue descriptor. */
-struct rxq {
+struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
} __rte_cache_aligned;
/* RX queue control descriptor. */
-struct rxq_ctrl {
+struct mlx5_rxq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_wq *wq; /* Work Queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_comp_channel *channel;
unsigned int socket; /* CPU socket ID for allocations. */
- struct rxq rxq; /* Data path structure. */
+ struct mlx5_rxq_data rxq; /* Data path structure. */
};
/* Hash RX queue types. */
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-void rxq_cleanup(struct rxq_ctrl *);
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
/* Vectorized version of mlx5_rxtx.c */
int priv_check_raw_vec_tx_support(struct priv *);
int priv_check_vec_tx_support(struct priv *);
-int rxq_check_vec_support(struct rxq *);
+int rxq_check_vec_support(struct mlx5_rxq_data *);
int priv_check_vec_rx_support(struct priv *);
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
* Number of packets to be stored.
*/
static inline void
-rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
{
const uint16_t q_mask = (1 << rxq->elts_n) - 1;
struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
* Number of buffers to be replenished.
*/
static inline void
-rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
+rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
{
const uint16_t q_n = 1 << rxq->elts_n;
const uint16_t q_mask = q_n - 1;
* the title completion descriptor to be copied to the rest of mbufs.
*/
static inline void
-rxq_cq_decompress_v(struct rxq *rxq,
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq,
volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
{
* Pointer to array of packets to be filled.
*/
static inline void
-rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
- struct rte_mbuf **pkts)
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
+ __m128i op_err, struct rte_mbuf **pkts)
{
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
* Number of packets successfully received (<= pkts_n).
*/
static uint16_t
-rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
+rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
uint16_t n = 0;
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
-rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
uint16_t
mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct rxq *rxq = dpdk_rxq;
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
uint16_t nb_rx;
nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-rxq_check_vec_support(struct rxq *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
- struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (rxq_check_vec_support(rxq) < 0)
break;
priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (rxq == NULL)
continue;
static void
priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
{
- struct rxq *rxq = (*priv->rxqs)[idx];
- struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
uint16_t vlan_offloads =
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |