X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=f1d8373079dbc5b444079192870b3a0c00a00581;hb=952ebacce4f2;hp=99b32f6e298fa783475a95a695e800d8884084a0;hpb=5eaf882e94d99d2ffc19ca84e8db1bc6fa0dc37a;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 99b32f6e29..f1d8373079 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -484,11 +484,11 @@ rxq_sync_cq(struct mlx5_rxq_data *rxq) cqe->op_own = MLX5_CQE_INVALIDATE; } /* Resync CQE and WQE (WQ in RESET state). */ - rte_cio_wmb(); + rte_io_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); - rte_cio_wmb(); + rte_io_wmb(); *rxq->rq_db = rte_cpu_to_be_32(0); - rte_cio_wmb(); + rte_io_wmb(); } /** @@ -513,7 +513,7 @@ mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx) int ret; MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, false); + ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST); if (ret) { DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s", strerror(errno)); @@ -547,7 +547,7 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; int ret; - if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) { + if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { DRV_LOG(ERR, "Hairpin queue can't be stopped"); rte_errno = EINVAL; return -EINVAL; @@ -562,7 +562,7 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) * The routine pointer depends on the process * type, should perform check there. */ - if (pkt_burst == mlx5_rx_burst) { + if (pkt_burst == mlx5_rx_burst_vec) { DRV_LOG(ERR, "Rx queue stop is not supported " "for vectorized Rx"); rte_errno = EINVAL; @@ -606,13 +606,13 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx) rte_errno = errno; return ret; } - rte_cio_wmb(); + rte_io_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); - rte_cio_wmb(); - /* Reset RQ consumer before moving queue to READY state. */ + rte_io_wmb(); + /* Reset RQ consumer before moving queue ro READY state. */ *rxq->rq_db = rte_cpu_to_be_32(0); - rte_cio_wmb(); - ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, true); + rte_io_wmb(); + ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY); if (ret) { DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s", strerror(errno)); @@ -644,7 +644,7 @@ mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx) { int ret; - if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) { + if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { DRV_LOG(ERR, "Hairpin queue can't be started"); rte_errno = EINVAL; return -EINVAL; @@ -1090,7 +1090,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg, memset(_m, 0, sizeof(*buf)); buf->mp = mp; - rte_atomic16_set(&buf->refcnt, 1); + __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED); for (j = 0; j != strd_n; ++j) { shinfo = &buf->shinfos[j]; shinfo->free_cb = mlx5_mprq_buf_free_cb; @@ -2005,6 +2005,78 @@ error: return 0; } +/** + * Create a drop Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. + */ +struct mlx5_hrxq * +mlx5_drop_action_create(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = NULL; + int ret; + + if (priv->drop_queue.hrxq) { + rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + return priv->drop_queue.hrxq; + } + hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); + if (!hrxq) { + DRV_LOG(WARNING, + "Port %u cannot allocate memory for drop queue.", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + priv->drop_queue.hrxq = hrxq; + hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table), + 0, SOCKET_ID_ANY); + if (!hrxq->ind_table) { + rte_errno = ENOMEM; + goto error; + } + ret = priv->obj_ops.drop_action_create(dev); + if (ret < 0) + goto error; + rte_atomic32_set(&hrxq->refcnt, 1); + return hrxq; +error: + if (hrxq) { + if (hrxq->ind_table) + mlx5_free(hrxq->ind_table); + priv->drop_queue.hrxq = NULL; + mlx5_free(hrxq); + } + return NULL; +} + +/** + * Release a drop hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_drop_action_destroy(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { + priv->obj_ops.drop_action_destroy(dev); + mlx5_free(priv->drop_queue.rxq); + mlx5_free(hrxq->ind_table); + mlx5_free(hrxq); + priv->drop_queue.rxq = NULL; + priv->drop_queue.hrxq = NULL; + } +} + /** * Verify the Rx Queue list is empty *