X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=bb9a9080871d3149ce1b81c4b451733823f7de84;hb=0af8a2298a4250018ffa065010bd8c78721a56c7;hp=d95a573095112e765ff949536312bda8efb084be;hpb=876b5d52a3952cde9cd84d5e2e5188886cc1bfd5;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index d95a573095..bb9a908087 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -24,7 +24,8 @@ #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_rxtx.h" +#include "mlx5_tx.h" +#include "mlx5_rx.h" #include "mlx5_utils.h" #include "mlx5_autoconf.h" @@ -48,77 +49,6 @@ static_assert(MLX5_RSS_HASH_KEY_LEN == (unsigned int)sizeof(rss_hash_default_key), "wrong RSS default key size."); -/** - * Check whether Multi-Packet RQ can be enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 1 if supported, negative errno value if not. - */ -inline int -mlx5_check_mprq_support(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (priv->config.mprq.enabled && - priv->rxqs_n >= priv->config.mprq.min_rxqs_num) - return 1; - return -ENOTSUP; -} - -/** - * Check whether Multi-Packet RQ is enabled for the Rx queue. - * - * @param rxq - * Pointer to receive queue structure. - * - * @return - * 0 if disabled, otherwise enabled. - */ -inline int -mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) -{ - return rxq->strd_num_n > 0; -} - -/** - * Check whether Multi-Packet RQ is enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, otherwise enabled. - */ -inline int -mlx5_mprq_enabled(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t i; - uint16_t n = 0; - uint16_t n_ibv = 0; - - if (mlx5_check_mprq_support(dev) < 0) - return 0; - /* All the configured queues should be enabled. */ - for (i = 0; i < priv->rxqs_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = container_of - (rxq, struct mlx5_rxq_ctrl, rxq); - - if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) - continue; - n_ibv++; - if (mlx5_rxq_mprq_enabled(rxq)) - ++n; - } - /* Multi-Packet RQ can't be partially configured. */ - MLX5_ASSERT(n == 0 || n == n_ibv); - return n == n_ibv; -} - /** * Calculate the number of CQEs in CQ for the Rx queue. * @@ -346,7 +276,9 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : (1 << rxq->elts_n); const uint16_t q_mask = q_n - 1; - uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); + uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq->elts_ci : rxq->rq_ci; + uint16_t used = q_n - (elts_ci - rxq->rq_pi); uint16_t i; DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs", @@ -359,8 +291,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) */ if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) - (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; - rxq->rq_pi = rxq->rq_ci; + (*rxq->elts)[(elts_ci + i) & q_mask] = NULL; + rxq->rq_pi = elts_ci; } for (i = 0; i != q_n; ++i) { if ((*rxq->elts)[i] != NULL) @@ -402,14 +334,14 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | - RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH); + if (!config->mprq.enabled) + offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; if (config->hw_fcs_strip) offloads |= DEV_RX_OFFLOAD_KEEP_CRC; - if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -622,7 +554,7 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx) rte_io_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_io_wmb(); - /* Reset RQ consumer before moving queue ro READY state. */ + /* Reset RQ consumer before moving queue to READY state. */ *rxq->rq_db = rte_cpu_to_be_32(0); rte_io_wmb(); ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY); @@ -902,6 +834,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) unsigned int count = 0; struct rte_intr_handle *intr_handle = dev->intr_handle; + /* Representor shares dev->intr_handle with PF. */ + if (priv->representor) + return 0; if (!dev->data->dev_conf.intr_conf.rxq) return 0; mlx5_rx_intr_vec_disable(dev); @@ -982,6 +917,9 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + /* Representor shares dev->intr_handle with PF. */ + if (priv->representor) + return; if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) @@ -1506,7 +1444,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->port_id, idx, mb_len, max_rx_pkt_len, RTE_PKTMBUF_HEADROOM); rte_errno = ENOSPC; - return NULL; + goto error; } tmpl->type = MLX5_RXQ_TYPE_STANDARD; if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, @@ -1683,6 +1621,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: + mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); mlx5_free(tmpl); return NULL; } @@ -1994,6 +1933,51 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) return ret; } +/** + * Setup an indirection table structure fields. + * + * @param dev + * Pointer to Ethernet device. + * @param ind_table + * Indirection table to modify. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t queues_n = ind_tbl->queues_n; + uint16_t *queues = ind_tbl->queues; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; + goto error; + } + } + ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); + if (ret) + goto error; + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); + return 0; +error: + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); + return ret; +} + /** * Create an indirection table. * @@ -2015,10 +1999,6 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; - const unsigned int n = rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size); - unsigned int i, j; int ret; ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + @@ -2028,27 +2008,85 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, return NULL; } ind_tbl->queues_n = queues_n; - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); - if (!rxq) - goto error; - ind_tbl->queues[i] = queues[i]; + ind_tbl->queues = (uint16_t *)(ind_tbl + 1); + memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); + ret = mlx5_ind_table_obj_setup(dev, ind_tbl); + if (ret < 0) { + mlx5_free(ind_tbl); + return NULL; } - ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); - if (ret < 0) - goto error; - __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); if (!standalone) LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); return ind_tbl; +} + +/** + * Modify an indirection table. + * + * @param dev + * Pointer to Ethernet device. + * @param ind_table + * Indirection table to modify. + * @param queues + * Queues replacement for the indirection table. + * @param queues_n + * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl, + uint16_t *queues, const uint32_t queues_n, + bool standalone) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + MLX5_ASSERT(standalone); + RTE_SET_USED(standalone); + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) { + /* + * Modification of indirection ntables having more than 1 + * reference unsupported. Intended for standalone indirection + * tables only. + */ + DRV_LOG(DEBUG, + "Port %u cannot modify indirection table (refcnt> 1).", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; + } + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; + goto error; + } + } + MLX5_ASSERT(priv->obj_ops.ind_table_modify); + ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); + if (ret) + goto error; + for (j = 0; j < ind_tbl->queues_n; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + ind_tbl->queues_n = queues_n; + ind_tbl->queues = queues; + return 0; error: - ret = rte_errno; + err = rte_errno; for (j = 0; j < i; j++) mlx5_rxq_release(dev, ind_tbl->queues[j]); - rte_errno = ret; - mlx5_free(ind_tbl); - DEBUG("Port %u cannot create indirection table.", dev->data->port_id); - return NULL; + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); + return ret; } /** @@ -2136,6 +2174,14 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, queues, queues_n)) { ind_tbl = hrxq->ind_table; } else { + if (hrxq->standalone) { + /* + * Replacement of indirection table unsupported for + * stanalone hrxq objects (used by shared RSS). + */ + rte_errno = ENOTSUP; + return -rte_errno; + } ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, @@ -2153,6 +2199,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, goto error; } if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone); hrxq->ind_table = ind_tbl; @@ -2162,8 +2209,10 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, return 0; error: err = rte_errno; - if (ind_tbl != hrxq->ind_table) + if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + } rte_errno = err; return -rte_errno; } @@ -2177,7 +2226,10 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) mlx5_glue->destroy_flow_action(hrxq->action); #endif priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone); + if (!hrxq->standalone) { + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + } mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } @@ -2211,25 +2263,27 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const uint8_t *rss_key = rss_desc->key; uint32_t rss_key_len = rss_desc->key_len; + bool standalone = !!rss_desc->shared_rss; const uint16_t *queues = - rss_desc->standalone ? rss_desc->const_q : rss_desc->queue; + standalone ? rss_desc->const_q : rss_desc->queue; uint32_t queues_n = rss_desc->queue_num; struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = 0; - struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl; int ret; queues_n = rss_desc->hash_fields ? queues_n : 1; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, - rss_desc->standalone); + standalone); if (!ind_tbl) return NULL; hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; - hrxq->standalone = rss_desc->standalone; + hrxq->standalone = standalone; hrxq->idx = hrxq_idx; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; @@ -2240,7 +2294,8 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, goto error; return hrxq; error: - mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone); + if (!rss_desc->ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl, standalone); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return NULL; @@ -2294,7 +2349,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, .data = rss_desc, }; - if (rss_desc->standalone) { + if (rss_desc->shared_rss) { hrxq = __mlx5_hrxq_create(dev, rss_desc); } else { entry = mlx5_cache_register(&priv->hrxqs, &ctx); @@ -2302,7 +2357,9 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, return 0; hrxq = container_of(entry, typeof(*hrxq), entry); } - return hrxq->idx; + if (hrxq) + return hrxq->idx; + return 0; } /** @@ -2322,6 +2379,8 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) struct mlx5_hrxq *hrxq; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + if (!hrxq) + return 0; if (!hrxq->standalone) return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); __mlx5_hrxq_remove(dev, hrxq); @@ -2344,11 +2403,8 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) struct mlx5_hrxq *hrxq = NULL; int ret; - if (priv->drop_queue.hrxq) { - __atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1, - __ATOMIC_RELAXED); + if (priv->drop_queue.hrxq) return priv->drop_queue.hrxq; - } hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); if (!hrxq) { DRV_LOG(WARNING, @@ -2367,7 +2423,6 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) ret = priv->obj_ops.drop_action_create(dev); if (ret < 0) goto error; - __atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED); return hrxq; error: if (hrxq) { @@ -2391,14 +2446,14 @@ mlx5_drop_action_destroy(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; - if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) { - priv->obj_ops.drop_action_destroy(dev); - mlx5_free(priv->drop_queue.rxq); - mlx5_free(hrxq->ind_table); - mlx5_free(hrxq); - priv->drop_queue.rxq = NULL; - priv->drop_queue.hrxq = NULL; - } + if (!priv->drop_queue.hrxq) + return; + priv->obj_ops.drop_action_destroy(dev); + mlx5_free(priv->drop_queue.rxq); + mlx5_free(hrxq->ind_table); + mlx5_free(hrxq); + priv->drop_queue.rxq = NULL; + priv->drop_queue.hrxq = NULL; } /**