X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=38fde939e14bfa4625e3182f5d8025f81014da99;hb=1573b07284caf92cad5479b539213c508d5583a6;hp=0ede46aa43c3c4baa710511164a144e4149e455d;hpb=538205614f9c4bcf0073fa51192cae0ce2e1ca60;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 0ede46aa43..38fde939e1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -30,6 +30,7 @@ #include "mlx5_utils.h" #include "mlx5_autoconf.h" #include "mlx5_devx.h" +#include "rte_pmd_mlx5.h" /* Default RSS hash key also used for ConnectX-3. */ @@ -359,22 +360,21 @@ uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_RSS_HASH); - if (!config->mprq.enabled) + if (!priv->config.mprq.enabled) offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; - if (config->hw_fcs_strip) + if (priv->sh->config.hw_fcs_strip) offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; - if (config->hw_csum) + if (priv->sh->dev_cap.hw_csum) offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM); - if (config->hw_vlan_strip) + if (priv->sh->dev_cap.hw_vlan_strip) offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; - if (MLX5_LRO_SUPPORTED(dev)) + if (priv->sh->dev_cap.lro_supported) offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -840,6 +840,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, int res; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + bool is_extmem = false; if (mp) { /* @@ -849,6 +850,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, */ rx_seg = &rx_single; n_seg = 1; + is_extmem = rte_pktmbuf_priv_flags(mp) & + RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF; } if (n_seg > 1) { /* The offloads should be checked on rte_eth_dev layer. */ @@ -910,25 +913,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; return -rte_errno; } - rxq->priv = priv; - rxq->idx = idx; - (*priv->rxq_privs)[idx] = rxq; - if (rxq_ctrl != NULL) { - /* Join owner list. */ - LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); - rxq->ctrl = rxq_ctrl; - } else { - rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, - n_seg); + if (rxq_ctrl == NULL) { + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, + n_seg, is_extmem); if (rxq_ctrl == NULL) { DRV_LOG(ERR, "port %u unable to allocate rx queue index %u", dev->data->port_id, idx); mlx5_free(rxq); - (*priv->rxq_privs)[idx] = NULL; rte_errno = ENOMEM; return -rte_errno; } } + rxq->priv = priv; + rxq->idx = idx; + (*priv->rxq_privs)[idx] = rxq; + /* Join owner list. */ + LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); + rxq->ctrl = rxq_ctrl; mlx5_rxq_ref(dev, idx); DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", dev->data->port_id, idx); @@ -1394,8 +1395,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); struct mlx5_rxq_data *rxq; - if (rxq_ctrl == NULL || - rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin) continue; rxq = &rxq_ctrl->rxq; n_ibv++; @@ -1483,8 +1483,7 @@ exit: for (i = 0; i != priv->rxqs_n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); - if (rxq_ctrl == NULL || - rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) + if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin) continue; rxq_ctrl->rxq.mprq_mp = mp; } @@ -1552,7 +1551,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, * Log number of strides to configure for this queue. * @param actual_log_stride_size * Log stride size to configure for this queue. - * + * @param is_extmem + * Is external pinned memory pool used. * @return * 0 if Multi-Packet RQ is supported, otherwise -1. */ @@ -1560,25 +1560,27 @@ static int mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, bool rx_seg_en, uint32_t min_mbuf_size, uint32_t *actual_log_stride_num, - uint32_t *actual_log_stride_size) + uint32_t *actual_log_stride_size, + bool is_extmem) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; - uint32_t log_min_stride_num = config->mprq.log_min_stride_num; - uint32_t log_max_stride_num = config->mprq.log_max_stride_num; + struct mlx5_port_config *config = &priv->config; + struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap; + uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num; + uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num; uint32_t log_def_stride_num = RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM, log_min_stride_num), log_max_stride_num); - uint32_t log_min_stride_size = config->mprq.log_min_stride_size; - uint32_t log_max_stride_size = config->mprq.log_max_stride_size; + uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size; + uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size; uint32_t log_def_stride_size = RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE, log_min_stride_size), log_max_stride_size); uint32_t log_stride_wqe_size; - if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en) + if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem) goto unsupport; /* Checks if chosen number of strides is in supported range. */ if (config->mprq.log_stride_num > log_max_stride_num || @@ -1610,7 +1612,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size; /* Check if WQE buffer size is supported by hardware. */ - if (log_stride_wqe_size < config->mprq.log_min_stride_wqe_size) { + if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) { *actual_log_stride_num = log_def_stride_num; *actual_log_stride_size = log_def_stride_size; DRV_LOG(WARNING, @@ -1619,7 +1621,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, RTE_BIT32(log_def_stride_size)); log_stride_wqe_size = log_def_stride_num + log_def_stride_size; } - MLX5_ASSERT(log_stride_wqe_size >= config->mprq.log_min_stride_wqe_size); + MLX5_ASSERT(log_stride_wqe_size >= + dev_cap->mprq.log_min_stride_wqe_size); if (desc <= RTE_BIT32(*actual_log_stride_num)) goto unsupport; if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) { @@ -1643,15 +1646,15 @@ unsupport: " rxq_num = %u, stride_sz = %u, stride_num = %u\n" " supported: min_rxqs_num = %u, min_buf_wqe_sz = %u" " min_stride_sz = %u, max_stride_sz = %u).\n" - "Rx segment is %senable.", + "Rx segment is %senabled. External mempool is %sused.", dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n, RTE_BIT32(config->mprq.log_stride_size), RTE_BIT32(config->mprq.log_stride_num), config->mprq.min_rxqs_num, - RTE_BIT32(config->mprq.log_min_stride_wqe_size), - RTE_BIT32(config->mprq.log_min_stride_size), - RTE_BIT32(config->mprq.log_max_stride_size), - rx_seg_en ? "" : "not "); + RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size), + RTE_BIT32(dev_cap->mprq.log_min_stride_size), + RTE_BIT32(dev_cap->mprq.log_max_stride_size), + rx_seg_en ? "" : "not ", is_extmem ? "" : "not "); return -1; } @@ -1660,8 +1663,8 @@ unsupport: * * @param dev * Pointer to Ethernet device. - * @param rxq - * RX queue private data. + * @param idx + * RX queue index. * @param desc * Number of descriptors to configure in queue. * @param socket @@ -1671,16 +1674,15 @@ unsupport: * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ struct mlx5_rxq_ctrl * -mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, - uint16_t desc, +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg) + const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, + bool is_extmem) { - uint16_t idx = rxq->idx; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); - struct mlx5_dev_config *config = &priv->config; + struct mlx5_port_config *config = &priv->config; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO); @@ -1698,7 +1700,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en, non_scatter_min_mbuf_size, &mprq_log_actual_stride_num, - &mprq_log_actual_stride_size); + &mprq_log_actual_stride_size, + is_extmem); /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. @@ -1719,14 +1722,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, return NULL; } LIST_INIT(&tmpl->owners); - if (conf->share_group > 0) { - tmpl->rxq.shared = 1; - tmpl->share_group = conf->share_group; - tmpl->share_qid = conf->share_qid; - LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); - } - rxq->ctrl = tmpl; - LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); /* * Save the original segment configuration in the shared queue @@ -1809,7 +1804,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, rte_errno = ENOSPC; goto error; } - tmpl->type = MLX5_RXQ_TYPE_STANDARD; + tmpl->is_hairpin = false; if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, &priv->sh->cdev->mr_scache.dev_gen, socket)) { /* rte_errno is already set. */ @@ -1894,7 +1889,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl->rxq.crc_present = 0; tmpl->rxq.lro = lro_on_queue; if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { - if (config->hw_fcs_strip) { + if (priv->sh->config.hw_fcs_strip) { /* * RQs used for LRO-enabled TIRs should not be * configured to scatter the FCS. @@ -1933,6 +1928,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl->rxq.mprq_bufs = (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n); tmpl->rxq.idx = idx; + if (conf->share_group > 0) { + tmpl->rxq.shared = 1; + tmpl->share_group = conf->share_group; + tmpl->share_qid = conf->share_qid; + LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); + } LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1974,7 +1975,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, LIST_INIT(&tmpl->owners); rxq->ctrl = tmpl; LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); - tmpl->type = MLX5_RXQ_TYPE_HAIRPIN; + tmpl->is_hairpin = true; tmpl->socket = SOCKET_ID_ANY; tmpl->rxq.rss_hash = 0; tmpl->rxq.port_id = dev->data->port_id; @@ -2048,6 +2049,8 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; + if (idx >= priv->rxqs_n) + return NULL; MLX5_ASSERT(priv->rxq_privs != NULL); return (*priv->rxq_privs)[idx]; } @@ -2090,6 +2093,124 @@ mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx) return rxq == NULL ? NULL : &rxq->ctrl->rxq; } +/** + * Increase an external Rx queue reference count. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * External RX queue index. + * + * @return + * A pointer to the queue if it exists, NULL otherwise. + */ +struct mlx5_external_rxq * +mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx); + + __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED); + return rxq; +} + +/** + * Decrease an external Rx queue reference count. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * External RX queue index. + * + * @return + * Updated reference count. + */ +uint32_t +mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx); + + return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED); +} + +/** + * Get an external Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param idx + * External Rx queue index. + * + * @return + * A pointer to the queue if it exists, NULL otherwise. + */ +struct mlx5_external_rxq * +mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + MLX5_ASSERT(mlx5_is_external_rxq(dev, idx)); + return &priv->ext_rxqs[idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN]; +} + +/** + * Dereference a list of Rx queues. + * + * @param dev + * Pointer to Ethernet device. + * @param queues + * List of Rx queues to deref. + * @param queues_n + * Number of queues in the array. + */ +static void +mlx5_rxqs_deref(struct rte_eth_dev *dev, uint16_t *queues, + const uint32_t queues_n) +{ + uint32_t i; + + for (i = 0; i < queues_n; i++) { + if (mlx5_is_external_rxq(dev, queues[i])) + claim_nonzero(mlx5_ext_rxq_deref(dev, queues[i])); + else + claim_nonzero(mlx5_rxq_deref(dev, queues[i])); + } +} + +/** + * Increase reference count for list of Rx queues. + * + * @param dev + * Pointer to Ethernet device. + * @param queues + * List of Rx queues to ref. + * @param queues_n + * Number of queues in the array. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_rxqs_ref(struct rte_eth_dev *dev, uint16_t *queues, + const uint32_t queues_n) +{ + uint32_t i; + + for (i = 0; i != queues_n; ++i) { + if (mlx5_is_external_rxq(dev, queues[i])) { + if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL) + goto error; + } else { + if (mlx5_rxq_ref(dev, queues[i]) == NULL) + goto error; + } + } + return 0; +error: + mlx5_rxqs_deref(dev, queues, i); + rte_errno = EINVAL; + return -rte_errno; +} + /** * Release a Rx queue. * @@ -2125,7 +2246,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) mlx5_free(rxq_ctrl->obj); rxq_ctrl->obj = NULL; } - if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { + if (!rxq_ctrl->is_hairpin) { if (!rxq_ctrl->started) rxq_free_elts(rxq_ctrl); dev->data->rx_queue_state[idx] = @@ -2134,7 +2255,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) } else { /* Refcnt zero, closing device. */ LIST_REMOVE(rxq, owner_entry); if (LIST_EMPTY(&rxq_ctrl->owners)) { - if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) + if (!rxq_ctrl->is_hairpin) mlx5_mr_btree_free (&rxq_ctrl->rxq.mr_ctrl.cache_bh); if (rxq_ctrl->rxq.shared) @@ -2174,7 +2295,38 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) } /** - * Get a Rx queue type. + * Verify the external Rx Queue list is empty. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +int +mlx5_ext_rxq_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_external_rxq *rxq; + uint32_t i; + int ret = 0; + + if (priv->ext_rxqs == NULL) + return 0; + + for (i = MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) { + rxq = mlx5_ext_rxq_get(dev, i); + if (rxq->refcnt < 2) + continue; + DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.", + dev->data->port_id, i); + ++ret; + } + return ret; +} + +/** + * Check whether RxQ type is Hairpin. * * @param dev * Pointer to Ethernet device. @@ -2182,17 +2334,18 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) * Rx queue index. * * @return - * The Rx queue type. + * True if Rx queue type is Hairpin, otherwise False. */ -enum mlx5_rxq_type -mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) +bool +mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + struct mlx5_rxq_ctrl *rxq_ctrl; - if (idx < priv->rxqs_n && rxq_ctrl != NULL) - return rxq_ctrl->type; - return MLX5_RXQ_TYPE_UNDEFINED; + if (mlx5_is_external_rxq(dev, idx)) + return false; + rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); + return (idx < priv->rxqs_n && rxq_ctrl != NULL && rxq_ctrl->is_hairpin); } /* @@ -2209,14 +2362,9 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) const struct rte_eth_hairpin_conf * mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) { - struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); - if (idx < priv->rxqs_n && rxq != NULL) { - if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) - return &rxq->hairpin_conf; - } - return NULL; + return mlx5_rxq_is_hairpin(dev, idx) ? &rxq->hairpin_conf : NULL; } /** @@ -2284,8 +2432,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, * Pointer to Ethernet device. * @param ind_table * Indirection table to release. - * @param standalone - * Indirection table for Standalone queue. * @param deref_rxqs * If true, then dereference RX queues related to indirection table. * Otherwise, no additional action will be taken. @@ -2296,24 +2442,21 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, - bool standalone, bool deref_rxqs) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i, ret; + unsigned int ret; rte_rwlock_write_lock(&priv->ind_tbls_lock); ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); - if (!ret && !standalone) + if (!ret) LIST_REMOVE(ind_tbl, next); rte_rwlock_write_unlock(&priv->ind_tbls_lock); if (ret) return 1; priv->obj_ops.ind_table_destroy(ind_tbl); - if (deref_rxqs) { - for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); - } + if (deref_rxqs) + mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n); mlx5_free(ind_tbl); return 0; } @@ -2365,35 +2508,30 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; uint32_t queues_n = ind_tbl->queues_n; - uint16_t *queues = ind_tbl->queues; - unsigned int i = 0, j; - int ret = 0, err; + int ret; const unsigned int n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : - log2above(priv->config.ind_table_max_size); + log2above(priv->sh->dev_cap.ind_table_max_size); - if (ref_qs) - for (i = 0; i != queues_n; ++i) { - if (mlx5_rxq_ref(dev, queues[i]) == NULL) { - ret = -rte_errno; - goto error; - } - } + if (ref_qs && mlx5_rxqs_ref(dev, ind_tbl->queues, queues_n) < 0) { + DRV_LOG(DEBUG, "Port %u invalid indirection table queues.", + dev->data->port_id); + return -rte_errno; + } ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); - if (ret) - goto error; + if (ret) { + DRV_LOG(DEBUG, "Port %u cannot create a new indirection table.", + dev->data->port_id); + if (ref_qs) { + int err = rte_errno; + + mlx5_rxqs_deref(dev, ind_tbl->queues, queues_n); + rte_errno = err; + } + return ret; + } __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); return 0; -error: - if (ref_qs) { - err = rte_errno; - for (j = 0; j < i; j++) - mlx5_rxq_deref(dev, queues[j]); - rte_errno = err; - } - DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", - dev->data->port_id); - return ret; } /** @@ -2413,7 +2551,7 @@ error: * @return * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ -static struct mlx5_ind_table_obj * +struct mlx5_ind_table_obj * mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n, bool standalone, bool ref_qs) { @@ -2421,8 +2559,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, struct mlx5_ind_table_obj *ind_tbl; int ret; + /* + * Allocate maximum queues for shared action as queue number + * maybe modified later. + */ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + - queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); + (standalone ? priv->rxqs_n : queues_n) * + sizeof(uint16_t), 0, SOCKET_ID_ANY); if (!ind_tbl) { rte_errno = ENOMEM; return NULL; @@ -2435,11 +2578,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, mlx5_free(ind_tbl); return NULL; } - if (!standalone) { - rte_rwlock_write_lock(&priv->ind_tbls_lock); + rte_rwlock_write_lock(&priv->ind_tbls_lock); + if (!standalone) LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - rte_rwlock_write_unlock(&priv->ind_tbls_lock); - } + else + LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next); + rte_rwlock_write_unlock(&priv->ind_tbls_lock); + return ind_tbl; } @@ -2491,43 +2636,38 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, bool standalone, bool ref_new_qs, bool deref_old_qs) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i = 0, j; - int ret = 0, err; + int ret; const unsigned int n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : - log2above(priv->config.ind_table_max_size); + log2above(priv->sh->dev_cap.ind_table_max_size); MLX5_ASSERT(standalone); RTE_SET_USED(standalone); if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0) return -rte_errno; - if (ref_new_qs) - for (i = 0; i != queues_n; ++i) { - if (!mlx5_rxq_ref(dev, queues[i])) { - ret = -rte_errno; - goto error; - } - } + if (ref_new_qs && mlx5_rxqs_ref(dev, queues, queues_n) < 0) { + DRV_LOG(DEBUG, "Port %u invalid indirection table queues.", + dev->data->port_id); + return -rte_errno; + } MLX5_ASSERT(priv->obj_ops.ind_table_modify); ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); - if (ret) - goto error; + if (ret) { + DRV_LOG(DEBUG, "Port %u cannot modify indirection table.", + dev->data->port_id); + if (ref_new_qs) { + int err = rte_errno; + + mlx5_rxqs_deref(dev, queues, queues_n); + rte_errno = err; + } + return ret; + } if (deref_old_qs) - for (i = 0; i < ind_tbl->queues_n; i++) - claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); + mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n); ind_tbl->queues_n = queues_n; ind_tbl->queues = queues; return 0; -error: - if (ref_new_qs) { - err = rte_errno; - for (j = 0; j < i; j++) - mlx5_rxq_deref(dev, queues[j]); - rte_errno = err; - } - DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", - dev->data->port_id); - return ret; } /** @@ -2576,7 +2716,7 @@ mlx5_ind_table_obj_detach(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ? log2above(ind_tbl->queues_n) : - log2above(priv->config.ind_table_max_size); + log2above(priv->sh->dev_cap.ind_table_max_size); unsigned int i; int ret; @@ -2605,6 +2745,7 @@ mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry, return (hrxq->rss_key_len != rss_desc->key_len || memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || + hrxq->hws_flags != rss_desc->hws_flags || hrxq->hash_fields != rss_desc->hash_fields || hrxq->ind_table->queues_n != rss_desc->queue_num || memcmp(hrxq->ind_table->queues, rss_desc->queue, @@ -2689,8 +2830,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, } if (ind_tbl != hrxq->ind_table) { MLX5_ASSERT(!hrxq->standalone); - mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone, true); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, true); hrxq->ind_table = ind_tbl; } hrxq->hash_fields = hash_fields; @@ -2700,8 +2840,7 @@ error: err = rte_errno; if (ind_tbl != hrxq->ind_table) { MLX5_ASSERT(!hrxq->standalone); - mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone, - true); + mlx5_ind_table_obj_release(dev, ind_tbl, true); } rte_errno = err; return -rte_errno; @@ -2713,12 +2852,16 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) struct mlx5_priv *priv = dev->data->dev_private; #ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); + if (hrxq->hws_flags) + mlx5dr_action_destroy(hrxq->action); + else + mlx5_glue->destroy_flow_action(hrxq->action); #endif priv->obj_ops.hrxq_destroy(hrxq); if (!hrxq->standalone) { mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone, true); + hrxq->hws_flags ? + (!!dev->data->dev_started) : true); } mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } @@ -2762,11 +2905,12 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, int ret; queues_n = rss_desc->hash_fields ? queues_n : 1; - if (!ind_tbl) + if (!ind_tbl && !rss_desc->hws_flags) ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, - standalone, + standalone || + rss_desc->hws_flags, !!dev->data->dev_started); if (!ind_tbl) return NULL; @@ -2778,6 +2922,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = rss_desc->hash_fields; + hrxq->hws_flags = rss_desc->hws_flags; memcpy(hrxq->rss_key, rss_key, rss_key_len); ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); if (ret < 0) @@ -2785,7 +2930,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, return hrxq; error: if (!rss_desc->ind_tbl) - mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true); + mlx5_ind_table_obj_release(dev, ind_tbl, true); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return NULL; @@ -2839,13 +2984,13 @@ mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) * RSS configuration for the Rx hash queue. * * @return - * An hash Rx queue index on success. + * An hash Rx queue on success. */ -uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, +struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, struct mlx5_flow_rss_desc *rss_desc) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; + struct mlx5_hrxq *hrxq = NULL; struct mlx5_list_entry *entry; struct mlx5_flow_cb_ctx ctx = { .data = rss_desc, @@ -2856,12 +3001,10 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, } else { entry = mlx5_list_register(priv->hrxqs, &ctx); if (!entry) - return 0; + return NULL; hrxq = container_of(entry, typeof(*hrxq), entry); } - if (hrxq) - return hrxq->idx; - return 0; + return hrxq; } /** @@ -2870,17 +3013,15 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, * @param dev * Pointer to Ethernet device. * @param hrxq_idx - * Index to Hash Rx queue to release. + * Hash Rx queue to release. * * @return * 1 while a reference on it exists, 0 when freed. */ -int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) return 0; if (!hrxq->standalone) @@ -2889,6 +3030,26 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) return 0; } +/** + * Release the hash Rx queue with index. + * + * @param dev + * Pointer to Ethernet device. + * @param hrxq_idx + * Index to Hash Rx queue to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + return mlx5_hrxq_obj_release(dev, hrxq); +} + /** * Create a drop Rx Hash queue. * @@ -2994,6 +3155,122 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev) if (data == NULL) continue; data->sh = sh; - data->rt_timestamp = priv->config.rt_timestamp; + data->rt_timestamp = sh->dev_cap.rt_timestamp; + } +} + +/** + * Validate given external RxQ rte_plow index, and get pointer to concurrent + * external RxQ object to map/unmap. + * + * @param[in] port_id + * The port identifier of the Ethernet device. + * @param[in] dpdk_idx + * Queue index in rte_flow. + * + * @return + * Pointer to concurrent external RxQ on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_external_rxq * +mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx) +{ + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + + if (dpdk_idx < MLX5_EXTERNAL_RX_QUEUE_ID_MIN) { + DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].", + dpdk_idx, MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX); + rte_errno = EINVAL; + return NULL; + } + if (rte_eth_dev_is_valid_port(port_id) < 0) { + DRV_LOG(ERR, "There is no Ethernet device for port %u.", + port_id); + rte_errno = ENODEV; + return NULL; } + dev = &rte_eth_devices[port_id]; + priv = dev->data->dev_private; + if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) { + DRV_LOG(ERR, "Port %u " + "external RxQ isn't supported on local PD and CTX.", + port_id); + rte_errno = ENOTSUP; + return NULL; + } + if (!mlx5_devx_obj_ops_en(priv->sh)) { + DRV_LOG(ERR, + "Port %u external RxQ isn't supported by Verbs API.", + port_id); + rte_errno = ENOTSUP; + return NULL; + } + /* + * When user configures remote PD and CTX and device creates RxQ by + * DevX, external RxQs array is allocated. + */ + MLX5_ASSERT(priv->ext_rxqs != NULL); + return &priv->ext_rxqs[dpdk_idx - MLX5_EXTERNAL_RX_QUEUE_ID_MIN]; +} + +int +rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx, + uint32_t hw_idx) +{ + struct mlx5_external_rxq *ext_rxq; + uint32_t unmapped = 0; + + ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx); + if (ext_rxq == NULL) + return -rte_errno; + if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { + if (ext_rxq->hw_id != hw_idx) { + DRV_LOG(ERR, "Port %u external RxQ index %u " + "is already mapped to HW index (requesting is " + "%u, existing is %u).", + port_id, dpdk_idx, hw_idx, ext_rxq->hw_id); + rte_errno = EEXIST; + return -rte_errno; + } + DRV_LOG(WARNING, "Port %u external RxQ index %u " + "is already mapped to the requested HW index (%u)", + port_id, dpdk_idx, hw_idx); + + } else { + ext_rxq->hw_id = hw_idx; + DRV_LOG(DEBUG, "Port %u external RxQ index %u " + "is successfully mapped to the requested HW index (%u)", + port_id, dpdk_idx, hw_idx); + } + return 0; +} + +int +rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx) +{ + struct mlx5_external_rxq *ext_rxq; + uint32_t mapped = 1; + + ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx); + if (ext_rxq == NULL) + return -rte_errno; + if (ext_rxq->refcnt > 1) { + DRV_LOG(ERR, "Port %u external RxQ index %u still referenced.", + port_id, dpdk_idx); + rte_errno = EINVAL; + return -rte_errno; + } + if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { + DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.", + port_id, dpdk_idx); + rte_errno = EINVAL; + return -rte_errno; + } + DRV_LOG(DEBUG, + "Port %u external RxQ index %u is successfully unmapped.", + port_id, dpdk_idx); + return 0; }