X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=ed993ea6d96d6115a0b41ce13ec729a17f872c14;hb=0d8c6e6ba09967b386e7a62254cb9a974ba7a5e0;hp=d960daa43e2fbd20c873410205b100edec42d7ed;hpb=78be885295b86da2c208324f1e276e802812b60d;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index d960daa43e..ed993ea6d9 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -52,7 +52,9 @@ uint8_t rss_hash_default_key[] = { }; /* Length of the default RSS hash key. */ -const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); +static_assert(MLX5_RSS_HASH_KEY_LEN == + (unsigned int)sizeof(rss_hash_default_key), + "wrong RSS default key size."); /** * Check whether Multi-Packet RQ can be enabled for the device. @@ -386,7 +388,6 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME); - offloads |= DEV_RX_OFFLOAD_CRC_STRIP; if (config->hw_fcs_strip) offloads |= DEV_RX_OFFLOAD_KEEP_CRC; @@ -645,7 +646,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); + mlx5_uar_write64(rte_cpu_to_be_64(doorbell), + cq_db_reg, rxq->uar_lock_cq); } /** @@ -1231,6 +1233,13 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) */ desc *= 4; obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n; + /* + * rte_mempool_create_empty() has sanity check to refuse large cache + * size compared to the number of elements. + * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a + * constant number 2 instead. + */ + obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); /* Check a mempool is already allocated and if it can be resued. */ if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { DRV_LOG(DEBUG, "port %u mempool %s is being reused", @@ -1346,7 +1355,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; if (mprq_en && - desc >= (1U << config->mprq.stride_num_n) && + desc > (1U << config->mprq.stride_num_n) && mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { /* TODO: Rx scatter isn't supported yet. */ tmpl->rxq.sges_n = 0; @@ -1401,6 +1410,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->dev_conf.rxmode.max_rx_pkt_len, mb_len - RTE_PKTMBUF_HEADROOM); } + if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + DRV_LOG(WARNING, + "port %u MPRQ is requested but cannot be enabled" + " (requested: desc = %u, stride_sz = %u," + " supported: min_stride_num = %u, max_stride_sz = %u).", + dev->data->port_id, desc, mprq_stride_size, + (1 << config->mprq.stride_num_n), + (1 << config->mprq.max_stride_size_n)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -1420,7 +1437,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ tmpl->rxq.crc_present = 0; - if (rte_eth_dev_must_keep_crc(offloads)) { + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { tmpl->rxq.crc_present = 1; } else { @@ -1447,6 +1464,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); +#ifndef RTE_ARCH_64 + tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; +#endif tmpl->idx = idx; rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); @@ -1738,10 +1758,6 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. - * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level. * * @return * The Verbs object initialised, NULL otherwise and rte_errno is set. @@ -1751,16 +1767,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + int tunnel __rte_unused) { struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; int err; -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5dv_qp_init_attr qp_init_attr = {0}; -#endif queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); @@ -1771,15 +1784,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, return NULL; } if (!rss_key_len) { - rss_key_len = rss_hash_default_key_len; + rss_key_len = MLX5_RSS_HASH_KEY_LEN; rss_key = rss_hash_default_key; } #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - if (tunnel) { - qp_init_attr.comp_mask = - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; - } qp = mlx5_glue->dv_create_qp (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1791,18 +1799,20 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, + MLX5_RSS_HASH_KEY_LEN, .rx_hash_key = rss_key ? (void *)(uintptr_t)rss_key : rss_hash_default_key, - .rx_hash_fields_mask = hash_fields | - (tunnel && rss_level > 1 ? - (uint32_t)IBV_RX_HASH_INNER : 0), + .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }, - &qp_init_attr); + &(struct mlx5dv_qp_init_attr){ + .comp_mask = tunnel ? + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0, + .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS, + }); #else qp = mlx5_glue->create_qp_ex (priv->ctx, @@ -1815,7 +1825,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, + MLX5_RSS_HASH_KEY_LEN, .rx_hash_key = rss_key ? (void *)(uintptr_t)rss_key : rss_hash_default_key, @@ -1836,8 +1846,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, hrxq->qp = qp; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; - hrxq->tunnel = tunnel; - hrxq->rss_level = rss_level; memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); @@ -1863,10 +1871,6 @@ error: * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. - * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level * * @return * An hash Rx queue on success. @@ -1875,8 +1879,7 @@ struct mlx5_hrxq * mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + const uint16_t *queues, uint32_t queues_n) { struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; @@ -1891,10 +1894,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; if (hrxq->hash_fields != hash_fields) continue; - if (hrxq->tunnel != tunnel) - continue; - if (hrxq->rss_level != rss_level) - continue; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; @@ -2138,7 +2137,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev) .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_hash_default_key_len, + .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, .rx_hash_key = rss_hash_default_key, .rx_hash_fields_mask = 0, },