net/mlx5: optimize Rx queue creation
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index bcb0401..809006f 100644 (file)
@@ -359,14 +359,13 @@ uint64_t
 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
        uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
                             RTE_ETH_RX_OFFLOAD_TIMESTAMP |
                             RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
-       if (!config->mprq.enabled)
+       if (!priv->config.mprq.enabled)
                offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
-       if (config->hw_fcs_strip)
+       if (priv->sh->config.hw_fcs_strip)
                offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
        if (priv->sh->dev_cap.hw_csum)
                offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
@@ -910,25 +909,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOMEM;
                return -rte_errno;
        }
-       rxq->priv = priv;
-       rxq->idx = idx;
-       (*priv->rxq_privs)[idx] = rxq;
-       if (rxq_ctrl != NULL) {
-               /* Join owner list. */
-               LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
-               rxq->ctrl = rxq_ctrl;
-       } else {
-               rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+       if (rxq_ctrl == NULL) {
+               rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
                                        n_seg);
                if (rxq_ctrl == NULL) {
                        DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
                                dev->data->port_id, idx);
                        mlx5_free(rxq);
-                       (*priv->rxq_privs)[idx] = NULL;
                        rte_errno = ENOMEM;
                        return -rte_errno;
                }
        }
+       rxq->priv = priv;
+       rxq->idx = idx;
+       (*priv->rxq_privs)[idx] = rxq;
+       /* Join owner list. */
+       LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+       rxq->ctrl = rxq_ctrl;
        mlx5_rxq_ref(dev, idx);
        DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
                dev->data->port_id, idx);
@@ -1563,7 +1560,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                  uint32_t *actual_log_stride_size)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_port_config *config = &priv->config;
        struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
        uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
        uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
@@ -1662,8 +1659,8 @@ unsupport:
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param rxq
- *   RX queue private data.
+ * @param idx
+ *   RX queue index.
  * @param desc
  *   Number of descriptors to configure in queue.
  * @param socket
@@ -1673,16 +1670,14 @@ unsupport:
  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
  */
 struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
-            uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
             const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
-       uint16_t idx = rxq->idx;
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
        unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
-       struct mlx5_dev_config *config = &priv->config;
+       struct mlx5_port_config *config = &priv->config;
        uint64_t offloads = conf->offloads |
                           dev->data->dev_conf.rxmode.offloads;
        unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
@@ -1721,14 +1716,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
                return NULL;
        }
        LIST_INIT(&tmpl->owners);
-       if (conf->share_group > 0) {
-               tmpl->rxq.shared = 1;
-               tmpl->share_group = conf->share_group;
-               tmpl->share_qid = conf->share_qid;
-               LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
-       }
-       rxq->ctrl = tmpl;
-       LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
        MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
        /*
         * Save the original segment configuration in the shared queue
@@ -1896,7 +1883,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.crc_present = 0;
        tmpl->rxq.lro = lro_on_queue;
        if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
-               if (config->hw_fcs_strip) {
+               if (priv->sh->config.hw_fcs_strip) {
                        /*
                         * RQs used for LRO-enabled TIRs should not be
                         * configured to scatter the FCS.
@@ -1935,6 +1922,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.mprq_bufs =
                (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
        tmpl->rxq.idx = idx;
+       if (conf->share_group > 0) {
+               tmpl->rxq.shared = 1;
+               tmpl->share_group = conf->share_group;
+               tmpl->share_qid = conf->share_qid;
+               LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+       }
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error: