net/qede: refactoring multi-queue implementation
[dpdk.git] / drivers / net / qede / qede_ethdev.c
index f8d9f7c..6380c2b 100644 (file)
@@ -294,7 +294,6 @@ static void
 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
 {
        rte_memcpy(&qdev->dev_info, info, sizeof(*info));
-       qdev->num_tc = qdev->dev_info.num_tc;
        qdev->ops = qed_ops;
 }
 
@@ -509,6 +508,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
        /* ucast->assert_on_error = true; - For debug */
 }
 
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+                            enum qed_filter_rx_mode_type type)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_filter_accept_flags flags;
+
+       memset(&flags, 0, sizeof(flags));
+
+       flags.update_rx_mode_config = 1;
+       flags.update_tx_mode_config = 1;
+       flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+               ECORE_ACCEPT_MCAST_MATCHED |
+               ECORE_ACCEPT_BCAST;
+
+       flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+               ECORE_ACCEPT_MCAST_MATCHED |
+               ECORE_ACCEPT_BCAST;
+
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+               if (IS_VF(edev)) {
+                       flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+                       DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+               }
+       } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+       } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+                               QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+                       ECORE_ACCEPT_MCAST_UNMATCHED;
+       }
+
+       return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+                       ECORE_SPQ_MODE_CB, NULL);
+}
 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
                                    uint8_t clss, bool mode, bool mask)
 {
@@ -971,66 +1007,151 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+       struct ecore_hwfn *p_hwfn;
+       int i;
+
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               ecore_hw_start_fastpath(p_hwfn);
+       }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       /* Update MTU only if it has changed */
+       if (qdev->mtu != qdev->new_mtu) {
+               if (qede_update_mtu(eth_dev, qdev->new_mtu))
+                       goto err;
+               qdev->mtu = qdev->new_mtu;
+               /* If MTU has changed then update TPA too */
+               if (qdev->enable_lro)
+                       if (qede_enable_tpa(eth_dev, true))
+                               goto err;
+       }
+
+       /* Start queues */
+       if (qede_start_queues(eth_dev))
+               goto err;
+
+       /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+        * enabling RSS. Hence RSS configuration is deferred upto this point.
+        * Also, we would like to retain similar behavior in PF case, so we
+        * don't do PF/VF specific check here.
+        */
+       if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+               if (qede_config_rss(eth_dev))
+                       goto err;
+
+       /* Enable vport*/
+       if (qede_activate_vport(eth_dev, true))
+               goto err;
+
+       /* Bring-up the link */
+       qede_dev_set_link_state(eth_dev, true);
+
+       /* Start/resume traffic */
+       qede_fastpath_start(edev);
+
+       DP_INFO(edev, "Device started\n");
+
+       return 0;
+err:
+       DP_ERR(edev, "Device start fails\n");
+       return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       /* Disable vport */
+       if (qede_activate_vport(eth_dev, false))
+               return;
+
+       if (qdev->enable_lro)
+               qede_enable_tpa(eth_dev, false);
+
+       /* TODO: Do we need disable LRO or RSS */
+       /* Stop queues */
+       qede_stop_queues(eth_dev);
+
+       /* Disable traffic */
+       ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+       /* Bring the link down */
+       qede_dev_set_link_state(eth_dev, false);
+
+       DP_INFO(edev, "Device is stopped\n");
+}
+
 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 {
-       struct qede_dev *qdev = eth_dev->data->dev_private;
-       struct ecore_dev *edev = &qdev->edev;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-       int rc;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        /* Check requirements for 100G mode */
        if (edev->num_hwfns > 1) {
                if (eth_dev->data->nb_rx_queues < 2 ||
-                   eth_dev->data->nb_tx_queues < 2) {
+                               eth_dev->data->nb_tx_queues < 2) {
                        DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
                        return -EINVAL;
                }
 
                if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
-                   (eth_dev->data->nb_tx_queues % 2 != 0)) {
+                               (eth_dev->data->nb_tx_queues % 2 != 0)) {
                        DP_ERR(edev,
-                                 "100G mode needs even no. of RX/TX queues\n");
+                                       "100G mode needs even no. of RX/TX queues\n");
                        return -EINVAL;
                }
        }
 
        /* Sanity checks and throw warnings */
-       if (rxmode->enable_scatter == 1)
+       if (rxmode->enable_scatter)
                eth_dev->data->scattered_rx = 1;
-
        if (!rxmode->hw_strip_crc)
                DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
-
        if (!rxmode->hw_ip_checksum)
                DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
-                             "in hw\n");
-
-       if (rxmode->enable_lro) {
-               qdev->enable_lro = true;
-               /* Enable scatter mode for LRO */
-               if (!rxmode->enable_scatter)
-                       eth_dev->data->scattered_rx = 1;
+                               "in hw\n");
+       if (rxmode->header_split)
+               DP_INFO(edev, "Header split enable is not supported\n");
+       if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+                               ETH_MQ_RX_RSS)) {
+               DP_ERR(edev, "Unsupported multi-queue mode\n");
+               return -ENOTSUP;
        }
+       /* Flow director mode check */
+       if (qede_check_fdir_support(eth_dev))
+               return -ENOTSUP;
 
-       /* Check for the port restart case */
-       if (qdev->state != QEDE_DEV_INIT) {
-               qede_stop_vport(edev);
+       /* Deallocate resources if held previously. It is needed only if the
+        * queue count has been changed from previous configuration. If its
+        * going to change then it means RX/TX queue setup will be called
+        * again and the fastpath pointers will be reinitialized there.
+        */
+       if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+                       qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
                qede_dealloc_fp_resc(eth_dev);
+               /* Proceed with updated queue count */
+               qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+               qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+               if (qede_alloc_fp_resc(qdev))
+                       return -ENOMEM;
        }
 
-       qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
-       qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
-       qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
-       /* Fastpath status block should be initialized before sending
-        * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
-        */
-       rc = qede_alloc_fp_resc(qdev);
-       if (rc != 0)
-               return rc;
-
        /* VF's MTU has to be set using vport-start where as
         * PF's MTU can be updated via vport-update.
         */
@@ -1045,35 +1166,23 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        qdev->mtu = rxmode->max_rx_pkt_len;
        qdev->new_mtu = qdev->mtu;
 
-       if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
-           rxmode->mq_mode == ETH_MQ_RX_NONE)) {
-               DP_ERR(edev, "Unsupported RSS mode\n");
-               qede_stop_vport(edev);
-               qede_dealloc_fp_resc(eth_dev);
-               return -EINVAL;
-       }
-
-       /* Flow director mode check */
-       rc = qede_check_fdir_support(eth_dev);
-       if (rc) {
-               qede_stop_vport(edev);
-               qede_dealloc_fp_resc(eth_dev);
-               return -EINVAL;
+       /* Configure TPA parameters */
+       if (rxmode->enable_lro) {
+               if (qede_enable_tpa(eth_dev, true))
+                       return -EINVAL;
+               /* Enable scatter mode for LRO */
+               if (!rxmode->enable_scatter)
+                       eth_dev->data->scattered_rx = 1;
        }
-       SLIST_INIT(&qdev->fdir_info.fdir_list_head);
-
-       SLIST_INIT(&qdev->vlan_list_head);
+       qdev->enable_lro = rxmode->enable_lro;
 
        /* Enable VLAN offloads by default */
        qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-                                      ETH_VLAN_FILTER_MASK |
-                                      ETH_VLAN_EXTEND_MASK);
-
-       qdev->state = QEDE_DEV_CONFIG;
+                       ETH_VLAN_FILTER_MASK |
+                       ETH_VLAN_EXTEND_MASK);
 
-       DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
-               (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
-               qdev->num_tc);
+       DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+                       QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
 
        return 0;
 }
@@ -1264,38 +1373,38 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       uint8_t i;
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       qede_fdir_dealloc_resc(eth_dev);
-
        /* dev_stop() shall cleanup fp resources in hw but without releasing
         * dma memories and sw structures so that dev_start() can be called
         * by the app without reconfiguration. However, in dev_close() we
         * can release all the resources and device can be brought up newly
         */
-       if (qdev->state != QEDE_DEV_STOP)
+       if (eth_dev->data->dev_started)
                qede_dev_stop(eth_dev);
-       else
-               DP_INFO(edev, "Device is already stopped\n");
 
        qede_stop_vport(edev);
-
+       qede_fdir_dealloc_resc(eth_dev);
        qede_dealloc_fp_resc(eth_dev);
 
-       qdev->ops->common->slowpath_stop(edev);
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+               if (eth_dev->data->rx_queues[i])
+                       eth_dev->data->rx_queues[i] = NULL;
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               if (eth_dev->data->tx_queues[i])
+                       eth_dev->data->tx_queues[i] = NULL;
+       eth_dev->data->nb_rx_queues = 0;
+       eth_dev->data->nb_tx_queues = 0;
 
+       qdev->ops->common->slowpath_stop(edev);
        qdev->ops->common->remove(edev);
-
        rte_intr_disable(&pci_dev->intr_handle);
-
        rte_intr_callback_unregister(&pci_dev->intr_handle,
                                     qede_interrupt_handler, (void *)eth_dev);
-
        if (edev->num_hwfns > 1)
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
-       qdev->state = QEDE_DEV_INIT; /* Go back to init state */
 }
 
 static void
@@ -1308,7 +1417,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        unsigned int rxq_stat_cntrs, txq_stat_cntrs;
        struct qede_tx_queue *txq;
 
-       qdev->ops->get_vport_stats(edev, &stats);
+       ecore_get_vport_stats(edev, &stats);
 
        /* RX Stats */
        eth_stats->ipackets = stats.rx_ucast_pkts +
@@ -1350,38 +1459,34 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
                       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
                       " appropriately and retry.\n");
 
-       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
-                       eth_stats->q_ipackets[i] =
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rcv_pkts));
-                       eth_stats->q_errors[i] =
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rx_hw_errors)) +
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rx_alloc_errors));
-                       i++;
-               }
+       for_each_rss(qid) {
+               eth_stats->q_ipackets[i] =
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rcv_pkts));
+               eth_stats->q_errors[i] =
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rx_hw_errors)) +
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rx_alloc_errors));
+               i++;
                if (i == rxq_stat_cntrs)
                        break;
        }
 
-       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
-                       txq = qdev->fp_array[(qid)].txqs[0];
-                       eth_stats->q_opackets[j] =
-                               *((uint64_t *)(uintptr_t)
-                                       (((uint64_t)(uintptr_t)(txq)) +
-                                        offsetof(struct qede_tx_queue,
-                                                 xmit_pkts)));
-                       j++;
-               }
+       for_each_tss(qid) {
+               txq = qdev->fp_array[qid].txq;
+               eth_stats->q_opackets[j] =
+                       *((uint64_t *)(uintptr_t)
+                               (((uint64_t)(uintptr_t)(txq)) +
+                                offsetof(struct qede_tx_queue,
+                                         xmit_pkts)));
+               j++;
                if (j == txq_stat_cntrs)
                        break;
        }
@@ -1445,7 +1550,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        if (n < num)
                return num;
 
-       qdev->ops->get_vport_stats(edev, &stats);
+       ecore_get_vport_stats(edev, &stats);
 
        for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
                xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1457,10 +1562,10 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+               for_each_rss(qid) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                xstats[stat_idx].value = *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
+                                       ((char *)(qdev->fp_array[qid].rxq)) +
                                         qede_rxq_xstats_strings[i].offset);
                                xstats[stat_idx].id = stat_idx;
                                stat_idx++;
@@ -1878,6 +1983,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+
+
 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1911,19 +2018,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        rte_delay_ms(1000);
        qdev->mtu = mtu;
        /* Fix up RX buf size for all queues of the port */
-       for_each_queue(i) {
+       for_each_rss(i) {
                fp = &qdev->fp_array[i];
-               if (fp->type & QEDE_FASTPATH_RX) {
-                       bufsz = (uint16_t)rte_pktmbuf_data_room_size(
-                               fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
-                       if (dev->data->scattered_rx)
-                               rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-                       else
-                               rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
-                       rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
-                       fp->rxq->rx_buf_size = rx_buf_size;
-                       DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
-               }
+               bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+                       fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+               if (dev->data->scattered_rx)
+                       rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+               else
+                       rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+               rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+               fp->rxq->rx_buf_size = rx_buf_size;
+               DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
        }
        qede_dev_start(dev);
        if (frame_size > ETHER_MAX_LEN)
@@ -2350,20 +2455,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        DP_INFO(edev, "Starting qede probe\n");
-
-       rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
-                                   dp_module, dp_level, is_vf);
-
+       rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+                                   dp_level, is_vf);
        if (rc != 0) {
                DP_ERR(edev, "qede probe failed rc %d\n", rc);
                return -ENODEV;
        }
-
        qede_update_pf_params(edev);
-
        rte_intr_callback_register(&pci_dev->intr_handle,
                                   qede_interrupt_handler, (void *)eth_dev);
-
        if (rte_intr_enable(&pci_dev->intr_handle)) {
                DP_ERR(edev, "rte_intr_enable() failed\n");
                return -ENODEV;
@@ -2476,7 +2576,11 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                do_once = false;
        }
 
-       adapter->state = QEDE_DEV_INIT;
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+       SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+       SLIST_INIT(&adapter->vlan_list_head);
+       SLIST_INIT(&adapter->uc_list_head);
        adapter->mtu = ETHER_MTU;
        adapter->new_mtu = ETHER_MTU;
        if (!is_vf)
@@ -2491,7 +2595,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                  adapter->primary_mac.addr_bytes[4],
                  adapter->primary_mac.addr_bytes[5]);
 
-       return rc;
+       DP_INFO(edev, "Device initialized\n");
+
+       return 0;
 }
 
 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)