net/dpaa: fix event queue detach
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index e49f765..e5a072c 100644 (file)
@@ -195,6 +195,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
        uint64_t rx_offloads = eth_conf->rxmode.offloads;
        uint64_t tx_offloads = eth_conf->txmode.offloads;
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
        struct rte_device *rdev = dev->device;
        struct rte_eth_link *link = &dev->data->dev_link;
        struct rte_dpaa_device *dpaa_dev;
@@ -203,7 +204,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle;
        uint32_t max_rx_pktlen;
        int speed, duplex;
-       int ret;
+       int ret, rx_status;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -211,6 +212,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        intr_handle = dpaa_dev->intr_handle;
        __fif = container_of(fif, struct __fman_if, __if);
 
+       /* Check if interface is enabled in case of shared MAC */
+       if (fif->is_shared_mac) {
+               rx_status = fman_if_get_rx_status(fif);
+               if (!rx_status) {
+                       DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+                                    dpaa_intf->name);
+                       return -EHOSTDOWN;
+               }
+       }
+
        /* Rx offloads which are enabled by default */
        if (dev_rx_offloads_nodis & ~rx_offloads) {
                DPAA_PMD_INFO(
@@ -463,8 +474,7 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
        }
 
        /* release configuration memory */
-       if (dpaa_intf->fc_conf)
-               rte_free(dpaa_intf->fc_conf);
+       rte_free(dpaa_intf->fc_conf);
 
        /* Release RX congestion Groups */
        if (dpaa_intf->cgr_rx) {
@@ -1030,7 +1040,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                   QM_FQCTRL_CTXASTASHING |
                                   QM_FQCTRL_PREFERINCACHE;
                opts.fqd.context_a.stashing.exclusive = 0;
-               /* In muticore scenario stashing becomes a bottleneck on LS1046.
+               /* In multicore scenario stashing becomes a bottleneck on LS1046.
                 * So do not enable stashing in this case
                 */
                if (dpaa_svr_family != SVR_LS1046A_FAMILY)
@@ -1201,23 +1211,17 @@ int
 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
                int eth_rx_queue_id)
 {
-       struct qm_mcc_initfq opts;
+       struct qm_mcc_initfq opts = {0};
        int ret;
        u32 flags = 0;
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
        struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
 
-       dpaa_poll_queue_default_config(&opts);
-
-       if (dpaa_intf->cgr_rx) {
-               opts.we_mask |= QM_INITFQ_WE_CGID;
-               opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
-               opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-       }
-
+       qman_retire_fq(rxq, NULL);
+       qman_oos_fq(rxq);
        ret = qman_init_fq(rxq, flags, &opts);
        if (ret) {
-               DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+               DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
                             rxq->fqid, ret);
        }
 
@@ -1744,6 +1748,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
        /* no tx-confirmation */
        opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
        opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+       if (fman_ip_rev >= FMAN_V3) {
+               /* Set B0V bit in contextA to set ASPID to 0 */
+               opts.fqd.context_a.hi |= 0x04000000;
+       }
        DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
 
        if (cgr_tx) {
@@ -1866,7 +1874,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 
        dpaa_intf->name = dpaa_device->name;
 
-       /* save fman_if & cfg in the interface struture */
+       /* save fman_if & cfg in the interface structure */
        eth_dev->process_private = fman_intf;
        dpaa_intf->ifid = dev_id;
        dpaa_intf->cfg = cfg;
@@ -2169,7 +2177,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                if (dpaa_svr_family == SVR_LS1043A_FAMILY)
                        dpaa_push_mode_max_queue = 0;
 
-               /* if push mode queues to be enabled. Currenly we are allowing
+               /* if push mode queues to be enabled. Currently we are allowing
                 * only one queue per thread.
                 */
                if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {