net/dpaa: report error on using deferred start
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index 6c94fd3..ea178f4 100644 (file)
@@ -45,6 +45,7 @@
 #include <fsl_qman.h>
 #include <fsl_bman.h>
 #include <fsl_fman.h>
+#include <process.h>
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
@@ -131,6 +132,11 @@ static struct rte_dpaa_driver rte_dpaa_pmd;
 static int
 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
 
+static int dpaa_eth_link_update(struct rte_eth_dev *dev,
+                               int wait_to_complete __rte_unused);
+
+static void dpaa_interrupt_handler(void *param);
+
 static inline void
 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
 {
@@ -195,9 +201,19 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
        uint64_t rx_offloads = eth_conf->rxmode.offloads;
        uint64_t tx_offloads = eth_conf->txmode.offloads;
+       struct rte_device *rdev = dev->device;
+       struct rte_dpaa_device *dpaa_dev;
+       struct fman_if *fif = dev->process_private;
+       struct __fman_if *__fif;
+       struct rte_intr_handle *intr_handle;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
+       dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
+       intr_handle = &dpaa_dev->intr_handle;
+       __fif = container_of(fif, struct __fman_if, __if);
+
        /* Rx offloads which are enabled by default */
        if (dev_rx_offloads_nodis & ~rx_offloads) {
                DPAA_PMD_INFO(
@@ -241,6 +257,28 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
                dev->data->scattered_rx = 1;
        }
 
+       /* if the interrupts were configured on this devices*/
+       if (intr_handle && intr_handle->fd) {
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
+                       rte_intr_callback_register(intr_handle,
+                                          dpaa_interrupt_handler,
+                                          (void *)dev);
+
+               ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
+               if (ret) {
+                       if (dev->data->dev_conf.intr_conf.lsc != 0) {
+                               rte_intr_callback_unregister(intr_handle,
+                                       dpaa_interrupt_handler,
+                                       (void *)dev);
+                               if (ret == EINVAL)
+                                       printf("Failed to enable interrupt: Not Supported\n");
+                               else
+                                       printf("Failed to enable interrupt\n");
+                       }
+                       dev->data->dev_conf.intr_conf.lsc = 0;
+                       dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+               }
+       }
        return 0;
 }
 
@@ -269,6 +307,25 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
        return NULL;
 }
 
+static void dpaa_interrupt_handler(void *param)
+{
+       struct rte_eth_dev *dev = param;
+       struct rte_device *rdev = dev->device;
+       struct rte_dpaa_device *dpaa_dev;
+       struct rte_intr_handle *intr_handle;
+       uint64_t buf;
+       int bytes_read;
+
+       dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
+       intr_handle = &dpaa_dev->intr_handle;
+
+       bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
+       if (bytes_read < 0)
+               DPAA_PMD_ERR("Error reading eventfd\n");
+       dpaa_eth_link_update(dev, 0);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -298,9 +355,27 @@ static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 
 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
 {
+       struct fman_if *fif = dev->process_private;
+       struct __fman_if *__fif;
+       struct rte_device *rdev = dev->device;
+       struct rte_dpaa_device *dpaa_dev;
+       struct rte_intr_handle *intr_handle;
+
        PMD_INIT_FUNC_TRACE();
 
+       dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
+       intr_handle = &dpaa_dev->intr_handle;
+       __fif = container_of(fif, struct __fman_if, __if);
+
        dpaa_eth_dev_stop(dev);
+
+       if (intr_handle && intr_handle->fd &&
+           dev->data->dev_conf.intr_conf.lsc != 0) {
+               dpaa_intr_disable(__fif->node_name);
+               rte_intr_callback_unregister(intr_handle,
+                                            dpaa_interrupt_handler,
+                                            (void *)dev);
+       }
 }
 
 static int
@@ -382,12 +457,81 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+                       __rte_unused uint16_t queue_id,
+                       struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+       int ret = -EINVAL;
+       unsigned int i;
+       const struct burst_info {
+               uint64_t flags;
+               const char *output;
+       } rx_offload_map[] = {
+                       {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+                       {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
+                       {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+       };
+
+       /* Update Rx offload info */
+       for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+               if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                               rx_offload_map[i].output);
+                       ret = 0;
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int
+dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+                       __rte_unused uint16_t queue_id,
+                       struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+       int ret = -EINVAL;
+       unsigned int i;
+       const struct burst_info {
+               uint64_t flags;
+               const char *output;
+       } tx_offload_map[] = {
+                       {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+                       {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+                       {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+       };
+
+       /* Update Tx offload info */
+       for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+               if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                               tx_offload_map[i].output);
+                       ret = 0;
+                       break;
+               }
+       }
+       return ret;
+}
+
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete __rte_unused)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
        struct rte_eth_link *link = &dev->data->dev_link;
        struct fman_if *fif = dev->process_private;
+       struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -401,9 +545,20 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
                DPAA_PMD_ERR("invalid link_speed: %s, %d",
                             dpaa_intf->name, fif->mac_type);
 
-       link->link_status = dpaa_intf->valid;
+       if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+               ret = dpaa_get_link_status(__fif->node_name);
+               if (ret < 0)
+                       return ret;
+               link->link_status = ret;
+       } else {
+               link->link_status = dpaa_intf->valid;
+       }
+
        link->link_duplex = ETH_LINK_FULL_DUPLEX;
        link->link_autoneg = ETH_LINK_AUTONEG;
+
+       DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
+                     link->link_status ? "Up" : "Down");
        return 0;
 }
 
@@ -568,7 +723,7 @@ static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                            uint16_t nb_desc,
                            unsigned int socket_id __rte_unused,
-                           const struct rte_eth_rxconf *rx_conf __rte_unused,
+                           const struct rte_eth_rxconf *rx_conf,
                            struct rte_mempool *mp)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -588,6 +743,12 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -rte_errno;
        }
 
+       /* Rx deferred start is not supported */
+       if (rx_conf->rx_deferred_start) {
+               DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
+               return -EINVAL;
+       }
+
        DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
                        queue_idx, rxq->fqid);
 
@@ -851,12 +1012,17 @@ static
 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                            uint16_t nb_desc __rte_unused,
                unsigned int socket_id __rte_unused,
-               const struct rte_eth_txconf *tx_conf __rte_unused)
+               const struct rte_eth_txconf *tx_conf)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
+       /* Tx deferred start is not supported */
+       if (tx_conf->tx_deferred_start) {
+               DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
+               return -EINVAL;
+       }
        if (queue_idx >= dev->data->nb_tx_queues) {
                rte_errno = EOVERFLOW;
                DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
@@ -894,17 +1060,33 @@ dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 static int dpaa_link_down(struct rte_eth_dev *dev)
 {
+       struct fman_if *fif = dev->process_private;
+       struct __fman_if *__fif;
+
        PMD_INIT_FUNC_TRACE();
 
-       dpaa_eth_dev_stop(dev);
+       __fif = container_of(fif, struct __fman_if, __if);
+
+       if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+               dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+       else
+               dpaa_eth_dev_stop(dev);
        return 0;
 }
 
 static int dpaa_link_up(struct rte_eth_dev *dev)
 {
+       struct fman_if *fif = dev->process_private;
+       struct __fman_if *__fif;
+
        PMD_INIT_FUNC_TRACE();
 
-       dpaa_eth_dev_start(dev);
+       __fif = container_of(fif, struct __fman_if, __if);
+
+       if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+               dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+       else
+               dpaa_eth_dev_start(dev);
        return 0;
 }
 
@@ -1078,7 +1260,8 @@ static struct eth_dev_ops dpaa_devops = {
        .rx_queue_release         = dpaa_eth_rx_queue_release,
        .tx_queue_release         = dpaa_eth_tx_queue_release,
        .rx_queue_count           = dpaa_dev_rx_queue_count,
-
+       .rx_burst_mode_get        = dpaa_dev_rx_burst_mode_get,
+       .tx_burst_mode_get        = dpaa_dev_tx_burst_mode_get,
        .flow_ctrl_get            = dpaa_flow_ctrl_get,
        .flow_ctrl_set            = dpaa_flow_ctrl_set,
 
@@ -1707,7 +1890,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
                is_global_init = 1;
        }
 
-       if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+       if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
                ret = rte_dpaa_portal_init((void *)1);
                if (ret) {
                        DPAA_PMD_ERR("Unable to initialize portal");
@@ -1734,6 +1917,9 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
 
        qman_ern_register_cb(dpaa_free_mbuf);
 
+       if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+
        /* Invoke PMD device initialization function */
        diag = dpaa_dev_init(eth_dev);
        if (diag == 0) {
@@ -1761,6 +1947,7 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
 }
 
 static struct rte_dpaa_driver rte_dpaa_pmd = {
+       .drv_flags = RTE_DPAA_DRV_INTR_LSC,
        .drv_type = FSL_DPAA_ETH,
        .probe = rte_dpaa_probe,
        .remove = rte_dpaa_remove,