net/bnxt: fix flow check for null spec and mask
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
index b269a5e..d1f160e 100644 (file)
 
 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int  axgbe_dev_configure(struct rte_eth_dev *dev);
+static int  axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
 static void axgbe_dev_interrupt_handler(void *param);
 static void axgbe_dev_close(struct rte_eth_dev *dev);
-static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+                                int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *stats);
+static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
 
 /* The set of PCI devices this driver supports */
@@ -38,6 +50,7 @@ static struct axgbe_version_data axgbe_v2a = {
        .tx_tstamp_workaround           = 1,
        .ecc_support                    = 1,
        .i2c_support                    = 1,
+       .an_cdr_workaround              = 1,
 };
 
 static struct axgbe_version_data axgbe_v2b = {
@@ -49,6 +62,7 @@ static struct axgbe_version_data axgbe_v2b = {
        .tx_tstamp_workaround           = 1,
        .ecc_support                    = 1,
        .i2c_support                    = 1,
+       .an_cdr_workaround              = 1,
 };
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -64,7 +78,17 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
 };
 
 static const struct eth_dev_ops axgbe_eth_dev_ops = {
+       .dev_configure        = axgbe_dev_configure,
+       .dev_start            = axgbe_dev_start,
+       .dev_stop             = axgbe_dev_stop,
        .dev_close            = axgbe_dev_close,
+       .promiscuous_enable   = axgbe_dev_promiscuous_enable,
+       .promiscuous_disable  = axgbe_dev_promiscuous_disable,
+       .allmulticast_enable  = axgbe_dev_allmulticast_enable,
+       .allmulticast_disable = axgbe_dev_allmulticast_disable,
+       .link_update          = axgbe_dev_link_update,
+       .stats_get            = axgbe_dev_stats_get,
+       .stats_reset          = axgbe_dev_stats_reset,
        .dev_infos_get        = axgbe_dev_info_get,
        .rx_queue_setup       = axgbe_dev_rx_queue_setup,
        .rx_queue_release     = axgbe_dev_rx_queue_release,
@@ -72,6 +96,13 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
        .tx_queue_release     = axgbe_dev_tx_queue_release,
 };
 
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+       pdata->phy_link = -1;
+       pdata->phy_speed = SPEED_UNKNOWN;
+       return pdata->phy_if.phy_reset(pdata);
+}
+
 /*
  * Interrupt handler triggered by NIC  for handling
  * specific interrupt.
@@ -89,11 +120,113 @@ axgbe_dev_interrupt_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
        struct axgbe_port *pdata = dev->data->dev_private;
+       unsigned int dma_isr, dma_ch_isr;
 
        pdata->phy_if.an_isr(pdata);
+       /*DMA related interrupts*/
+       dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+       if (dma_isr) {
+               if (dma_isr & 1) {
+                       dma_ch_isr =
+                               AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+                                                 pdata->rx_queues[0],
+                                                 DMA_CH_SR);
+                       AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+                                          pdata->rx_queues[0],
+                                          DMA_CH_SR, dma_ch_isr);
+               }
+       }
+       /* Unmask interrupts since disabled after generation */
+       rte_intr_ack(&pdata->pci_dev->intr_handle);
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata =  dev->data->dev_private;
+       /* Checksum offload to hardware */
+       pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+                               DEV_RX_OFFLOAD_CHECKSUM;
+       return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+               pdata->rss_enable = 1;
+       else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+               pdata->rss_enable = 0;
+       else
+               return  -1;
+       return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Multiqueue RSS */
+       ret = axgbe_dev_rx_mq_config(dev);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+               return ret;
+       }
+       ret = axgbe_phy_reset(pdata);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "phy reset failed\n");
+               return ret;
+       }
+       ret = pdata->hw_if.init(pdata);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "dev_init failed\n");
+               return ret;
+       }
 
-       /* Enable interrupts since disabled after generation*/
+       /* enable uio/vfio intr/eventfd mapping */
        rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+       /* phy start*/
+       pdata->phy_if.phy_start(pdata);
+       axgbe_dev_enable_tx(dev);
+       axgbe_dev_enable_rx(dev);
+
+       axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+       axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+       return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+       if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+               return;
+
+       axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+       axgbe_dev_disable_tx(dev);
+       axgbe_dev_disable_rx(dev);
+
+       pdata->phy_if.phy_stop(pdata);
+       pdata->hw_if.exit(pdata);
+       memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+       axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
 }
 
 /* Clear all resources like TX/RX queues. */
@@ -103,13 +236,139 @@ axgbe_dev_close(struct rte_eth_dev *dev)
        axgbe_dev_clear_queues(dev);
 }
 
-static void
-axgbe_dev_info_get(struct rte_eth_dev *dev,
-                  struct rte_eth_dev_info *dev_info)
+static int
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+
+       return 0;
+}
+
+static int
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+
+       return 0;
+}
+
+static int
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+               return 0;
+       AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+
+       return 0;
+}
+
+static int
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+               return 0;
+       AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+
+       return 0;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+                     int wait_to_complete __rte_unused)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+       struct rte_eth_link link;
+       int ret = 0;
+
+       PMD_INIT_FUNC_TRACE();
+       rte_delay_ms(800);
+
+       pdata->phy_if.phy_status(pdata);
+
+       memset(&link, 0, sizeof(struct rte_eth_link));
+       link.link_duplex = pdata->phy.duplex;
+       link.link_status = pdata->phy_link;
+       link.link_speed = pdata->phy_speed;
+       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                             ETH_LINK_SPEED_FIXED);
+       ret = rte_eth_linkstatus_set(dev, &link);
+       if (ret == -1)
+               PMD_DRV_LOG(ERR, "No change in link status\n");
+
+       return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+                   struct rte_eth_stats *stats)
+{
+       struct axgbe_rx_queue *rxq;
+       struct axgbe_tx_queue *txq;
+       unsigned int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               stats->q_ipackets[i] = rxq->pkts;
+               stats->ipackets += rxq->pkts;
+               stats->q_ibytes[i] = rxq->bytes;
+               stats->ibytes += rxq->bytes;
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               stats->q_opackets[i] = txq->pkts;
+               stats->opackets += txq->pkts;
+               stats->q_obytes[i] = txq->bytes;
+               stats->obytes += txq->bytes;
+       }
+
+       return 0;
+}
+
+static int
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+       struct axgbe_rx_queue *rxq;
+       struct axgbe_tx_queue *txq;
+       unsigned int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               rxq->pkts = 0;
+               rxq->bytes = 0;
+               rxq->errors = 0;
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               txq->pkts = 0;
+               txq->bytes = 0;
+               txq->errors = 0;
+       }
+
+       return 0;
+}
+
+static int
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct axgbe_port *pdata = dev->data->dev_private;
 
-       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->max_rx_queues = pdata->rx_ring_count;
        dev_info->max_tx_queues = pdata->tx_ring_count;
        dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
@@ -120,7 +379,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev,
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_IPV4_CKSUM |
                DEV_RX_OFFLOAD_UDP_CKSUM  |
-               DEV_RX_OFFLOAD_TCP_CKSUM;
+               DEV_RX_OFFLOAD_TCP_CKSUM  |
+               DEV_RX_OFFLOAD_KEEP_CRC;
 
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -142,9 +402,9 @@ axgbe_dev_info_get(struct rte_eth_dev *dev,
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
                .tx_free_thresh = AXGBE_TX_FREE_THRESH,
-               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-                               ETH_TXQ_FLAGS_NOOFFLOADS,
        };
+
+       return 0;
 }
 
 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
@@ -327,6 +587,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        int ret;
 
        eth_dev->dev_ops = &axgbe_eth_dev_ops;
+       eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
 
        /*
         * For secondary processes, we don't initialise any further as primary
@@ -335,7 +596,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+       pdata = eth_dev->data->dev_private;
        /* initial state */
        axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
        axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
@@ -345,10 +606,12 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        pdata->pci_dev = pci_dev;
 
        pdata->xgmac_regs =
-               (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
-       pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
-       pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
-       pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+               (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+       pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+                                    + AXGBE_MAC_PROP_OFFSET);
+       pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+                                   + AXGBE_I2C_CTRL_OFFSET);
+       pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
 
        /* version specific driver data*/
        if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
@@ -381,19 +644,19 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
 
        eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
-                                              ETHER_ADDR_LEN, 0);
+                                              RTE_ETHER_ADDR_LEN, 0);
        if (!eth_dev->data->mac_addrs) {
                PMD_INIT_LOG(ERR,
                             "Failed to alloc %u bytes needed to store MAC addr tbl",
-                            ETHER_ADDR_LEN);
+                            RTE_ETHER_ADDR_LEN);
                return -ENOMEM;
        }
 
-       if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
-               eth_random_addr(pdata->mac_addr.addr_bytes);
+       if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
+               rte_eth_random_addr(pdata->mac_addr.addr_bytes);
 
        /* Copy the permanent MAC address */
-       ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
+       rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
 
        /* Clock settings */
        pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
@@ -449,6 +712,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        ret = pdata->phy_if.phy_init(pdata);
        if (ret) {
                rte_free(eth_dev->data->mac_addrs);
+               eth_dev->data->mac_addrs = NULL;
                return ret;
        }
 
@@ -473,10 +737,9 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
                return 0;
 
        pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
-       /*Free macaddres*/
-       rte_free(eth_dev->data->mac_addrs);
-       eth_dev->data->mac_addrs = NULL;
        eth_dev->dev_ops = NULL;
+       eth_dev->rx_pkt_burst = NULL;
+       eth_dev->tx_pkt_burst = NULL;
        axgbe_dev_clear_queues(eth_dev);
 
        /* disable uio intr before callback unregister */
@@ -511,9 +774,7 @@ RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
 
-RTE_INIT(axgbe_init_log);
-static void
-axgbe_init_log(void)
+RTE_INIT(axgbe_init_log)
 {
        axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
        if (axgbe_logtype_init >= 0)