mbuf: rename deprecated VLAN flags
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index bb23287..dac6a3a 100644 (file)
@@ -61,6 +61,7 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_security_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -176,7 +177,7 @@ static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
-static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
                                struct rte_eth_xstat *xstats, unsigned n);
@@ -269,7 +270,7 @@ static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
-static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
+static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
@@ -1167,8 +1168,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       /* Initialize security_ctx only for primary process*/
+       eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
+       if (eth_dev->security_ctx == NULL)
+               return -ENOMEM;
+
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -1336,12 +1341,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        /* initialize l2 tunnel filter list & hash */
        ixgbe_l2_tn_filter_init(eth_dev);
 
-       TAILQ_INIT(&filter_ntuple_list);
-       TAILQ_INIT(&filter_ethertype_list);
-       TAILQ_INIT(&filter_syn_list);
-       TAILQ_INIT(&filter_fdir_list);
-       TAILQ_INIT(&filter_l2_tunnel_list);
-       TAILQ_INIT(&ixgbe_flow_list);
+       /* initialize flow filter lists */
+       ixgbe_filterlist_init();
 
        /* initialize bandwidth configuration info */
        memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
@@ -1405,6 +1406,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        /* Remove all Traffic Manager configuration */
        ixgbe_tm_conf_uninit(eth_dev);
 
+       rte_free(eth_dev->security_ctx);
+
        return 0;
 }
 
@@ -1631,7 +1634,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -1785,7 +1787,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_ixgbe_pmd = {
        .id_table = pci_id_ixgbe_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                    RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbe_pci_probe,
        .remove = eth_ixgbe_pci_remove,
 };
@@ -1807,7 +1810,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
  */
 static struct rte_pci_driver rte_ixgbevf_pmd = {
        .id_table = pci_id_ixgbevf_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbevf_pci_probe,
        .remove = eth_ixgbevf_pci_remove,
 };
@@ -1963,9 +1966,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        rxq = dev->data->rx_queues[queue];
 
        if (on)
-               rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+               rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
        else
-               rxq->vlan_flags = PKT_RX_VLAN_PKT;
+               rxq->vlan_flags = PKT_RX_VLAN;
 }
 
 static void
@@ -2508,8 +2511,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        *    - fixed speed: TODO implement
        */
        if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
-               PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
-                            dev->data->port_id);
+               PMD_INIT_LOG(ERR,
+               "Invalid link_speeds for port %u, fix speed not supported",
+                               dev->data->port_id);
                return -EINVAL;
        }
 
@@ -3107,7 +3111,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 /*
  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
  */
-static void
+static int
 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct ixgbe_hw *hw =
@@ -3129,7 +3133,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                        &total_qbrc, &total_qprc, &total_qprdc);
 
        if (stats == NULL)
-               return;
+               return -EINVAL;
 
        /* Fill out the rte_eth_stats statistics structure */
        stats->ipackets = total_qprc;
@@ -3160,6 +3164,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* Tx Errors */
        stats->oerrors  = 0;
+       return 0;
 }
 
 static void
@@ -3571,7 +3576,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        return IXGBEVF_NB_XSTATS;
 }
 
-static void
+static int
 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -3580,12 +3585,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        ixgbevf_update_stats(dev);
 
        if (stats == NULL)
-               return;
+               return -EINVAL;
 
        stats->ipackets = hw_stats->vfgprc;
        stats->ibytes = hw_stats->vfgorc;
        stats->opackets = hw_stats->vfgptc;
        stats->obytes = hw_stats->vfgotc;
+       return 0;
 }
 
 static void
@@ -3695,6 +3701,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
            hw->mac.type == ixgbe_mac_X550EM_a)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
+       dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+       dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
@@ -3957,6 +3966,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
        link.link_duplex = ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = ETH_LINK_AUTONEG;
        memset(&old, 0, sizeof(old));
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);
 
@@ -5029,7 +5039,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 
        /* check and configure queue intr-vector mapping */
        if (dev->data->dev_conf.intr_conf.rxq != 0) {
-               intr_vector = dev->data->nb_rx_queues;
+               /* According to datasheet, only vector 0/1/2 can be used,
+                * now only one vector is used for Rx queue
+                */
+               intr_vector = 1;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
        }
@@ -5046,6 +5059,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        }
        ixgbevf_configure_msix(dev);
 
+       /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+        * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+        * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+        * is not cleared, it will fail when following rte_intr_enable( ) tries
+        * to map Rx queue interrupt to other VFIO vectors.
+        * So clear uio/vfio intr/evevnfd first to avoid failure.
+        */
+       rte_intr_disable(intr_handle);
+
        rte_intr_enable(intr_handle);
 
        /* Re-enable interrupt for VF */
@@ -5556,9 +5578,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t vec = IXGBE_MISC_VEC_ID;
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask |= (1 << IXGBE_MISC_VEC_ID);
+       if (rte_intr_allow_others(intr_handle))
+               vec = IXGBE_RX_VEC_START;
+       mask |= (1 << vec);
        RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
@@ -5573,9 +5598,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       uint32_t vec = IXGBE_MISC_VEC_ID;
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask &= ~(1 << IXGBE_MISC_VEC_ID);
+       if (rte_intr_allow_others(intr_handle))
+               vec = IXGBE_RX_VEC_START;
+       mask &= ~(1 << vec);
        RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
@@ -5717,6 +5747,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t q_idx;
        uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+       uint32_t base = IXGBE_MISC_VEC_ID;
 
        /* Configure VF other cause ivar */
        ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
@@ -5727,6 +5758,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
+       if (rte_intr_allow_others(intr_handle)) {
+               base = IXGBE_RX_VEC_START;
+               vector_idx = IXGBE_RX_VEC_START;
+       }
+
        /* Configure all RX queues of VF */
        for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
                /* Force all queue use vector 0,
@@ -5734,6 +5770,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                 */
                ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
                intr_handle->intr_vec[q_idx] = vector_idx;
+               if (vector_idx < base + intr_handle->nb_efd - 1)
+                       vector_idx++;
        }
 }
 
@@ -7200,6 +7238,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        struct ixgbe_dcb_config *dcb_config =
                        IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
        struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
        uint8_t i, j;
 
        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
@@ -7207,19 +7247,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        else
                dcb_info->nb_tcs = 1;
 
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
        if (dcb_config->vt_mode) { /* vt is enabled*/
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
-               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
-                       for (j = 0; j < dcb_info->nb_tcs; j++) {
-                               dcb_info->tc_queue.tc_rxq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
-                               dcb_info->tc_queue.tc_txq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
                        }
                }
        } else { /* vt is disabled*/