mbuf: rename deprecated VLAN flags
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index a0588ab..dac6a3a 100644 (file)
@@ -61,6 +61,7 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_security_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -1167,8 +1168,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       /* Initialize security_ctx only for primary process*/
+       eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
+       if (eth_dev->security_ctx == NULL)
+               return -ENOMEM;
+
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -1401,6 +1406,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        /* Remove all Traffic Manager configuration */
        ixgbe_tm_conf_uninit(eth_dev);
 
+       rte_free(eth_dev->security_ctx);
+
        return 0;
 }
 
@@ -1627,7 +1634,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -1781,7 +1787,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_ixgbe_pmd = {
        .id_table = pci_id_ixgbe_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                    RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbe_pci_probe,
        .remove = eth_ixgbe_pci_remove,
 };
@@ -1803,7 +1810,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
  */
 static struct rte_pci_driver rte_ixgbevf_pmd = {
        .id_table = pci_id_ixgbevf_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
        .probe = eth_ixgbevf_pci_probe,
        .remove = eth_ixgbevf_pci_remove,
 };
@@ -1959,9 +1966,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        rxq = dev->data->rx_queues[queue];
 
        if (on)
-               rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+               rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
        else
-               rxq->vlan_flags = PKT_RX_VLAN_PKT;
+               rxq->vlan_flags = PKT_RX_VLAN;
 }
 
 static void
@@ -3694,6 +3701,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
            hw->mac.type == ixgbe_mac_X550EM_a)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
+       dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+       dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
@@ -3956,6 +3966,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
        link.link_duplex = ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = ETH_LINK_AUTONEG;
        memset(&old, 0, sizeof(old));
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);
 
@@ -7227,6 +7238,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        struct ixgbe_dcb_config *dcb_config =
                        IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
        struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
        uint8_t i, j;
 
        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
@@ -7234,19 +7247,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        else
                dcb_info->nb_tcs = 1;
 
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
        if (dcb_config->vt_mode) { /* vt is enabled*/
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
-               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
-                       for (j = 0; j < dcb_info->nb_tcs; j++) {
-                               dcb_info->tc_queue.tc_rxq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
-                               dcb_info->tc_queue.tc_txq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
                        }
                }
        } else { /* vt is disabled*/