net/ngbe: support MAC filters
[dpdk.git] / drivers / net / hns3 / hns3_ethdev_vf.c
index 7e01691..8739249 100644 (file)
@@ -807,15 +807,15 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
        }
 
        hw->adapter_state = HNS3_NIC_CONFIGURING;
-       if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+       if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
                hns3_err(hw, "setting link speed/duplex not supported");
                ret = -EINVAL;
                goto cfg_err;
        }
 
        /* When RSS is not configured, redirect the packet queue 0 */
-       if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
-               conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
+               conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
                hw->rss_dis_flag = false;
                rss_conf = conf->rx_adv_conf.rss_conf;
                ret = hns3_dev_rss_hash_update(dev, &rss_conf);
@@ -832,7 +832,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
                goto cfg_err;
 
        /* config hardware GRO */
-       gro_en = conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false;
+       gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
        ret = hns3_config_gro(hw, gro_en);
        if (ret)
                goto cfg_err;
@@ -935,33 +935,32 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
        info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
        info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
-       info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
-                                DEV_RX_OFFLOAD_UDP_CKSUM |
-                                DEV_RX_OFFLOAD_TCP_CKSUM |
-                                DEV_RX_OFFLOAD_SCTP_CKSUM |
-                                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-                                DEV_RX_OFFLOAD_SCATTER |
-                                DEV_RX_OFFLOAD_VLAN_STRIP |
-                                DEV_RX_OFFLOAD_VLAN_FILTER |
-                                DEV_RX_OFFLOAD_JUMBO_FRAME |
-                                DEV_RX_OFFLOAD_RSS_HASH |
-                                DEV_RX_OFFLOAD_TCP_LRO);
-       info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                DEV_TX_OFFLOAD_IPV4_CKSUM |
-                                DEV_TX_OFFLOAD_TCP_CKSUM |
-                                DEV_TX_OFFLOAD_UDP_CKSUM |
-                                DEV_TX_OFFLOAD_SCTP_CKSUM |
-                                DEV_TX_OFFLOAD_MULTI_SEGS |
-                                DEV_TX_OFFLOAD_TCP_TSO |
-                                DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                DEV_TX_OFFLOAD_GRE_TNL_TSO |
-                                DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-                                DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+       info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+                                RTE_ETH_RX_OFFLOAD_SCATTER |
+                                RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+                                RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+                                RTE_ETH_RX_OFFLOAD_RSS_HASH |
+                                RTE_ETH_RX_OFFLOAD_TCP_LRO);
+       info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+                                RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+                                RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+                                RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+                                RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+                                RTE_ETH_TX_OFFLOAD_TCP_TSO |
+                                RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
+                                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
+                                RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
                                 hns3_txvlan_cap_get(hw));
 
        if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
-               info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+               info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
        if (hns3_dev_get_support(hw, INDEP_TXRX))
                info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
@@ -1641,10 +1640,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
        tmp_mask = (unsigned int)mask;
 
-       if (tmp_mask & ETH_VLAN_FILTER_MASK) {
+       if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
                rte_spinlock_lock(&hw->lock);
                /* Enable or disable VLAN filter */
-               if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+               if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ret = hns3vf_en_vlan_filter(hw, true);
                else
                        ret = hns3vf_en_vlan_filter(hw, false);
@@ -1654,10 +1653,10 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 
        /* Vlan stripping setting */
-       if (tmp_mask & ETH_VLAN_STRIP_MASK) {
+       if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
                rte_spinlock_lock(&hw->lock);
                /* Enable or disable VLAN stripping */
-               if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        ret = hns3vf_en_hw_strip_rxvtag(hw, true);
                else
                        ret = hns3vf_en_hw_strip_rxvtag(hw, false);
@@ -1725,7 +1724,7 @@ hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
        int ret;
 
        dev_conf = &hw->data->dev_conf;
-       en = dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP ? true
+       en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
                                                                   : false;
        ret = hns3vf_en_hw_strip_rxvtag(hw, en);
        if (ret)
@@ -1750,8 +1749,8 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
        }
 
        /* Apply vlan offload setting */
-       ret = hns3vf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK |
-                                       ETH_VLAN_FILTER_MASK);
+       ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
+                                       RTE_ETH_VLAN_FILTER_MASK);
        if (ret)
                hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
 
@@ -1957,7 +1956,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 
        hns3vf_clear_event_cause(hw, 0);
 
-       ret = rte_intr_callback_register(&pci_dev->intr_handle,
+       ret = rte_intr_callback_register(pci_dev->intr_handle,
                                         hns3vf_interrupt_handler, eth_dev);
        if (ret) {
                PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
@@ -1965,7 +1964,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
        }
 
        /* Enable interrupt */
-       rte_intr_enable(&pci_dev->intr_handle);
+       rte_intr_enable(pci_dev->intr_handle);
        hns3vf_enable_irq0(hw);
 
        /* Get configuration from PF */
@@ -2017,8 +2016,8 @@ err_set_tc_queue:
 
 err_get_config:
        hns3vf_disable_irq0(hw);
-       rte_intr_disable(&pci_dev->intr_handle);
-       hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
+       rte_intr_disable(pci_dev->intr_handle);
+       hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
                             eth_dev);
 err_intr_callback_register:
 err_cmd_init:
@@ -2046,8 +2045,8 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
        hns3_flow_uninit(eth_dev);
        hns3_tqp_stats_uninit(hw);
        hns3vf_disable_irq0(hw);
-       rte_intr_disable(&pci_dev->intr_handle);
-       hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler,
+       rte_intr_disable(pci_dev->intr_handle);
+       hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
                             eth_dev);
        hns3_cmd_uninit(hw);
        hns3_cmd_destroy_queue(hw);
@@ -2060,7 +2059,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)
        struct hns3_hw *hw = &hns->hw;
        int ret;
 
-       hw->mac.link_status = ETH_LINK_DOWN;
+       hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
        /*
         * The "hns3vf_do_stop" function will also be called by .stop_service to
@@ -2090,7 +2089,7 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
 {
        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
        uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
        uint16_t q_id;
@@ -2108,16 +2107,16 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
                        (void)hns3vf_bind_ring_with_vector(hw, vec, false,
                                                           HNS3_RING_TYPE_RX,
                                                           q_id);
-                       if (vec < base + intr_handle->nb_efd - 1)
+                       if (vec < base + rte_intr_nb_efd_get(intr_handle)
+                           - 1)
                                vec++;
                }
        }
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+
+       /* Cleanup vector list */
+       rte_intr_vec_list_free(intr_handle);
 }
 
 static int
@@ -2219,31 +2218,31 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
        memset(&new_link, 0, sizeof(new_link));
        switch (mac->link_speed) {
-       case ETH_SPEED_NUM_10M:
-       case ETH_SPEED_NUM_100M:
-       case ETH_SPEED_NUM_1G:
-       case ETH_SPEED_NUM_10G:
-       case ETH_SPEED_NUM_25G:
-       case ETH_SPEED_NUM_40G:
-       case ETH_SPEED_NUM_50G:
-       case ETH_SPEED_NUM_100G:
-       case ETH_SPEED_NUM_200G:
+       case RTE_ETH_SPEED_NUM_10M:
+       case RTE_ETH_SPEED_NUM_100M:
+       case RTE_ETH_SPEED_NUM_1G:
+       case RTE_ETH_SPEED_NUM_10G:
+       case RTE_ETH_SPEED_NUM_25G:
+       case RTE_ETH_SPEED_NUM_40G:
+       case RTE_ETH_SPEED_NUM_50G:
+       case RTE_ETH_SPEED_NUM_100G:
+       case RTE_ETH_SPEED_NUM_200G:
                if (mac->link_status)
                        new_link.link_speed = mac->link_speed;
                break;
        default:
                if (mac->link_status)
-                       new_link.link_speed = ETH_SPEED_NUM_UNKNOWN;
+                       new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
                break;
        }
 
        if (!mac->link_status)
-               new_link.link_speed = ETH_SPEED_NUM_NONE;
+               new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 
        new_link.link_duplex = mac->link_duplex;
-       new_link.link_status = mac->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+       new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
        new_link.link_autoneg =
-           !(eth_dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
+           !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
 
        return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -2273,7 +2272,7 @@ static int
 hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
        uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
@@ -2296,16 +2295,13 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
        if (rte_intr_efd_enable(intr_handle, intr_vector))
                return -EINVAL;
 
-       if (intr_handle->intr_vec == NULL) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   hw->used_rx_queues * sizeof(int), 0);
-               if (intr_handle->intr_vec == NULL) {
-                       hns3_err(hw, "Failed to allocate %u rx_queues"
-                                    " intr_vec", hw->used_rx_queues);
-                       ret = -ENOMEM;
-                       goto vf_alloc_intr_vec_error;
-               }
+       /* Allocate vector list */
+       if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                   hw->used_rx_queues)) {
+               hns3_err(hw, "Failed to allocate %u rx_queues"
+                        " intr_vec", hw->used_rx_queues);
+               ret = -ENOMEM;
+               goto vf_alloc_intr_vec_error;
        }
 
        if (rte_intr_allow_others(intr_handle)) {
@@ -2318,20 +2314,22 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
                                                   HNS3_RING_TYPE_RX, q_id);
                if (ret)
                        goto vf_bind_vector_error;
-               intr_handle->intr_vec[q_id] = vec;
+
+               if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
+                       goto vf_bind_vector_error;
+
                /*
                 * If there are not enough efds (e.g. not enough interrupt),
                 * remaining queues will be bond to the last interrupt.
                 */
-               if (vec < base + intr_handle->nb_efd - 1)
+               if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
                        vec++;
        }
        rte_intr_enable(intr_handle);
        return 0;
 
 vf_bind_vector_error:
-       rte_free(intr_handle->intr_vec);
-       intr_handle->intr_vec = NULL;
+       rte_intr_vec_list_free(intr_handle);
 vf_alloc_intr_vec_error:
        rte_intr_efd_disable(intr_handle);
        return ret;
@@ -2342,7 +2340,7 @@ hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint16_t q_id;
        int ret;
 
@@ -2352,8 +2350,9 @@ hns3vf_restore_rx_interrupt(struct hns3_hw *hw)
        if (rte_intr_dp_is_en(intr_handle)) {
                for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
                        ret = hns3vf_bind_ring_with_vector(hw,
-                                       intr_handle->intr_vec[q_id], true,
-                                       HNS3_RING_TYPE_RX, q_id);
+                               rte_intr_vec_list_index_get(intr_handle,
+                                                                  q_id),
+                               true, HNS3_RING_TYPE_RX, q_id);
                        if (ret)
                                return ret;
                }
@@ -2571,11 +2570,11 @@ hns3vf_stop_service(struct hns3_adapter *hns)
                 * Make sure call update link status before hns3vf_stop_poll_job
                 * because update link status depend on polling job exist.
                 */
-               hns3vf_update_link_status(hw, ETH_LINK_DOWN, hw->mac.link_speed,
+               hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
                                          hw->mac.link_duplex);
                hns3vf_stop_poll_job(eth_dev);
        }
-       hw->mac.link_status = ETH_LINK_DOWN;
+       hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
        hns3_set_rxtx_function(eth_dev);
        rte_wmb();
@@ -2817,7 +2816,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns)
        int ret;
 
        if (hw->reset.level == HNS3_VF_FULL_RESET) {
-               rte_intr_disable(&pci_dev->intr_handle);
+               rte_intr_disable(pci_dev->intr_handle);
                ret = hns3vf_set_bus_master(pci_dev, true);
                if (ret < 0) {
                        hns3_err(hw, "failed to set pci bus, ret = %d", ret);
@@ -2843,7 +2842,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns)
                                hns3_err(hw, "Failed to enable msix");
                }
 
-               rte_intr_enable(&pci_dev->intr_handle);
+               rte_intr_enable(pci_dev->intr_handle);
        }
 
        ret = hns3_reset_all_tqps(hns);
@@ -3087,4 +3086,5 @@ RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
                HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
                HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
-               HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> ");
+               HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
+               HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");