+static void
+ice_dev_close(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Since stop will make link down, then the link event will be
+ * triggered, disable the irq firstly to avoid the port_infoe etc
+ * resources deallocation causing the interrupt service thread
+ * crash.
+ */
+ ice_pf_disable_irq0(hw);
+
+ ice_dev_stop(dev);
+
+ if (!ad->is_safe_mode)
+ ice_flow_uninit(ad);
+
+ /* release all queue resource */
+ ice_free_queues(dev);
+
+ ice_res_pool_destroy(&pf->msix_pool);
+ ice_release_vsi(pf->main_vsi);
+ ice_sched_cleanup_all(hw);
+ ice_free_hw_tbls(hw);
+ rte_free(hw->port_info);
+ hw->port_info = NULL;
+ ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
+ pf->proto_xtr = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ ice_interrupt_handler, dev);
+}
+
+static int
+ice_dev_uninit(struct rte_eth_dev *dev)
+{
+ ice_dev_close(dev);
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+
+ return 0;
+}
+
+int
+ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
+ if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
+ PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
+
+ ret = ice_rem_rss_cfg_post(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+
+ return 0;
+}
+
+int
+ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_add_rss_cfg_pre(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
+
+ ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg failed\n");
+
+ ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
+
+ return 0;
+}
+
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ /* Configure RSS for IPv4 with src/dst addr as input set */
+ if (rss_hf & ETH_RSS_IPV4) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for IPv6 with src/dst addr as input set */
+ if (rss_hf & ETH_RSS_IPV6) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+ __func__, ret);
+ }