+ return ret;
+}
+
+int
+ice_release_vsi(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw;
+ struct ice_vsi_ctx vsi_ctx;
+ enum ice_status ret;
+
+ if (!vsi)
+ return 0;
+
+ hw = ICE_VSI_TO_HW(vsi);
+
+ ice_remove_all_mac_vlan_filters(vsi);
+
+ memset(&vsi_ctx, 0, sizeof(vsi_ctx));
+
+ vsi_ctx.vsi_num = vsi->vsi_id;
+ vsi_ctx.info = vsi->info;
+ ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
+ if (ret != ICE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
+ rte_free(vsi);
+ return -1;
+ }
+
+ rte_free(vsi);
+ return 0;
+}
+
+void
+ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ /* disable interrupt and also clear all the exist config */
+ for (i = 0; i < vsi->nb_qps; i++) {
+ ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+ ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+ rte_wmb();
+ }
+
+ if (rte_intr_allow_others(intr_handle))
+ /* vfio-pci */
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+ }
+ else
+ /* igb_uio */
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static void
+ice_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *main_vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint16_t i;
+
+ /* avoid stopping again */
+ if (pf->adapter_stopped)
+ return;
+
+ /* stop and clear all Rx queues */
+ for (i = 0; i < data->nb_rx_queues; i++)
+ ice_rx_queue_stop(dev, i);
+
+ /* stop and clear all Tx queues */
+ for (i = 0; i < data->nb_tx_queues; i++)
+ ice_tx_queue_stop(dev, i);
+
+ /* disable all queue interrupts */
+ ice_vsi_disable_queues_intr(main_vsi);
+
+ if (pf->init_link_up)
+ ice_dev_set_link_up(dev);
+ else
+ ice_dev_set_link_down(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ pf->adapter_stopped = true;
+}
+
+static void
+ice_dev_close(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Since stop will make link down, then the link event will be
+ * triggered, disable the irq firstly to avoid the port_infoe etc
+ * resources deallocation causing the interrupt service thread
+ * crash.
+ */
+ ice_pf_disable_irq0(hw);
+
+ ice_dev_stop(dev);
+
+ if (!ad->is_safe_mode)
+ ice_flow_uninit(ad);
+
+ /* release all queue resource */
+ ice_free_queues(dev);
+
+ ice_res_pool_destroy(&pf->msix_pool);
+ ice_release_vsi(pf->main_vsi);
+ ice_sched_cleanup_all(hw);
+ ice_free_hw_tbls(hw);
+ rte_free(hw->port_info);
+ hw->port_info = NULL;
+ ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
+ pf->proto_xtr = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ ice_interrupt_handler, dev);
+}
+
+static int
+ice_dev_uninit(struct rte_eth_dev *dev)
+{
+ ice_dev_close(dev);
+
+ return 0;
+}
+
+static int
+ice_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->tx_simple_allowed = true;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ return 0;
+}
+
+static int ice_init_rss(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_rss_conf *rss_conf;
+ struct ice_aqc_get_set_rss_keys key;
+ uint16_t i, nb_q;
+ int ret = 0;
+ bool is_safe_mode = pf->adapter->is_safe_mode;
+ uint32_t reg;
+
+ rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = dev->data->nb_rx_queues;
+ vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
+ vsi->rss_lut_size = pf->hash_lut_size;
+
+ if (is_safe_mode) {
+ PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
+ return 0;
+ }
+
+ if (!vsi->rss_key)
+ vsi->rss_key = rte_zmalloc(NULL,
+ vsi->rss_key_size, 0);
+ if (!vsi->rss_lut)
+ vsi->rss_lut = rte_zmalloc(NULL,
+ vsi->rss_lut_size, 0);
+
+ /* configure RSS key */
+ if (!rss_conf->rss_key) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= vsi->rss_key_size; i++)
+ vsi->rss_key[i] = (uint8_t)rte_rand();
+ } else {
+ rte_memcpy(vsi->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vsi->rss_key_size));
+ }
+ rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
+ ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
+ if (ret)
+ return -EINVAL;
+
+ /* init RSS LUT table */
+ for (i = 0; i < vsi->rss_lut_size; i++)
+ vsi->rss_lut[i] = i % nb_q;
+
+ ret = ice_aq_set_rss_lut(hw, vsi->idx,
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
+ vsi->rss_lut, vsi->rss_lut_size);
+ if (ret)
+ return -EINVAL;
+
+ /* Enable registers for symmetric_toeplitz function. */
+ reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
+ reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
+ (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
+ ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
+
+ /* configure RSS for IPv4 with input set IPv4 src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for sctp6 with input set IPv6 src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+ __func__, ret);
+
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
+
+ /* configure RSS for sctp4 with input set IP src/dst */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+ __func__, ret);
+
+ /* configure RSS for gtpu with input set TEID */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
+ ICE_FLOW_SEG_HDR_GTPU_IP, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
+ __func__, ret);
+
+ /**
+ * configure RSS for pppoe/pppod with input set
+ * Source MAC and Session ID
+ */
+ ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
+ ICE_FLOW_SEG_HDR_PPPOE, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
+ __func__, ret);
+
+ return 0;
+}
+
+static void
+__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint32_t val, val_tx;
+ int i;
+
+ for (i = 0; i < nb_queue; i++) {
+ /*do actual bind*/
+ val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
+ (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
+ val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
+ (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
+
+ PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
+ base_queue + i, msix_vect);
+ /* set ITR0 value */
+ ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
+ ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
+ ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
+ }
+}
+
+void
+ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ int i;
+
+ /* clear Rx/Tx queue interrupt */
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+ ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* PF bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ queue_idx = 0;
+ record = 1;
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ msix_vect = ICE_MISC_VEC_ID;
+
+ /* uio mapping all queue to one msix_vect */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i);
+
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+
+ /* vfio 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1);
+
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
+void
+ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ msix_intr = vsi->msix_intr + i;
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+ GLINT_DYN_CTL_INTENA_M |
+ GLINT_DYN_CTL_CLEARPBA_M |
+ GLINT_DYN_CTL_ITR_INDX_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+ }
+ else
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
+ GLINT_DYN_CTL_INTENA_M |
+ GLINT_DYN_CTL_CLEARPBA_M |
+ GLINT_DYN_CTL_ITR_INDX_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static int
+ice_rxq_intr_setup(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_vsi *vsi = pf->main_vsi;
+ uint32_t intr_vector = 0;
+
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
+ ICE_MAX_INTR_QUEUE_NUM);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* Map queues with MSIX interrupt */
+ vsi->nb_used_qps = dev->data->nb_rx_queues;
+ ice_vsi_queues_bind_intr(vsi);
+
+ /* Enable interrupts for all the queues */
+ ice_vsi_enable_queues_intr(vsi);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static void
+ice_get_init_link_status(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ struct ice_link_status link_status;
+ int ret;
+
+ ret = ice_aq_get_link_info(hw->port_info, enable_lse,
+ &link_status, NULL);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ pf->init_link_up = false;
+ return;
+ }
+
+ if (link_status.link_info & ICE_AQ_LINK_UP)
+ pf->init_link_up = true;
+}
+
+static int
+ice_dev_start(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ uint16_t nb_rxq = 0;
+ uint16_t nb_txq, i;
+ uint16_t max_frame_size;
+ int mask, ret;
+
+ /* program Tx queues' context in hardware */
+ for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
+ ret = ice_tx_queue_start(dev, nb_txq);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
+ goto tx_err;
+ }
+ }
+
+ /* program Rx queues' context in hardware*/
+ for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
+ ret = ice_rx_queue_start(dev, nb_rxq);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
+ goto rx_err;
+ }
+ }
+
+ ret = ice_init_rss(pf);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+ goto rx_err;
+ }
+
+ ice_set_rx_function(dev);
+ ice_set_tx_function(dev);
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = ice_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto rx_err;
+ }
+
+ /* enable Rx interrput and mapping Rx queue to interrupt vector */
+ if (ice_rxq_intr_setup(dev))
+ return -EIO;
+
+ /* Enable receiving broadcast packets and transmitting packets */
+ ret = ice_set_vsi_promisc(hw, vsi->idx,
+ ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
+ ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
+ 0);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
+ ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
+ ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
+ ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
+ ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
+ ICE_AQ_LINK_EVENT_AN_COMPLETED |
+ ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
+ NULL);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ ice_get_init_link_status(dev);
+
+ ice_dev_set_link_up(dev);
+
+ /* Call get_link_info aq commond to enable/disable LSE */
+ ice_link_update(dev, 0);
+
+ pf->adapter_stopped = false;
+
+ /* Set the max frame size to default value*/
+ max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
+ pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+ ICE_FRAME_SIZE_MAX;
+
+ /* Set the max frame size to HW*/
+ ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
+
+ return 0;
+
+ /* stop the started queues if failed to start all queues */
+rx_err:
+ for (i = 0; i < nb_rxq; i++)
+ ice_rx_queue_stop(dev, i);
+tx_err:
+ for (i = 0; i < nb_txq; i++)
+ ice_tx_queue_stop(dev, i);
+
+ return -EIO;
+}
+
+static int
+ice_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = ice_dev_uninit(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
+ return -ENXIO;
+ }
+
+ ret = ice_dev_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int
+ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ bool is_safe_mode = pf->adapter->is_safe_mode;
+ u64 phy_type_low;
+ u64 phy_type_high;
+
+ dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
+ dev_info->max_rx_queues = vsi->nb_qps;
+ dev_info->max_tx_queues = vsi->nb_qps;
+ dev_info->max_mac_addrs = vsi->max_macaddrs;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->flow_type_rss_offloads = 0;
+
+ if (!is_safe_mode) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->tx_offload_capa |=
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
+ }
+
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
+
+ dev_info->reta_size = pf->hash_lut_size;
+ dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = ICE_DEFAULT_RX_PTHRESH,
+ .hthresh = ICE_DEFAULT_RX_HTHRESH,
+ .wthresh = ICE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = ICE_DEFAULT_TX_PTHRESH,
+ .hthresh = ICE_DEFAULT_TX_HTHRESH,
+ .wthresh = ICE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_25G;
+
+ phy_type_low = hw->port_info->phy.phy_type_low;
+ phy_type_high = hw->port_info->phy.phy_type_high;
+
+ if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
+ dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+
+ if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
+ ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
+ dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+
+ dev_info->nb_rx_queues = dev->data->nb_rx_queues;
+ dev_info->nb_tx_queues = dev->data->nb_tx_queues;
+
+ dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
+ dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
+ dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
+
+ return 0;
+}
+
+static inline int
+ice_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+ice_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_link_status link_status;
+ struct rte_eth_link link, old;
+ int status;
+ unsigned int rep_cnt = MAX_REPEAT_TIME;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+
+ memset(&link, 0, sizeof(link));
+ memset(&old, 0, sizeof(old));
+ memset(&link_status, 0, sizeof(link_status));
+ ice_atomic_read_link_status(dev, &old);
+
+ do {
+ /* Get link status information from hardware */
+ status = ice_aq_get_link_info(hw->port_info, enable_lse,
+ &link_status, NULL);
+ if (status != ICE_SUCCESS) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ goto out;
+ }
+
+ link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
+ if (!wait_to_complete || link.link_status)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (--rep_cnt);
+
+ if (!link.link_status)
+ goto out;
+
+ /* Full-duplex operation at all supported speeds */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ /* Parse the link status */
+ switch (link_status.link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ link.link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ link.link_speed = ETH_SPEED_NUM_100G;
+ break;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed");
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+out:
+ ice_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/* Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes, link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ */
+static enum ice_status
+ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ if (!hw || !hw->port_info)
+ return ICE_ERR_PARAM;
+
+ pi = hw->port_info;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg.phy_type_low = pcaps->phy_type_low;
+ cfg.phy_type_high = pcaps->phy_type_high;
+ cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg.low_power_ctrl = pcaps->low_power_ctrl;
+ cfg.eee_cap = pcaps->eee_cap;
+ cfg.eeer_value = pcaps->eeer_value;
+ cfg.link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg.caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+
+out:
+ ice_free(hw, pcaps);
+ return status;
+}
+
+static int
+ice_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, true);
+}
+
+static int
+ice_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, false);
+}
+
+static int
+ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
+
+ /* check if mtu is within the allowed range */
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR,
+ "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return 0;
+}
+
+static int ice_macaddr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct ice_mac_filter *f;
+ uint8_t flags = 0;
+ int ret;
+
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return -EINVAL;
+ }
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (!f) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return -EIO;
+ }
+
+ ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return -EIO;
+ }
+ ret = ice_add_mac_filter(vsi, mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return -EIO;
+ }
+ rte_ether_addr_copy(mac_addr, &pf->dev_addr);
+
+ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
+ ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to set manage mac");
+
+ return 0;
+}
+
+/* Add a MAC address, and update filters */
+static int
+ice_macaddr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ ret = ice_add_mac_filter(vsi, mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter");
+ return -EINVAL;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_ether_addr *macaddr;
+ int ret;
+
+ macaddr = &data->mac_addrs[index];
+ ret = ice_remove_mac_filter(vsi, macaddr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
+ return;
+ }
+}
+
+static int
+ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (on) {
+ ret = ice_add_vlan_filter(vsi, vlan_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ return -EINVAL;
+ }
+ } else {
+ ret = ice_remove_vlan_filter(vsi, vlan_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Configure vlan filter on or off */
+static int
+ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t sec_flags, sw_flags2;
+ int ret = 0;
+
+ sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ if (on) {
+ vsi->info.sec_flags |= sec_flags;
+ vsi->info.sw_flags2 |= sw_flags2;
+ } else {
+ vsi->info.sec_flags &= ~sec_flags;
+ vsi->info.sw_flags2 &= ~sw_flags2;
+ }
+ vsi->info.sw_id = hw->port_info->sw_id;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
+ on ? "enable" : "disable");
+ return -EINVAL;
+ } else {
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID);
+ }
+
+ /* consist with other drivers, allow untagged packet when vlan filter on */
+ if (on)
+ ret = ice_add_vlan_filter(vsi, 0);
+ else
+ ret = ice_remove_vlan_filter(vsi, 0);
+
+ return 0;
+}
+
+static int
+ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t vlan_flags;
+ int ret = 0;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
+ if (on) {
+ if ((vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M) ==
+ ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M) ==
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+ else
+ vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
+ vsi->info.vlan_flags |= vlan_flags;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
+ on ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ return ret;
+}
+
+static int
+ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ ice_vsi_config_vlan_filter(vsi, true);
+ else
+ ice_vsi_config_vlan_filter(vsi, false);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ ice_vsi_config_vlan_stripping(vsi, true);
+ else
+ ice_vsi_config_vlan_stripping(vsi, false);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ ice_vsi_config_double_vlan(vsi, true);
+ else
+ ice_vsi_config_double_vlan(vsi, false);
+ }
+
+ return 0;
+}
+
+static int
+ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!lut)
+ return -EINVAL;
+
+ if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
+ ret = ice_aq_get_rss_lut(hw, vsi->idx,
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
+ return -EINVAL;
+ }
+ } else {
+ uint64_t *lut_dw = (uint64_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
+ }
+
+ return 0;
+}
+
+static int
+ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
+{
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int ret;
+
+ if (!vsi || !lut)
+ return -EINVAL;
+
+ pf = ICE_VSI_TO_PF(vsi);
+ hw = ICE_VSI_TO_HW(vsi);
+
+ if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
+ ret = ice_aq_set_rss_lut(hw, vsi->idx,
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
+ return -EINVAL;
+ }
+ } else {
+ uint64_t *lut_dw = (uint64_t *)lut;
+ uint16_t i, lut_size_dw = lut_size / 4;
+
+ for (i = 0; i < lut_size_dw; i++)
+ ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
+
+ ice_flush(hw);
+ }
+
+ return 0;
+}
+
+static int
+ice_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
+ reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
+ reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d)"
+ "doesn't match the number hardware can "
+ "supported (128, 512, 2048)",
+ reta_size);
+ return -EINVAL;
+ }
+
+ /* It MUST use the current LUT size to get the RSS lookup table,
+ * otherwise if will fail with -100 error code.
+ */
+ lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+ ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret == 0 && lut_size != reta_size) {
+ PMD_DRV_LOG(INFO,
+ "The size of hash lookup table is changed from (%d) to (%d)",
+ lut_size, reta_size);
+ pf->hash_lut_size = reta_size;
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+ice_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ uint16_t i, lut_size = pf->hash_lut_size;
+ uint16_t idx, shift;
+ uint8_t *lut;
+ int ret;
+
+ if (reta_size != lut_size) {
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d)"
+ "doesn't match the number hardware can "
+ "supported (%d)",
+ reta_size, lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc(NULL, reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+
+ ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = lut[i];
+ }
+
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ int ret = 0;
+
+ if (!key || key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
+ return -EINVAL;
+ }
+
+ struct ice_aqc_get_set_rss_keys *key_dw =
+ (struct ice_aqc_get_set_rss_keys *)key;
+
+ ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!key || !key_len)
+ return -EINVAL;
+
+ ret = ice_aq_get_rss_key
+ (hw, vsi->idx,
+ (struct ice_aqc_get_set_rss_keys *)key);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
+ return -EINVAL;
+ }
+ *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ return 0;
+}