+ /* Add vlan into vlan list */
+ f = rte_zmalloc(NULL, sizeof(*f), 0);
+ if (!f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ ret = -ENOMEM;
+ goto DONE;
+ }
+ f->vlan_info.vlan_id = vlan_id;
+ TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
+ vsi->vlan_num++;
+
+ ret = 0;
+
+DONE:
+ rte_free(v_list_itr);
+ return ret;
+}
+
+static int
+ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
+{
+ struct ice_fltr_list_entry *v_list_itr = NULL;
+ struct ice_vlan_filter *f;
+ struct LIST_HEAD_TYPE list_head;
+ struct ice_hw *hw;
+ int ret = 0;
+
+ /**
+ * Vlan 0 is the generic filter for untagged packets
+ * and can't be removed.
+ */
+ if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = ICE_VSI_TO_HW(vsi);
+
+ /* Can't find it, return an error */
+ f = ice_find_vlan_filter(vsi, vlan_id);
+ if (!f)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&list_head);
+
+ v_list_itr = (struct ice_fltr_list_entry *)
+ ice_malloc(hw, sizeof(*v_list_itr));
+ if (!v_list_itr) {
+ ret = -ENOMEM;
+ goto DONE;
+ }
+
+ v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
+ v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
+ v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ v_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ v_list_itr->fltr_info.vsi_handle = vsi->idx;
+
+ LIST_ADD(&v_list_itr->list_entry, &list_head);
+
+ /* remove the vlan filter */
+ ret = ice_remove_vlan(hw, &list_head);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
+ ret = -EINVAL;
+ goto DONE;
+ }
+
+ /* Remove the vlan id from vlan list */
+ TAILQ_REMOVE(&vsi->vlan_list, f, next);
+ rte_free(f);
+ vsi->vlan_num--;
+
+ ret = 0;
+DONE:
+ rte_free(v_list_itr);
+ return ret;
+}
+
+static int
+ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
+{
+ struct ice_mac_filter *m_f;
+ struct ice_vlan_filter *v_f;
+ int ret = 0;
+
+ if (!vsi || !vsi->mac_num)
+ return -EINVAL;
+
+ TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
+ ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
+ if (ret != ICE_SUCCESS) {
+ ret = -EINVAL;
+ goto DONE;
+ }
+ }
+
+ if (vsi->vlan_num == 0)
+ return 0;
+
+ TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
+ ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
+ if (ret != ICE_SUCCESS) {
+ ret = -EINVAL;
+ goto DONE;
+ }
+ }
+
+DONE:
+ return ret;
+}
+
+static int
+ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t qinq_flags;
+ int ret = 0;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
+ if (on) {
+ if ((vsi->info.outer_tag_flags &
+ ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
+ ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
+ return 0; /* already on */
+ } else {
+ if (!(vsi->info.outer_tag_flags &
+ ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
+ else
+ qinq_flags = 0;
+ /* clear global insertion and use per packet insertion */
+ vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
+ vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
+ vsi->info.outer_tag_flags |= qinq_flags;
+ /* use default vlan type 0x8100 */
+ vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(INFO,
+ "Update VSI failed to %s qinq stripping",
+ on ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+
+ return ret;
+}
+
+static int
+ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t qinq_flags;
+ int ret = 0;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
+ if (on) {
+ if ((vsi->info.outer_tag_flags &
+ ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
+ ICE_AQ_VSI_OUTER_TAG_COPY)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.outer_tag_flags &
+ ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
+ ICE_AQ_VSI_OUTER_TAG_NOTHING)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
+ else
+ qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
+ vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
+ vsi->info.outer_tag_flags |= qinq_flags;
+ /* use default vlan type 0x8100 */
+ vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(INFO,
+ "Update VSI failed to %s qinq stripping",
+ on ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+
+ return ret;
+}
+
+static int
+ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
+{
+ int ret;
+
+ ret = ice_vsi_config_qinq_stripping(vsi, on);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
+
+ ret = ice_vsi_config_qinq_insertion(vsi, on);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
+
+ return ret;
+}
+
+/* Enable IRQ0 */
+static void
+ice_pf_enable_irq0(struct ice_hw *hw)
+{
+ /* reset the registers */
+ ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
+ ICE_READ_REG(hw, PFINT_OICR);
+
+#ifdef ICE_LSE_SPT
+ ICE_WRITE_REG(hw, PFINT_OICR_ENA,
+ (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
+ (~PFINT_OICR_LINK_STAT_CHANGE_M)));
+
+ ICE_WRITE_REG(hw, PFINT_OICR_CTL,
+ (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
+ ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
+ PFINT_OICR_CTL_ITR_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M);
+
+ ICE_WRITE_REG(hw, PFINT_FW_CTL,
+ (0 & PFINT_FW_CTL_MSIX_INDX_M) |
+ ((0 << PFINT_FW_CTL_ITR_INDX_S) &
+ PFINT_FW_CTL_ITR_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M);
+#else
+ ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
+#endif
+
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
+ GLINT_DYN_CTL_INTENA_M |
+ GLINT_DYN_CTL_CLEARPBA_M |
+ GLINT_DYN_CTL_ITR_INDX_M);
+
+ ice_flush(hw);
+}
+
+/* Disable IRQ0 */
+static void
+ice_pf_disable_irq0(struct ice_hw *hw)
+{
+ /* Disable all interrupt types */
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
+ ice_flush(hw);
+}
+
+#ifdef ICE_LSE_SPT
+static void
+ice_handle_aq_msg(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_ctl_q_info *cq = &hw->adminq;
+ struct ice_rq_event_info event;
+ uint16_t pending, opcode;
+ int ret;
+
+ event.buf_len = ICE_AQ_MAX_BUF_LEN;
+ event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
+ if (!event.msg_buf) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mem");
+ return;
+ }
+
+ pending = 1;
+ while (pending) {
+ ret = ice_clean_rq_elem(hw, cq, &event, &pending);
+
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, "
+ "adminq_err: %u",
+ hw->adminq.sq_last_status);
+ break;
+ }
+ opcode = rte_le_to_cpu_16(event.desc.opcode);
+
+ switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ ret = ice_link_update(dev, 0);
+ if (!ret)
+ _rte_eth_dev_callback_process
+ (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
+ opcode);
+ break;
+ }
+ }
+ rte_free(event.msg_buf);
+}
+#endif
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+ice_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t oicr;
+ uint32_t reg;
+ uint8_t pf_num;
+ uint8_t event;
+ uint16_t queue;
+ int ret;
+#ifdef ICE_LSE_SPT
+ uint32_t int_fw_ctl;
+#endif
+
+ /* Disable interrupt */
+ ice_pf_disable_irq0(hw);
+
+ /* read out interrupt causes */
+ oicr = ICE_READ_REG(hw, PFINT_OICR);
+#ifdef ICE_LSE_SPT
+ int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
+#endif
+
+ /* No interrupt event indicated */
+ if (!(oicr & PFINT_OICR_INTEVENT_M)) {
+ PMD_DRV_LOG(INFO, "No interrupt event");
+ goto done;
+ }
+
+#ifdef ICE_LSE_SPT
+ if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
+ PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
+ ice_handle_aq_msg(dev);
+ }
+#else
+ if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
+ PMD_DRV_LOG(INFO, "OICR: link state change event");
+ ret = ice_link_update(dev, 0);
+ if (!ret)
+ _rte_eth_dev_callback_process
+ (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+#endif
+
+ if (oicr & PFINT_OICR_MAL_DETECT_M) {
+ PMD_DRV_LOG(WARNING, "OICR: MDD event");
+ reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
+ if (reg & GL_MDET_TX_PQM_VALID_M) {
+ pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
+ GL_MDET_TX_PQM_PF_NUM_S;
+ event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
+ GL_MDET_TX_PQM_MAL_TYPE_S;
+ queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
+ GL_MDET_TX_PQM_QNUM_S;
+
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
+ "%d by PQM on TX queue %d PF# %d",
+ event, queue, pf_num);
+ }
+
+ reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
+ if (reg & GL_MDET_TX_TCLAN_VALID_M) {
+ pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
+ GL_MDET_TX_TCLAN_PF_NUM_S;
+ event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
+ GL_MDET_TX_TCLAN_MAL_TYPE_S;
+ queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
+ GL_MDET_TX_TCLAN_QNUM_S;
+
+ PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
+ "%d by TCLAN on TX queue %d PF# %d",
+ event, queue, pf_num);
+ }
+ }
+done:
+ /* Enable interrupt */
+ ice_pf_enable_irq0(hw);
+ rte_intr_ack(dev->intr_handle);
+}
+
+static void
+ice_init_proto_xtr(struct rte_eth_dev *dev)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ const struct proto_xtr_ol_flag *ol_flag;
+ bool proto_xtr_enable = false;
+ int offset;
+ uint16_t i;
+
+ if (!ice_proto_xtr_support(hw)) {
+ PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported");
+ return;
+ }
+
+ pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
+ if (unlikely(pf->proto_xtr == NULL)) {
+ PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
+ return;
+ }
+
+ for (i = 0; i < pf->lan_nb_qps; i++) {
+ pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
+ ad->devargs.proto_xtr[i] :
+ ad->devargs.proto_xtr_dflt;
+
+ if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
+ uint8_t type = pf->proto_xtr[i];
+
+ ice_proto_xtr_ol_flag_params[type].required = true;
+ proto_xtr_enable = true;
+ }
+ }
+
+ if (likely(!proto_xtr_enable))
+ return;
+
+ offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
+ if (unlikely(offset == -1)) {
+ PMD_DRV_LOG(ERR,
+ "Protocol extraction metadata is disabled in mbuf with error %d",
+ -rte_errno);
+ return;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "Protocol extraction metadata offset in mbuf is : %d",
+ offset);
+ rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
+
+ for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
+ ol_flag = &ice_proto_xtr_ol_flag_params[i];
+
+ if (!ol_flag->required)
+ continue;
+
+ offset = rte_mbuf_dynflag_register(&ol_flag->param);
+ if (unlikely(offset == -1)) {
+ PMD_DRV_LOG(ERR,
+ "Protocol extraction offload '%s' failed to register with error %d",
+ ol_flag->param.name, -rte_errno);
+
+ rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
+ break;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "Protocol extraction offload '%s' offset in mbuf is : %d",
+ ol_flag->param.name, offset);
+ *ol_flag->ol_flag = 1ULL << offset;
+ }
+}
+
+/* Initialize SW parameters of PF */
+static int
+ice_pf_sw_init(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ pf->lan_nb_qp_max =
+ (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
+ hw->func_caps.common_cap.num_rxq);
+
+ pf->lan_nb_qps = pf->lan_nb_qp_max;
+
+ ice_init_proto_xtr(dev);
+
+ if (hw->func_caps.fd_fltr_guar > 0 ||
+ hw->func_caps.fd_fltr_best_effort > 0) {
+ pf->flags |= ICE_FLAG_FDIR;
+ pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
+ pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
+ } else {
+ pf->fdir_nb_qps = 0;
+ }
+ pf->fdir_qp_offset = 0;
+
+ return 0;
+}
+
+struct ice_vsi *
+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = NULL;
+ struct ice_vsi_ctx vsi_ctx;
+ int ret;
+ struct rte_ether_addr broadcast = {
+ .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+ struct rte_ether_addr mac_addr;
+ uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ uint8_t tc_bitmap = 0x1;
+ uint16_t cfg;
+
+ /* hw->num_lports = 1 in NIC mode */
+ vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
+ if (!vsi)
+ return NULL;
+
+ vsi->idx = pf->next_vsi_idx;
+ pf->next_vsi_idx++;
+ vsi->type = type;
+ vsi->adapter = ICE_PF_TO_ADAPTER(pf);
+ vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
+ vsi->vlan_anti_spoof_on = 0;
+ vsi->vlan_filter_on = 1;
+ TAILQ_INIT(&vsi->mac_list);
+ TAILQ_INIT(&vsi->vlan_list);
+
+ /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
+ pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
+ ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
+ hw->func_caps.common_cap.rss_table_size;
+ pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
+
+ memset(&vsi_ctx, 0, sizeof(vsi_ctx));
+ switch (type) {
+ case ICE_VSI_PF:
+ vsi->nb_qps = pf->lan_nb_qps;
+ vsi->base_queue = 1;
+ ice_vsi_config_default_rss(&vsi_ctx.info);
+ vsi_ctx.alloc_from_pool = true;
+ vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
+ /* switch_id is queried by get_switch_config aq, which is done
+ * by ice_init_hw
+ */
+ vsi_ctx.info.sw_id = hw->port_info->sw_id;
+ vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ /* Allow all untagged or tagged packets */
+ vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+ vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
+ ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+ /* FDIR */
+ cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+ vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+ cfg = ICE_AQ_VSI_FD_ENABLE;
+ vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+ vsi_ctx.info.max_fd_fltr_dedicated =
+ rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
+ vsi_ctx.info.max_fd_fltr_shared =
+ rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
+
+ /* Enable VLAN/UP trip */
+ ret = ice_vsi_config_tc_queue_mapping(vsi,
+ &vsi_ctx.info,
+ ICE_DEFAULT_TCMAP);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "tc queue mapping with vsi failed, "
+ "err = %d",
+ ret);
+ goto fail_mem;
+ }
+
+ break;
+ case ICE_VSI_CTRL:
+ vsi->nb_qps = pf->fdir_nb_qps;
+ vsi->base_queue = ICE_FDIR_QUEUE_ID;
+ vsi_ctx.alloc_from_pool = true;
+ vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
+
+ cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+ vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
+ cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
+ vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
+ vsi_ctx.info.sw_id = hw->port_info->sw_id;
+ vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ ret = ice_vsi_config_tc_queue_mapping(vsi,
+ &vsi_ctx.info,
+ ICE_DEFAULT_TCMAP);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "tc queue mapping with vsi failed, "
+ "err = %d",
+ ret);
+ goto fail_mem;
+ }
+ break;
+ default:
+ /* for other types of VSI */
+ PMD_INIT_LOG(ERR, "other types of VSI not supported");
+ goto fail_mem;
+ }
+
+ /* VF has MSIX interrupt in VF range, don't allocate here */
+ if (type == ICE_VSI_PF) {
+ ret = ice_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
+ vsi->vsi_id, ret);
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
+ } else if (type == ICE_VSI_CTRL) {
+ ret = ice_res_pool_alloc(&pf->msix_pool, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
+ vsi->vsi_id, ret);
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = 1;
+ } else {
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 0;
+ }
+ ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
+ if (ret != ICE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
+ goto fail_mem;
+ }
+ /* store vsi information is SW structure */
+ vsi->vsi_id = vsi_ctx.vsi_num;
+ vsi->info = vsi_ctx.info;
+ pf->vsis_allocated = vsi_ctx.vsis_allocd;
+ pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
+
+ if (type == ICE_VSI_PF) {
+ /* MAC configuration */
+ rte_ether_addr_copy((struct rte_ether_addr *)
+ hw->port_info->mac.perm_addr,
+ &pf->dev_addr);
+
+ rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
+ ret = ice_add_mac_filter(vsi, &mac_addr);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
+
+ rte_ether_addr_copy(&broadcast, &mac_addr);
+ ret = ice_add_mac_filter(vsi, &mac_addr);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to add MAC filter");
+ }
+
+ /* At the beginning, only TC0. */
+ /* What we need here is the maximam number of the TX queues.
+ * Currently vsi->nb_qps means it.
+ * Correct it if any change.
+ */
+ max_txqs[0] = vsi->nb_qps;
+ ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
+ tc_bitmap, max_txqs);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(ERR, "Failed to config vsi sched");
+
+ return vsi;
+fail_mem:
+ rte_free(vsi);
+ pf->next_vsi_idx--;
+ return NULL;
+}
+
+static int
+ice_send_driver_ver(struct ice_hw *hw)
+{
+ struct ice_driver_ver dv;
+
+ /* we don't have driver version use 0 for dummy */
+ dv.major_ver = 0;
+ dv.minor_ver = 0;
+ dv.build_ver = 0;
+ dv.subbuild_ver = 0;
+ strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
+
+ return ice_aq_send_driver_ver(hw, &dv, NULL);
+}
+
+static int
+ice_pf_setup(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi;
+ uint16_t unused;
+
+ /* Clear all stats counters */
+ pf->offset_loaded = false;
+ memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
+ memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
+ memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
+ memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
+
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(hw, &unused,
+ hw->func_caps.fd_fltr_guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(hw, &unused,
+ hw->func_caps.fd_fltr_best_effort);
+
+ vsi = ice_setup_vsi(pf, ICE_VSI_PF);
+ if (!vsi) {
+ PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
+ return -EINVAL;
+ }
+
+ pf->main_vsi = vsi;
+
+ return 0;
+}
+
+/* PCIe configuration space setting */
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
+#define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
+#define PCI_EXT_CAP_ID_DSN 0x03
+
+static int
+ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
+{
+ uint32_t header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
+ return -1;
+ }
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Extract device serial number from PCIe Configuration Space and
+ * determine the pkg file path according to the DSN.
+ */
+static int
+ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
+{
+ int pos;
+ char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
+ uint32_t dsn_low, dsn_high;
+ memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
+
+ pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
+
+ if (pos) {
+ rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
+ rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
+ snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
+ "ice-%08x%08x.pkg", dsn_high, dsn_low);
+ } else {
+ PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
+ goto fail_dsn;
+ }
+
+ strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
+ ICE_MAX_PKG_FILENAME_SIZE);
+ if (!access(strcat(pkg_file, opt_ddp_filename), 0))
+ return 0;
+
+ strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
+ ICE_MAX_PKG_FILENAME_SIZE);
+ if (!access(strcat(pkg_file, opt_ddp_filename), 0))
+ return 0;
+
+fail_dsn:
+ strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
+ if (!access(pkg_file, 0))
+ return 0;
+ strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
+ return 0;
+}
+
+enum ice_pkg_type
+ice_load_pkg_type(struct ice_hw *hw)
+{
+ enum ice_pkg_type package_type;
+
+ /* store the activated package type (OS default or Comms) */
+ if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
+ ICE_PKG_NAME_SIZE))
+ package_type = ICE_PKG_TYPE_OS_DEFAULT;
+ else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
+ ICE_PKG_NAME_SIZE))
+ package_type = ICE_PKG_TYPE_COMMS;
+ else
+ package_type = ICE_PKG_TYPE_UNKNOWN;
+
+ PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
+ hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
+ hw->active_pkg_name);
+
+ return package_type;
+}
+
+static int ice_load_pkg(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
+ int err;
+ uint8_t *buf;
+ int buf_len;
+ FILE *file;
+ struct stat fstat;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ ice_pkg_file_search_path(pci_dev, pkg_file);
+
+ file = fopen(pkg_file, "rb");
+ if (!file) {
+ PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
+ return -1;
+ }
+
+ err = stat(pkg_file, &fstat);
+ if (err) {
+ PMD_INIT_LOG(ERR, "failed to get file stats\n");
+ fclose(file);
+ return err;
+ }
+
+ buf_len = fstat.st_size;
+ buf = rte_malloc(NULL, buf_len, 0);
+
+ if (!buf) {
+ PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
+ buf_len);
+ fclose(file);
+ return -1;
+ }
+
+ err = fread(buf, buf_len, 1, file);
+ if (err != 1) {
+ PMD_INIT_LOG(ERR, "failed to read package data\n");
+ fclose(file);
+ err = -1;
+ goto fail_exit;
+ }
+
+ fclose(file);
+
+ err = ice_copy_and_init_pkg(hw, buf, buf_len);
+ if (err) {
+ PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
+ goto fail_exit;
+ }
+
+ /* store the loaded pkg type info */
+ ad->active_pkg_type = ice_load_pkg_type(hw);
+
+ err = ice_init_hw_tbls(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
+ goto fail_init_tbls;
+ }
+
+ return 0;
+
+fail_init_tbls:
+ rte_free(hw->pkg_copy);
+fail_exit:
+ rte_free(buf);
+ return err;
+}
+
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+ uint32_t reg;
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+ reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+ if (reg & PFLAN_RX_QALLOC_VALID_M) {
+ pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+ } else {
+ PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+ " index");
+ }
+}
+
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+ int *i = (int *)args;
+ char *end;
+ int num;
+
+ num = strtoul(value, &end, 10);
+
+ if (num != 0 && num != 1) {
+ PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
+ "value must be 0 or 1",
+ value, key);
+ return -1;
+ }
+
+ *i = num;
+ return 0;
+}
+
+static int ice_parse_devargs(struct rte_eth_dev *dev)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_devargs *devargs = dev->device->devargs;
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
+ if (kvlist == NULL) {
+ PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
+ return -EINVAL;
+ }
+
+ ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
+ memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
+ sizeof(ad->devargs.proto_xtr));
+
+ ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
+ &handle_proto_xtr_arg, &ad->devargs);
+ if (ret)
+ goto bail;
+
+ ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
+ &parse_bool, &ad->devargs.safe_mode_support);
+ if (ret)
+ goto bail;
+
+ ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
+ &parse_bool, &ad->devargs.pipe_mode_support);
+ if (ret)
+ goto bail;
+
+ ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
+ &parse_bool, &ad->devargs.flow_mark_support);
+ if (ret)
+ goto bail;
+
+bail:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Forward LLDP packets to default VSI by set switch rules */
+static int
+ice_vsi_config_sw_lldp(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_fltr_list_entry *s_list_itr = NULL;
+ struct LIST_HEAD_TYPE list_head;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&list_head);
+
+ s_list_itr = (struct ice_fltr_list_entry *)
+ ice_malloc(hw, sizeof(*s_list_itr));
+ if (!s_list_itr)
+ return -ENOMEM;
+ s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ s_list_itr->fltr_info.vsi_handle = vsi->idx;
+ s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
+ RTE_ETHER_TYPE_LLDP;
+ s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ s_list_itr->fltr_info.flag = ICE_FLTR_RX;
+ s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
+ LIST_ADD(&s_list_itr->list_entry, &list_head);
+ if (on)
+ ret = ice_add_eth_mac(hw, &list_head);
+ else
+ ret = ice_remove_eth_mac(hw, &list_head);
+
+ rte_free(s_list_itr);
+ return ret;
+}
+
+static enum ice_status
+ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
+ uint16_t num, uint16_t desc_id,
+ uint16_t *prof_buf, uint16_t *num_prof)
+{
+ struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
+ int ret;
+ uint16_t buf_len;
+ bool res_shared = 1;
+ struct ice_aq_desc aq_desc;
+ struct ice_sq_cd *cd = NULL;
+ struct ice_aqc_get_allocd_res_desc *cmd =
+ &aq_desc.params.get_res_desc;
+
+ buf_len = sizeof(resp_buf->elem) * num;
+ resp_buf = ice_malloc(hw, buf_len);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ ice_fill_dflt_direct_cmd_desc(&aq_desc,
+ ice_aqc_opc_get_allocd_res_desc);
+
+ cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | (res_shared ?
+ ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
+ cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
+
+ ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
+ if (!ret)
+ *num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
+ else
+ goto exit;
+
+ ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
+ (*num_prof), ICE_NONDMA_TO_NONDMA);
+
+exit:
+ rte_free(resp_buf);
+ return ret;
+}
+static int
+ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
+{
+ int ret;
+ uint16_t prof_id;
+ uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
+ uint16_t first_desc = 1;
+ uint16_t num_prof = 0;
+
+ ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
+ first_desc, prof_buf, &num_prof);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get fxp resource");
+ return ret;
+ }
+
+ for (prof_id = 0; prof_id < num_prof; prof_id++) {
+ ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to free fxp resource");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int
+ice_reset_fxp_resource(struct ice_hw *hw)
+{
+ int ret;
+
+ ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
+ return ret;
+ }
+
+ ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+ice_rss_ctx_init(struct ice_pf *pf)
+{
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+}
+
+static int
+ice_dev_init(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_vsi *vsi;
+ int ret;
+
+ dev->dev_ops = &ice_eth_dev_ops;
+ dev->rx_pkt_burst = ice_recv_pkts;
+ dev->tx_pkt_burst = ice_xmit_pkts;
+ dev->tx_pkt_prepare = ice_prep_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ice_set_rx_function(dev);
+ ice_set_tx_function(dev);
+ return 0;
+ }
+
+ ice_set_default_ptype_table(dev);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+ intr_handle = &pci_dev->intr_handle;
+
+ pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ pf->adapter->eth_dev = dev;
+ pf->dev_data = dev->data;
+ hw->back = pf->adapter;
+ hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+
+ ret = ice_parse_devargs(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to parse devargs");
+ return -EINVAL;
+ }
+
+ ice_init_controlq_parameter(hw);
+
+ ret = ice_init_hw(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize HW");
+ return -EINVAL;
+ }
+
+ ret = ice_load_pkg(dev);
+ if (ret) {
+ if (ad->devargs.safe_mode_support == 0) {
+ PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
+ "Use safe-mode-support=1 to enter Safe Mode");
+ return ret;
+ }
+
+ PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
+ "Entering Safe Mode");
+ ad->is_safe_mode = 1;
+ }
+
+ PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
+ hw->api_maj_ver, hw->api_min_ver);
+
+ ice_pf_sw_init(dev);
+ ret = ice_init_mac_address(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize mac address");
+ goto err_init_mac;
+ }
+
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ ret = ice_res_pool_init(&pf->msix_pool, 1,
+ hw->func_caps.common_cap.num_msix_vectors - 1);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
+ goto err_msix_pool_init;
+ }
+
+ ret = ice_pf_setup(pf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup PF");
+ goto err_pf_setup;
+ }
+
+ ret = ice_send_driver_ver(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to send driver version");
+ goto err_pf_setup;
+ }
+
+ vsi = pf->main_vsi;
+
+ /* Disable double vlan by default */
+ ice_vsi_config_double_vlan(vsi, false);
+
+ ret = ice_aq_stop_lldp(hw, true, false, NULL);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
+ ret = ice_init_dcb(hw, true);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
+ /* Forward LLDP packets to default VSI */
+ ret = ice_vsi_config_sw_lldp(vsi, true);
+ if (ret != ICE_SUCCESS)
+ PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
+ /* register callback func to eal lib */
+ rte_intr_callback_register(intr_handle,
+ ice_interrupt_handler, dev);
+
+ ice_pf_enable_irq0(hw);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(intr_handle);
+
+ /* get base queue pairs index in the device */
+ ice_base_queue_get(pf);
+
+ /* Initialize RSS context for gtpu_eh */
+ ice_rss_ctx_init(pf);
+
+ if (!ad->is_safe_mode) {
+ ret = ice_flow_init(ad);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ return ret;
+ }
+ }
+
+ ret = ice_reset_fxp_resource(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
+ return ret;
+ }
+
+ return 0;
+
+err_pf_setup:
+ ice_res_pool_destroy(&pf->msix_pool);
+err_msix_pool_init:
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+err_init_mac:
+ ice_sched_cleanup_all(hw);
+ rte_free(hw->port_info);
+ ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
+
+ return ret;
+}
+
+int
+ice_release_vsi(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw;
+ struct ice_vsi_ctx vsi_ctx;
+ enum ice_status ret;
+ int error = 0;
+
+ if (!vsi)
+ return error;
+
+ hw = ICE_VSI_TO_HW(vsi);
+
+ ice_remove_all_mac_vlan_filters(vsi);
+
+ memset(&vsi_ctx, 0, sizeof(vsi_ctx));
+
+ vsi_ctx.vsi_num = vsi->vsi_id;
+ vsi_ctx.info = vsi->info;
+ ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
+ if (ret != ICE_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
+ error = -1;
+ }
+
+ rte_free(vsi->rss_lut);
+ rte_free(vsi->rss_key);
+ rte_free(vsi);
+ return error;
+}
+
+void
+ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ /* disable interrupt and also clear all the exist config */
+ for (i = 0; i < vsi->nb_qps; i++) {
+ ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+ ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+ rte_wmb();
+ }
+
+ if (rte_intr_allow_others(intr_handle))
+ /* vfio-pci */
+ for (i = 0; i < vsi->nb_msix; i++) {
+ msix_intr = vsi->msix_intr + i;
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+ }
+ else
+ /* igb_uio */
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static void
+ice_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *main_vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint16_t i;
+
+ /* avoid stopping again */
+ if (pf->adapter_stopped)
+ return;
+
+ /* stop and clear all Rx queues */
+ for (i = 0; i < data->nb_rx_queues; i++)
+ ice_rx_queue_stop(dev, i);
+
+ /* stop and clear all Tx queues */
+ for (i = 0; i < data->nb_tx_queues; i++)
+ ice_tx_queue_stop(dev, i);
+
+ /* disable all queue interrupts */
+ ice_vsi_disable_queues_intr(main_vsi);
+
+ if (pf->init_link_up)
+ ice_dev_set_link_up(dev);
+ else
+ ice_dev_set_link_down(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ pf->adapter_stopped = true;
+}
+
+static void
+ice_dev_close(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Since stop will make link down, then the link event will be
+ * triggered, disable the irq firstly to avoid the port_infoe etc
+ * resources deallocation causing the interrupt service thread
+ * crash.
+ */
+ ice_pf_disable_irq0(hw);
+
+ ice_dev_stop(dev);
+
+ if (!ad->is_safe_mode)
+ ice_flow_uninit(ad);
+
+ /* release all queue resource */
+ ice_free_queues(dev);
+
+ ice_res_pool_destroy(&pf->msix_pool);
+ ice_release_vsi(pf->main_vsi);
+ ice_sched_cleanup_all(hw);
+ ice_free_hw_tbls(hw);
+ rte_free(hw->port_info);
+ hw->port_info = NULL;
+ ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
+ pf->proto_xtr = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ ice_interrupt_handler, dev);
+}
+
+static int
+ice_dev_uninit(struct rte_eth_dev *dev)
+{
+ ice_dev_close(dev);
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+
+ return 0;
+}
+
+int
+ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
+ if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
+ PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
+
+ ret = ice_rem_rss_cfg_post(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+
+ return 0;
+}
+
+int
+ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_add_rss_cfg_pre(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
+
+ ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg failed\n");
+
+ ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
+
+ return 0;
+}
+
+static void
+ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
+{
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ /* Configure RSS for IPv4 with src/dst addr as input set */
+ if (rss_hf & ETH_RSS_IPV4) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for IPv6 with src/dst addr as input set */
+ if (rss_hf & ETH_RSS_IPV6) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for udp4 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for udp6 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for tcp4 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for tcp6 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for sctp4 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ /* Configure RSS for sctp6 with src/dst addr and port as input set */
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_IPV4) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_IPV6) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
+ __func__, ret);
+ }
+}
+
+static int ice_init_rss(struct ice_pf *pf)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_rss_conf *rss_conf;
+ struct ice_aqc_get_set_rss_keys key;
+ uint16_t i, nb_q;
+ int ret = 0;
+ bool is_safe_mode = pf->adapter->is_safe_mode;
+ uint32_t reg;
+
+ rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = dev->data->nb_rx_queues;
+ vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
+ vsi->rss_lut_size = pf->hash_lut_size;
+
+ if (is_safe_mode) {
+ PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
+ return 0;
+ }
+
+ if (!vsi->rss_key) {
+ vsi->rss_key = rte_zmalloc(NULL,
+ vsi->rss_key_size, 0);
+ if (vsi->rss_key == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
+ return -ENOMEM;
+ }
+ }
+ if (!vsi->rss_lut) {
+ vsi->rss_lut = rte_zmalloc(NULL,
+ vsi->rss_lut_size, 0);
+ if (vsi->rss_lut == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
+ rte_free(vsi->rss_key);
+ vsi->rss_key = NULL;
+ return -ENOMEM;
+ }
+ }
+ /* configure RSS key */
+ if (!rss_conf->rss_key) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= vsi->rss_key_size; i++)
+ vsi->rss_key[i] = (uint8_t)rte_rand();
+ } else {
+ rte_memcpy(vsi->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vsi->rss_key_size));
+ }
+ rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
+ ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
+ if (ret)
+ goto out;
+
+ /* init RSS LUT table */
+ for (i = 0; i < vsi->rss_lut_size; i++)
+ vsi->rss_lut[i] = i % nb_q;
+
+ ret = ice_aq_set_rss_lut(hw, vsi->idx,
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
+ vsi->rss_lut, vsi->rss_lut_size);
+ if (ret)
+ goto out;
+
+ /* Enable registers for symmetric_toeplitz function. */
+ reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
+ reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
+ (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
+ ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
+
+ /* RSS hash configuration */
+ ice_rss_hash_set(pf, rss_conf->rss_hf);
+
+ return 0;
+out:
+ rte_free(vsi->rss_key);
+ vsi->rss_key = NULL;
+ rte_free(vsi->rss_lut);
+ vsi->rss_lut = NULL;
+ return -EINVAL;
+}
+
+static int
+ice_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->tx_simple_allowed = true;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ ret = ice_init_rss(pf);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
+ int base_queue, int nb_queue)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint32_t val, val_tx;
+ int i;
+
+ for (i = 0; i < nb_queue; i++) {
+ /*do actual bind*/
+ val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
+ (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
+ val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
+ (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
+
+ PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
+ base_queue + i, msix_vect);
+ /* set ITR0 value */
+ ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
+ ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
+ ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
+ }
+}
+
+void
+ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_vect = vsi->msix_intr;
+ uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+ uint16_t queue_idx = 0;
+ int record = 0;
+ int i;
+
+ /* clear Rx/Tx queue interrupt */
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+ ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+ }
+
+ /* PF bind interrupt */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ queue_idx = 0;
+ record = 1;
+ }
+
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ if (nb_msix <= 1) {
+ if (!rte_intr_allow_others(intr_handle))
+ msix_vect = ICE_MISC_VEC_ID;
+
+ /* uio mapping all queue to one msix_vect */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i,
+ vsi->nb_used_qps - i);
+
+ for (; !!record && i < vsi->nb_used_qps; i++)
+ intr_handle->intr_vec[queue_idx + i] =
+ msix_vect;
+ break;
+ }
+
+ /* vfio 1:1 queue/msix_vect mapping */
+ __vsi_queues_bind_intr(vsi, msix_vect,
+ vsi->base_queue + i, 1);
+
+ if (!!record)
+ intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+ msix_vect++;
+ nb_msix--;
+ }
+}
+
+void
+ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
+{
+ struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ uint16_t msix_intr, i;
+
+ if (rte_intr_allow_others(intr_handle))
+ for (i = 0; i < vsi->nb_used_qps; i++) {
+ msix_intr = vsi->msix_intr + i;
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+ GLINT_DYN_CTL_INTENA_M |
+ GLINT_DYN_CTL_CLEARPBA_M |
+ GLINT_DYN_CTL_ITR_INDX_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+ }
+ else
+ ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
+ GLINT_DYN_CTL_INTENA_M |
+ GLINT_DYN_CTL_CLEARPBA_M |
+ GLINT_DYN_CTL_ITR_INDX_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static int
+ice_rxq_intr_setup(struct rte_eth_dev *dev)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ice_vsi *vsi = pf->main_vsi;
+ uint32_t intr_vector = 0;
+
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
+ ICE_MAX_INTR_QUEUE_NUM);
+ return -ENOTSUP;
+ }
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
+ 0);
+ if (!intr_handle->intr_vec) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* Map queues with MSIX interrupt */
+ vsi->nb_used_qps = dev->data->nb_rx_queues;
+ ice_vsi_queues_bind_intr(vsi);
+
+ /* Enable interrupts for all the queues */
+ ice_vsi_enable_queues_intr(vsi);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static void
+ice_get_init_link_status(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ struct ice_link_status link_status;
+ int ret;
+
+ ret = ice_aq_get_link_info(hw->port_info, enable_lse,
+ &link_status, NULL);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ pf->init_link_up = false;
+ return;
+ }
+
+ if (link_status.link_info & ICE_AQ_LINK_UP)
+ pf->init_link_up = true;
+}
+
+static int
+ice_dev_start(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ uint16_t nb_rxq = 0;
+ uint16_t nb_txq, i;
+ uint16_t max_frame_size;
+ int mask, ret;
+
+ /* program Tx queues' context in hardware */
+ for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
+ ret = ice_tx_queue_start(dev, nb_txq);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
+ goto tx_err;
+ }
+ }
+
+ /* program Rx queues' context in hardware*/
+ for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
+ ret = ice_rx_queue_start(dev, nb_rxq);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
+ goto rx_err;
+ }
+ }
+
+ ice_set_rx_function(dev);
+ ice_set_tx_function(dev);
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = ice_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto rx_err;
+ }
+
+ /* enable Rx interrput and mapping Rx queue to interrupt vector */
+ if (ice_rxq_intr_setup(dev))
+ return -EIO;
+
+ /* Enable receiving broadcast packets and transmitting packets */
+ ret = ice_set_vsi_promisc(hw, vsi->idx,
+ ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
+ ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
+ 0);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+
+ ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
+ ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
+ ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
+ ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
+ ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
+ ICE_AQ_LINK_EVENT_AN_COMPLETED |
+ ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
+ NULL);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ ice_get_init_link_status(dev);
+
+ ice_dev_set_link_up(dev);
+
+ /* Call get_link_info aq commond to enable/disable LSE */
+ ice_link_update(dev, 0);
+
+ pf->adapter_stopped = false;
+
+ /* Set the max frame size to default value*/
+ max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
+ pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+ ICE_FRAME_SIZE_MAX;
+
+ /* Set the max frame size to HW*/
+ ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
+
+ return 0;
+
+ /* stop the started queues if failed to start all queues */
+rx_err:
+ for (i = 0; i < nb_rxq; i++)
+ ice_rx_queue_stop(dev, i);
+tx_err:
+ for (i = 0; i < nb_txq; i++)
+ ice_tx_queue_stop(dev, i);
+
+ return -EIO;
+}
+
+static int
+ice_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = ice_dev_uninit(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
+ return -ENXIO;
+ }
+
+ ret = ice_dev_init(dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int
+ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ bool is_safe_mode = pf->adapter->is_safe_mode;
+ u64 phy_type_low;
+ u64 phy_type_high;
+
+ dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
+ dev_info->max_rx_queues = vsi->nb_qps;
+ dev_info->max_tx_queues = vsi->nb_qps;
+ dev_info->max_mac_addrs = vsi->max_macaddrs;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->flow_type_rss_offloads = 0;
+
+ if (!is_safe_mode) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->tx_offload_capa |=
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
+ dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
+ }
+
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
+
+ dev_info->reta_size = pf->hash_lut_size;
+ dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = ICE_DEFAULT_RX_PTHRESH,
+ .hthresh = ICE_DEFAULT_RX_HTHRESH,
+ .wthresh = ICE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = ICE_DEFAULT_TX_PTHRESH,
+ .hthresh = ICE_DEFAULT_TX_HTHRESH,
+ .wthresh = ICE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_25G;
+
+ phy_type_low = hw->port_info->phy.phy_type_low;
+ phy_type_high = hw->port_info->phy.phy_type_high;
+
+ if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
+ dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+
+ if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
+ ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
+ dev_info->speed_capa |= ETH_LINK_SPEED_100G;
+
+ dev_info->nb_rx_queues = dev->data->nb_rx_queues;
+ dev_info->nb_tx_queues = dev->data->nb_tx_queues;
+
+ dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
+ dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
+ dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
+
+ return 0;
+}
+
+static inline int
+ice_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+ice_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_link_status link_status;
+ struct rte_eth_link link, old;
+ int status;
+ unsigned int rep_cnt = MAX_REPEAT_TIME;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+
+ memset(&link, 0, sizeof(link));
+ memset(&old, 0, sizeof(old));
+ memset(&link_status, 0, sizeof(link_status));
+ ice_atomic_read_link_status(dev, &old);
+
+ do {
+ /* Get link status information from hardware */
+ status = ice_aq_get_link_info(hw->port_info, enable_lse,
+ &link_status, NULL);
+ if (status != ICE_SUCCESS) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_DRV_LOG(ERR, "Failed to get link info");
+ goto out;
+ }
+
+ link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
+ if (!wait_to_complete || link.link_status)
+ break;
+
+ rte_delay_ms(CHECK_INTERVAL);
+ } while (--rep_cnt);
+
+ if (!link.link_status)
+ goto out;
+
+ /* Full-duplex operation at all supported speeds */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ /* Parse the link status */
+ switch (link_status.link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ link.link_speed = ETH_SPEED_NUM_10M;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ link.link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ link.link_speed = ETH_SPEED_NUM_100G;
+ break;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed");
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+out:
+ ice_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/* Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes, link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ */
+static enum ice_status
+ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ if (!hw || !hw->port_info)
+ return ICE_ERR_PARAM;
+
+ pi = hw->port_info;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg.phy_type_low = pcaps->phy_type_low;
+ cfg.phy_type_high = pcaps->phy_type_high;
+ cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
+ cfg.eee_cap = pcaps->eee_cap;
+ cfg.eeer_value = pcaps->eeer_value;
+ cfg.link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg.caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+
+out:
+ ice_free(hw, pcaps);
+ return status;
+}
+
+static int
+ice_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, true);
+}
+
+static int
+ice_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ return ice_force_phys_link_state(hw, false);
+}
+
+static int
+ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
+
+ /* check if mtu is within the allowed range */
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR,
+ "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return 0;
+}
+
+static int ice_macaddr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct ice_mac_filter *f;
+ uint8_t flags = 0;
+ int ret;
+
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return -EINVAL;
+ }
+
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (!f) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return -EIO;
+ }
+
+ ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return -EIO;
+ }
+ ret = ice_add_mac_filter(vsi, mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return -EIO;
+ }
+ rte_ether_addr_copy(mac_addr, &pf->dev_addr);
+
+ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
+ ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
+ if (ret != ICE_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to set manage mac");
+
+ return 0;
+}
+
+/* Add a MAC address, and update filters */
+static int
+ice_macaddr_add(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ ret = ice_add_mac_filter(vsi, mac_addr);
+ if (ret != ICE_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter");
+ return -EINVAL;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* Remove a MAC address, and update filters */
+static void
+ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_ether_addr *macaddr;
+ int ret;
+
+ macaddr = &data->mac_addrs[index];
+ ret = ice_remove_mac_filter(vsi, macaddr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
+ return;
+ }
+}
+
+static int
+ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (on) {
+ ret = ice_add_vlan_filter(vsi, vlan_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ return -EINVAL;
+ }
+ } else {
+ ret = ice_remove_vlan_filter(vsi, vlan_id);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Configure vlan filter on or off */
+static int
+ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t sec_flags, sw_flags2;
+ int ret = 0;
+
+ sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ if (on) {
+ vsi->info.sec_flags |= sec_flags;
+ vsi->info.sw_flags2 |= sw_flags2;
+ } else {
+ vsi->info.sec_flags &= ~sec_flags;
+ vsi->info.sw_flags2 &= ~sw_flags2;
+ }
+ vsi->info.sw_id = hw->port_info->sw_id;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
+ on ? "enable" : "disable");
+ return -EINVAL;
+ } else {
+ vsi->info.valid_sections |=
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
+ ICE_AQ_VSI_PROP_SECURITY_VALID);
+ }
+
+ /* consist with other drivers, allow untagged packet when vlan filter on */
+ if (on)
+ ret = ice_add_vlan_filter(vsi, 0);
+ else
+ ret = ice_remove_vlan_filter(vsi, 0);
+
+ return 0;
+}
+
+static int
+ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
+{
+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+ struct ice_vsi_ctx ctxt;
+ uint8_t vlan_flags;
+ int ret = 0;
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
+ if (on) {
+ if ((vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M) ==
+ ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M) ==
+ ICE_AQ_VSI_VLAN_EMOD_NOTHING)
+ return 0; /* already off */
+ }
+ }
+
+ if (on)
+ vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+ else
+ vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
+ vsi->info.vlan_flags |= vlan_flags;
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info.valid_sections =
+ rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.vsi_num = vsi->vsi_id;
+ ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);