X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Figc%2Figc_ethdev.c;h=224a0954836b6a8662220b19fd9e93f4b935945a;hb=d31a89719024934cb834ca4ed7f76e8d178cd366;hp=78364a58a03a14ace3185589a0ffb8bfefe3ee20;hpb=bd3fcf0d0fa1424053f4eacc962a1434029956df;p=dpdk.git diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c index 78364a58a0..224a095483 100644 --- a/drivers/net/igc/igc_ethdev.c +++ b/drivers/net/igc/igc_ethdev.c @@ -8,13 +8,15 @@ #include #include #include -#include -#include +#include +#include #include #include #include "igc_logs.h" #include "igc_txrx.h" +#include "igc_filter.h" +#include "igc_flow.h" #define IGC_INTEL_VENDOR_ID 0x8086 @@ -50,6 +52,17 @@ /* External VLAN Enable bit mask */ #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) +/* Speed select */ +#define IGC_CTRL_SPEED_MASK (7u << 8) +#define IGC_CTRL_SPEED_2500 (6u << 8) + +/* External VLAN Ether Type bit mask and shift */ +#define IGC_VET_EXT 0xFFFF0000 +#define IGC_VET_EXT_SHIFT 16 + +/* Force EEE Auto-negotiation */ +#define IGC_EEER_EEE_FRC_AN (1u << 28) + /* Per Queue Good Packets Received Count */ #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) /* Per Queue Good Octets Received Count */ @@ -166,11 +179,11 @@ static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { static int eth_igc_configure(struct rte_eth_dev *dev); static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void eth_igc_stop(struct rte_eth_dev *dev); +static int eth_igc_stop(struct rte_eth_dev *dev); static int eth_igc_start(struct rte_eth_dev *dev); static int eth_igc_set_link_up(struct rte_eth_dev *dev); static int eth_igc_set_link_down(struct rte_eth_dev *dev); -static void eth_igc_close(struct rte_eth_dev *dev); +static int eth_igc_close(struct rte_eth_dev *dev); static int eth_igc_reset(struct rte_eth_dev *dev); static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); @@ -227,6 +240,11 @@ static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int +eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, uint16_t tpid); static const struct eth_dev_ops eth_igc_ops = { .dev_configure = eth_igc_configure, @@ -254,10 +272,6 @@ static const struct eth_dev_ops eth_igc_ops = { .rx_queue_setup = eth_igc_rx_queue_setup, .rx_queue_release = eth_igc_rx_queue_release, - .rx_queue_count = eth_igc_rx_queue_count, - .rx_descriptor_done = eth_igc_rx_descriptor_done, - .rx_descriptor_status = eth_igc_rx_descriptor_status, - .tx_descriptor_status = eth_igc_tx_descriptor_status, .tx_queue_setup = eth_igc_tx_queue_setup, .tx_queue_release = eth_igc_tx_queue_release, .tx_done_cleanup = eth_igc_tx_done_cleanup, @@ -279,6 +293,11 @@ static const struct eth_dev_ops eth_igc_ops = { .reta_query = eth_igc_rss_reta_query, .rss_hash_update = eth_igc_rss_hash_update, .rss_hash_conf_get = eth_igc_rss_hash_conf_get, + .vlan_filter_set = eth_igc_vlan_filter_set, + .vlan_offload_set = eth_igc_vlan_offload_set, + .vlan_tpid_set = eth_igc_vlan_tpid_set, + .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, + .flow_ops_get = eth_igc_flow_ops_get, }; /* @@ -322,6 +341,9 @@ eth_igc_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + ret = igc_check_mq_mode(dev); if (ret != 0) return ret; @@ -521,8 +543,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev) pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } } @@ -589,7 +610,7 @@ eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) * This routine disables all traffic on the adapter by issuing a * global reset on the MAC. */ -static void +static int eth_igc_stop(struct rte_eth_dev *dev) { struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); @@ -598,6 +619,7 @@ eth_igc_stop(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct rte_eth_link link; + dev->data->dev_started = 0; adapter->stopped = 1; /* disable receive and transmit */ @@ -622,6 +644,9 @@ eth_igc_stop(struct rte_eth_dev *dev) /* disable all wake up */ IGC_WRITE_REG(hw, IGC_WUC, 0); + /* disable checking EEE operation in MAC loopback mode */ + igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); + /* Set bit for Go Link disconnect */ igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, IGC_82580_PM_GO_LINKD); @@ -647,6 +672,8 @@ eth_igc_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + return 0; } /* @@ -950,6 +977,11 @@ eth_igc_start(struct rte_eth_dev *dev) igc_clear_hw_cntrs_base_generic(hw); + /* VLAN Offload Settings */ + eth_igc_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + /* Setup link speed and duplex */ speeds = &dev->data->dev_conf.link_speeds; if (*speeds == ETH_LINK_SPEED_AUTONEG) { @@ -957,15 +989,20 @@ eth_igc_start(struct rte_eth_dev *dev) hw->mac.autoneg = 1; } else { int num_speeds = 0; - bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; - /* Reset */ + if (*speeds & ETH_LINK_SPEED_FIXED) { + PMD_DRV_LOG(ERR, + "Force speed mode currently not supported"); + igc_dev_clear_queues(dev); + return -EINVAL; + } + hw->phy.autoneg_advertised = 0; + hw->mac.autoneg = 1; if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | - ETH_LINK_SPEED_FIXED)) { + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) { num_speeds = -1; goto error_invalid_config; } @@ -993,19 +1030,8 @@ eth_igc_start(struct rte_eth_dev *dev) hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; num_speeds++; } - if (num_speeds == 0 || (!autoneg && num_speeds > 1)) + if (num_speeds == 0) goto error_invalid_config; - - /* Set/reset the mac.autoneg based on the link speed, - * fixed or not - */ - if (!autoneg) { - hw->mac.autoneg = 0; - hw->mac.forced_speed_duplex = - hw->phy.autoneg_advertised; - } else { - hw->mac.autoneg = 1; - } } igc_setup_link(hw); @@ -1042,6 +1068,19 @@ eth_igc_start(struct rte_eth_dev *dev) eth_igc_rxtx_control(dev, true); eth_igc_link_update(dev, 0); + /* configure MAC-loopback mode */ + if (dev->data->dev_conf.lpbk_mode == 1) { + uint32_t reg_val; + + reg_val = IGC_READ_REG(hw, IGC_CTRL); + reg_val &= ~IGC_CTRL_SPEED_MASK; + reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | + IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; + IGC_WRITE_REG(hw, IGC_CTRL, reg_val); + + igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); + } + return 0; error_invalid_config: @@ -1126,7 +1165,7 @@ igc_dev_free_queues(struct rte_eth_dev *dev) dev->data->nb_tx_queues = 0; } -static void +static int eth_igc_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -1134,11 +1173,17 @@ eth_igc_close(struct rte_eth_dev *dev) struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); int retry = 0; + int ret = 0; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; if (!adapter->stopped) - eth_igc_stop(dev); + ret = eth_igc_stop(dev); + + igc_flow_flush(dev, NULL); + igc_clear_all_filter(dev); igc_intr_other_disable(dev); do { @@ -1157,6 +1202,8 @@ eth_igc_close(struct rte_eth_dev *dev) /* Reset any pending lock */ igc_reset_swfw_lock(hw); + + return ret; } static void @@ -1180,6 +1227,10 @@ eth_igc_dev_init(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); dev->dev_ops = ð_igc_ops; + dev->rx_descriptor_done = eth_igc_rx_descriptor_done; + dev->rx_queue_count = eth_igc_rx_queue_count; + dev->rx_descriptor_status = eth_igc_rx_descriptor_status; + dev->tx_descriptor_status = eth_igc_tx_descriptor_status; /* * for secondary processes, we don't initialize any further as primary @@ -1190,6 +1241,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev) return 0; rte_eth_copy_pci_info(dev, pci_dev); + dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; hw->back = pci_dev; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; @@ -1275,11 +1327,6 @@ eth_igc_dev_init(struct rte_eth_dev *dev) goto err_late; } - /* Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - hw->mac.get_link_status = 1; igc->stopped = 0; @@ -1307,6 +1354,8 @@ eth_igc_dev_init(struct rte_eth_dev *dev) igc->rxq_stats_map[i] = -1; } + igc_flow_init(dev); + igc_clear_all_filter(dev); return 0; err_late: @@ -1318,10 +1367,6 @@ static int eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) { PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - eth_igc_close(eth_dev); return 0; } @@ -1425,9 +1470,11 @@ eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, fw.eep_build); } } + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (u32)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -1443,6 +1490,7 @@ eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = hw->mac.rar_entry_count; dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; + dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; @@ -1541,12 +1589,14 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* - * refuse mtu that requires the support of scattered packets when - * this feature has not been enabled before. + * If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. */ - if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + if (dev->data->dev_started && !dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } rctl = IGC_READ_REG(hw, IGC_RCTL); @@ -1850,8 +1900,7 @@ eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) /* Rx Errors */ rte_stats->imissed = stats->mpc; - rte_stats->ierrors = stats->crcerrs + - stats->rlec + stats->ruc + stats->roc + + rte_stats->ierrors = stats->crcerrs + stats->rlec + stats->rxerrc + stats->algnerrc; /* Tx Errors */ @@ -2216,6 +2265,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev, return -EINVAL; } + RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); + /* set redirection table */ for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { union igc_rss_reta_reg reta, reg; @@ -2228,7 +2279,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev, IGC_RSS_RDT_REG_SIZE_MASK); /* if no need to update the register */ - if (!mask) + if (!mask || + shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) continue; /* check mask whether need to read the register value first */ @@ -2239,6 +2291,7 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev, IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); /* update the register */ + RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { if (mask & (1u << j)) reta.bytes[j] = @@ -2268,6 +2321,8 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev, return -EINVAL; } + RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); + /* read redirection table */ for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { union igc_rss_reta_reg reta; @@ -2280,10 +2335,12 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev, IGC_RSS_RDT_REG_SIZE_MASK); /* if no need to read register */ - if (!mask) + if (!mask || + shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) continue; /* read register and get the queue index */ + RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); reta.dword = IGC_READ_REG_LE_VALUE(hw, IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { @@ -2358,6 +2415,192 @@ eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int +eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; + vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); + vfta = shadow_vfta->vfta[vid_idx]; + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + igc_read_reg_check_clear_bits(hw, IGC_RCTL, + IGC_RCTL_CFIEN | IGC_RCTL_VFE); +} + +static void +igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); + uint32_t reg_val; + int i; + + /* Filter Table Enable, CFI not used for packet acceptance */ + reg_val = IGC_READ_REG(hw, IGC_RCTL); + reg_val &= ~IGC_RCTL_CFIEN; + reg_val |= IGC_RCTL_VFE; + IGC_WRITE_REG(hw, IGC_RCTL, reg_val); + + /* restore VFTA table */ + for (i = 0; i < IGC_VFTA_SIZE; i++) + IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); +} + +static void +igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + + igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); +} + +static int +igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t ctrl_ext; + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + + /* if extend vlan hasn't been enabled */ + if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) + return 0; + + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) + goto write_ext_vlan; + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len < + RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { + PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); + return -EINVAL; + } + dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE; + IGC_WRITE_REG(hw, IGC_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + +write_ext_vlan: + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); + return 0; +} + +static int +igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t ctrl_ext; + + ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); + + /* if extend vlan has been enabled */ + if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) + return 0; + + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) + goto write_ext_vlan; + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > + MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) { + PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE; + IGC_WRITE_REG(hw, IGC_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + +write_ext_vlan: + IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); + return 0; +} + +static int +eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + igc_vlan_hw_strip_enable(dev); + else + igc_vlan_hw_strip_disable(dev); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + igc_vlan_hw_filter_enable(dev); + else + igc_vlan_hw_filter_disable(dev); + } + + if (mask & ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + return igc_vlan_hw_extend_enable(dev); + else + return igc_vlan_hw_extend_disable(dev); + } + + return 0; +} + +static int +eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) +{ + struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); + uint32_t reg_val; + + /* only outer TPID of double VLAN can be configured*/ + if (vlan_type == ETH_VLAN_TYPE_OUTER) { + reg_val = IGC_READ_REG(hw, IGC_VET); + reg_val = (reg_val & (~IGC_VET_EXT)) | + ((uint32_t)tpid << IGC_VET_EXT_SHIFT); + IGC_WRITE_REG(hw, IGC_VET, reg_val); + + return 0; + } + + /* all other TPID values are read-only*/ + PMD_DRV_LOG(ERR, "Not supported"); + return -ENOTSUP; +} + static int eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev)