X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_ethdev.c;h=49843c1b3b1c62e4897eeb93a7f8f3a010dba5c8;hb=ff708facfcbf42f3dcb3c62d82ecd93e7b8c2506;hp=c92b7372b56cce5efe0819d2cf6d1929e3860da9;hpb=2d95b84aaacb3d2d0bd70367c0530d15e0cbb14e;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c index c92b7372b5..49843c1b3b 100644 --- a/lib/librte_pmd_e1000/igb_ethdev.c +++ b/lib/librte_pmd_e1000/igb_ethdev.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -57,6 +56,24 @@ #include "e1000/e1000_api.h" #include "e1000_ethdev.h" +/* + * Default values for port configuration + */ +#define IGB_DEFAULT_RX_FREE_THRESH 32 +#define IGB_DEFAULT_RX_PTHRESH 8 +#define IGB_DEFAULT_RX_HTHRESH 8 +#define IGB_DEFAULT_RX_WTHRESH 0 + +#define IGB_DEFAULT_TX_PTHRESH 32 +#define IGB_DEFAULT_TX_HTHRESH 0 +#define IGB_DEFAULT_TX_WTHRESH 0 + +/* Bit shift and mask */ +#define IGB_4_BIT_WIDTH (CHAR_BIT / 2) +#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) +#define IGB_8_BIT_WIDTH CHAR_BIT +#define IGB_8_BIT_MASK UINT8_MAX + static int eth_igb_configure(struct rte_eth_dev *dev); static int eth_igb_start(struct rte_eth_dev *dev); static void eth_igb_stop(struct rte_eth_dev *dev); @@ -71,6 +88,8 @@ static void eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static void eth_igb_stats_reset(struct rte_eth_dev *dev); static void eth_igb_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static void eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); @@ -87,6 +106,8 @@ static void igb_hw_control_release(struct e1000_hw *hw); static void igb_init_manageability(struct e1000_hw *hw); static void igb_release_manageability(struct e1000_hw *hw); +static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); @@ -122,47 +143,56 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); - -static int eth_igb_add_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t rx_queue); -static int eth_igb_remove_syn_filter(struct rte_eth_dev *dev); -static int eth_igb_get_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t *rx_queue); -static int eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_ethertype_filter *filter, uint16_t rx_queue); -static int eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index); -static int eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_ethertype_filter *filter, uint16_t *rx_queue); -static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_2tuple_filter *filter, uint16_t rx_queue); -static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev, - uint16_t index); -static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_2tuple_filter *filter, uint16_t *rx_queue); -static int eth_igb_add_flex_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_flex_filter *filter, uint16_t rx_queue); -static int eth_igb_remove_flex_filter(struct rte_eth_dev *dev, - uint16_t index); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); + +static int eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add); static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_flex_filter *filter, uint16_t *rx_queue); -static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_5tuple_filter *filter, uint16_t rx_queue); -static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index); -static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index, - struct rte_5tuple_filter *filter, uint16_t *rx_queue); + struct rte_eth_flex_filter *filter); +static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter); +static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); +static int igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -218,6 +248,7 @@ static struct eth_dev_ops eth_igb_ops = { .stats_get = eth_igb_stats_get, .stats_reset = eth_igb_stats_reset, .dev_infos_get = eth_igb_infos_get, + .mtu_set = eth_igb_mtu_set, .vlan_filter_set = eth_igb_vlan_filter_set, .vlan_tpid_set = eth_igb_vlan_tpid_set, .vlan_offload_set = eth_igb_vlan_offload_set, @@ -237,21 +268,7 @@ static struct eth_dev_ops eth_igb_ops = { .reta_query = eth_igb_rss_reta_query, .rss_hash_update = eth_igb_rss_hash_update, .rss_hash_conf_get = eth_igb_rss_hash_conf_get, - .add_syn_filter = eth_igb_add_syn_filter, - .remove_syn_filter = eth_igb_remove_syn_filter, - .get_syn_filter = eth_igb_get_syn_filter, - .add_ethertype_filter = eth_igb_add_ethertype_filter, - .remove_ethertype_filter = eth_igb_remove_ethertype_filter, - .get_ethertype_filter = eth_igb_get_ethertype_filter, - .add_2tuple_filter = eth_igb_add_2tuple_filter, - .remove_2tuple_filter = eth_igb_remove_2tuple_filter, - .get_2tuple_filter = eth_igb_get_2tuple_filter, - .add_flex_filter = eth_igb_add_flex_filter, - .remove_flex_filter = eth_igb_remove_flex_filter, - .get_flex_filter = eth_igb_get_flex_filter, - .add_5tuple_filter = eth_igb_add_5tuple_filter, - .remove_5tuple_filter = eth_igb_remove_5tuple_filter, - .get_5tuple_filter = eth_igb_get_5tuple_filter, + .filter_ctrl = eth_igb_filter_ctrl, }; /* @@ -267,7 +284,7 @@ static struct eth_dev_ops igbvf_eth_dev_ops = { .stats_get = eth_igbvf_stats_get, .stats_reset = eth_igbvf_stats_reset, .vlan_filter_set = igbvf_vlan_filter_set, - .dev_infos_get = eth_igb_infos_get, + .dev_infos_get = eth_igbvf_infos_get, .rx_queue_setup = eth_igb_rx_queue_setup, .rx_queue_release = eth_igb_rx_queue_release, .tx_queue_setup = eth_igb_tx_queue_setup, @@ -397,7 +414,7 @@ igb_reset_swfw_lock(struct e1000_hw *hw) * So force the release of the faulty lock. */ if (e1000_get_hw_semaphore_generic(hw) < 0) { - DEBUGOUT("SMBI lock released"); + PMD_DRV_LOG(DEBUG, "SMBI lock released"); } e1000_put_hw_semaphore_generic(hw); @@ -413,7 +430,8 @@ igb_reset_swfw_lock(struct e1000_hw *hw) if (hw->bus.func > E1000_FUNC_1) mask <<= 2; if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { - DEBUGOUT1("SWFW phy%d lock released", hw->bus.func); + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", + hw->bus.func); } hw->mac.ops.release_swfw_sync(hw, mask); @@ -425,7 +443,7 @@ igb_reset_swfw_lock(struct e1000_hw *hw) */ mask = E1000_SWFW_EEP_SM; if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { - DEBUGOUT("SWFW common locks released"); + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); } hw->mac.ops.release_swfw_sync(hw, mask); } @@ -434,8 +452,7 @@ igb_reset_swfw_lock(struct e1000_hw *hw) } static int -eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev) +eth_igb_dev_init(struct rte_eth_dev *eth_dev) { int error = 0; struct rte_pci_device *pci_dev; @@ -443,6 +460,8 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct e1000_vfta * shadow_vfta = E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); uint32_t ctrl_ext; pci_dev = eth_dev->pci_dev; @@ -561,7 +580,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); - PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); @@ -574,6 +593,13 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* enable support intr */ igb_intr_enable(eth_dev); + TAILQ_INIT(&filter_info->flex_list); + filter_info->flex_mask = 0; + TAILQ_INIT(&filter_info->twotuple_list); + filter_info->twotuple_mask = 0; + TAILQ_INIT(&filter_info->fivetuple_list); + filter_info->fivetuple_mask = 0; + return 0; err_late: @@ -586,15 +612,14 @@ err_late: * Virtual Function device init */ static int -eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev) +eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int diag; - PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init"); + PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &igbvf_eth_dev_ops; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; @@ -615,7 +640,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->vendor_id = pci_dev->id.vendor_id; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; - /* Initialize the shared code */ + /* Initialize the shared code (base driver) */ diag = e1000_setup_init_funcs(hw, TRUE); if (diag != 0) { PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", @@ -646,11 +671,10 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); - PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x " - "mac.type=%s\n", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id, - "igb_mac_82576_vf"); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " + "mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "igb_mac_82576_vf"); return 0; } @@ -659,7 +683,7 @@ static struct eth_driver rte_igb_pmd = { { .name = "rte_igb_pmd", .id_table = pci_id_igb_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, }, .eth_dev_init = eth_igb_dev_init, .dev_private_size = sizeof(struct e1000_adapter), @@ -704,7 +728,7 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) static int rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { - DEBUGFUNC("rte_igbvf_pmd_init"); + PMD_INIT_FUNC_TRACE(); rte_eth_driver_register(&rte_igbvf_pmd); return (0); @@ -716,11 +740,9 @@ eth_igb_configure(struct rte_eth_dev *dev) struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, ">>"); - + PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; - - PMD_INIT_LOG(DEBUG, "<<"); + PMD_INIT_FUNC_TRACE(); return (0); } @@ -733,7 +755,7 @@ eth_igb_start(struct rte_eth_dev *dev) int ret, i, mask; uint32_t ctrl_ext; - PMD_INIT_LOG(DEBUG, ">>"); + PMD_INIT_FUNC_TRACE(); /* Power up the phy. Needed to make the link go Up */ e1000_power_up_phy(hw); @@ -815,7 +837,8 @@ eth_igb_start(struct rte_eth_dev *dev) * value of Write-Back Threshold registers. */ if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || - (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210)) { + (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { uint32_t ivar; /* Enable all RX & TX queues in the IVAR registers */ @@ -884,9 +907,9 @@ eth_igb_start(struct rte_eth_dev *dev) return (0); error_invalid_config: - PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n", - dev->data->dev_conf.link_speed, - dev->data->dev_conf.link_duplex, dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u", + dev->data->dev_conf.link_speed, + dev->data->dev_conf.link_duplex, dev->data->port_id); igb_dev_clear_queues(dev); return (-EINVAL); } @@ -901,7 +924,12 @@ static void eth_igb_stop(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct rte_eth_link link; + struct e1000_flex_filter *p_flex; + struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; + struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; igb_intr_disable(hw); igb_pf_reset_hw(hw); @@ -924,6 +952,31 @@ eth_igb_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); rte_igb_dev_atomic_write_link_status(dev, &link); + + /* Remove all flex filters of the device */ + while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { + TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); + rte_free(p_flex); + } + filter_info->flex_mask = 0; + + /* Remove all ntuple filters of the device */ + for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); + p_5tuple != NULL; p_5tuple = p_5tuple_next) { + p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + filter_info->fivetuple_mask = 0; + for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list); + p_2tuple != NULL; p_2tuple = p_2tuple_next) { + p_2tuple_next = TAILQ_NEXT(p_2tuple, entries); + TAILQ_REMOVE(&filter_info->twotuple_list, + p_2tuple, entries); + rte_free(p_2tuple); + } + filter_info->twotuple_mask = 0; } static void @@ -963,7 +1016,7 @@ igb_get_rx_buffer_size(struct e1000_hw *hw) rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); rx_buf_size = (rx_buf_size << 10); - } else if (hw->mac.type == e1000_i210) { + } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; } else { rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; @@ -1225,7 +1278,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) if (rte_stats == NULL) return; - memset(rte_stats, 0, sizeof(*rte_stats)); rte_stats->ipackets = hw_stats->gprc; rte_stats->ibytes = hw_stats->gorc; rte_stats->opackets = hw_stats->gptc; @@ -1254,8 +1306,7 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev) } static void -eth_igb_infos_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info) +eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1285,18 +1336,21 @@ eth_igb_infos_get(struct rte_eth_dev *dev, dev_info->max_rx_queues = 16; dev_info->max_tx_queues = 16; dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 16; break; case e1000_82580: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; break; case e1000_i350: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; dev_info->max_vmdq_pools = ETH_8_POOLS; + dev_info->vmdq_queue_num = 8; break; case e1000_i354: @@ -1310,24 +1364,88 @@ eth_igb_infos_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = 0; break; - case e1000_vfadapt: + case e1000_i211: dev_info->max_rx_queues = 2; dev_info->max_tx_queues = 2; dev_info->max_vmdq_pools = 0; break; + default: + /* Should not happen */ + break; + } + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .txq_flags = 0, + }; +} + +static void +eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; + switch (hw->mac.type) { + case e1000_vfadapt: + dev_info->max_rx_queues = 2; + dev_info->max_tx_queues = 2; + break; case e1000_vfadapt_i350: dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; - dev_info->max_vmdq_pools = 0; break; - default: /* Should not happen */ - dev_info->max_rx_queues = 0; - dev_info->max_tx_queues = 0; - dev_info->max_vmdq_pools = 0; + break; } + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IGB_DEFAULT_RX_PTHRESH, + .hthresh = IGB_DEFAULT_RX_HTHRESH, + .wthresh = IGB_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IGB_DEFAULT_TX_PTHRESH, + .hthresh = IGB_DEFAULT_TX_HTHRESH, + .wthresh = IGB_DEFAULT_TX_WTHRESH, + }, + .txq_flags = 0, + }; } /* return 0 means link status changed, -1 means not changed */ @@ -1786,19 +1904,20 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) rte_igb_dev_atomic_read_link_status(dev, &link); if (link.link_status) { PMD_INIT_LOG(INFO, - " Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? - "full-duplex" : "half-duplex"); + " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); } else { - PMD_INIT_LOG(INFO, " Port %d: Link Down\n", - dev->data->port_id); + PMD_INIT_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); } PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); if (link.link_status) { @@ -1919,14 +2038,14 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (fc_conf->autoneg != hw->mac.autoneg) return -ENOTSUP; rx_buf_size = igb_get_rx_buffer_size(hw); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ max_high_water = rx_buf_size - ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || - (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n"); - PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water); + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -1956,7 +2075,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return 0; } - PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err); + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); return (-EIO); } @@ -1991,7 +2110,7 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) static void igbvf_intr_disable(struct e1000_hw *hw) { - PMD_INIT_LOG(DEBUG, "igbvf_intr_disable"); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); @@ -2008,7 +2127,7 @@ igbvf_stop_adapter(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); memset(&dev_info, 0, sizeof(dev_info)); - eth_igb_infos_get(dev, &dev_info); + eth_igbvf_infos_get(dev, &dev_info); /* Clear interrupt mask to stop from interrupts being generated */ igbvf_intr_disable(hw); @@ -2073,8 +2192,8 @@ igbvf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf* conf = &dev->data->dev_conf; - PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n", - dev->data->port_id); + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); /* * VF has no ability to enable/disable HW CRC @@ -2082,12 +2201,12 @@ igbvf_dev_configure(struct rte_eth_dev *dev) */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip"); conf->rxmode.hw_strip_crc = 1; } #else if (conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip"); conf->rxmode.hw_strip_crc = 0; } #endif @@ -2102,7 +2221,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; - PMD_INIT_LOG(DEBUG, "igbvf_dev_start"); + PMD_INIT_FUNC_TRACE(); hw->mac.ops.reset_hw(hw); @@ -2125,7 +2244,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) static void igbvf_dev_stop(struct rte_eth_dev *dev) { - PMD_INIT_LOG(DEBUG, "igbvf_dev_stop"); + PMD_INIT_FUNC_TRACE(); igbvf_stop_adapter(dev); @@ -2143,7 +2262,7 @@ igbvf_dev_close(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, "igbvf_dev_close"); + PMD_INIT_FUNC_TRACE(); e1000_reset_hw(hw); @@ -2199,7 +2318,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) uint32_t vid_bit = 0; int ret = 0; - PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set"); + PMD_INIT_FUNC_TRACE(); /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ ret = igbvf_set_vfta(hw, vlan_id, !!on); @@ -2221,38 +2340,40 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i,j,mask; - uint32_t reta; - struct e1000_hw *hw = - E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t i, j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* - * Update Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + if (mask == IGB_4_BIT_MASK) + r = 0; else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - if (mask != 0) { - reta = 0; - /* If all 4 entries were set,don't need read RETA register */ - if (mask != 0xF) - reta = E1000_READ_REG(hw,E1000_RETA(i >> 2)); - - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) { - if (mask != 0xF) - reta &= ~(0xFF << 8 * j); - reta |= reta_conf->reta[i + j] << 8 * j; - } - } - E1000_WRITE_REG(hw, E1000_RETA(i >> 2),reta); + r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); } + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); } return 0; @@ -2260,31 +2381,34 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev, static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i,j,mask; + uint8_t i, j, mask; uint32_t reta; - struct e1000_hw *hw = - E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t idx, shift; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* - * Read Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); - else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - - if (mask != 0) { - reta = E1000_READ_REG(hw,E1000_RETA(i >> 2)); - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) - reta_conf->reta[i + j] = - (uint8_t)((reta >> 8 * j) & 0xFF); - } + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IGB_4_BIT_MASK); + if (!mask) + continue; + reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); + for (j = 0; j < IGB_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IGB_8_BIT_MASK); } } @@ -2294,717 +2418,1227 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, #define MAC_TYPE_FILTER_SUP(type) do {\ if ((type) != e1000_82580 && (type) != e1000_i350 &&\ (type) != e1000_82576)\ - return -ENOSYS;\ + return -ENOTSUP;\ } while (0) -/* - * add the syn filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -eth_igb_add_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t rx_queue) +eth_igb_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t synqf, rfctl; - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (rx_queue >= IGB_MAX_RX_QUEUE_NUM) + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) return -EINVAL; synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); - if (synqf & E1000_SYN_FILTER_ENABLE) - return -EINVAL; - synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) & - E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); - - rfctl = E1000_READ_REG(hw, E1000_RFCTL); - if (filter->hig_pri) - rfctl |= E1000_RFCTL_SYNQFP; - else - rfctl &= ~E1000_RFCTL_SYNQFP; + if (add) { + if (synqf & E1000_SYN_FILTER_ENABLE) + return -EINVAL; - E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); - E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); - return 0; -} + synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & + E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); -/* - * remove the syn filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -eth_igb_remove_syn_filter(struct rte_eth_dev *dev) -{ - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + if (filter->hig_pri) + rfctl |= E1000_RFCTL_SYNQFP; + else + rfctl &= ~E1000_RFCTL_SYNQFP; - MAC_TYPE_FILTER_SUP(hw->mac.type); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + } else { + if (!(synqf & E1000_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf = 0; + } - E1000_WRITE_REG(hw, E1000_SYNQF(0), 0); + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); return 0; } -/* - * get the syn filter's info - * - * @param - * dev: Pointer to struct rte_eth_dev. - * filter: ponter to the filter that returns. - * *rx_queue: pointer to the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -eth_igb_get_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t *rx_queue) +eth_igb_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t synqf, rfctl; - MAC_TYPE_FILTER_SUP(hw->mac.type); synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); if (synqf & E1000_SYN_FILTER_ENABLE) { rfctl = E1000_READ_REG(hw, E1000_RFCTL); filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; - *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> + filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> E1000_SYN_FILTER_QUEUE_SHIFT); return 0; } + return -ENOENT; } -/* - * add an ethertype filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_ethertype_filter *filter, uint16_t rx_queue) +eth_igb_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t etqf; + int ret; MAC_TYPE_FILTER_SUP(hw->mac.type); - if (index >= E1000_MAX_ETQF_FILTERS || rx_queue >= IGB_MAX_RX_QUEUE_NUM) - return -EINVAL; - - etqf = E1000_READ_REG(hw, E1000_ETQF(index)); - if (etqf & E1000_ETQF_FILTER_ENABLE) - return -EINVAL; /* filter index is in use. */ - else - etqf = 0; - - etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; - etqf |= (uint32_t)(filter->ethertype & E1000_ETQF_ETHERTYPE); - etqf |= rx_queue << E1000_ETQF_QUEUE_SHIFT; + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; - if (filter->priority_en) { - PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported" - " in E1000.", filter->priority); + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); return -EINVAL; } - E1000_WRITE_REG(hw, E1000_ETQF(index), etqf); - return 0; + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -EINVAL; + break; + } + + return ret; } -/* - * remove an ethertype filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -eth_igb_remove_ethertype_filter(struct rte_eth_dev *dev, uint16_t index) +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != e1000_82580 && (type) != e1000_i350)\ + return -ENOSYS; \ +} while (0) + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ +static inline int +ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, + struct e1000_2tuple_filter_info *filter_info) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) + return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > TCP_FLAG_ALL) + return -EINVAL; /* flags is invalid. */ - MAC_TYPE_FILTER_SUP(hw->mac.type); + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } - if (index >= E1000_MAX_ETQF_FILTERS) + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; + else + filter_info->tcp_flags = 0; - E1000_WRITE_REG(hw, E1000_ETQF(index), 0); return 0; } -/* - * get an ethertype filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be gotten. - * *rx_queue: the ponited of the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -eth_igb_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_ethertype_filter *filter, uint16_t *rx_queue) +static inline struct e1000_2tuple_filter * +igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, + struct e1000_2tuple_filter_info *key) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t etqf; - - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (index >= E1000_MAX_ETQF_FILTERS) - return -EINVAL; + struct e1000_2tuple_filter *it; - etqf = E1000_READ_REG(hw, E1000_ETQF(index)); - if (etqf & E1000_ETQF_FILTER_ENABLE) { - filter->ethertype = etqf & E1000_ETQF_ETHERTYPE; - filter->priority_en = 0; - *rx_queue = (etqf & E1000_ETQF_QUEUE) >> E1000_ETQF_QUEUE_SHIFT; - return 0; + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_2tuple_filter_info)) == 0) { + return it; + } } - return -ENOENT; + return NULL; } -#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != e1000_82580 && (type) != e1000_i350)\ - return -ENOSYS; \ -} while (0) - /* - * add a 2tuple filter + * igb_add_2tuple_filter - add a 2tuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. + * ntuple_filter: ponter to the filter that will be added. * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_2tuple_filter *filter, uint16_t rx_queue) +igb_add_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ttqf, imir = 0; - uint32_t imir_ext = 0; - - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter *filter; + uint32_t ttqf = E1000_TTQF_DISABLE_MASK; + uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; + int i, ret; + + filter = rte_zmalloc("e1000_2tuple_filter", + sizeof(struct e1000_2tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; - if (index >= E1000_MAX_TTQF_FILTERS || - rx_queue >= IGB_MAX_RX_QUEUE_NUM || - filter->priority > E1000_2TUPLE_MAX_PRI) - return -EINVAL; /* filter index is out of range. */ - if (filter->tcp_flags > TCP_FLAG_ALL) - return -EINVAL; /* flags is invalid. */ + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } + if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; - ttqf = E1000_READ_REG(hw, E1000_TTQF(index)); - if (ttqf & E1000_TTQF_QUEUE_ENABLE) - return -EINVAL; /* filter index is in use. */ + /* + * look for an unused 2tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { + if (!(filter_info->twotuple_mask & (1 << i))) { + filter_info->twotuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->twotuple_list, + filter, + entries); + break; + } + } + if (i >= E1000_MAX_TTQF_FILTERS) { + PMD_DRV_LOG(ERR, "2tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } - imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT); - if (filter->dst_port_mask == 1) /* 1b means not compare. */ + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ imir |= E1000_IMIR_PORT_BP; else imir &= ~E1000_IMIR_PORT_BP; - imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; - ttqf = 0; ttqf |= E1000_TTQF_QUEUE_ENABLE; - ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT); - ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK); - if (filter->protocol_mask == 1) - ttqf |= E1000_TTQF_MASK_ENABLE; - else + ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); + ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK); + if (filter->filter_info.proto_mask == 0) ttqf &= ~E1000_TTQF_MASK_ENABLE; - imir_ext |= E1000_IMIR_EXT_SIZE_BP; /* tcp flags bits setting. */ - if (filter->tcp_flags & TCP_FLAG_ALL) { - if (filter->tcp_flags & TCP_UGR_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_UGR; - if (filter->tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_ACK; - if (filter->tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_PSH; - if (filter->tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_RST; - if (filter->tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_SYN; - if (filter->tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_FIN; - imir_ext &= ~E1000_IMIR_EXT_CTRL_BP; + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; } else - imir_ext |= E1000_IMIR_EXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(index), imir); - E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf); - E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext); + imir_ext |= E1000_IMIREXT_CTRL_BP; + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); return 0; } /* - * remove a 2tuple filter + * igb_remove_2tuple_filter - remove a 2tuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. + * ntuple_filter: ponter to the filter that will be removed. * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev, - uint16_t index) +igb_remove_2tuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_2tuple_filter *filter; + int ret; + + memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, + &filter_2tuple); + if (ret < 0) + return ret; - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, + &filter_2tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } - if (index >= E1000_MAX_TTQF_FILTERS) - return -EINVAL; /* filter index is out of range */ + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); + rte_free(filter); - E1000_WRITE_REG(hw, E1000_TTQF(index), 0); - E1000_WRITE_REG(hw, E1000_IMIR(index), 0); - E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0); + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); return 0; } -/* - * get a 2tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that returns. - * *rx_queue: pointer of the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_2tuple_filter *filter, uint16_t *rx_queue) +static inline struct e1000_flex_filter * +eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, + struct e1000_flex_filter_info *key) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t imir, ttqf, imir_ext; + struct e1000_flex_filter *it; - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); - - if (index >= E1000_MAX_TTQF_FILTERS) - return -EINVAL; /* filter index is out of range. */ - - ttqf = E1000_READ_REG(hw, E1000_TTQF(index)); - if (ttqf & E1000_TTQF_QUEUE_ENABLE) { - imir = E1000_READ_REG(hw, E1000_IMIR(index)); - filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK; - filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0; - *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >> - E1000_TTQF_QUEUE_SHIFT; - filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT); - filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0; - filter->priority = (imir & E1000_IMIR_PRIORITY) >> - E1000_IMIR_PRIORITY_SHIFT; - - imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index)); - if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) { - if (imir_ext & E1000_IMIR_EXT_CTRL_UGR) - filter->tcp_flags |= TCP_UGR_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_ACK) - filter->tcp_flags |= TCP_ACK_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_PSH) - filter->tcp_flags |= TCP_PSH_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_RST) - filter->tcp_flags |= TCP_RST_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_SYN) - filter->tcp_flags |= TCP_SYN_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_FIN) - filter->tcp_flags |= TCP_FIN_FLAG; - } else - filter->tcp_flags = 0; - return 0; + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_flex_filter_info)) == 0) + return it; } - return -ENOENT; + + return NULL; } -/* - * add a flex filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -eth_igb_add_flex_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_flex_filter *filter, uint16_t rx_queue) +eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter, + bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t wufc, en_bits = 0; - uint32_t queueing = 0; - uint32_t reg_off = 0; - uint8_t i, j = 0; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter, *it; + uint32_t wufc, queueing, mask; + uint32_t reg_off; + uint8_t shift, i, j = 0; + + flex_filter = rte_zmalloc("e1000_flex_filter", + sizeof(struct e1000_flex_filter), 0); + if (flex_filter == NULL) + return -ENOMEM; - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + flex_filter->filter_info.len = filter->len; + flex_filter->filter_info.priority = filter->priority; + memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); + for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { + mask = 0; + /* reverse bits in flex filter's mask*/ + for (shift = 0; shift < CHAR_BIT; shift++) { + if (filter->mask[i] & (0x01 << shift)) + mask |= (0x80 >> shift); + } + flex_filter->filter_info.mask[i] = mask; + } - if (index >= E1000_MAX_FLEXIBLE_FILTERS) - return -EINVAL; /* filter index is out of range. */ + wufc = E1000_READ_REG(hw, E1000_WUFC); + if (flex_filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(flex_filter->index); + else + reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); + + if (add) { + if (eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(flex_filter); + return -EEXIST; + } + flex_filter->queue = filter->queue; + /* + * look for an unused flex filter index + * and insert the filter into the list. + */ + for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { + if (!(filter_info->flex_mask & (1 << i))) { + filter_info->flex_mask |= 1 << i; + flex_filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->flex_list, + flex_filter, + entries); + break; + } + } + if (i >= E1000_MAX_FLEX_FILTERS) { + PMD_DRV_LOG(ERR, "flex filters are full."); + rte_free(flex_filter); + return -ENOSYS; + } - if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN || - filter->len % 8 != 0 || - filter->priority > E1000_MAX_FLEX_FILTER_PRI) - return -EINVAL; + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | + (E1000_WUFC_FLX0 << flex_filter->index)); + queueing = filter->len | + (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | + (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT); + E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, + queueing); + for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { + E1000_WRITE_REG(hw, reg_off, + flex_filter->filter_info.dwords[j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + flex_filter->filter_info.dwords[++j]); + reg_off += sizeof(uint32_t); + E1000_WRITE_REG(hw, reg_off, + (uint32_t)flex_filter->filter_info.mask[i]); + reg_off += sizeof(uint32_t) * 2; + ++j; + } + } else { + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter->filter_info); + if (it == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + rte_free(flex_filter); + return -ENOENT; + } - wufc = E1000_READ_REG(hw, E1000_WUFC); - en_bits = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index); - if ((wufc & en_bits) == en_bits) - return -EINVAL; /* the filter is in use. */ + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) + E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); + E1000_WRITE_REG(hw, E1000_WUFC, wufc & + (~(E1000_WUFC_FLX0 << it->index))); - E1000_WRITE_REG(hw, E1000_WUFC, - wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index)); + filter_info->flex_mask &= ~(1 << it->index); + TAILQ_REMOVE(&filter_info->flex_list, it, entries); + rte_free(it); + rte_free(flex_filter); + } - j = 0; - if (index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(index); - else - reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT); - - for (i = 0; i < 16; i++) { - E1000_WRITE_REG(hw, reg_off + i*4*4, filter->dwords[j]); - E1000_WRITE_REG(hw, reg_off + (i*4+1)*4, filter->dwords[++j]); - E1000_WRITE_REG(hw, reg_off + (i*4+2)*4, - (uint32_t)filter->mask[i]); - ++j; - } - queueing |= filter->len | - (rx_queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | - (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT); - E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, queueing); return 0; } -/* - * remove a flex filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -eth_igb_remove_flex_filter(struct rte_eth_dev *dev, - uint16_t index) +eth_igb_get_flex_filter(struct rte_eth_dev *dev, + struct rte_eth_flex_filter *filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t wufc, reg_off = 0; - uint8_t i; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter flex_filter, *it; + uint32_t wufc, queueing, wufc_en = 0; + + memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); + flex_filter.filter_info.len = filter->len; + flex_filter.filter_info.priority = filter->priority; + memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); + memcpy(flex_filter.filter_info.mask, filter->mask, + RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char)); + + it = eth_igb_flex_filter_lookup(&filter_info->flex_list, + &flex_filter.filter_info); + if (it == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + wufc = E1000_READ_REG(hw, E1000_WUFC); + wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); + + if ((wufc & wufc_en) == wufc_en) { + uint32_t reg_off = 0; + if (it->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(it->index); + else + reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + queueing = E1000_READ_REG(hw, + reg_off + E1000_FHFT_QUEUEING_OFFSET); + filter->len = queueing & E1000_FHFT_QUEUEING_LEN; + filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> + E1000_FHFT_QUEUEING_PRIO_SHIFT; + filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> + E1000_FHFT_QUEUEING_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +static int +eth_igb_flex_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_flex_filter *filter; + int ret = 0; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return ret; - if (index >= E1000_MAX_FLEXIBLE_FILTERS) + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + filter = (struct rte_eth_flex_filter *)arg; + if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN + || filter->len % sizeof(uint64_t) != 0) { + PMD_DRV_LOG(ERR, "filter's length is out of range"); + return -EINVAL; + } + if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { + PMD_DRV_LOG(ERR, "filter's priority is out of range"); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = eth_igb_get_flex_filter(dev, filter); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, + struct e1000_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) + return -EINVAL; + if (filter->priority > E1000_2TUPLE_MAX_PRI) return -EINVAL; /* filter index is out of range. */ + if (filter->tcp_flags > TCP_FLAG_ALL) + return -EINVAL; /* flags is invalid. */ - wufc = E1000_READ_REG(hw, E1000_WUFC); - E1000_WRITE_REG(hw, E1000_WUFC, wufc & (~(E1000_WUFC_FLX0 << index))); + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = filter->proto; + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } - if (index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(index); + filter_info->priority = (uint8_t)filter->priority; + if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) + filter_info->tcp_flags = filter->tcp_flags; else - reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT); + filter_info->tcp_flags = 0; - for (i = 0; i < 64; i++) - E1000_WRITE_REG(hw, reg_off + i*4, 0); return 0; } +static inline struct e1000_5tuple_filter * +igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, + struct e1000_5tuple_filter_info *key) +{ + struct e1000_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct e1000_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + /* - * get a flex filter + * igb_add_5tuple_filter_82576 - add a 5tuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that returns. - * *rx_queue: the pointer of the queue id the filter assigned to. + * ntuple_filter: ponter to the filter that will be added. * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_get_flex_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_flex_filter *filter, uint16_t *rx_queue) +igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t wufc, queueing, wufc_en = 0; - uint8_t i, j; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *filter; + uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; + uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; + uint8_t i; + int ret; - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + filter = rte_zmalloc("e1000_5tuple_filter", + sizeof(struct e1000_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; - if (index >= E1000_MAX_FLEXIBLE_FILTERS) - return -EINVAL; /* filter index is out of range. */ + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter->filter_info); + if (ret < 0) { + rte_free(filter); + return ret; + } - wufc = E1000_READ_REG(hw, E1000_WUFC); - wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << index); + if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter->filter_info) != NULL) { + PMD_DRV_LOG(ERR, "filter exists."); + rte_free(filter); + return -EEXIST; + } + filter->queue = ntuple_filter->queue; - if ((wufc & wufc_en) == wufc_en) { - uint32_t reg_off = 0; - j = 0; - if (index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(index); - else - reg_off = E1000_FHFT_EXT(index - E1000_MAX_FHFT); - - for (i = 0; i < 16; i++, j = i * 2) { - filter->dwords[j] = - E1000_READ_REG(hw, reg_off + i*4*4); - filter->dwords[j+1] = - E1000_READ_REG(hw, reg_off + (i*4+1)*4); - filter->mask[i] = - E1000_READ_REG(hw, reg_off + (i*4+2)*4); + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { + if (!(filter_info->fivetuple_mask & (1 << i))) { + filter_info->fivetuple_mask |= 1 << i; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; } - queueing = E1000_READ_REG(hw, - reg_off + E1000_FHFT_QUEUEING_OFFSET); - filter->len = queueing & E1000_FHFT_QUEUEING_LEN; - filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> - E1000_FHFT_QUEUEING_PRIO_SHIFT; - *rx_queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> - E1000_FHFT_QUEUEING_QUEUE_SHIFT; - return 0; } - return -ENOENT; + if (i >= E1000_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + rte_free(filter); + return -ENOSYS; + } + + ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; + if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ + ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; + if (filter->filter_info.dst_ip_mask == 0) + ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; + if (filter->filter_info.src_port_mask == 0) + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + if (filter->filter_info.proto_mask == 0) + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; + ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & + E1000_FTQF_QUEUE_MASK; + ftqf |= E1000_FTQF_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); + E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); + E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + + spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; + E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else + imir_ext |= E1000_IMIREXT_CTRL_BP; + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + return 0; } /* - * add a 5tuple filter + * igb_remove_5tuple_filter_82576 - remove a 5tuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. + * ntuple_filter: ponter to the filter that will be removed. * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t rx_queue) +igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ftqf, spqf = 0; - uint32_t imir = 0; - uint32_t imir_ext = 0; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_5tuple_filter *filter; + int ret; - if (hw->mac.type != e1000_82576) - return -ENOSYS; + memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; - if (index >= E1000_MAX_FTQF_FILTERS || - rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576) - return -EINVAL; /* filter index is out of range. */ + filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } - ftqf = E1000_READ_REG(hw, E1000_FTQF(index)); - if (ftqf & E1000_FTQF_QUEUE_ENABLE) - return -EINVAL; /* filter index is in use. */ - - ftqf = 0; - ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK; - if (filter->src_ip_mask == 1) /* 1b means not compare. */ - ftqf |= E1000_FTQF_SOURCE_ADDR_MASK; - if (filter->dst_ip_mask == 1) - ftqf |= E1000_FTQF_DEST_ADDR_MASK; - if (filter->src_port_mask == 1) - ftqf |= E1000_FTQF_SOURCE_PORT_MASK; - if (filter->protocol_mask == 1) - ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK; - ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK; - ftqf |= E1000_FTQF_VF_MASK_EN; - ftqf |= E1000_FTQF_QUEUE_ENABLE; - E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf); - E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip); - E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip); + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); + E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); + return 0; +} - spqf |= filter->src_port & E1000_SPQF_SRCPORT; - E1000_WRITE_REG(hw, E1000_SPQF(index), spqf); +static int +eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t rctl; + struct e1000_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE); - imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT); - if (filter->dst_port_mask == 1) /* 1b means not compare. */ - imir |= E1000_IMIR_PORT_BP; - else - imir &= ~E1000_IMIR_PORT_BP; - imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT; + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + +#ifdef RTE_LIBRTE_82571_SUPPORT + /* XXX: not bigger than max_rx_pktlen */ + if (hw->mac.type == e1000_82571) + return -ENOTSUP; +#endif + eth_igb_infos_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || + (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + rctl |= E1000_RCTL_LPE; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + rctl &= ~E1000_RCTL_LPE; + } + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); - imir_ext |= E1000_IMIR_EXT_SIZE_BP; - /* tcp flags bits setting. */ - if (filter->tcp_flags & TCP_FLAG_ALL) { - if (filter->tcp_flags & TCP_UGR_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_UGR; - if (filter->tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_ACK; - if (filter->tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_PSH; - if (filter->tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_RST; - if (filter->tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_SYN; - if (filter->tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIR_EXT_CTRL_FIN; - } else - imir_ext |= E1000_IMIR_EXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(index), imir); - E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext); return 0; } /* - * remove a 5tuple filter + * igb_add_del_ntuple_filter - add or delete a ntuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index) +igb_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; - if (hw->mac.type != e1000_82576) - return -ENOSYS; - - if (index >= E1000_MAX_FTQF_FILTERS) - return -EINVAL; /* filter index is out of range. */ + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + if (add) + ret = igb_add_5tuple_filter_82576(dev, + ntuple_filter); + else + ret = igb_remove_5tuple_filter_82576(dev, + ntuple_filter); + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + return -ENOTSUP; + if (add) + ret = igb_add_2tuple_filter(dev, ntuple_filter); + else + ret = igb_remove_2tuple_filter(dev, ntuple_filter); + break; + default: + ret = -EINVAL; + break; + } - E1000_WRITE_REG(hw, E1000_FTQF(index), 0); - E1000_WRITE_REG(hw, E1000_DAQF(index), 0); - E1000_WRITE_REG(hw, E1000_SAQF(index), 0); - E1000_WRITE_REG(hw, E1000_SPQF(index), 0); - E1000_WRITE_REG(hw, E1000_IMIR(index), 0); - E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0); - return 0; + return ret; } /* - * get a 5tuple filter + * igb_get_ntuple_filter - get a ntuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates - * filter: ponter to the filter that returns - * *rx_queue: pointer of the queue id the filter assigned to + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t *rx_queue) +igb_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t spqf, ftqf, imir, imir_ext; + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter_info filter_5tuple; + struct e1000_2tuple_filter_info filter_2tuple; + struct e1000_5tuple_filter *p_5tuple_filter; + struct e1000_2tuple_filter *p_2tuple_filter; + int ret; - if (hw->mac.type != e1000_82576) - return -ENOSYS; + switch (ntuple_filter->flags) { + case RTE_5TUPLE_FLAGS: + case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82576) + return -ENOTSUP; + memset(&filter_5tuple, + 0, + sizeof(struct e1000_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple_82576(ntuple_filter, + &filter_5tuple); + if (ret < 0) + return ret; + p_5tuple_filter = igb_5tuple_filter_lookup_82576( + &filter_info->fivetuple_list, + &filter_5tuple); + if (p_5tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_5tuple_filter->queue; + break; + case RTE_2TUPLE_FLAGS: + case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + return -ENOTSUP; + memset(&filter_2tuple, + 0, + sizeof(struct e1000_2tuple_filter_info)); + ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); + if (ret < 0) + return ret; + p_2tuple_filter = igb_2tuple_filter_lookup( + &filter_info->twotuple_list, + &filter_2tuple); + if (p_2tuple_filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = p_2tuple_filter->queue; + break; + default: + ret = -EINVAL; + break; + } - if (index >= E1000_MAX_FTQF_FILTERS) - return -EINVAL; /* filter index is out of range. */ + return 0; +} - ftqf = E1000_READ_REG(hw, E1000_FTQF(index)); - if (ftqf & E1000_FTQF_QUEUE_ENABLE) { - filter->src_ip_mask = - (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0; - filter->dst_ip_mask = - (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0; - filter->src_port_mask = - (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0; - filter->protocol_mask = - (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0; - filter->protocol = - (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK; - *rx_queue = (uint16_t)((ftqf & E1000_FTQF_QUEUE_MASK) >> - E1000_FTQF_QUEUE_SHIFT); - - spqf = E1000_READ_REG(hw, E1000_SPQF(index)); - filter->src_port = spqf & E1000_SPQF_SRCPORT; - - filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index)); - filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index)); - - imir = E1000_READ_REG(hw, E1000_IMIR(index)); - filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0; - filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT); - filter->priority = (imir & E1000_IMIR_PRIORITY) >> - E1000_IMIR_PRIORITY_SHIFT; - - imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index)); - if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) { - if (imir_ext & E1000_IMIR_EXT_CTRL_UGR) - filter->tcp_flags |= TCP_UGR_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_ACK) - filter->tcp_flags |= TCP_ACK_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_PSH) - filter->tcp_flags |= TCP_PSH_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_RST) - filter->tcp_flags |= TCP_RST_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_SYN) - filter->tcp_flags |= TCP_SYN_FLAG; - if (imir_ext & E1000_IMIR_EXT_CTRL_FIN) - filter->tcp_flags |= TCP_FIN_FLAG; - } else - filter->tcp_flags = 0; +/* + * igb_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static inline int +igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i] == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; } + return -1; +} + +static inline int +igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, + uint16_t ethertype) +{ + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i] = ethertype; + return i; + } + } + return -1; +} + +static inline int +igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= E1000_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx] = 0; + return idx; +} + + +static int +igb_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + int ret; + + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + ret = igb_ethertype_filter_insert(filter_info, + filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSYS; + } + + etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); + etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; + } else { + ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +static int +igb_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf; + int ret; + + ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); + if (etqf & E1000_ETQF_FILTER_ENABLE) { + filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqf & E1000_ETQF_QUEUE) >> + E1000_ETQF_QUEUE_SHIFT; + return 0; + } + return -ENOENT; } +/* + * igb_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +igb_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = igb_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = igb_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +eth_igb_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = igb_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = igb_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = eth_igb_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FLEXIBLE: + ret = eth_igb_flex_filter_handle(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; +} + static struct rte_driver pmd_igb_drv = { .type = PMD_PDEV, .init = rte_igb_pmd_init,