X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_ethdev.c;h=06da23148c273f4ec1e6bb317a5f76c0ca57f6f4;hb=ff708facfcbf42f3dcb3c62d82ecd93e7b8c2506;hp=2eb609c65dfeebe37af3f47cf994f1a9ab6dd528;hpb=5974ee01f4e82f3d5290b5dcf1e69ea890fef24b;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 2eb609c65d..06da23148c 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -52,7 +52,6 @@ #include #include #include -#include #include #include #include @@ -68,6 +67,7 @@ #include "ixgbe/ixgbe_common.h" #include "ixgbe_ethdev.h" #include "ixgbe_bypass.h" +#include "ixgbe_rxtx.h" /* * High threshold controlling when to start sending XOFF frames. Must be at @@ -106,12 +106,17 @@ #define IXGBE_DEFAULT_TX_WTHRESH 0 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 +/* Bit shift and mask */ +#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) +#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) +#define IXGBE_8_BIT_WIDTH CHAR_BIT +#define IXGBE_8_BIT_MASK UINT8_MAX + #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) -static int eth_ixgbe_dev_init(struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); static int ixgbe_dev_start(struct rte_eth_dev *dev); static void ixgbe_dev_stop(struct rte_eth_dev *dev); @@ -132,8 +137,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint8_t stat_idx, uint8_t is_rx); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); - + struct rte_eth_dev_info *dev_info); +static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, @@ -158,9 +164,11 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); @@ -174,8 +182,7 @@ static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); /* For Virtual Function support */ -static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev); +static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); @@ -216,24 +223,38 @@ static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static int ixgbe_add_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t rx_queue); -static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev); -static int ixgbe_get_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t *rx_queue); -static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_ethertype_filter *filter, uint16_t rx_queue); -static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index); -static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_ethertype_filter *filter, uint16_t *rx_queue); -static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t rx_queue); -static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index); -static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t *rx_queue); - +static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add); +static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter); +static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter); +static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter, + bool add); +static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *filter); +static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add); +static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); +static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter); +static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); /* @@ -344,14 +365,6 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .set_queue_rate_limit = ixgbe_set_queue_rate_limit, .set_vf_rate_limit = ixgbe_set_vf_rate_limit, - .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter, - .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter, - .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter, - .fdir_infos_get = ixgbe_fdir_info_get, - .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter, - .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter, - .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter, - .fdir_set_masks = ixgbe_fdir_set_masks, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, #ifdef RTE_NIC_BYPASS @@ -367,15 +380,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { #endif /* RTE_NIC_BYPASS */ .rss_hash_update = ixgbe_dev_rss_hash_update, .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, - .add_syn_filter = ixgbe_add_syn_filter, - .remove_syn_filter = ixgbe_remove_syn_filter, - .get_syn_filter = ixgbe_get_syn_filter, - .add_ethertype_filter = ixgbe_add_ethertype_filter, - .remove_ethertype_filter = ixgbe_remove_ethertype_filter, - .get_ethertype_filter = ixgbe_get_ethertype_filter, - .add_5tuple_filter = ixgbe_add_5tuple_filter, - .remove_5tuple_filter = ixgbe_remove_5tuple_filter, - .get_5tuple_filter = ixgbe_get_5tuple_filter, + .filter_ctrl = ixgbe_dev_filter_ctrl, }; /* @@ -391,7 +396,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = { .stats_get = ixgbevf_dev_stats_get, .stats_reset = ixgbevf_dev_stats_reset, .dev_close = ixgbevf_dev_close, - .dev_infos_get = ixgbe_dev_info_get, + .dev_infos_get = ixgbevf_dev_info_get, .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, @@ -710,8 +715,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) * It returns 0 on success. */ static int -eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev) +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; struct ixgbe_hw *hw = @@ -722,6 +726,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); uint32_t ctrl_ext; uint16_t csum; int diag, i; @@ -732,10 +738,24 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; - /* for secondary processes, we don't initialise any further as primary + /* + * For secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ + * RX and TX function. + */ if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + struct igb_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; + set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(INFO, "No TX queues configured yet. " + "Using default TX function."); + } + if (eth_dev->data->scattered_rx) eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; return 0; @@ -889,6 +909,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* enable support intr */ ixgbe_enable_intr(eth_dev); + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + return 0; } @@ -938,8 +963,7 @@ generate_random_mac_addr(struct ether_addr *mac_addr) * Virtual Function device init */ static int -eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, - struct rte_eth_dev *eth_dev) +eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; @@ -1452,6 +1476,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (status != 0) return -1; hw->mac.ops.start_hw(hw); + hw->mac.get_link_status = true; /* configure PF module if SRIOV enabled */ ixgbe_pf_host_configure(dev); @@ -1466,7 +1491,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - ixgbe_dev_rxtx_start(dev); + err = ixgbe_dev_rxtx_start(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); + goto error; + } + + /* Skip link setup if loopback mode is enabled for 82599. */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + goto skip_link_setup; if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { err = hw->mac.ops.setup_sfp(hw); @@ -1477,14 +1511,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* Turn on the laser */ ixgbe_enable_tx_laser(hw); - /* Skip link setup if loopback mode is enabled for 82599. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - goto skip_link_setup; - err = ixgbe_check_link(hw, &speed, &link_up, 0); if (err) goto error; + dev->data->dev_link.link_status = link_up; + err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); if (err) goto error; @@ -1577,6 +1608,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; int vf; PMD_INIT_FUNC_TRACE(); @@ -1606,6 +1640,18 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_write_link_status(dev, &link); + + /* Remove all ntuple filters of the device */ + for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); + p_5tuple != NULL; p_5tuple = p_5tuple_next) { + p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + } /* @@ -1911,7 +1957,6 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (stats == NULL) return; - memset(stats, 0, sizeof(*stats)); stats->ipackets = hw_stats->vfgprc; stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; @@ -1964,28 +2009,81 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM; + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; dev_info->default_rxconf = (struct rte_eth_rxconf) { - .rx_thresh = { - .pthresh = IXGBE_DEFAULT_RX_PTHRESH, - .hthresh = IXGBE_DEFAULT_RX_HTHRESH, - .wthresh = IXGBE_DEFAULT_RX_WTHRESH, - }, - .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, - .rx_drop_en = 0, + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, }; + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; +} + +static void +ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = dev->pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; - dev_info->default_txconf = (struct rte_eth_txconf) { - .tx_thresh = { - .pthresh = IXGBE_DEFAULT_TX_PTHRESH, - .hthresh = IXGBE_DEFAULT_TX_HTHRESH, - .wthresh = IXGBE_DEFAULT_TX_WTHRESH, - }, - .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, - .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, }; } @@ -1995,7 +2093,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_link link, old; - ixgbe_link_speed link_speed; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; int link_up; int diag; @@ -2019,6 +2117,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) return 0; } + if (link_speed == IXGBE_LINK_SPEED_UNKNOWN && + !hw->mac.get_link_status) { + memcpy(&link, &old, sizeof(link)); + return -1; + } + if (link_up == 0) { rte_ixgbe_dev_atomic_write_link_status(dev, &link); if (link.link_status == old.link_status) @@ -2663,38 +2767,42 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i,j,mask; - uint32_t reta; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t i, j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* - * Update Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + if (mask == IXGBE_4_BIT_MASK) + r = 0; else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - if (mask != 0) { - reta = 0; - if (mask != 0xF) - reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); - - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) { - if (mask != 0xF) - reta &= ~(0xFF << 8 * j); - reta |= reta_conf->reta[i + j] << 8*j; - } - } - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta); + r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IXGBE_8_BIT_MASK << + (CHAR_BIT * j)); } + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); } return 0; @@ -2702,32 +2810,36 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i,j,mask; + uint8_t i, j, mask; uint32_t reta; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* - * Read Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); - else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - - if (mask != 0) { - reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) - reta_conf->reta[i + j] = - (uint8_t)((reta >> 8 * j) & 0xFF); - } + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + + reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IXGBE_8_BIT_MASK); } } @@ -2849,6 +2961,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); hw->mac.ops.reset_hw(hw); + hw->mac.get_link_status = true; /* negotiate mailbox API version to use with the PF. */ ixgbevf_negotiate_api(hw); @@ -3581,450 +3694,730 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } } -/* - * add syn filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ + (type) != ixgbe_mac_X550)\ + return -ENOTSUP;\ +} while (0) + static int -ixgbe_add_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t rx_queue) +ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t synqf; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; - - if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM) + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); - if (synqf & IXGBE_SYN_FILTER_ENABLE) + if (add) { + if (synqf & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + } else { + if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + } + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + return 0; +} + +static int +ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +static int +ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); return -EINVAL; + } - synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & - IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -EINVAL; + break; + } - if (filter->hig_pri) - synqf |= IXGBE_SYN_FILTER_SYNQFP; - else - synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + return ret; +} - IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); - return 0; + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; } /* - * remove syn filter + * add a 5tuple filter * * @param * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_remove_syn_filter(struct rte_eth_dev *dev) +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, idx, shift; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; - if (hw->mac.type != ixgbe_mac_82599EB) + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= IXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); return -ENOSYS; + } - synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; - synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + IXGBE_WRITE_REG(hw, IXGBE_DAQF(idx), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(idx), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(idx), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(idx), ftqf); - IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); return 0; } /* - * get the syn filter's info + * remove a 5tuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * filter: ponter to the filter that returns. - * *rx_queue: pointer to the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. + * filter: the pointer of the filter will be removed. */ -static int -ixgbe_get_syn_filter(struct rte_eth_dev *dev, - struct rte_syn_filter *filter, uint16_t *rx_queue) - +static void +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint16_t index = filter->index; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); - synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); - if (synqf & IXGBE_SYN_FILTER_ENABLE) { - filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; - *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); - return 0; - } - return -ENOENT; + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); } -/* - * add an ethertype filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index, struct rte_ethertype_filter *filter, - uint16_t rx_queue) +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t etqf, etqs = 0; + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (index >= IXGBE_MAX_ETQF_FILTERS || - rx_queue >= IXGBE_MAX_RX_QUEUE_NUM) + if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) return -EINVAL; - etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index)); - if (etqf & IXGBE_ETQF_FILTER_EN) - return -EINVAL; /* filter index is in use. */ + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; - etqf = 0; - etqf |= IXGBE_ETQF_FILTER_EN; - etqf |= (uint32_t)filter->ethertype; + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); - if (filter->priority_en) { - if (filter->priority > IXGBE_ETQF_MAX_PRI) - return -EINVAL; - etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP); - etqf |= IXGBE_ETQF_UP_EN; + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ + return -ENOTSUP;\ +} while (0) + +static inline struct ixgbe_5tuple_filter * +ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, + struct ixgbe_5tuple_filter_info *key) +{ + struct ixgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct ixgbe_5tuple_filter_info)) == 0) { + return it; + } } - etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE); - etqs |= IXGBE_ETQS_QUEUE_EN; + return NULL; +} - IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs); +/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct ixgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; return 0; } /* - * remove an ethertype filter + * add or delete a ntuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index) +ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; - if (index >= IXGBE_MAX_ETQF_FILTERS) - return -EINVAL; + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } - IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0); + if (add) { + filter = rte_zmalloc("ixgbe_5tuple_filter", + sizeof(struct ixgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + (void)rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct ixgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = ixgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else + ixgbe_remove_5tuple_filter(dev, filter); return 0; } /* - * get an ethertype filter + * get a ntuple filter * * @param * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be gotten. - * *rx_queue: the ponited of the queue id the filter assigned to. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, - uint16_t index, struct rte_ethertype_filter *filter, - uint16_t *rx_queue) +ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t etqf, etqs; - - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; - - if (index >= IXGBE_MAX_ETQF_FILTERS) + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); return -EINVAL; - - etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index)); - etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index)); - if (etqf & IXGBE_ETQF_FILTER_EN) { - filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE; - filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0; - if (filter->priority_en) - filter->priority = (etqf & IXGBE_ETQF_UP) >> 16; - *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT; - return 0; } - return -ENOENT; -} -static inline enum ixgbe_5tuple_protocol -convert_protocol_type(uint8_t protocol_value) -{ - if (protocol_value == IPPROTO_TCP) - return IXGBE_FILTER_PROTOCOL_TCP; - else if (protocol_value == IPPROTO_UDP) - return IXGBE_FILTER_PROTOCOL_UDP; - else if (protocol_value == IPPROTO_SCTP) - return IXGBE_FILTER_PROTOCOL_SCTP; - else - return IXGBE_FILTER_PROTOCOL_NONE; -} + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; -static inline uint8_t -revert_protocol_type(enum ixgbe_5tuple_protocol protocol) -{ - if (protocol == IXGBE_FILTER_PROTOCOL_TCP) - return IPPROTO_TCP; - else if (protocol == IXGBE_FILTER_PROTOCOL_UDP) - return IPPROTO_UDP; - else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP) - return IPPROTO_SCTP; - else - return 0; + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = filter->queue; + return 0; } /* - * add a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. + * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t rx_queue) +ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ftqf, sdpqf = 0; - uint32_t l34timir = 0; - uint8_t mask = 0xff; + int ret; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); - if (index >= IXGBE_MAX_FTQF_FILTERS || - rx_queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) - return -EINVAL; /* filter index is out of range. */ + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; - if (filter->tcp_flags) { - PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple"); + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); return -EINVAL; } - ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index)); - if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) - return -EINVAL; /* filter index is in use. */ + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} - ftqf = 0; - sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT); - sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT); +static inline int +ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, + uint16_t ethertype) +{ + int i; - ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) & - IXGBE_FTQF_PROTOCOL_MASK); - ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) << - IXGBE_FTQF_PRIORITY_SHIFT); - if (filter->src_ip_mask == 0) /* 0 means compare. */ - mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; - if (filter->dst_ip_mask == 0) - mask &= IXGBE_FTQF_DEST_ADDR_MASK; - if (filter->src_port_mask == 0) - mask &= IXGBE_FTQF_SOURCE_PORT_MASK; - if (filter->dst_port_mask == 0) - mask &= IXGBE_FTQF_DEST_PORT_MASK; - if (filter->protocol_mask == 0) - mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; - ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; - ftqf |= IXGBE_FTQF_POOL_MASK_EN; - ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_filters[i] == ethertype && + (filter_info->ethertype_mask & (1 << i))) + return i; + } + return -1; +} - IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf); +static inline int +ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, + uint16_t ethertype) +{ + int i; - l34timir |= IXGBE_L34T_IMIR_RESERVE; - l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir); - return 0; + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (!(filter_info->ethertype_mask & (1 << i))) { + filter_info->ethertype_mask |= 1 << i; + filter_info->ethertype_filters[i] = ethertype; + return i; + } + } + return -1; +} + +static inline int +ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, + uint8_t idx) +{ + if (idx >= IXGBE_MAX_ETQF_FILTERS) + return -1; + filter_info->ethertype_mask &= ~(1 << idx); + filter_info->ethertype_filters[idx] = 0; + return idx; } -/* - * remove a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, - uint16_t index) +ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; - if (index >= IXGBE_MAX_FTQF_FILTERS) - return -EINVAL; /* filter index is out of range. */ + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + ret = ixgbe_ethertype_filter_insert(filter_info, + filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSYS; + } + etqf = IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ether_type; + etqs |= (uint32_t)((filter->queue << + IXGBE_ETQS_RX_QUEUE_SHIFT) & + IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + } else { + ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); + IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); return 0; } -/* - * get a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates - * filter: ponter to the filter that returns. - * *rx_queue: pointer of the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, - struct rte_5tuple_filter *filter, uint16_t *rx_queue) +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t sdpqf, ftqf, l34timir; - uint8_t mask; - enum ixgbe_5tuple_protocol proto; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf, etqs; + int ret; - if (hw->mac.type != ixgbe_mac_82599EB) - return -ENOSYS; + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } - if (index >= IXGBE_MAX_FTQF_FILTERS) - return -EINVAL; /* filter index is out of range. */ - - ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index)); - if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) { - proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK); - filter->protocol = revert_protocol_type(proto); - filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) & - IXGBE_FTQF_PRIORITY_MASK; - mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) & - IXGBE_FTQF_5TUPLE_MASK_MASK); - filter->src_ip_mask = - (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0; - filter->dst_ip_mask = - (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0; - filter->src_port_mask = - (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0; - filter->dst_port_mask = - (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0; - filter->protocol_mask = - (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0; - - sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index)); - filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >> - IXGBE_SDPQF_DSTPORT_SHIFT; - filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT; - filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index)); - filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index)); - - l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index)); - *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >> - IXGBE_L34T_IMIR_QUEUE_SHIFT; + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); + filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> + IXGBE_ETQS_RX_QUEUE_SHIFT; return 0; } return -ENOENT; } +/* + * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ static int -ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) { - struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; - hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + MAC_TYPE_FILTER_SUP(hw->mac.type); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) - return -EINVAL; + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && - (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); return -EINVAL; + } - /* - * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU - * request of the version 2.0 of the mailbox API. - * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 - * of the mailbox API. - * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers - * prior to 3.11.33 which contains the following change: - * "ixgbe: Enable jumbo frames support w/ SR-IOV" - */ - ixgbevf_rlpml_set_vf(hw, max_frame); + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; - return 0; +static int +ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = -EINVAL; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = ixgbe_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + break; + } + + return ret; } static struct rte_driver rte_ixgbe_driver = {