X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_ethdev.c;h=937fc3c4e48f6c35d57e6b489d07bac216506717;hb=1224decaa44b3dba58e0a524fd0383969929c575;hp=a95bc28906d7c60e96843c6ee0eeaf2dee62dd51;hpb=db03592561700bbd776e062e61351fad2dc532f6;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index a95bc28906..937fc3c4e4 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,8 @@ #include #include #include +#include +#include #include "ixgbe_logs.h" #include "ixgbe/ixgbe_api.h" @@ -86,6 +89,28 @@ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ +#define IXGBE_MMW_SIZE_DEFAULT 0x4 +#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 + +/* + * Default values for RX/TX configuration + */ +#define IXGBE_DEFAULT_RX_FREE_THRESH 32 +#define IXGBE_DEFAULT_RX_PTHRESH 8 +#define IXGBE_DEFAULT_RX_HTHRESH 8 +#define IXGBE_DEFAULT_RX_WTHRESH 0 + +#define IXGBE_DEFAULT_TX_FREE_THRESH 32 +#define IXGBE_DEFAULT_TX_PTHRESH 32 +#define IXGBE_DEFAULT_TX_HTHRESH 0 +#define IXGBE_DEFAULT_TX_WTHRESH 0 +#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 + +/* Bit shift and mask */ +#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) +#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) +#define IXGBE_8_BIT_WIDTH CHAR_BIT +#define IXGBE_8_BIT_MASK UINT8_MAX #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ @@ -96,6 +121,8 @@ static int eth_ixgbe_dev_init(struct eth_driver *eth_drv, static int ixgbe_dev_configure(struct rte_eth_dev *dev); static int ixgbe_dev_start(struct rte_eth_dev *dev); static void ixgbe_dev_stop(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); +static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -111,11 +138,15 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint8_t stat_idx, uint8_t is_rx); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); + struct rte_eth_dev_info *dev_info); +static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); -static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, +static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); @@ -127,14 +158,18 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); static int ixgbe_dev_led_on(struct rte_eth_dev *dev); static int ixgbe_dev_led_off(struct rte_eth_dev *dev); -static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf); +static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); @@ -158,7 +193,7 @@ static void ixgbevf_intr_disable(struct ixgbe_hw *hw); static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); -static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, +static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); @@ -169,18 +204,47 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr* mac_addr,uint8_t on); static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); -static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, +static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, uint16_t rx_mask, uint8_t on); static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, +static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, uint64_t pool_mask,uint8_t vlan_on); -static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_vmdq_mirror_conf *mirror_conf, +static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_vmdq_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id); +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate); +static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk); + +static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); +static int ixgbe_add_syn_filter(struct rte_eth_dev *dev, + struct rte_syn_filter *filter, uint16_t rx_queue); +static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev); +static int ixgbe_get_syn_filter(struct rte_eth_dev *dev, + struct rte_syn_filter *filter, uint16_t *rx_queue); +static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_ethertype_filter *filter, uint16_t rx_queue); +static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev, + uint16_t index); +static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_ethertype_filter *filter, uint16_t *rx_queue); +static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_5tuple_filter *filter, uint16_t rx_queue); +static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + uint16_t index); +static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_5tuple_filter *filter, uint16_t *rx_queue); + +static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); + /* * Define VF Stats MACRO for Non "cleared on read" register */ @@ -205,13 +269,13 @@ static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ }while(0) - + #define IXGBE_CLEAR_HWSTRIP(h, q) do{\ uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ }while(0) - + #define IXGBE_GET_HWSTRIP(h, q, r) do{\ uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ @@ -245,6 +309,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_configure = ixgbe_dev_configure, .dev_start = ixgbe_dev_start, .dev_stop = ixgbe_dev_stop, + .dev_set_link_up = ixgbe_dev_set_link_up, + .dev_set_link_down = ixgbe_dev_set_link_down, .dev_close = ixgbe_dev_close, .promiscuous_enable = ixgbe_dev_promiscuous_enable, .promiscuous_disable = ixgbe_dev_promiscuous_disable, @@ -255,10 +321,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .stats_reset = ixgbe_dev_stats_reset, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, .dev_infos_get = ixgbe_dev_info_get, + .mtu_set = ixgbe_dev_mtu_set, .vlan_filter_set = ixgbe_vlan_filter_set, .vlan_tpid_set = ixgbe_vlan_tpid_set, .vlan_offload_set = ixgbe_vlan_offload_set, .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, + .rx_queue_start = ixgbe_dev_rx_queue_start, + .rx_queue_stop = ixgbe_dev_rx_queue_stop, + .tx_queue_start = ixgbe_dev_tx_queue_start, + .tx_queue_stop = ixgbe_dev_tx_queue_stop, .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, @@ -267,18 +338,21 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .tx_queue_release = ixgbe_dev_tx_queue_release, .dev_led_on = ixgbe_dev_led_on, .dev_led_off = ixgbe_dev_led_off, + .flow_ctrl_get = ixgbe_flow_ctrl_get, .flow_ctrl_set = ixgbe_flow_ctrl_set, .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, .mac_addr_add = ixgbe_add_rar, .mac_addr_remove = ixgbe_remove_rar, .uc_hash_table_set = ixgbe_uc_hash_table_set, .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, - .mirror_rule_set = ixgbe_mirror_rule_set, - .mirror_rule_reset = ixgbe_mirror_rule_reset, + .mirror_rule_set = ixgbe_mirror_rule_set, + .mirror_rule_reset = ixgbe_mirror_rule_reset, .set_vf_rx_mode = ixgbe_set_pool_rx_mode, .set_vf_rx = ixgbe_set_pool_rx, .set_vf_tx = ixgbe_set_pool_tx, .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, + .set_queue_rate_limit = ixgbe_set_queue_rate_limit, + .set_vf_rate_limit = ixgbe_set_vf_rate_limit, .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter, .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter, .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter, @@ -300,6 +374,17 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .bypass_ver_show = ixgbe_bypass_ver_show, .bypass_wd_reset = ixgbe_bypass_wd_reset, #endif /* RTE_NIC_BYPASS */ + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, + .add_syn_filter = ixgbe_add_syn_filter, + .remove_syn_filter = ixgbe_remove_syn_filter, + .get_syn_filter = ixgbe_get_syn_filter, + .add_ethertype_filter = ixgbe_add_ethertype_filter, + .remove_ethertype_filter = ixgbe_remove_ethertype_filter, + .get_ethertype_filter = ixgbe_get_ethertype_filter, + .add_5tuple_filter = ixgbe_add_5tuple_filter, + .remove_5tuple_filter = ixgbe_remove_5tuple_filter, + .get_5tuple_filter = ixgbe_get_5tuple_filter, }; /* @@ -315,7 +400,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = { .stats_get = ixgbevf_dev_stats_get, .stats_reset = ixgbevf_dev_stats_reset, .dev_close = ixgbevf_dev_close, - .dev_infos_get = ixgbe_dev_info_get, + .dev_infos_get = ixgbevf_dev_info_get, + .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, .vlan_offload_set = ixgbevf_vlan_offload_set, @@ -323,6 +409,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = { .rx_queue_release = ixgbe_dev_rx_queue_release, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, + .mac_addr_add = ixgbevf_add_mac_addr, + .mac_addr_remove = ixgbevf_remove_mac_addr, }; /** @@ -418,9 +506,9 @@ ixgbe_enable_intr(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); IXGBE_WRITE_FLUSH(hw); } @@ -479,15 +567,19 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint32_t q_map; uint8_t n, offset; - if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540)) + if ((hw->mac.type != ixgbe_mac_82599EB) && + (hw->mac.type != ixgbe_mac_X540) && + (hw->mac.type != ixgbe_mac_X550) && + (hw->mac.type != ixgbe_mac_X550EM_x)) return -ENOSYS; - PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n", - (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx); + PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); if (n >= IXGBE_NB_STAT_MAPPING_REGS) { - PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n"); + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); return -EIO; } offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); @@ -507,19 +599,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, else stat_mappings->rqsmr[n] |= qsmr_mask; - PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n" - "%s[%d] = 0x%08x\n", - (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx, - is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); + PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); /* Now write the mapping in the appropriate register */ if (is_rx) { - PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n", + PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); } else { - PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n", + PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); } @@ -559,7 +652,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = (uint8_t)(100/dcb_max_tc + (i & 1)); tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; - tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = + tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = (uint8_t)(100/dcb_max_tc + (i & 1)); tc->pfc = ixgbe_dcb_pfc_disabled; } @@ -579,12 +672,47 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) /* support all DCB capabilities in 82599 */ dcb_config->support.capabilities = 0xFF; - /*we only support 4 Tcs for X540*/ - if (hw->mac.type == ixgbe_mac_X540) { + /*we only support 4 Tcs for X540, X550 */ + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x) { dcb_config->num_tcs.pg_tcs = 4; dcb_config->num_tcs.pfc_tcs = 4; } -} +} + +/* + * Ensure that all locks are released before first NVM or PHY access + */ +static void +ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) +{ + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. Release of common lock + * is done automatically by swfw_sync function. + */ + mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); + } + ixgbe_release_swfw_semaphore(hw, mask); + + /* + * These ones are more tricky since they are common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + PMD_DRV_LOG(DEBUG, "SWFW common locks released"); + } + ixgbe_release_swfw_semaphore(hw, mask); +} /* * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c. @@ -599,7 +727,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct ixgbe_vfta * shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); - struct ixgbe_hwstrip *hwstrip = + struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); @@ -627,11 +755,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; -#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP hw->allow_unsupported_sfp = 1; -#endif - /* Initialize the shared code */ + /* Initialize the shared code (base driver) */ #ifdef RTE_NIC_BYPASS diag = ixgbe_bypass_init_shared_code(hw); #else @@ -643,6 +769,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return -EIO; } + /* pick up the PCI bus settings for reporting later */ + ixgbe_get_bus_info(hw); + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); ixgbe_dcb_init(hw,dcb_config); @@ -687,11 +819,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" "LOM. Please be aware there may be issues associated " - "with your hardware.\n If you are experiencing problems " + "with your hardware."); + PMD_INIT_LOG(ERR, "If you are experiencing problems " "please contact your Intel or hardware representative " - "who provided you with this hardware.\n"); + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) - PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n"); + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); return -EIO; @@ -700,9 +833,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* disable interrupt */ ixgbe_disable_intr(hw); - /* pick up the PCI bus settings for reporting later */ - ixgbe_get_bus_info(hw); - /* reset mappings for queue statistics hw counters*/ ixgbe_reset_qstat_mappings(hw); @@ -719,7 +849,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* Copy the permanent MAC address */ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); - + /* Allocate memory for storing hash filter MAC addresses */ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); @@ -748,12 +878,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, IXGBE_WRITE_FLUSH(hw); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - PMD_INIT_LOG(DEBUG, - "MAC: %d, PHY: %d, SFP+: %dmac.type, (int) hw->phy.type, (int) hw->phy.sfp_type); else - PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n", + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", @@ -772,6 +901,48 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return 0; } + +/* + * Negotiate mailbox API version with the PF. + * After reset API version is always set to the basic one (ixgbe_mbox_api_10). + * Then we try to negotiate starting with the most recent one. + * If all negotiation attempts fail, then we will proceed with + * the default one (ixgbe_mbox_api_10). + */ +static void +ixgbevf_negotiate_api(struct ixgbe_hw *hw) +{ + int32_t i; + + /* start with highest supported, proceed down */ + static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + }; + + for (i = 0; + i != RTE_DIM(sup_ver) && + ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; + i++) + ; +} + +static void +generate_random_mac_addr(struct ether_addr *mac_addr) +{ + uint64_t random; + + /* Set Organizationally Unique Identifier (OUI) prefix. */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + /* Generate the last 3 bytes of the MAC address with a random number. */ + random = rte_rand(); + memcpy(&mac_addr->addr_bytes[3], &random, 3); +} + /* * Virtual Function device init */ @@ -779,15 +950,18 @@ static int eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int diag; + uint32_t tc, tcs; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct ixgbe_vfta * shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); - struct ixgbe_hwstrip *hwstrip = + struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init"); + PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &ixgbevf_eth_dev_ops; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; @@ -814,7 +988,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* initialize the hw strip bitmap*/ memset(hwstrip, 0, sizeof(*hwstrip)); - /* Initialize the shared code */ + /* Initialize the shared code (base driver) */ diag = ixgbe_init_shared_code(hw); if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); @@ -827,19 +1001,25 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* Disable the interrupts for VF */ ixgbevf_intr_disable(hw); - hw->mac.num_rar_entries = hw->mac.max_rx_queues; + hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ diag = hw->mac.ops.reset_hw(hw); - if (diag != IXGBE_SUCCESS) { + /* + * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when + * the underlying PF driver has not assigned a MAC address to the VF. + * In this case, assign a random MAC address. + */ + if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n" - "\tThe most likely cause of this error is that the VM host\n" - "\thas not assigned a valid MAC address to this VF device.\n" - "\tPlease consult the DPDK Release Notes (FAQ section) for\n" - "\ta possible solution to this problem.\n"); return (diag); } + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ + ixgbevf_get_queues(hw, &tcs, &tc); + /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); @@ -851,9 +1031,28 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return -ENOMEM; } + /* Generate a random MAC address, if none was assigned by PF. */ + if (is_zero_ether_addr(perm_addr)) { + generate_random_mac_addr(perm_addr); + diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } + PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); + PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x", + perm_addr->addr_bytes[0], + perm_addr->addr_bytes[1], + perm_addr->addr_bytes[2], + perm_addr->addr_bytes[3], + perm_addr->addr_bytes[4], + perm_addr->addr_bytes[5]); + } + /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, - ð_dev->data->mac_addrs[0]); + ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); @@ -866,9 +1065,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return (-EIO); } - PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n", - eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, - "ixgbe_mac_82599_vf"); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "ixgbe_mac_82599_vf"); return 0; } @@ -877,9 +1076,7 @@ static struct eth_driver rte_ixgbe_pmd = { { .name = "rte_ixgbe_pmd", .id_table = pci_id_ixgbe_map, -#ifdef RTE_EAL_UNBIND_PORTS - .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, -#endif + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, }, .eth_dev_init = eth_ixgbe_dev_init, .dev_private_size = sizeof(struct ixgbe_adapter), @@ -892,9 +1089,7 @@ static struct eth_driver rte_ixgbevf_pmd = { { .name = "rte_ixgbevf_pmd", .id_table = pci_id_ixgbevf_map, -#ifdef RTE_EAL_UNBIND_PORTS - .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, -#endif + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, }, .eth_dev_init = eth_ixgbevf_dev_init, .dev_private_size = sizeof(struct ixgbe_adapter), @@ -905,8 +1100,8 @@ static struct eth_driver rte_ixgbevf_pmd = { * Invoked once at EAL init time. * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. */ -int -rte_ixgbe_pmd_init(void) +static int +rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -919,10 +1114,10 @@ rte_ixgbe_pmd_init(void) * Invoked one at EAL init time. * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. */ -int -rte_ixgbevf_pmd_init(void) +static int +rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) { - DEBUGFUNC("rte_ixgbevf_pmd_init"); + PMD_INIT_FUNC_TRACE(); rte_eth_driver_register(&rte_ixgbevf_pmd); return (0); @@ -1013,10 +1208,10 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); } -static void +static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { - struct ixgbe_hwstrip *hwstrip = + struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); if(queue >= IXGBE_MAX_RX_QUEUE_NUM) @@ -1127,7 +1322,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); + ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); } } } @@ -1237,19 +1432,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); int err, link_up = 0, negotiate = 0; uint32_t speed = 0; int mask = 0; int status; - + uint16_t vf, idx; + PMD_INIT_FUNC_TRACE(); /* IXGBE devices don't support half duplex */ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n", - dev->data->dev_conf.link_duplex, - dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu", + dev->data->dev_conf.link_duplex, + dev->data->port_id); return -EINVAL; } @@ -1269,11 +1467,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* initialize transmission unit */ ixgbe_dev_tx_init(dev); - + /* This can fail when allocating mbufs for descriptor rings */ err = ixgbe_dev_rx_init(dev); if (err) { - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n"); + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); goto error; } @@ -1320,13 +1518,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed = IXGBE_LINK_SPEED_10GB_FULL; break; default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n", - dev->data->dev_conf.link_speed, - dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu", + dev->data->dev_conf.link_speed, + dev->data->port_id); goto error; } - err = ixgbe_setup_link(hw, speed, negotiate, link_up); + err = ixgbe_setup_link(hw, speed, link_up); if (err) goto error; @@ -1346,10 +1544,10 @@ skip_link_setup: if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ ixgbe_vmdq_vlan_hw_filter_enable(dev); - } + } /* Configure DCB hw */ - ixgbe_configure_dcb(dev); + ixgbe_configure_dcb(dev); if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { err = ixgbe_fdir_configure(dev); @@ -1357,6 +1555,16 @@ skip_link_setup: goto error; } + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + ixgbe_set_vf_rate_limit(dev, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + ixgbe_restore_statistics_mapping(dev); return (0); @@ -1376,6 +1584,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + int vf; PMD_INIT_FUNC_TRACE(); @@ -1389,16 +1600,77 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* stop adapter */ ixgbe_stop_adapter(hw); + for (vf = 0; vfinfo != NULL && + vf < dev->pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + /* Turn off the laser */ ixgbe_disable_tx_laser(hw); ixgbe_dev_clear_queues(dev); + /* Clear stored conf */ + dev->data->scattered_rx = 0; + /* Clear recorded link status */ memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_write_link_status(dev, &link); } +/* + * Set device link up: enable tx laser. + */ +static int +ixgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_NIC_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link up is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + return 0; + } + + PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x", + hw->device_id); + return -ENOTSUP; +} + +/* + * Set device link down: disable tx laser. + */ +static int +ixgbe_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (hw->mac.type == ixgbe_mac_82599EB) { +#ifdef RTE_NIC_BYPASS + if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + /* Not suported in bypass mode */ + PMD_INIT_LOG(ERR, "Set link down is not supported " + "by device id 0x%x", hw->device_id); + return -ENOTSUP; + } +#endif + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + return 0; + } + + PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x", + hw->device_id); + return -ENOTSUP; +} + /* * Reest and stop device. */ @@ -1581,11 +1853,23 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) } /* Rx Errors */ - stats->ierrors = total_missed_rx + hw_stats->crcerrs + - hw_stats->rlec; - + stats->ibadcrc = hw_stats->crcerrs; + stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc; + stats->imissed = total_missed_rx; + stats->ierrors = stats->ibadcrc + + stats->ibadlen + + stats->imissed + + hw_stats->illerrc + hw_stats->errbc; + + /* Tx Errors */ stats->oerrors = 0; + /* XON/XOFF pause frames */ + stats->tx_pause_xon = hw_stats->lxontxc; + stats->rx_pause_xon = hw_stats->lxonrxc; + stats->tx_pause_xoff = hw_stats->lxofftxc; + stats->rx_pause_xoff = hw_stats->lxoffrxc; + /* Flow Director Stats registers */ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); @@ -1678,6 +1962,92 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->vmdq_queue_num = dev_info->max_rx_queues; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; +} + +static void +ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = dev->pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = IXGBE_DEFAULT_RX_PTHRESH, + .hthresh = IXGBE_DEFAULT_RX_HTHRESH, + .wthresh = IXGBE_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = IXGBE_DEFAULT_TX_PTHRESH, + .hthresh = IXGBE_DEFAULT_TX_HTHRESH, + .wthresh = IXGBE_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; } /* return 0 means link status changed, -1 means not changed */ @@ -1844,7 +2214,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); PMD_DRV_LOG(INFO, "eicr %x", eicr); - + intr->flags = 0; if (eicr & IXGBE_EICR_LSC) { /* set flag for async link update */ @@ -1892,7 +2262,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) } /* - * It executes link_update after knowing an interrupt occured. + * It executes link_update after knowing an interrupt occurred. * * @param dev * Pointer to struct rte_eth_dev. @@ -1908,14 +2278,14 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; struct rte_eth_link link; - int intr_enable_delay = false; + int intr_enable_delay = false; - PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags); + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); if (intr->flags & IXGBE_FLAG_MAILBOX) { ixgbe_pf_mbx_process(dev); intr->flags &= ~IXGBE_FLAG_MAILBOX; - } + } if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { /* get the link status before link update, for predicting later */ @@ -1932,11 +2302,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) else /* handle it 4 sec later, wait it being stable */ timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; - + ixgbe_dev_link_status_print(dev); intr_enable_delay = true; - } + } if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, @@ -1947,7 +2317,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) ixgbe_enable_intr(dev); rte_intr_enable(&(dev->pci_dev->intr_handle)); } - + return 0; } @@ -1987,7 +2357,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } - PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr); + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); rte_intr_enable(&(dev->pci_dev->intr_handle)); } @@ -2031,6 +2401,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev) return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP); } +static int +ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + uint32_t mflcn_reg; + uint32_t fccfg_reg; + int rx_pause; + int tx_pause; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + fc_conf->pause_time = hw->fc.pause_time; + fc_conf->high_water = hw->fc.high_water[0]; + fc_conf->low_water = hw->fc.low_water[0]; + fc_conf->send_xon = hw->fc.send_xon; + fc_conf->autoneg = !hw->fc.disable_fc_autoneg; + + /* + * Return rx_pause status according to actual setting of + * MFLCN register. + */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) + rx_pause = 1; + else + rx_pause = 0; + + /* + * Return tx_pause status according to actual setting of + * FCCFG register. + */ + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) + tx_pause = 1; + else + tx_pause = 0; + + if (rx_pause && tx_pause) + fc_conf->mode = RTE_FC_FULL; + else if (rx_pause) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (tx_pause) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { @@ -2049,8 +2468,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg) + return -ENOTSUP; rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* * At least reserve one Ethernet frame for watermark @@ -2059,8 +2480,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n"); - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water); + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -2092,7 +2513,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return 0; } - PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err); + PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); return -EIO; } @@ -2102,7 +2523,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * @tc_num: traffic class number * Enable flow control according to the current settings. */ -static int +static int ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) { int ret_val = 0; @@ -2111,7 +2532,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) uint32_t fcrtl, fcrth; uint8_t i; uint8_t nb_rx_en; - + /* Validate the water mark configuration */ if (!hw->fc.pause_time) { ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; @@ -2122,13 +2543,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) if (hw->fc.current_mode & ixgbe_fc_tx_pause) { /* High/Low water can not be 0 */ if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) { - PMD_INIT_LOG(ERR,"Invalid water mark configuration\n"); + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } - + if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { - PMD_INIT_LOG(ERR,"Invalid water mark configuration\n"); + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } @@ -2147,7 +2568,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) case ixgbe_fc_none: /* * If the count of enabled RX Priority Flow control >1, - * and the TX pause can not be disabled + * and the TX pause can not be disabled */ nb_rx_en = 0; for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { @@ -2194,7 +2615,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; default: - DEBUGOUT("Flow control param set incorrectly\n"); + PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; break; @@ -2235,7 +2656,7 @@ out: return ret_val; } -static int +static int ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2247,7 +2668,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) return ret_val; } -static int +static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) { int err; @@ -2259,29 +2680,29 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); - + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, ixgbe_fc_rx_pause, ixgbe_fc_tx_pause, ixgbe_fc_full }; - + PMD_INIT_FUNC_TRACE(); - + ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); tc_num = map[pfc_conf->priority]; rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || - (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n"); - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water); + (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -2290,51 +2711,55 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.send_xon = pfc_conf->fc.send_xon; hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - + err = ixgbe_dcb_pfc_enable(dev,tc_num); - + /* Not negotiated is not an error case */ - if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) + if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) return 0; - PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err); + PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); return -EIO; -} +} -static int +static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) -{ - uint8_t i,j,mask; - uint32_t reta; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + uint8_t i, j, mask; + uint32_t reta, r; + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* - * Update Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + if (mask == IXGBE_4_BIT_MASK) + r = 0; else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - if (mask != 0) { - reta = 0; - if (mask != 0xF) - reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); - - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) { - if (mask != 0xF) - reta &= ~(0xFF << 8 * j); - reta |= reta_conf->reta[i + j] << 8*j; - } - } - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta); + r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta |= reta_conf[idx].reta[shift + j] << + (CHAR_BIT * j); + else + reta |= r & (IXGBE_8_BIT_MASK << + (CHAR_BIT * j)); } + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); } return 0; @@ -2342,36 +2767,40 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta *reta_conf) + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { - uint8_t i,j,mask; + uint8_t i, j, mask; uint32_t reta; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + uint16_t idx, shift; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); - /* - * Read Redirection Table RETA[n],n=0...31,The redirection table has - * 128-entries in 32 registers - */ - for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { - if (i < ETH_RSS_RETA_NUM_ENTRIES/2) - mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); - else - mask = (uint8_t)((reta_conf->mask_hi >> - (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); - - if (mask != 0) { - reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); - for (j = 0; j < 4; j++) { - if (mask & (0x1 << j)) - reta_conf->reta[i + j] = - (uint8_t)((reta >> 8 * j) & 0xFF); - } + if (reta_size != ETH_RSS_RETA_SIZE_128) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + return -EINVAL; + } + + for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + mask = (uint8_t)((reta_conf[idx].mask >> shift) & + IXGBE_4_BIT_MASK); + if (!mask) + continue; + + reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { + if (mask & (0x1 << j)) + reta_conf[idx].reta[shift + j] = + ((reta >> (CHAR_BIT * j)) & + IXGBE_8_BIT_MASK); } } - return 0; + return 0; } static void @@ -2392,13 +2821,59 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } +static int +ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t hlreg0; + uint32_t maxfrs; + struct ixgbe_hw *hw; + struct rte_eth_dev_info dev_info; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + ixgbe_dev_info_get(dev, &dev_info); + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + + /* switch to jumbo mode if needed */ + if (frame_size > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + + return 0; +} + /* * Virtual Function operations */ static void ixgbevf_intr_disable(struct ixgbe_hw *hw) { - PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable"); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); @@ -2411,8 +2886,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf* conf = &dev->data->dev_conf; - PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n", - dev->data->port_id); + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); /* * VF has no ability to enable/disable HW CRC @@ -2420,12 +2895,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip"); conf->rxmode.hw_strip_crc = 1; } #else if (conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip"); conf->rxmode.hw_strip_crc = 0; } #endif @@ -2436,24 +2911,27 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) static int ixgbevf_dev_start(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int err, mask = 0; - - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start"); + + PMD_INIT_FUNC_TRACE(); hw->mac.ops.reset_hw(hw); + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + ixgbevf_dev_tx_init(dev); /* This can fail when allocating mbufs for descriptor rings */ err = ixgbevf_dev_rx_init(dev); if (err) { - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err); + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); ixgbe_dev_clear_queues(dev); return err; } - + /* Set vfta */ ixgbevf_set_vfta_all(dev,1); @@ -2472,17 +2950,20 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop"); - + PMD_INIT_FUNC_TRACE(); + hw->adapter_stopped = TRUE; ixgbe_stop_adapter(hw); - /* - * Clear what we set, but we still keep shadow_vfta to + /* + * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ ixgbevf_set_vfta_all(dev,0); + /* Clear stored conf */ + dev->data->scattered_rx = 0; + ixgbe_dev_clear_queues(dev); } @@ -2491,7 +2972,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close"); + PMD_INIT_FUNC_TRACE(); ixgbe_reset_hw(hw); @@ -2532,7 +3013,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) uint32_t vid_idx = 0; uint32_t vid_bit = 0; int ret = 0; - + PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ @@ -2561,14 +3042,14 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) uint32_t ctrl; PMD_INIT_FUNC_TRACE(); - + if(queue >= hw->mac.max_rx_queues) return; ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); if(on) ctrl |= IXGBE_RXDCTL_VME; - else + else ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); @@ -2596,36 +3077,36 @@ static int ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) { uint32_t reg_val; - + /* we only need to do this if VMDq is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n"); + PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); return (-1); } - + return 0; } -static uint32_t +static uint32_t ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) { uint32_t vector = 0; switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ - vector = ((uc_addr->addr_bytes[4] >> 4) | + vector = ((uc_addr->addr_bytes[4] >> 4) | (((uint16_t)uc_addr->addr_bytes[5]) << 4)); break; case 1: /* use bits [46:35] of the address */ - vector = ((uc_addr->addr_bytes[4] >> 3) | + vector = ((uc_addr->addr_bytes[4] >> 3) | (((uint16_t)uc_addr->addr_bytes[5]) << 5)); break; case 2: /* use bits [45:34] of the address */ - vector = ((uc_addr->addr_bytes[4] >> 2) | + vector = ((uc_addr->addr_bytes[4] >> 2) | (((uint16_t)uc_addr->addr_bytes[5]) << 6)); break; case 3: /* use bits [43:32] of the address */ - vector = ((uc_addr->addr_bytes[4]) | + vector = ((uc_addr->addr_bytes[4]) | (((uint16_t)uc_addr->addr_bytes[5]) << 8)); break; default: /* Invalid mc_filter_type */ @@ -2637,7 +3118,7 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) return vector; } -static int +static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, uint8_t on) { @@ -2650,24 +3131,24 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, const uint32_t ixgbe_uta_bit_shift = 5; const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; const uint32_t bit1 = 0x1; - + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_uta_info *uta_info = IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); - + /* The UTA table only exists on 82599 hardware and newer */ if (hw->mac.type < ixgbe_mac_82599EB) return (-ENOTSUP); - + vector = ixgbe_uta_vector(hw,mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; - + rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); if(rc == on) return 0; - + reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); if (on) { uta_info->uta_in_use++; @@ -2678,15 +3159,15 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, reg_val &= ~(bit1 << uta_shift); uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); } - + IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); - + if (uta_info->uta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); - + return 0; } @@ -2702,7 +3183,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) /* The UTA table only exists on 82599 hardware and newer */ if (hw->mac.type < ixgbe_mac_82599EB) return (-ENOTSUP); - + if(on) { for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = ~0; @@ -2715,44 +3196,55 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) } } return 0; - + +} + +uint32_t +ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) +{ + uint32_t new_val = orig_val; + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + new_val |= IXGBE_VMOLR_AUPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + new_val |= IXGBE_VMOLR_ROMPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + new_val |= IXGBE_VMOLR_ROPE; + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + new_val |= IXGBE_VMOLR_BAM; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + new_val |= IXGBE_VMOLR_MPE; + + return new_val; } + static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, uint16_t rx_mask, uint8_t on) { int val = 0; - + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - + if (hw->mac.type == ixgbe_mac_82598EB) { PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" - " on 82599 hardware and newer\n"); + " on 82599 hardware and newer"); return (-ENOTSUP); } if (ixgbe_vmdq_mode_check(hw) < 0) return (-ENOTSUP); - if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG ) - val |= IXGBE_VMOLR_AUPE; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC ) - val |= IXGBE_VMOLR_ROMPE; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) - val |= IXGBE_VMOLR_ROPE; - if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) - val |= IXGBE_VMOLR_BAM; - if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) - val |= IXGBE_VMOLR_MPE; + val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); if (on) vmolr |= val; - else + else vmolr &= ~val; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); - + return 0; } @@ -2762,13 +3254,13 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) uint32_t reg,addr; uint32_t val; const uint8_t bit1 = 0x1; - + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (ixgbe_vmdq_mode_check(hw) < 0) return (-ENOTSUP); - + addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); reg = IXGBE_READ_REG(hw, addr); val = bit1 << pool; @@ -2777,9 +3269,9 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) reg |= val; else reg &= ~val; - + IXGBE_WRITE_REG(hw, addr,reg); - + return 0; } @@ -2789,13 +3281,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) uint32_t reg,addr; uint32_t val; const uint8_t bit1 = 0x1; - + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (ixgbe_vmdq_mode_check(hw) < 0) return (-ENOTSUP); - + addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); reg = IXGBE_READ_REG(hw, addr); val = bit1 << pool; @@ -2804,13 +3296,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) reg |= val; else reg &= ~val; - + IXGBE_WRITE_REG(hw, addr,reg); - + return 0; } -static int +static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, uint64_t pool_mask, uint8_t vlan_on) { @@ -2818,14 +3310,14 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, uint16_t pool_idx; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - + if (ixgbe_vmdq_mode_check(hw) < 0) return (-ENOTSUP); for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { - if (pool_mask & ((uint64_t)(1ULL << pool_idx))) + if (pool_mask & ((uint64_t)(1ULL << pool_idx))) ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); - if (ret < 0) - return ret; + if (ret < 0) + return ret; } return ret; @@ -2833,7 +3325,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_vmdq_mirror_conf *mirror_conf, + struct rte_eth_vmdq_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { uint32_t mr_ctl,vlvf; @@ -2844,7 +3336,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, uint8_t i = 0; int reg_index = 0; uint64_t vlan_mask = 0; - + const uint8_t pool_mask_offset = 32; const uint8_t vlan_mask_offset = 32; const uint8_t dst_pool_offset = 8; @@ -2887,7 +3379,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, if (on) { mv_lsb = vlan_mask & 0xFFFFFFFF; mv_msb = vlan_mask >> vlan_mask_offset; - + mr_info->mr_conf[rule_id].vlan.vlan_mask = mirror_conf->vlan.vlan_mask; for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { @@ -2905,23 +3397,23 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, } /* - * if enable pool mirror, write related pool mask register,if disable + * if enable pool mirror, write related pool mask register,if disable * pool mirror, clear PFMRVM register */ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { - if (on) { + if (on) { mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; mp_msb = mirror_conf->pool_mask >> pool_mask_offset; - mr_info->mr_conf[rule_id].pool_mask = + mr_info->mr_conf[rule_id].pool_mask = mirror_conf->pool_mask; - + } else { mp_lsb = 0; mp_msb = 0; mr_info->mr_conf[rule_id].pool_mask = 0; } } - + /* read mirror control register and recalculate it */ mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id)); @@ -2937,7 +3429,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, /* write mirrror control register */ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); - + /* write pool mirrror control register */ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); @@ -2954,19 +3446,19 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, return 0; } -static int +static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) { int mr_ctl = 0; uint32_t lsb_val = 0; uint32_t msb_val = 0; const uint8_t rule_mr_offset = 4; - + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_mirror_info *mr_info = + struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); - + if (ixgbe_vmdq_mode_check(hw) < 0) return (-ENOTSUP); @@ -2986,3 +3478,633 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) return 0; } + +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rf_dec, rf_int; + uint32_t bcnrc_val; + uint16_t link_speed = dev->data->dev_link.link_speed; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len >= + IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + uint32_t queue_stride = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; + uint32_t queue_end = queue_idx + nb_q_per_pool - 1; + uint16_t total_rate = 0; + + if (queue_end >= hw->mac.max_tx_queues) + return -EINVAL; + + if (vfinfo != NULL) { + for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { + if (vf_idx == vf) + continue; + for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); + idx++) + total_rate += vfinfo[vf_idx].tx_rate[idx]; + } + } else + return -EINVAL; + + /* Store tx_rate for this vf. */ + for (idx = 0; idx < nb_q_per_pool; idx++) { + if (((uint64_t)0x1 << idx) & q_msk) { + if (vfinfo[vf].tx_rate[idx] != tx_rate) + vfinfo[vf].tx_rate[idx] = tx_rate; + total_rate += tx_rate; + } + } + + if (total_rate > dev->data->dev_link.link_speed) { + /* + * Reset stored TX rate of the VF if it causes exceed + * link speed. + */ + memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + return -EINVAL; + } + + /* Set RTTBCNRC of each queue/pool for vf X */ + for (; queue_idx <= queue_end; queue_idx++) { + if (0x1 & q_msk) + ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); + q_msk = q_msk >> 1; + } + + return 0; +} + +static void +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + __attribute__((unused)) uint32_t index, + __attribute__((unused)) uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag; + + /* + * On a 82599 VF, adding again the same MAC addr is not an idempotent + * operation. Trap this case to avoid exhausting the [very limited] + * set of PF resources used to store VF MAC addresses. + */ + if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + return; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag == 0) + return; + PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); +} + +static void +ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct ether_addr *mac_addr; + uint32_t i; + int diag; + + /* + * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does + * not support the deletion of a given MAC address. + * Instead, it imposes to delete all MAC addresses, then to add again + * all MAC addresses with the exception of the one to be deleted. + */ + (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + + /* + * Add again all MAC addresses, with the exception of the deleted one + * and of the permanent MAC address. + */ + for (i = 0, mac_addr = dev->data->mac_addrs; + i < hw->mac.num_rar_entries; i++, mac_addr++) { + /* Skip the deleted MAC address */ + if (i == index) + continue; + /* Skip NULL MAC addresses */ + if (is_zero_ether_addr(mac_addr)) + continue; + /* Skip the permanent MAC address */ + if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + continue; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, + "Adding again MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x failed " + "diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + } +} + +/* + * add syn filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_syn_filter(struct rte_eth_dev *dev, + struct rte_syn_filter *filter, uint16_t rx_queue) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + + synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + return 0; +} + +/* + * remove syn filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_remove_syn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + return 0; +} + +/* + * get the syn filter's info + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: ponter to the filter that returns. + * *rx_queue: pointer to the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_syn_filter(struct rte_eth_dev *dev, + struct rte_syn_filter *filter, uint16_t *rx_queue) + +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +/* + * add an ethertype filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, + uint16_t index, struct rte_ethertype_filter *filter, + uint16_t rx_queue) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t etqf, etqs = 0; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_ETQF_FILTERS || + rx_queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index)); + if (etqf & IXGBE_ETQF_FILTER_EN) + return -EINVAL; /* filter index is in use. */ + + etqf = 0; + etqf |= IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ethertype; + + if (filter->priority_en) { + if (filter->priority > IXGBE_ETQF_MAX_PRI) + return -EINVAL; + etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP); + etqf |= IXGBE_ETQF_UP_EN; + } + etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs); + return 0; +} + +/* + * remove an ethertype filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev, + uint16_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_ETQF_FILTERS) + return -EINVAL; + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0); + + return 0; +} + +/* + * get an ethertype filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be gotten. + * *rx_queue: the ponited of the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + uint16_t index, struct rte_ethertype_filter *filter, + uint16_t *rx_queue) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t etqf, etqs; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_ETQF_FILTERS) + return -EINVAL; + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index)); + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE; + filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0; + if (filter->priority_en) + filter->priority = (etqf & IXGBE_ETQF_UP) >> 16; + *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; +} + +static inline uint8_t +revert_protocol_type(enum ixgbe_5tuple_protocol protocol) +{ + if (protocol == IXGBE_FILTER_PROTOCOL_TCP) + return IPPROTO_TCP; + else if (protocol == IXGBE_FILTER_PROTOCOL_UDP) + return IPPROTO_UDP; + else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP) + return IPPROTO_SCTP; + else + return 0; +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_5tuple_filter *filter, uint16_t rx_queue) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ftqf, sdpqf = 0; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_FTQF_FILTERS || + rx_queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; /* filter index is out of range. */ + + if (filter->tcp_flags) { + PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple"); + return -EINVAL; + } + + ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index)); + if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) + return -EINVAL; /* filter index is in use. */ + + ftqf = 0; + sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT); + + ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) << + IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->protocol_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir); + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + uint16_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_FTQF_FILTERS) + return -EINVAL; /* filter index is out of range. */ + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); + return 0; +} + +/* + * get a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates + * filter: ponter to the filter that returns. + * *rx_queue: pointer of the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index, + struct rte_5tuple_filter *filter, uint16_t *rx_queue) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t sdpqf, ftqf, l34timir; + uint8_t mask; + enum ixgbe_5tuple_protocol proto; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + if (index >= IXGBE_MAX_FTQF_FILTERS) + return -EINVAL; /* filter index is out of range. */ + + ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index)); + if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) { + proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK); + filter->protocol = revert_protocol_type(proto); + filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) & + IXGBE_FTQF_PRIORITY_MASK; + mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) & + IXGBE_FTQF_5TUPLE_MASK_MASK); + filter->src_ip_mask = + (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0; + filter->dst_ip_mask = + (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0; + filter->src_port_mask = + (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0; + filter->dst_port_mask = + (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0; + filter->protocol_mask = + (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0; + + sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index)); + filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >> + IXGBE_SDPQF_DSTPORT_SHIFT; + filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT; + filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index)); + filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index)); + + l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index)); + *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >> + IXGBE_L34T_IMIR_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +static int +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. */ + if (!dev->data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +static struct rte_driver rte_ixgbe_driver = { + .type = PMD_PDEV, + .init = rte_ixgbe_pmd_init, +}; + +static struct rte_driver rte_ixgbevf_driver = { + .type = PMD_PDEV, + .init = rte_ixgbevf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_ixgbe_driver); +PMD_REGISTER_DRIVER(rte_ixgbevf_driver);