X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=4778aaf2995d5c0ec560f67f5cb22935984a0fea;hb=be797cbf4582f3c474f208aeb3d1baa4001a6156;hp=777e14926f0c23783e6614059cc3ea3a735e572c;hpb=2fc1d6da882563ab80786d69b6d7c9d0e4ce860a;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 777e14926f..4778aaf299 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "i40e_logs.h" #include "base/i40e_prototype.h" @@ -47,6 +48,8 @@ #define ETH_I40E_VF_MSG_CFG "vf_msg_cfg" #define I40E_CLEAR_PXE_WAIT_MS 200 +#define I40E_VSI_TSR_QINQ_STRIP 0x4010 +#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) /* Maximun number of capability elements */ #define I40E_MAX_CAP_ELE_NUM 128 @@ -221,8 +224,8 @@ static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); -static void i40e_dev_stop(struct rte_eth_dev *dev); -static void i40e_dev_close(struct rte_eth_dev *dev); +static int i40e_dev_stop(struct rte_eth_dev *dev); +static int i40e_dev_close(struct rte_eth_dev *dev); static int i40e_dev_reset(struct rte_eth_dev *dev); static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev); static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -301,7 +304,6 @@ static int i40e_dev_init_vlan(struct rte_eth_dev *dev); static int i40e_veb_release(struct i40e_veb *veb); static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi); -static int i40e_pf_config_mq_rx(struct i40e_pf *pf); static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on); static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, @@ -397,6 +399,7 @@ static void i40e_ethertype_filter_restore(struct i40e_pf *pf); static void i40e_tunnel_filter_restore(struct i40e_pf *pf); static void i40e_filter_restore(struct i40e_pf *pf); static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev); +static int i40e_pf_config_rss(struct i40e_pf *pf); static const char *const valid_keys[] = { ETH_I40E_FLOATING_VEB_ARG, @@ -471,10 +474,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable, .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable, .rx_queue_release = i40e_dev_rx_queue_release, - .rx_queue_count = i40e_dev_rx_queue_count, - .rx_descriptor_done = i40e_dev_rx_descriptor_done, - .rx_descriptor_status = i40e_dev_rx_descriptor_status, - .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -1045,8 +1044,15 @@ static int i40e_init_fdir_filter_list(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_info *fdir_info = &pf->fdir; char fdir_hash_name[RTE_HASH_NAMESIZE]; + uint32_t alloc = hw->func_caps.fd_filters_guaranteed; + uint32_t best = hw->func_caps.fd_filters_best_effort; + struct rte_bitmap *bmp = NULL; + uint32_t bmp_size; + void *mem = NULL; + uint32_t i = 0; int ret; struct rte_hash_parameters fdir_hash_params = { @@ -1067,6 +1073,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); return -EINVAL; } + fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map", sizeof(struct i40e_fdir_filter *) * I40E_MAX_FDIR_FILTER_NUM, @@ -1077,8 +1084,74 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) ret = -ENOMEM; goto err_fdir_hash_map_alloc; } + + fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter", + sizeof(struct i40e_fdir_filter) * + I40E_MAX_FDIR_FILTER_NUM, + 0); + + if (!fdir_info->fdir_filter_array) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir filter array!"); + ret = -ENOMEM; + goto err_fdir_filter_array_alloc; + } + + fdir_info->fdir_space_size = alloc + best; + fdir_info->fdir_actual_cnt = 0; + fdir_info->fdir_guarantee_total_space = alloc; + fdir_info->fdir_guarantee_free_space = + fdir_info->fdir_guarantee_total_space; + + PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best); + + fdir_info->fdir_flow_pool.pool = + rte_zmalloc("i40e_fdir_entry", + sizeof(struct i40e_fdir_entry) * + fdir_info->fdir_space_size, + 0); + + if (!fdir_info->fdir_flow_pool.pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for bitmap flow!"); + ret = -ENOMEM; + goto err_fdir_bitmap_flow_alloc; + } + + for (i = 0; i < fdir_info->fdir_space_size; i++) + fdir_info->fdir_flow_pool.pool[i].idx = i; + + bmp_size = + rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size); + mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE); + if (mem == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_mem_alloc; + } + bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size); + if (bmp == NULL) { + PMD_INIT_LOG(ERR, + "Failed to initialization fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_bmp_alloc; + } + for (i = 0; i < fdir_info->fdir_space_size; i++) + rte_bitmap_set(bmp, i); + + fdir_info->fdir_flow_pool.bitmap = bmp; + return 0; +err_fdir_bmp_alloc: + rte_free(mem); +err_fdir_mem_alloc: + rte_free(fdir_info->fdir_flow_pool.pool); +err_fdir_bitmap_flow_alloc: + rte_free(fdir_info->fdir_filter_array); +err_fdir_filter_array_alloc: + rte_free(fdir_info->hash_map); err_fdir_hash_map_alloc: rte_hash_free(fdir_info->hash_table); @@ -1101,6 +1174,30 @@ i40e_init_customized_info(struct i40e_pf *pf) pf->esp_support = false; } +static void +i40e_init_filter_invalidation(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_info *fdir_info = &pf->fdir; + uint32_t glqf_ctl_reg = 0; + + glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!pf->support_multi_driver) { + fdir_info->fdir_invalprio = 1; + glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first"); + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg); + } else { + if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) { + fdir_info->fdir_invalprio = 1; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first"); + } else { + fdir_info->fdir_invalprio = 0; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first"); + } + } +} + void i40e_init_queue_region_conf(struct rte_eth_dev *dev) { @@ -1347,6 +1444,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_FUNC_TRACE(); dev->dev_ops = &i40e_eth_dev_ops; + dev->rx_queue_count = i40e_dev_rx_queue_count; + dev->rx_descriptor_done = i40e_dev_rx_descriptor_done; + dev->rx_descriptor_status = i40e_dev_rx_descriptor_status; + dev->tx_descriptor_status = i40e_dev_tx_descriptor_status; dev->rx_pkt_burst = i40e_recv_pkts; dev->tx_pkt_burst = i40e_xmit_pkts; dev->tx_pkt_prepare = i40e_prep_pkts; @@ -1364,6 +1465,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); + dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1598,11 +1700,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); - /* Pass the information to the rte_eth_dev_close() that it should also - * release the private port resources. - */ - dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; - /* Init dcb to sw mode by default */ ret = i40e_dcb_init_configure(dev, TRUE); if (ret != I40E_SUCCESS) { @@ -1654,6 +1751,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Initialize customized information */ i40e_init_customized_info(pf); + /* Initialize the filter invalidation configuration */ + i40e_init_filter_invalidation(pf); + ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1749,16 +1849,30 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf) struct i40e_fdir_info *fdir_info; fdir_info = &pf->fdir; - /* Remove all flow director rules and hash */ + + /* Remove all flow director rules */ + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); +} + +static void +i40e_fdir_memory_cleanup(struct i40e_pf *pf) +{ + struct i40e_fdir_info *fdir_info; + + fdir_info = &pf->fdir; + + /* flow director memory cleanup */ if (fdir_info->hash_map) rte_free(fdir_info->hash_map); if (fdir_info->hash_table) rte_hash_free(fdir_info->hash_table); - - while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) { - TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); - rte_free(p_fdir); - } + if (fdir_info->fdir_flow_pool.bitmap) + rte_free(fdir_info->fdir_flow_pool.bitmap); + if (fdir_info->fdir_flow_pool.pool) + rte_free(fdir_info->fdir_flow_pool.pool); + if (fdir_info->fdir_filter_array) + rte_free(fdir_info->fdir_filter_array); } void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) @@ -1838,8 +1952,6 @@ i40e_dev_configure(struct rte_eth_dev *dev) goto err; /* VMDQ setup. - * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and - * RSS setting have different requirements. * General PMD driver call sequence are NIC init, configure, * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it * will try to lookup the VSI that specific queue belongs to if VMDQ @@ -2001,7 +2113,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, I40E_WRITE_FLUSH(hw); } -void +int i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2021,10 +2133,14 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue, vsi->nb_qps, itr_idx); - return; + return 0; } /* PF & VMDq bind interrupt */ @@ -2041,7 +2157,10 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) } for (i = 0; i < vsi->nb_used_qps; i++) { - if (nb_msix <= 1) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } else if (nb_msix <= 1) { if (!rte_intr_allow_others(intr_handle)) /* allow to share MISC_VEC_ID */ msix_vect = I40E_MISC_VEC_ID; @@ -2066,9 +2185,11 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) msix_vect++; nb_msix--; } + + return 0; } -static void +void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2095,7 +2216,7 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) I40E_WRITE_FLUSH(hw); } -static void +void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2306,24 +2427,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, - I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } - /* enable FDIR MSIX interrupt */ - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, - I40E_ITR_INDEX_NONE); - i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); - } - /* Enable all queues which have been configured */ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) { ret = i40e_dev_rx_queue_start(dev, nb_rxq); @@ -2425,7 +2543,7 @@ rx_err: return ret; } -static void +static int i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2436,7 +2554,7 @@ i40e_dev_stop(struct rte_eth_dev *dev) int i; if (hw->adapter_stopped == 1) - return; + return 0; if (dev->data->dev_conf.intr_conf.rxq == 0) { rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev); @@ -2459,10 +2577,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); } - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); - i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); - } /* Clear all queues and release memory */ i40e_dev_clear_queues(dev); @@ -2486,11 +2600,14 @@ i40e_dev_stop(struct rte_eth_dev *dev) pf->tm_conf.committed = false; hw->adapter_stopped = 1; + dev->data->dev_started = 0; pf->adapter->rss_reta_updated = 0; + + return 0; } -static void +static int i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2507,13 +2624,15 @@ i40e_dev_close(struct rte_eth_dev *dev) int retries = 0; PMD_INIT_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; ret = rte_eth_switch_domain_free(pf->switch_domain_id); if (ret) PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); - i40e_dev_stop(dev); + ret = i40e_dev_stop(dev); /* Remove all mirror rules */ while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { @@ -2577,10 +2696,6 @@ i40e_dev_close(struct rte_eth_dev *dev) (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); I40E_WRITE_FLUSH(hw); - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; - /* Clear PXE mode */ i40e_clear_pxe_mode(hw); @@ -2618,13 +2733,19 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Remove all flows */ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { TAILQ_REMOVE(&pf->flow_list, p_flow, node); - rte_free(p_flow); + /* Do not free FDIR flows since they are static allocated */ + if (p_flow->filter_type != RTE_ETH_FILTER_FDIR) + rte_free(p_flow); } + /* release the fdir static allocated memory */ + i40e_fdir_memory_cleanup(pf); + /* Remove all Traffic Manager configuration */ i40e_tm_conf_uninit(dev); hw->adapter_closed = 1; + return ret; } /* @@ -2891,7 +3012,10 @@ update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, link->link_speed = ETH_SPEED_NUM_40G; break; default: - link->link_speed = ETH_SPEED_NUM_NONE; + if (link->link_status) + link->link_speed = ETH_SPEED_NUM_UNKNOWN; + else + link->link_speed = ETH_SPEED_NUM_NONE; break; } } @@ -2926,6 +3050,21 @@ i40e_dev_link_update(struct rte_eth_dev *dev, return ret; } +static void +i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg, + uint32_t loreg, bool offset_loaded, uint64_t *offset, + uint64_t *stat, uint64_t *prev_stat) +{ + i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat); + /* enlarge the limitation when statistics counters overflowed */ + if (offset_loaded) { + if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat) + *stat += (uint64_t)1 << I40E_48_BIT_WIDTH; + *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat); + } + *prev_stat = *stat; +} + /* Get all the statistics of a VSI */ void i40e_update_vsi_stats(struct i40e_vsi *vsi) @@ -2935,9 +3074,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx); - i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), - vsi->offset_loaded, &oes->rx_bytes, - &nes->rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx), + vsi->offset_loaded, &oes->rx_bytes, + &nes->rx_bytes, &vsi->prev_rx_bytes); i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx), vsi->offset_loaded, &oes->rx_unicast, &nes->rx_unicast); @@ -2958,9 +3097,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded, &oes->rx_unknown_protocol, &nes->rx_unknown_protocol); - i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), - vsi->offset_loaded, &oes->tx_bytes, - &nes->tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx), + vsi->offset_loaded, &oes->tx_bytes, + &nes->tx_bytes, &vsi->prev_tx_bytes); i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx), vsi->offset_loaded, &oes->tx_unicast, &nes->tx_unicast); @@ -3002,17 +3141,18 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ /* Get rx/tx bytes of internal transfer packets */ - i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), - I40E_GLV_GORCL(hw->port), - pf->offset_loaded, - &pf->internal_stats_offset.rx_bytes, - &pf->internal_stats.rx_bytes); - - i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), - I40E_GLV_GOTCL(hw->port), - pf->offset_loaded, - &pf->internal_stats_offset.tx_bytes, - &pf->internal_stats.tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.rx_bytes, + &pf->internal_stats.rx_bytes, + &pf->internal_prev_rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_bytes, + &pf->internal_stats.tx_bytes, + &pf->internal_prev_tx_bytes); /* Get total internal rx packet count */ i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port), I40E_GLV_UPRCL(hw->port), @@ -3052,10 +3192,10 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN; /* Get statistics of struct i40e_eth_stats */ - i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), - I40E_GLPRT_GORCL(hw->port), - pf->offset_loaded, &os->eth.rx_bytes, - &ns->eth.rx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->offset_loaded, &os->eth.rx_bytes, + &ns->eth.rx_bytes, &pf->prev_rx_bytes); i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->offset_loaded, &os->eth.rx_unicast, @@ -3110,10 +3250,10 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.rx_unknown_protocol, &ns->eth.rx_unknown_protocol); - i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port), - I40E_GLPRT_GOTCL(hw->port), - pf->offset_loaded, &os->eth.tx_bytes, - &ns->eth.tx_bytes); + i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->offset_loaded, &os->eth.tx_bytes, + &ns->eth.tx_bytes, &pf->prev_tx_bytes); i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->offset_loaded, &os->eth.tx_unicast, @@ -3858,6 +3998,39 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, return ret; } +/* Configure outer vlan stripping on or off in QinQ mode */ +static int +i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret = I40E_SUCCESS; + uint32_t reg; + + if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) { + PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum"); + return -EINVAL; + } + + /* Configure for outer VLAN RX stripping */ + reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id)); + + if (on) + reg |= I40E_VSI_TSR_QINQ_STRIP; + else + reg &= ~I40E_VSI_TSR_QINQ_STRIP; + + ret = i40e_aq_debug_write_register(hw, + I40E_VSI_TSR(vsi->vsi_id), + reg, NULL); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]", + vsi->vsi_id); + return I40E_ERR_CONFIG; + } + + return ret; +} + static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { @@ -3865,11 +4038,6 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct i40e_vsi *vsi = pf->main_vsi; struct rte_eth_rxmode *rxmode; - if (mask & ETH_QINQ_STRIP_MASK) { - PMD_DRV_LOG(ERR, "Strip qinq is not supported."); - return -ENOTSUP; - } - rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) @@ -3899,6 +4067,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) i40e_vsi_config_double_vlan(vsi, FALSE); } + if (mask & ETH_QINQ_STRIP_MASK) { + /* Enable or disable outer VLAN stripping */ + if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + i40e_vsi_config_outer_vlan_stripping(vsi, TRUE); + else + i40e_vsi_config_outer_vlan_stripping(vsi, FALSE); + } + return 0; } @@ -5721,10 +5897,14 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret); - goto fail_queue_alloc; + if (type != I40E_VSI_FDIR) + goto fail_queue_alloc; + vsi->msix_intr = 0; + vsi->nb_msix = 0; + } else { + vsi->msix_intr = ret; + vsi->nb_msix = 1; } - vsi->msix_intr = ret; - vsi->nb_msix = 1; } else { vsi->msix_intr = 0; vsi->nb_msix = 0; @@ -6073,6 +6253,7 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) /* Apply vlan offload setting */ mask = ETH_VLAN_STRIP_MASK | + ETH_QINQ_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ret = i40e_vlan_offload_set(dev, mask); @@ -6373,7 +6554,7 @@ i40e_dev_rx_init(struct i40e_pf *pf) uint16_t i; struct i40e_rx_queue *rxq; - i40e_pf_config_mq_rx(pf); + i40e_pf_config_rss(pf); for (i = 0; i < data->nb_rx_queues; i++) { rxq = data->rx_queues[i]; if (!rxq || !rxq->q_set) @@ -6679,7 +6860,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) case i40e_aqc_opc_get_link_status: ret = i40e_dev_link_update(dev, 0); if (!ret) - _rte_eth_dev_callback_process(dev, + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); break; default: @@ -7944,6 +8125,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, #define I40E_TR_GRE_KEY_MASK 0x400 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800 #define I40E_TR_GRE_NO_KEY_MASK 0x8000 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80 +#define I40E_DIRECTION_INGRESS_KEY 0x8000 +#define I40E_TR_L4_TYPE_TCP 0x2 +#define I40E_TR_L4_TYPE_UDP 0x4 +#define I40E_TR_L4_TYPE_SCTP 0x8 static enum i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) @@ -8242,6 +8430,132 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) return status; } +static enum i40e_status_code +i40e_replace_port_l1_filter(struct i40e_pf *pf, + enum i40e_l4_port_type l4_port_type) +{ + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + /* create L1 filter */ + if (l4_port_type == I40E_L4_PORT_TYPE_SRC) { + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11; + filter_replace_buf.data[8] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT; + } else { + filter_replace.old_filter_type = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN; + filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10; + filter_replace_buf.data[8] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT; + } + + filter_replace.tr_bit = 0; + /* Prepare the buffer, 3 entries */ + filter_replace_buf.data[0] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[2] = 0x00; + filter_replace_buf.data[3] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0; + filter_replace_buf.data[4] = + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[5] = 0x00; + filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP | + I40E_TR_L4_TYPE_TCP | + I40E_TR_L4_TYPE_SCTP; + filter_replace_buf.data[7] = 0x00; + filter_replace_buf.data[8] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[9] = 0x00; + filter_replace_buf.data[10] = 0xFF; + filter_replace_buf.data[11] = 0xFF; + + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + if (!status && filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + +static enum i40e_status_code +i40e_replace_port_cloud_filter(struct i40e_pf *pf, + enum i40e_l4_port_type l4_port_type) +{ + struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; + struct i40e_aqc_replace_cloud_filters_cmd filter_replace; + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + + memset(&filter_replace, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); + memset(&filter_replace_buf, 0, + sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf)); + + if (l4_port_type == I40E_L4_PORT_TYPE_SRC) { + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X11; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11; + } else { + filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP; + filter_replace.new_filter_type = + I40E_AQC_ADD_CLOUD_FILTER_0X10; + filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10; + } + + filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER; + filter_replace.tr_bit = 0; + /* Prepare the buffer, 2 entries */ + filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG; + filter_replace_buf.data[0] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + filter_replace_buf.data[4] |= + I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; + status = i40e_aq_replace_cloud_filters(hw, &filter_replace, + &filter_replace_buf); + + if (!status && filter_replace.old_filter_type != + filter_replace.new_filter_type) + PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type." + " original: 0x%x, new: 0x%x", + dev->device->name, + filter_replace.old_filter_type, + filter_replace.new_filter_type); + + return status; +} + int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, struct i40e_tunnel_filter_conf *tunnel_filter, @@ -8389,6 +8703,62 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, pfilter->general_fields[0] = tunnel_filter->inner_vlan; pfilter->general_fields[1] = tunnel_filter->outer_vlan; big_buffer = 1; + break; + case I40E_CLOUD_TYPE_UDP: + case I40E_CLOUD_TYPE_TCP: + case I40E_CLOUD_TYPE_SCTP: + if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) { + if (!pf->sport_replace_flag) { + i40e_replace_port_l1_filter(pf, + tunnel_filter->l4_port_type); + i40e_replace_port_cloud_filter(pf, + tunnel_filter->l4_port_type); + pf->sport_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] = + I40E_DIRECTION_INGRESS_KEY; + + if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_UDP; + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_TCP; + else + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] = + I40E_TR_L4_TYPE_SCTP; + + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] = + (teid_le >> 16) & 0xFFFF; + big_buffer = 1; + } else { + if (!pf->dport_replace_flag) { + i40e_replace_port_l1_filter(pf, + tunnel_filter->l4_port_type); + i40e_replace_port_cloud_filter(pf, + tunnel_filter->l4_port_type); + pf->dport_replace_flag = 1; + } + teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] = + I40E_DIRECTION_INGRESS_KEY; + + if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_UDP; + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP) + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_TCP; + else + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] = + I40E_TR_L4_TYPE_SCTP; + + pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] = + (teid_le >> 16) & 0xFFFF; + big_buffer = 1; + } + break; default: /* Other tunnel types is not supported. */ @@ -8412,7 +8782,16 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ) pfilter->element.flags |= I40E_AQC_ADD_CLOUD_FILTER_0X10; - else { + else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP || + tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP || + tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) { + if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_0X11; + else + pfilter->element.flags |= + I40E_AQC_ADD_CLOUD_FILTER_0X10; + } else { val = i40e_dev_get_filter_type(tunnel_filter->filter_type, &pfilter->element.flags); if (val < 0) { @@ -8681,6 +9060,7 @@ i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) static int i40e_pf_config_rss(struct i40e_pf *pf) { + enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct rte_eth_rss_conf rss_conf; uint32_t i, lut = 0; @@ -8719,7 +9099,8 @@ i40e_pf_config_rss(struct i40e_pf *pf) } rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; - if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 || + !(mq_mode & ETH_MQ_RX_RSS_FLAG)) { i40e_pf_disable_rss(pf); return 0; } @@ -8775,7 +9156,7 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000 #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) -static int +int i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) { struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; @@ -8892,21 +9273,6 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, return ret; } -static int -i40e_pf_config_mq_rx(struct i40e_pf *pf) -{ - int ret = 0; - enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; - - /* RSS setup */ - if (mq_mode & ETH_MQ_RX_RSS_FLAG) - ret = i40e_pf_config_rss(pf); - else - i40e_pf_disable_rss(pf); - - return ret; -} - /* Get the symmetric hash enable configurations per port */ static void i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) @@ -10569,7 +10935,6 @@ i40e_configure_registers(struct i40e_hw *hw) } } -#define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4)) #define I40E_VSI_TSR_QINQ_CONFIG 0xc030 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4)) #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab