X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ethdev.c;h=0b0f9db7cb2a6ac4dc138a043440fa0970b666c4;hb=5b634932410ca41c2071a8d015180fd464df2fa5;hp=2c79b47ae7fa37e0e69da3e8fc97e8b8e24fce57;hpb=720e083d94c9690c185df54f97eb66361d1886cf;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 2c79b47ae7..0b0f9db7cb 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2020 + * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -7,7 +8,7 @@ #include #include #include -#include +#include #include #include @@ -16,6 +17,7 @@ #include #include #include +#include #include "txgbe_logs.h" #include "base/txgbe.h" @@ -88,6 +90,10 @@ static const struct reg_info *txgbe_regs_others[] = { txgbe_regs_diagnostic, NULL}; +static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); +static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); static int txgbe_dev_close(struct rte_eth_dev *dev); @@ -101,14 +107,19 @@ static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, static void txgbe_dev_link_status_print(struct rte_eth_dev *dev); static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); +static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); -static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *handle); static void txgbe_dev_interrupt_handler(void *param); static void txgbe_dev_interrupt_delayed_handler(void *param); static void txgbe_configure_msix(struct rte_eth_dev *dev); +static int txgbe_filter_restore(struct rte_eth_dev *dev); +static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev); + #define TXGBE_SET_HWSTRIP(h, q) do {\ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ @@ -131,8 +142,8 @@ static void txgbe_configure_msix(struct rte_eth_dev *dev); * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_txgbe_map[] = { - { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -462,6 +473,71 @@ txgbe_swfw_lock_reset(struct txgbe_hw *hw) hw->mac.release_swfw_sync(hw, mask); } +static int +txgbe_handle_devarg(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 10); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + u16 auto_neg = 1; + u16 poll = 0; + u16 present = 1; + u16 sgmii = 0; + u16 ffe_set = 0; + u16 ffe_main = 27; + u16 ffe_pre = 8; + u16 ffe_post = 44; + + if (devargs == NULL) + goto null; + + kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments); + if (kvlist == NULL) + goto null; + + rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO, + &txgbe_handle_devarg, &auto_neg); + rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL, + &txgbe_handle_devarg, &poll); + rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT, + &txgbe_handle_devarg, &present); + rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII, + &txgbe_handle_devarg, &sgmii); + rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET, + &txgbe_handle_devarg, &ffe_set); + rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN, + &txgbe_handle_devarg, &ffe_main); + rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE, + &txgbe_handle_devarg, &ffe_pre); + rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST, + &txgbe_handle_devarg, &ffe_post); + rte_kvargs_free(kvlist); + +null: + hw->devarg.auto_neg = auto_neg; + hw->devarg.poll = poll; + hw->devarg.present = present; + hw->devarg.sgmii = sgmii; + hw->phy.ffe_set = ffe_set; + hw->phy.ffe_main = ffe_main; + hw->phy.ffe_pre = ffe_pre; + hw->phy.ffe_post = ffe_post; +} + static int eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { @@ -470,12 +546,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; const struct rte_memzone *mz; uint32_t ctrl_ext; uint16_t csum; - int err, i; + int err, i, ret; PMD_INIT_FUNC_TRACE(); @@ -529,6 +606,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) hw->isb_dma = TMZ_PADDR(mz); hw->isb_mem = TMZ_VADDR(mz); + txgbe_parse_devargs(hw, pci_dev->device.devargs); /* Initialize the shared code (base driver) */ err = txgbe_init_shared_code(hw); if (err != 0) { @@ -539,6 +617,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* Unlock any pending hardware semaphore */ txgbe_swfw_lock_reset(hw); +#ifdef RTE_LIB_SECURITY + /* Initialize security_ctx only for primary process*/ + if (txgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct txgbe_dcb_config)); txgbe_dcb_init(hw, dcb_config); @@ -639,7 +723,14 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) memset(hwstrip, 0, sizeof(*hwstrip)); /* initialize PF if max_vfs not zero */ - txgbe_pf_host_init(eth_dev); + ret = txgbe_pf_host_init(eth_dev); + if (ret) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; + return ret; + } ctrl_ext = rd32(hw, TXGBE_PORTCTL); /* let hardware know driver is loaded */ @@ -670,9 +761,28 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) /* enable support intr */ txgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct txgbe_filter_info)); + + /* initialize 5tuple filter list */ + TAILQ_INIT(&filter_info->fivetuple_list); + + /* initialize flow director filter list & hash */ + txgbe_fdir_filter_init(eth_dev); + + /* initialize l2 tunnel filter list & hash */ + txgbe_l2_tn_filter_init(eth_dev); + + /* initialize flow filter lists */ + txgbe_filterlist_init(); + /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); + /* initialize Traffic Manager configuration */ + txgbe_tm_conf_init(eth_dev); + return 0; } @@ -689,45 +799,152 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } -static int -eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) +static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) { - struct rte_eth_dev *pf_ethdev; - struct rte_eth_devargs eth_da; - int retval; + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); + struct txgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE); - if (pci_dev->device.devargs) { - retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, - ð_da); - if (retval) - return retval; - } else { - memset(ð_da, 0, sizeof(eth_da)); + return 0; +} + +static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); + struct txgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); } - retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, - sizeof(struct txgbe_adapter), - eth_dev_pci_specific_init, pci_dev, - eth_txgbe_dev_init, NULL); + return 0; +} + +static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); + struct txgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } + + return 0; +} + +static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = TXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct txgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", TDEV_NAME(eth_dev)); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("txgbe", + sizeof(struct txgbe_fdir_filter *) * + TXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; - if (retval || eth_da.nb_representor_ports < 1) - return retval; + return 0; +} + +static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = TXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct txgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; - pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); - if (pf_ethdev == NULL) - return -ENODEV; + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", TDEV_NAME(eth_dev)); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("txgbe", + sizeof(struct txgbe_l2_tn_filter *) * + TXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; return 0; } +static int +eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct txgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_txgbe_dev_init, NULL); +} + static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) { struct rte_eth_dev *ethdev; ethdev = rte_eth_dev_allocated(pci_dev->device.name); if (!ethdev) - return -ENODEV; + return 0; return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit); } @@ -781,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); - if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { restart = (rxcfg & TXGBE_RXCFG_ENA) && !(rxcfg & TXGBE_RXCFG_VLAN); rxcfg |= TXGBE_RXCFG_VLAN; @@ -816,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev, vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT); qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ); switch (vlan_type) { - case ETH_VLAN_TYPE_INNER: + case RTE_ETH_VLAN_TYPE_INNER: if (vlan_ext) { wr32m(hw, TXGBE_VLANCTL, TXGBE_VLANCTL_TPID_MASK, @@ -836,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev, TXGBE_TAGTPID_LSB(tpid)); } break; - case ETH_VLAN_TYPE_OUTER: + case RTE_ETH_VLAN_TYPE_OUTER: if (vlan_ext) { /* Only the high 16-bits is valid */ wr32m(hw, TXGBE_EXTAG, @@ -921,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) if (on) { rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else { rxq->vlan_flags = PKT_RX_VLAN; - rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } } @@ -970,7 +1187,6 @@ txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) ctrl = rd32(hw, TXGBE_PORTCTL); ctrl &= ~TXGBE_PORTCTL_VLANEXT; - ctrl &= ~TXGBE_PORTCTL_QINQ; wr32(hw, TXGBE_PORTCTL, ctrl); } @@ -978,17 +1194,38 @@ static void txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; uint32_t ctrl; PMD_INIT_FUNC_TRACE(); ctrl = rd32(hw, TXGBE_PORTCTL); ctrl |= TXGBE_PORTCTL_VLANEXT; - if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP || - txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) - ctrl |= TXGBE_PORTCTL_QINQ; + wr32(hw, TXGBE_PORTCTL, ctrl); +} + +static void +txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, TXGBE_PORTCTL); + ctrl &= ~TXGBE_PORTCTL_QINQ; + wr32(hw, TXGBE_PORTCTL, ctrl); +} + +static void +txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t ctrl; + + PMD_INIT_FUNC_TRACE(); + + ctrl = rd32(hw, TXGBE_PORTCTL); + ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT; wr32(hw, TXGBE_PORTCTL, ctrl); } @@ -1003,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) txgbe_vlan_strip_queue_set(dev, i, 1); else txgbe_vlan_strip_queue_set(dev, i, 0); @@ -1017,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; struct txgbe_rx_queue *rxq; - if (mask & ETH_VLAN_STRIP_MASK) { + if (mask & RTE_ETH_VLAN_STRIP_MASK) { rxmode = &dev->data->dev_conf.rxmode; - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } else for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; } } } @@ -1038,23 +1275,30 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) struct rte_eth_rxmode *rxmode; rxmode = &dev->data->dev_conf.rxmode; - if (mask & ETH_VLAN_STRIP_MASK) + if (mask & RTE_ETH_VLAN_STRIP_MASK) txgbe_vlan_hw_strip_config(dev); - if (mask & ETH_VLAN_FILTER_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) txgbe_vlan_hw_filter_enable(dev); else txgbe_vlan_hw_filter_disable(dev); } - if (mask & ETH_VLAN_EXTEND_MASK) { - if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) + if (mask & RTE_ETH_VLAN_EXTEND_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) txgbe_vlan_hw_extend_enable(dev); else txgbe_vlan_hw_extend_disable(dev); } + if (mask & RTE_ETH_QINQ_STRIP_MASK) { + if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) + txgbe_qinq_hw_strip_enable(dev); + else + txgbe_qinq_hw_strip_disable(dev); + } + return 0; } @@ -1087,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) switch (nb_rx_q) { case 1: case 2: - RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; break; case 4: - RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; break; default: return -EINVAL; @@ -1113,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ switch (dev_conf->rxmode.mq_mode) { - case ETH_MQ_RX_VMDQ_DCB: - PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + case RTE_ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); break; - case ETH_MQ_RX_VMDQ_DCB_RSS: + case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ PMD_INIT_LOG(ERR, "SRIOV active," " unsupported mq_mode rx %d.", dev_conf->rxmode.mq_mode); return -EINVAL; - case ETH_MQ_RX_RSS: - case ETH_MQ_RX_VMDQ_RSS: - dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + case RTE_ETH_MQ_RX_RSS: + case RTE_ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { PMD_INIT_LOG(ERR, "SRIOV is active," @@ -1134,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } break; - case ETH_MQ_RX_VMDQ_ONLY: - case ETH_MQ_RX_NONE: + case RTE_ETH_MQ_RX_VMDQ_ONLY: + case RTE_ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = - ETH_MQ_RX_VMDQ_ONLY; + RTE_ETH_MQ_RX_VMDQ_ONLY; break; - default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ PMD_INIT_LOG(ERR, "SRIOV is active," " wrong mq_mode rx %d.", @@ -1149,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) } switch (dev_conf->txmode.mq_mode) { - case ETH_MQ_TX_VMDQ_DCB: - PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); - dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + case RTE_ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; break; - default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ dev->data->dev_conf.txmode.mq_mode = - ETH_MQ_TX_VMDQ_ONLY; + RTE_ETH_MQ_TX_VMDQ_ONLY; break; } @@ -1170,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } else { - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" " not supported."); return -EINVAL; } /* check configuration for vmdb+dcb mode */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { @@ -1185,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { + if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || + conf->nb_queue_pools == RTE_ETH_32_POOLS)) { PMD_INIT_LOG(ERR, "VMDQ+DCB selected," " nb_queue_pools must be %d or %d.", - ETH_16_POOLS, ETH_32_POOLS); + RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); return -EINVAL; } } - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_tx_conf *conf; if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { @@ -1202,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { + if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || + conf->nb_queue_pools == RTE_ETH_32_POOLS)) { PMD_INIT_LOG(ERR, "VMDQ+DCB selected," " nb_queue_pools != %d and" " nb_queue_pools != %d.", - ETH_16_POOLS, ETH_32_POOLS); + RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); return -EINVAL; } } /* For DCB mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; conf = &dev_conf->rx_adv_conf.dcb_rx_conf; - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { + if (!(conf->nb_tcs == RTE_ETH_4_TCS || + conf->nb_tcs == RTE_ETH_8_TCS)) { PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" " and nb_tcs != %d.", - ETH_4_TCS, ETH_8_TCS); + RTE_ETH_4_TCS, RTE_ETH_8_TCS); return -EINVAL; } } - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; conf = &dev_conf->tx_adv_conf.dcb_tx_conf; - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { + if (!(conf->nb_tcs == RTE_ETH_4_TCS || + conf->nb_tcs == RTE_ETH_8_TCS)) { PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" " and nb_tcs != %d.", - ETH_4_TCS, ETH_8_TCS); + RTE_ETH_4_TCS, RTE_ETH_8_TCS); return -EINVAL; } } @@ -1251,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; /* multiple queue mode checking */ ret = txgbe_check_mq_mode(dev); @@ -1285,6 +1529,7 @@ txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) gpie |= TXGBE_GPIOBIT_6; wr32(hw, TXGBE_GPIOINTEN, gpie); intr->mask_misc |= TXGBE_ICRMISC_GPIO; + intr->mask_misc |= TXGBE_ICRMISC_ANDONE; } int @@ -1385,20 +1630,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) int status; uint16_t vf, idx; uint32_t *link_speeds; + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); PMD_INIT_FUNC_TRACE(); - /* TXGBE devices don't support: - * - half duplex (checked afterwards for valid speeds) - * - fixed speed: TODO implement - */ - if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, - "Invalid link_speeds for port %u, fix speed not supported", - dev->data->port_id); - return -EINVAL; - } - /* Stop the link setup handler before resetting the HW. */ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); @@ -1419,6 +1654,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) return -1; hw->mac.start_hw(hw); hw->mac.get_link_status = true; + hw->dev_start = true; /* configure PF module if SRIOV enabled */ txgbe_pf_host_configure(dev); @@ -1458,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK; + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK; err = txgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; } - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ txgbe_vmdq_vlan_hw_filter_enable(dev); } @@ -1476,6 +1712,12 @@ txgbe_dev_start(struct rte_eth_dev *dev) txgbe_configure_port(dev); txgbe_configure_dcb(dev); + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = txgbe_fdir_configure(dev); + if (err) + goto error; + } + /* Restore vf rate limit */ if (vfinfo != NULL) { for (vf = 0; vf < pci_dev->max_vfs; vf++) @@ -1511,7 +1753,8 @@ txgbe_dev_start(struct rte_eth_dev *dev) hw->mac.enable_tx_laser(hw); } - err = hw->mac.check_link(hw, &speed, &link_up, 0); + if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4) + err = hw->mac.check_link(hw, &speed, &link_up, 0); if (err) goto error; dev->data->dev_link.link_status = link_up; @@ -1520,30 +1763,30 @@ txgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; - allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G; + allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_10G; link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds & ~allowed_speeds) { + if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } speed = 0x0; - if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); } else { - if (*link_speeds & ETH_LINK_SPEED_10G) + if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_5G) + if (*link_speeds & RTE_ETH_LINK_SPEED_5G) speed |= TXGBE_LINK_SPEED_5GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_2_5G) + if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) speed |= TXGBE_LINK_SPEED_2_5GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_1G) + if (*link_speeds & RTE_ETH_LINK_SPEED_1G) speed |= TXGBE_LINK_SPEED_1GB_FULL; - if (*link_speeds & ETH_LINK_SPEED_100M) + if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; } @@ -1554,6 +1797,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) skip_link_setup: if (rte_intr_allow_others(intr_handle)) { + txgbe_dev_misc_interrupt_setup(dev); /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) txgbe_dev_lsc_interrupt_setup(dev, TRUE); @@ -1579,6 +1823,13 @@ skip_link_setup: /* resume enabled intr since hw reset */ txgbe_enable_intr(dev); + txgbe_l2_tunnel_conf(dev); + txgbe_filter_restore(dev); + + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); /* * Update link status right before return, because it may @@ -1612,6 +1863,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) return 0; @@ -1664,11 +1916,15 @@ txgbe_dev_stop(struct rte_eth_dev *dev) intr_handle->intr_vec = NULL; } + /* reset hierarchy commit */ + tm_conf->committed = false; + adapter->rss_reta_updated = 0; wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); hw->adapter_stopped = true; dev->data->dev_started = 0; + hw->dev_start = false; return 0; } @@ -1767,6 +2023,25 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_free(dev->data->hash_mac_addrs); dev->data->hash_mac_addrs = NULL; + /* remove all the fdir filters & hash */ + txgbe_fdir_filter_uninit(dev); + + /* remove all the L2 tunnel filters & hash */ + txgbe_l2_tn_filter_uninit(dev); + + /* Remove all ntuple filters of the device */ + txgbe_ntuple_filter_uninit(dev); + + /* clear all the filters list */ + txgbe_filterlist_flush(); + + /* Remove all Traffic Manager configuration */ + txgbe_tm_conf_uninit(dev); + +#ifdef RTE_LIB_SECURITY + rte_free(dev->security_ctx); +#endif + return ret; } @@ -1874,6 +2149,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); + hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP); /* MAC Stats */ @@ -2022,7 +2298,8 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) } /* Rx Errors */ - stats->imissed = hw_stats->rx_total_missed_packets; + stats->imissed = hw_stats->rx_total_missed_packets + + hw_stats->rx_dma_drop; stats->ierrors = hw_stats->rx_crc_errors + hw_stats->rx_mac_short_packet_dropped + hw_stats->rx_length_errors + @@ -2133,9 +2410,8 @@ txgbe_get_offset_by_id(uint32_t id, uint32_t *offset) nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t)); return 0; } - id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP; - return -(int)(id + 1); + return -1; } static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev, @@ -2165,8 +2441,8 @@ static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev, } static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, unsigned int limit) { unsigned int i; @@ -2296,18 +2572,17 @@ static int txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - u16 eeprom_verh, eeprom_verl; u32 etrack_id; int ret; - hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh); - hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl); + hw->phy.get_fw_version(hw, &etrack_id); - etrack_id = (eeprom_verh << 16) | eeprom_verl; ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (u32)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -2326,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; - dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) | @@ -2359,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_desc_lim = tx_desc_lim; dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); - dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL; - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; - dev_info->speed_capa |= ETH_LINK_SPEED_100M; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; /* Driver-preferred Rx/Tx parameters */ dev_info->default_rxportconf.burst_size = 32; @@ -2420,10 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait = 1; memset(&link, 0, sizeof(link)); - link.link_status = ETH_LINK_DOWN; - link.link_speed = ETH_SPEED_NUM_NONE; - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_autoneg = ETH_LINK_AUTONEG; + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + RTE_ETH_LINK_AUTONEG); hw->mac.get_link_status = true; @@ -2437,49 +2713,54 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, err = hw->mac.check_link(hw, &link_speed, &link_up, wait); if (err != 0) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = RTE_ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return rte_eth_linkstatus_set(dev, &link); } if (link_up == 0) { - if (hw->phy.media_type == txgbe_media_type_fiber) { + if ((hw->subsystem_device_id & 0xFF) == + TXGBE_DEV_ID_KR_KX_KX4) { + hw->mac.bp_down_event(hw); + } else if (hw->phy.media_type == txgbe_media_type_fiber) { intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; rte_eal_alarm_set(10, txgbe_dev_setup_link_alarm_handler, dev); } return rte_eth_linkstatus_set(dev, &link); + } else if (!hw->dev_start) { + return rte_eth_linkstatus_set(dev, &link); } intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; - link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_status = RTE_ETH_LINK_UP; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case TXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link.link_speed = RTE_ETH_SPEED_NUM_100M; break; case TXGBE_LINK_SPEED_100M_FULL: - link.link_speed = ETH_SPEED_NUM_100M; + link.link_speed = RTE_ETH_SPEED_NUM_100M; break; case TXGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_SPEED_NUM_1G; + link.link_speed = RTE_ETH_SPEED_NUM_1G; break; case TXGBE_LINK_SPEED_2_5GB_FULL: - link.link_speed = ETH_SPEED_NUM_2_5G; + link.link_speed = RTE_ETH_SPEED_NUM_2_5G; break; case TXGBE_LINK_SPEED_5GB_FULL: - link.link_speed = ETH_SPEED_NUM_5G; + link.link_speed = RTE_ETH_SPEED_NUM_5G; break; case TXGBE_LINK_SPEED_10GB_FULL: - link.link_speed = ETH_SPEED_NUM_10G; + link.link_speed = RTE_ETH_SPEED_NUM_10G; break; } @@ -2578,6 +2859,20 @@ txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) return 0; } +static int +txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + u64 mask; + + mask = TXGBE_ICR_MASK; + mask &= (1ULL << TXGBE_MISC_VEC_ID); + intr->mask |= mask; + intr->mask_misc |= TXGBE_ICRMISC_GPIO; + intr->mask_misc |= TXGBE_ICRMISC_ANDONE; + return 0; +} + /** * It clears the interrupt causes and enables the interrupt. * It will be called once only during nic initialized. @@ -2593,9 +2888,11 @@ static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) { struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + u64 mask; - intr->mask[0] |= TXGBE_ICR_MASK; - intr->mask[1] |= TXGBE_ICR_MASK; + mask = TXGBE_ICR_MASK; + mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1); + intr->mask |= mask; return 0; } @@ -2632,12 +2929,17 @@ txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { uint32_t eicr; struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + if (intr_handle->type != RTE_INTR_HANDLE_UIO && + intr_handle->type != RTE_INTR_HANDLE_VFIO_MSIX) + wr32(hw, TXGBE_PX_INTA, 1); + /* clear all cause mask */ txgbe_disable_intr(hw); @@ -2651,6 +2953,9 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & TXGBE_ICRMISC_LSC) intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + if (eicr & TXGBE_ICRMISC_ANDONE) + intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG; + if (eicr & TXGBE_ICRMISC_VFMBX) intr->flags |= TXGBE_FLAG_MAILBOX; @@ -2685,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), (unsigned int)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? + link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? "full-duplex" : "half-duplex"); } else { PMD_INIT_LOG(INFO, " Port %d: Link Down", @@ -2728,6 +3033,13 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev, intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; } + if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) { + if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) { + hw->mac.kr_handle(hw); + intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG; + } + } + if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { struct rte_eth_link link; @@ -2741,6 +3053,11 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev, /* handle it 1 sec later, wait it being stable */ timeout = TXGBE_LINK_UP_CHECK_TIMEOUT; /* likely to down */ + else if ((hw->subsystem_device_id & 0xFF) == + TXGBE_DEV_ID_KR_KX_KX4 && + hw->devarg.auto_neg == 1) + /* handle it 2 sec later for backplane AN73 */ + timeout = 2000; else /* handle it 4 sec later, wait it being stable */ timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT; @@ -2751,10 +3068,12 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev, (void *)dev) < 0) { PMD_DRV_LOG(ERR, "Error setting alarm"); } else { - /* remember original mask */ - intr->mask_misc_orig = intr->mask_misc; /* only disable lsc interrupt */ intr->mask_misc &= ~TXGBE_ICRMISC_LSC; + + intr->mask_orig = intr->mask; + /* only disable all misc interrupts */ + intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID); } } @@ -2815,8 +3134,10 @@ txgbe_dev_interrupt_delayed_handler(void *param) } /* restore original mask */ - intr->mask_misc = intr->mask_misc_orig; - intr->mask_misc_orig = 0; + intr->mask_misc |= TXGBE_ICRMISC_LSC; + + intr->mask = intr->mask_orig; + intr->mask_orig = 0; PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); txgbe_enable_intr(dev); @@ -2840,7 +3161,7 @@ txgbe_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - txgbe_dev_interrupt_get_status(dev); + txgbe_dev_interrupt_get_status(dev, dev->intr_handle); txgbe_dev_interrupt_action(dev, dev->intr_handle); } @@ -2900,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) tx_pause = 0; if (rx_pause && tx_pause) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else if (rx_pause) - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; else if (tx_pause) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; return 0; } @@ -3038,21 +3359,21 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, return -ENOTSUP; } - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += 4) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); if (!mask) continue; - reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2); + reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); for (j = 0; j < 4; j++) { if (RS8(mask, j, 0x1)) { reta &= ~(MS32(8 * j, 0xFF)); @@ -3060,7 +3381,7 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 8 * j, 0xFF); } } - wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta); + wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta); } adapter->rss_reta_updated = 1; @@ -3079,21 +3400,21 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); return -EINVAL; } for (i = 0; i < reta_size; i += 4) { - idx = i / RTE_RETA_GROUP_SIZE; - shift = i % RTE_RETA_GROUP_SIZE; + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); if (!mask) continue; - reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2); + reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); for (j = 0; j < 4; j++) { if (RS8(mask, j, 0x1)) reta_conf[idx].reta[shift + j] = @@ -3138,18 +3459,8 @@ static int txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; struct rte_eth_dev_data *dev_data = dev->data; - int ret; - - ret = txgbe_dev_info_get(dev, &dev_info); - if (ret != 0) - return ret; - - /* check that mtu is within the allowed range */ - if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) - return -EINVAL; /* If device is started, refuse mtu that requires the support of * scattered packets when this feature has not been enabled before. @@ -3161,9 +3472,6 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - if (hw->mode) wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, TXGBE_FRAME_SIZE_MAX); @@ -3268,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) return -ENOTSUP; if (on) { - for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = ~0; wr32(hw, TXGBE_UCADDRTBL(i), ~0); } } else { - for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = 0; wr32(hw, TXGBE_UCADDRTBL(i), 0); } @@ -3297,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) { uint32_t new_val = orig_val; - if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) new_val |= TXGBE_POOLETHCTL_UTA; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) new_val |= TXGBE_POOLETHCTL_MCHA; - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) new_val |= TXGBE_POOLETHCTL_UCHA; - if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) new_val |= TXGBE_POOLETHCTL_BCA; - if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) new_val |= TXGBE_POOLETHCTL_MCP; return new_val; @@ -3474,102 +3782,506 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } -static u8 * -txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, - u8 **mc_addr_ptr, u32 *vmdq) +int +txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) { - u8 *mc_addr; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t syn_info; + uint32_t synqf; - *vmdq = 0; - mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); - return mc_addr; + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & TXGBE_SYNCLS_ENA) + return -EINVAL; + synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); + synqf |= TXGBE_SYNCLS_ENA; + + if (filter->hig_pri) + synqf |= TXGBE_SYNCLS_HIPRIO; + else + synqf &= ~TXGBE_SYNCLS_HIPRIO; + } else { + synqf = rd32(hw, TXGBE_SYNCLS); + if (!(syn_info & TXGBE_SYNCLS_ENA)) + return -ENOENT; + synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA); + } + + filter_info->syn_info = synqf; + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + return 0; } -int -txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct rte_ether_addr *mc_addr_set, - uint32_t nb_mc_addr) +static inline enum txgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) { - struct txgbe_hw *hw; - u8 *mc_addr_list; - - hw = TXGBE_DEV_HW(dev); - mc_addr_list = (u8 *)mc_addr_set; - return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, - txgbe_dev_addr_list_itr, TRUE); + if (protocol_value == IPPROTO_TCP) + return TXGBE_5TF_PROT_TCP; + else if (protocol_value == IPPROTO_UDP) + return TXGBE_5TF_PROT_UDP; + else if (protocol_value == IPPROTO_SCTP) + return TXGBE_5TF_PROT_SCTP; + else + return TXGBE_5TF_PROT_NONE; } -static uint64_t -txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +/* inject a 5-tuple filter to HW */ +static inline void +txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - uint64_t systime_cycles; - - systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL); - systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32; - - return systime_cycles; + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint32_t mask = TXGBE_5TFCTL0_MASK; + + i = filter->index; + sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port)); + sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port)); + + ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto); + ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= ~TXGBE_5TFCTL0_MSADDR; + if (filter->filter_info.dst_ip_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDADDR; + if (filter->filter_info.src_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MSPORT; + if (filter->filter_info.dst_port_mask == 0) + mask &= ~TXGBE_5TFCTL0_MDPORT; + if (filter->filter_info.proto_mask == 0) + mask &= ~TXGBE_5TFCTL0_MPROTO; + ftqf |= mask; + ftqf |= TXGBE_5TFCTL0_MPOOL; + ftqf |= TXGBE_5TFCTL0_ENA; + + wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip)); + wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip)); + wr32(hw, TXGBE_5TFPORT(i), sdpqf); + wr32(hw, TXGBE_5TFCTL0(i), ftqf); + + l34timir |= TXGBE_5TFCTL1_QP(filter->queue); + wr32(hw, TXGBE_5TFCTL1(i), l34timir); } -static uint64_t -txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: pointer to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +txgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) { - struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - uint64_t rx_tstamp_cycles; + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i, idx, shift; - /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ - rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL); - rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32; + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= TXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } - return rx_tstamp_cycles; + txgbe_inject_5tuple_filter(dev, filter); + + return 0; } -static uint64_t -txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +txgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct txgbe_5tuple_filter *filter) { struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - uint64_t tx_tstamp_cycles; + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + wr32(hw, TXGBE_5TFDADDR(index), 0); + wr32(hw, TXGBE_5TFSADDR(index), 0); + wr32(hw, TXGBE_5TFPORT(index), 0); + wr32(hw, TXGBE_5TFCTL0(index), 0); + wr32(hw, TXGBE_5TFCTL1(index), 0); +} - /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ - tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL); - tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32; +static inline struct txgbe_5tuple_filter * +txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list, + struct txgbe_5tuple_filter_info *key) +{ + struct txgbe_5tuple_filter *it; - return tx_tstamp_cycles; + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct txgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; } -static void -txgbe_start_timecounters(struct rte_eth_dev *dev) +/* translate elements in struct rte_eth_ntuple_filter + * to struct txgbe_5tuple_filter_info + */ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct txgbe_5tuple_filter_info *filter_info) { - struct txgbe_hw *hw = TXGBE_DEV_HW(dev); - struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); - struct rte_eth_link link; - uint32_t incval = 0; - uint32_t shift = 0; - - /* Get current link speed. */ - txgbe_dev_link_update(dev, 1); - rte_eth_linkstatus_get(dev, &link); + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM || + filter->priority > TXGBE_5TUPLE_MAX_PRI || + filter->priority < TXGBE_5TUPLE_MIN_PRI) + return -EINVAL; - switch (link.link_speed) { - case ETH_SPEED_NUM_100M: - incval = TXGBE_INCVAL_100; - shift = TXGBE_INCVAL_SHIFT_100; + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; break; - case ETH_SPEED_NUM_1G: - incval = TXGBE_INCVAL_1GB; - shift = TXGBE_INCVAL_SHIFT_1GB; + case 0: + filter_info->dst_ip_mask = 1; break; - case ETH_SPEED_NUM_10G: default: - incval = TXGBE_INCVAL_10GB; - shift = TXGBE_INCVAL_SHIFT_10GB; - break; + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; } - wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2)); - - memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter_info filter_5tuple; + struct txgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("txgbe_5tuple_filter", + sizeof(struct txgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct txgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = txgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else { + txgbe_remove_5tuple_filter(dev, filter); + } + + return 0; +} + +int +txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct txgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = TXGBE_ETFLT_ENA; + etqf |= TXGBE_ETFLT_ETID(filter->ether_type); + etqs |= TXGBE_ETCLS_QPID(filter->queue); + etqs |= TXGBE_ETCLS_QENA; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = txgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + wr32(hw, TXGBE_ETFLT(ret), etqf); + wr32(hw, TXGBE_ETCLS(ret), etqs); + txgbe_flush(hw); + + return 0; +} + +static int +txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + *ops = &txgbe_flow_ops; + return 0; +} + +static u8 * +txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + u8 *mc_addr; + + *vmdq = 0; + mc_addr = *mc_addr_ptr; + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); + return mc_addr; +} + +int +txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct txgbe_hw *hw; + u8 *mc_addr_list; + + hw = TXGBE_DEV_HW(dev); + mc_addr_list = (u8 *)mc_addr_set; + return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, + txgbe_dev_addr_list_itr, TRUE); +} + +static uint64_t +txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t systime_cycles; + + systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL); + systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32; + + return systime_cycles; +} + +static uint64_t +txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t rx_tstamp_cycles; + + /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL); + rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32; + + return rx_tstamp_cycles; +} + +static uint64_t +txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint64_t tx_tstamp_cycles; + + /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL); + tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32; + + return tx_tstamp_cycles; +} + +static void +txgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + txgbe_dev_link_update(dev, 1); + rte_eth_linkstatus_get(dev, &link); + + switch (link.link_speed) { + case RTE_ETH_SPEED_NUM_100M: + incval = TXGBE_INCVAL_100; + shift = TXGBE_INCVAL_SHIFT_100; + break; + case RTE_ETH_SPEED_NUM_1G: + incval = TXGBE_INCVAL_1GB; + shift = TXGBE_INCVAL_SHIFT_1GB; + break; + case RTE_ETH_SPEED_NUM_10G: + default: + incval = TXGBE_INCVAL_10GB; + shift = TXGBE_INCVAL_SHIFT_10GB; + break; + } + + wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2)); + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); @@ -3899,6 +4611,7 @@ txgbe_rss_update_sp(enum txgbe_mac_type mac_type) { switch (mac_type) { case txgbe_mac_raptor: + case txgbe_mac_raptor_vf: return 1; default: return 0; @@ -3915,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, uint8_t nb_tcs; uint8_t i, j; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; else dcb_info->nb_tcs = 1; @@ -3926,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, if (dcb_config->vt_mode) { /* vt is enabled */ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; if (RTE_ETH_DEV_SRIOV(dev).active > 0) { for (j = 0; j < nb_tcs; j++) { @@ -3950,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, } else { /* vt is disabled */ struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; - if (dcb_info->nb_tcs == ETH_4_TCS) { + if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { for (i = 0; i < dcb_info->nb_tcs; i++) { dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; @@ -3965,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; - } else if (dcb_info->nb_tcs == ETH_8_TCS) { + } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { for (i = 0; i < dcb_info->nb_tcs; i++) { dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; @@ -3995,6 +4708,553 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, return 0; } +/* Update e-tag ether type */ +static int +txgbe_update_e_tag_eth_type(struct txgbe_hw *hw, + uint16_t ether_type) +{ + uint32_t etag_etype; + + etag_etype = rd32(hw, TXGBE_EXTAG); + etag_etype &= ~TXGBE_EXTAG_ETAG_MASK; + etag_etype |= ether_type; + wr32(hw, TXGBE_EXTAG, etag_etype); + txgbe_flush(hw); + + return 0; +} + +/* Enable e-tag tunnel */ +static int +txgbe_e_tag_enable(struct txgbe_hw *hw) +{ + uint32_t etag_etype; + + etag_etype = rd32(hw, TXGBE_PORTCTL); + etag_etype |= TXGBE_PORTCTL_ETAG; + wr32(hw, TXGBE_PORTCTL, etag_etype); + txgbe_flush(hw); + + return 0; +} + +static int +txgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + rar_entries = hw->mac.num_rar_entries; + + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_ETHADDRIDX, i); + rar_high = rd32(hw, TXGBE_ETHADDRH); + rar_low = rd32(hw, TXGBE_ETHADDRL); + if ((rar_high & TXGBE_ETHADDRH_VLD) && + (rar_high & TXGBE_ETHADDRH_ETAG) && + (TXGBE_ETHADDRL_ETAG(rar_low) == + l2_tunnel->tunnel_id)) { + wr32(hw, TXGBE_ETHADDRL, 0); + wr32(hw, TXGBE_ETHADDRH, 0); + + txgbe_clear_vmdq(hw, i, BIT_MASK32); + + return ret; + } + } + + return ret; +} + +static int +txgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + /* One entry for one tunnel. Try to remove potential existing entry. */ + txgbe_e_tag_filter_del(dev, l2_tunnel); + + rar_entries = hw->mac.num_rar_entries; + + for (i = 1; i < rar_entries; i++) { + wr32(hw, TXGBE_ETHADDRIDX, i); + rar_high = rd32(hw, TXGBE_ETHADDRH); + if (rar_high & TXGBE_ETHADDRH_VLD) { + continue; + } else { + txgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG; + rar_low = l2_tunnel->tunnel_id; + + wr32(hw, TXGBE_ETHADDRL, rar_low); + wr32(hw, TXGBE_ETHADDRH, rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +static inline struct txgbe_l2_tn_filter * +txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_key *key) +{ + int ret; + + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return l2_tn_info->hash_map[ret]; +} + +static inline int +txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_filter *l2_tn_filter) +{ + int ret; + + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + + return 0; +} + +static inline int +txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, + struct txgbe_l2_tn_key *key) +{ + int ret; + struct txgbe_l2_tn_filter *l2_tn_filter; + + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; + } + + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); + + return 0; +} + +/* Add l2 tunnel filter */ +int +txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel, + bool restore) +{ + int ret; + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_key key; + struct txgbe_l2_tn_filter *node; + + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("txgbe_l2_tn", + sizeof(struct txgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + rte_memcpy(&node->key, + &key, + sizeof(struct txgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = txgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + } + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: + ret = txgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + if (!restore && ret < 0) + (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key); + + return ret; +} + +/* Delete l2 tunnel filter */ +int +txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct txgbe_l2_tunnel_conf *l2_tunnel) +{ + int ret; + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_key key; + + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: + ret = txgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +{ + int ret = 0; + uint32_t ctrl; + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + ctrl = rd32(hw, TXGBE_POOLCTL); + ctrl &= ~TXGBE_POOLCTL_MODE_MASK; + if (en) + ctrl |= TXGBE_PSRPOOL_MODE_ETAG; + wr32(hw, TXGBE_POOLCTL, ctrl); + + return ret; +} + +/* Add UDP tunneling port */ +static int +txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret = 0; + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_ETH_TUNNEL_TYPE_VXLAN: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port); + break; + case RTE_ETH_TUNNEL_TYPE_GENEVE: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port); + break; + case RTE_ETH_TUNNEL_TYPE_TEREDO: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port); + break; + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: + if (udp_tunnel->udp_port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + txgbe_flush(hw); + + return ret; +} + +/* Remove UDP tunneling port */ +static int +txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret = 0; + uint16_t cur_port; + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_ETH_TUNNEL_TYPE_VXLAN: + cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORT, 0); + break; + case RTE_ETH_TUNNEL_TYPE_GENEVE: + cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_GENEVEPORT, 0); + break; + case RTE_ETH_TUNNEL_TYPE_TEREDO: + cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_TEREDOPORT, 0); + break; + case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: + cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE); + if (cur_port != udp_tunnel->udp_port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", + udp_tunnel->udp_port); + ret = -EINVAL; + break; + } + wr32(hw, TXGBE_VXLANPORTGPE, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + txgbe_flush(hw); + + return ret; +} + +/* restore n-tuple filter */ +static inline void +txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter *node; + + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + txgbe_inject_5tuple_filter(dev, node); + } +} + +/* restore ethernet type filter */ +static inline void +txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i; + + for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + wr32(hw, TXGBE_ETFLT(i), + filter_info->ethertype_filters[i].etqf); + wr32(hw, TXGBE_ETCLS(i), + filter_info->ethertype_filters[i].etqs); + txgbe_flush(hw); + } + } +} + +/* restore SYN filter */ +static inline void +txgbe_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & TXGBE_SYNCLS_ENA) { + wr32(hw, TXGBE_SYNCLS, synqf); + txgbe_flush(hw); + } +} + +/* restore L2 tunnel filter */ +static inline void +txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_filter *node; + struct txgbe_l2_tunnel_conf l2_tn_conf; + + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); + } +} + +/* restore rss filter */ +static inline void +txgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->rss_info.conf.queue_num) + txgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + +static int +txgbe_filter_restore(struct rte_eth_dev *dev) +{ + txgbe_ntuple_filter_restore(dev); + txgbe_ethertype_filter_restore(dev); + txgbe_syn_filter_restore(dev); + txgbe_fdir_filter_restore(dev); + txgbe_l2_tn_filter_restore(dev); + txgbe_rss_filter_restore(dev); + + return 0; +} + +static void +txgbe_l2_tunnel_conf(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (l2_tn_info->e_tag_en) + (void)txgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)txgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); +} + +/* remove all the n-tuple filters */ +void +txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + struct txgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + txgbe_remove_5tuple_filter(dev, p_5tuple); +} + +/* remove all the ether type filters */ +void +txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + int i; + + for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)txgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + wr32(hw, TXGBE_ETFLT(i), 0); + wr32(hw, TXGBE_ETCLS(i), 0); + txgbe_flush(hw); + } + } +} + +/* remove the SYN filter */ +void +txgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + + if (filter_info->syn_info & TXGBE_SYNCLS_ENA) { + filter_info->syn_info = 0; + + wr32(hw, TXGBE_SYNCLS, 0); + txgbe_flush(hw); + } +} + +/* remove all the L2 tunnel filters */ +int +txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +{ + struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); + struct txgbe_l2_tn_filter *l2_tn_filter; + struct txgbe_l2_tunnel_conf l2_tn_conf; + int ret = 0; + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; + } + + return 0; +} + static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, @@ -4049,6 +5309,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .reta_query = txgbe_dev_rss_reta_query, .rss_hash_update = txgbe_dev_rss_hash_update, .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, + .flow_ops_get = txgbe_dev_flow_ops_get, .set_mc_addr_list = txgbe_dev_set_mc_addr_list, .rxq_info_get = txgbe_rxq_info_get, .txq_info_get = txgbe_txq_info_get, @@ -4066,23 +5327,36 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_adjust_time = txgbe_timesync_adjust_time, .timesync_read_time = txgbe_timesync_read_time, .timesync_write_time = txgbe_timesync_write_time, + .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, + .tm_ops_get = txgbe_tm_ops_get, .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map); RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci"); - -RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE); -RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE); +RTE_PMD_REGISTER_PARAM_STRING(net_txgbe, + TXGBE_DEVARG_BP_AUTO "=<0|1>" + TXGBE_DEVARG_KR_POLL "=<0|1>" + TXGBE_DEVARG_KR_PRESENT "=<0|1>" + TXGBE_DEVARG_KX_SGMII "=<0|1>" + TXGBE_DEVARG_FFE_SET "=<0-4>" + TXGBE_DEVARG_FFE_MAIN "=" + TXGBE_DEVARG_FFE_PRE "=" + TXGBE_DEVARG_FFE_POST "="); + +RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE); +RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE); #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX - RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG); + RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG); #endif #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX - RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG); + RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG); #endif #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE - RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG); + RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG); #endif