X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=af69c9cfc46633e6240c14d7c45cf1e350852c77;hb=adce1f86f8d25fc10e9ac32fd59fa0bedce608ad;hp=be00686ad7852aedd0ff39362de2cd7ba6b66fe7;hpb=86eb05d6350b1ae10d502cf942d40813e821aa9b;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index be00686ad7..af69c9cfc4 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +63,7 @@ #include "i40e_rxtx.h" #include "i40e_pf.h" #include "i40e_regs.h" +#include "rte_pmd_i40e.h" #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" @@ -139,60 +140,6 @@ #define I40E_DEFAULT_DCB_APP_NUM 1 #define I40E_DEFAULT_DCB_APP_PRIO 3 -#define I40E_INSET_NONE 0x00000000000000000ULL - -/* bit0 ~ bit 7 */ -#define I40E_INSET_DMAC 0x0000000000000001ULL -#define I40E_INSET_SMAC 0x0000000000000002ULL -#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL -#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL -#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL - -/* bit 8 ~ bit 15 */ -#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL -#define I40E_INSET_IPV4_DST 0x0000000000000200ULL -#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL -#define I40E_INSET_IPV6_DST 0x0000000000000800ULL -#define I40E_INSET_SRC_PORT 0x0000000000001000ULL -#define I40E_INSET_DST_PORT 0x0000000000002000ULL -#define I40E_INSET_SCTP_VT 0x0000000000004000ULL - -/* bit 16 ~ bit 31 */ -#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL -#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL -#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL -#define I40E_INSET_IPV6_TC 0x0000000000080000ULL -#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL -#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL -#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL -#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL - -/* bit 32 ~ bit 47, tunnel fields */ -#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL -#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL -#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL -#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL -#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL -#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL - -/* bit 48 ~ bit 55 */ -#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL - -/* bit 56 ~ bit 63, Flex Payload */ -#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL -#define I40E_INSET_FLEX_PAYLOAD \ - (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \ - I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \ - I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \ - I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8) - /** * Below are values for writing un-exposed registers suggested * by silicon experts @@ -296,6 +243,15 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) +/* The max bandwidth of i40e is 40Gbps. */ +#define I40E_QOS_BW_MAX 40000 +/* The bandwidth should be the multiple of 50Mbps. */ +#define I40E_QOS_BW_GRANULARITY 50 +/* The min bandwidth weight is 1. */ +#define I40E_QOS_BW_WEIGHT_MIN 1 +/* The max bandwidth weight is 127. */ +#define I40E_QOS_BW_WEIGHT_MAX 127 + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); @@ -404,9 +360,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static void i40e_filter_input_set_init(struct i40e_pf *pf); -static int i40e_ethertype_filter_set(struct i40e_pf *pf, - struct rte_eth_ethertype_filter *filter, - bool add); static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -535,6 +488,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -778,8 +733,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) pf->main_vsi_seid, 0, TRUE, NULL, NULL); if (ret) - PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control " - " frames from VSIs."); + PMD_INIT_LOG(ERR, + "Failed to add filter to drop flow control frames from VSIs."); } static int @@ -955,6 +910,8 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, .key_len = sizeof(struct i40e_ethertype_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize ethertype filter rule list and hash */ @@ -998,6 +955,8 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_TUNNEL_FILTER_NUM, .key_len = sizeof(struct i40e_tunnel_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize tunnel filter rule list and hash */ @@ -1041,6 +1000,8 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_FDIR_FILTER_NUM, .key_len = sizeof(struct rte_eth_fdir_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize flow director filter rule list and hash */ @@ -1101,7 +1062,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1110,8 +1071,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->back = I40E_PF_TO_ADAPTER(pf); hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr); if (!hw->hw_addr) { - PMD_INIT_LOG(ERR, "Hardware is not available, " - "as address is NULL"); + PMD_INIT_LOG(ERR, + "Hardware is not available, as address is NULL"); return -ENODEV; } @@ -1247,8 +1208,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Set the global registers with default ether type value */ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to set the default outer " - "VLAN ether type"); + PMD_INIT_LOG(ERR, + "Failed to set the default outer VLAN ether type"); goto err_setup_pf_switch; } @@ -1284,13 +1245,22 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* Should be after VSI initialized */ dev->data->mac_addrs = rte_zmalloc("i40e", len, 0); if (!dev->data->mac_addrs) { - PMD_INIT_LOG(ERR, "Failed to allocated memory " - "for storing mac address"); + PMD_INIT_LOG(ERR, + "Failed to allocated memory for storing mac address"); goto err_mac_alloc; } ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); @@ -1319,13 +1289,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); - /* Init dcb to sw mode by default */ - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(INFO, "Failed to init dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } - ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1431,6 +1394,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle; struct i40e_hw *hw; struct i40e_filter_control_settings settings; + struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; @@ -1482,6 +1446,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) i40e_rm_tunnel_filter_list(pf); i40e_rm_fdir_filter_list(pf); + /* Remove all flows */ + while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { + TAILQ_REMOVE(&pf->flow_list, p_flow, node); + rte_free(p_flow); + } + return 0; } @@ -1546,6 +1516,8 @@ i40e_dev_configure(struct rte_eth_dev *dev) } } + TAILQ_INIT(&pf->flow_list); + return 0; err_dcb: @@ -1914,6 +1886,7 @@ i40e_dev_start(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; + struct i40e_vsi *vsi; hw->adapter_stopped = 0; @@ -1929,8 +1902,9 @@ i40e_dev_start(struct rte_eth_dev *dev) !RTE_ETH_DEV_SRIOV(dev).active) && dev->data->dev_conf.intr_conf.rxq != 0) { intr_vector = dev->data->nb_rx_queues; - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -1; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; } if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { @@ -1939,8 +1913,9 @@ i40e_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (!intr_handle->intr_vec) { - PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + PMD_INIT_LOG(ERR, + "Failed to allocate %d rx_queues intr_vec", + dev->data->nb_rx_queues); return -ENOMEM; } } @@ -1990,6 +1965,15 @@ i40e_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); } + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -2013,8 +1997,8 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_pf_enable_irq0(hw); if (dev->data->dev_conf.intr_conf.lsc != 0) - PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); + PMD_INIT_LOG(INFO, + "lsc won't enable because of no intr multiplex"); } else if (dev->data->dev_conf.intr_conf.lsc != 0) { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | @@ -2117,18 +2101,17 @@ i40e_dev_close(struct rte_eth_dev *dev) /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); - /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); - i40e_vsi_release(pf->main_vsi); - for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { i40e_vsi_release(pf->vmdq[i].vsi); pf->vmdq[i].vsi = NULL; } - rte_free(pf->vmdq); pf->vmdq = NULL; + /* release all the existing VSIs and VEBs */ + i40e_fdir_teardown(pf); + i40e_vsi_release(pf->main_vsi); + /* shutdown the adminq */ i40e_aq_queue_shutdown(hw, true); i40e_shutdown_adminq(hw); @@ -2266,11 +2249,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev, } link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete) + if (!wait_to_complete || link.link_status) break; rte_delay_ms(CHECK_INTERVAL); - } while (!link.link_status && rep_cnt--); + } while (--rep_cnt); if (!link.link_status) goto out; @@ -2982,7 +2965,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, else { ret = -EINVAL; PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan.\n"); + "Unsupported vlan type in single vlan."); return ret; } break; @@ -2994,13 +2977,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ®_r, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Fail to debug read from " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + PMD_DRV_LOG(ERR, + "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); ret = -EIO; return ret; } - PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " - "0x%08"PRIx64"", reg_id, reg_r); + PMD_DRV_LOG(DEBUG, + "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64, + reg_id, reg_r); reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); @@ -3014,12 +2999,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, reg_w, NULL); if (ret != I40E_SUCCESS) { ret = -EIO; - PMD_DRV_LOG(ERR, "Fail to debug write to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + PMD_DRV_LOG(ERR, + "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_id); return ret; } - PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " - "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + PMD_DRV_LOG(DEBUG, + "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", + reg_w, reg_id); return ret; } @@ -3163,8 +3150,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, " - "High_water must <= %d.", max_high_water); + PMD_INIT_LOG(ERR, + "Invalid high/low water setup value in KB, High_water must be <= %d.", + max_high_water); return -EINVAL; } @@ -3336,8 +3324,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) /* No VMDQ pool enabled or configured */ if (!(pf->flags & I40E_FLAG_VMDQ) || (i > pf->nb_cfg_vmdq_vsi)) { - PMD_DRV_LOG(ERR, "No VMDQ pool enabled" - "/configured"); + PMD_DRV_LOG(ERR, + "No VMDQ pool enabled/configured"); return; } vsi = pf->vmdq[i - 1].vsi; @@ -3538,9 +3526,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3579,9 +3567,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { - PMD_DRV_LOG(ERR, "The size of hash lookup table configured " - "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, lut_size); + PMD_DRV_LOG(ERR, + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", + reta_size, lut_size); return -EINVAL; } @@ -3636,8 +3624,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, mem->va = mz->addr; mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); mem->zone = (const void *)mz; - PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: " - "%"PRIu64, mz->name, mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s allocated with physical address: %"PRIu64, + mz->name, mem->pa); return I40E_SUCCESS; } @@ -3654,9 +3643,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, if (!mem) return I40E_ERR_PARAM; - PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: " - "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name, - mem->pa); + PMD_DRV_LOG(DEBUG, + "memzone %s to be freed with physical address: %"PRIu64, + ((const struct rte_memzone *)mem->zone)->name, mem->pa); rte_memzone_free((const struct rte_memzone *)mem->zone); mem->zone = NULL; mem->va = NULL; @@ -3815,9 +3804,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->flags |= I40E_FLAG_SRIOV; pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; pf->vf_num = pci_dev->max_vfs; - PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, " - "in total %u queues", pf->vf_num, pf->vf_nb_qps, - pf->vf_nb_qps * pf->vf_num); + PMD_DRV_LOG(DEBUG, + "%u VF VSIs, %u queues per VF VSI, in total %u queues", + pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); } else { pf->vf_nb_qps = 0; pf->vf_num = 0; @@ -3845,14 +3834,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) if (pf->max_nb_vmdq_vsi) { pf->flags |= I40E_FLAG_VMDQ; pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; - PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues " - "per VMDQ VSI, in total %u queues", - pf->max_nb_vmdq_vsi, - pf->vmdq_nb_qps, pf->vmdq_nb_qps * - pf->max_nb_vmdq_vsi); + PMD_DRV_LOG(DEBUG, + "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues", + pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps, + pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); } else { - PMD_DRV_LOG(INFO, "No enough queues left for " - "VMDq"); + PMD_DRV_LOG(INFO, + "No enough queues left for VMDq"); } } else { PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq"); @@ -3865,15 +3853,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->flags |= I40E_FLAG_DCB; if (qp_count > hw->func_caps.num_tx_qp) { - PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds " - "the hardware maximum %u", qp_count, - hw->func_caps.num_tx_qp); + PMD_DRV_LOG(ERR, + "Failed to allocate %u queues, which exceeds the hardware maximum %u", + qp_count, hw->func_caps.num_tx_qp); return -EINVAL; } if (vsi_count > hw->func_caps.num_vsis) { - PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds " - "the hardware maximum %u", vsi_count, - hw->func_caps.num_vsis); + PMD_DRV_LOG(ERR, + "Failed to allocate %u VSIs, which exceeds the hardware maximum %u", + vsi_count, hw->func_caps.num_vsis); return -EINVAL; } @@ -4119,8 +4107,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool, */ entry = rte_zmalloc("res_pool", sizeof(*entry), 0); if (entry == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate memory for " - "resource pool"); + PMD_DRV_LOG(ERR, + "Failed to allocate memory for resource pool"); return -ENOMEM; } entry->base = valid_entry->base; @@ -4160,9 +4148,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap) } if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) { - PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to " - "HW support 0x%x", hw->func_caps.enabled_tcmap, - enabled_tcmap); + PMD_DRV_LOG(ERR, + "Enabled TC map 0x%x not applicable to HW support 0x%x", + hw->func_caps.enabled_tcmap, enabled_tcmap); return I40E_NOT_SUPPORTED; } return I40E_SUCCESS; @@ -4380,6 +4368,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } + veb->enabled_tc = I40E_DEFAULT_TCMAP; /* get statistics index */ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, @@ -4414,6 +4403,9 @@ i40e_vsi_release(struct i40e_vsi *vsi) if (!vsi) return I40E_SUCCESS; + if (!vsi->adapter) + return -EFAULT; + user_param = vsi->user_param; pf = I40E_VSI_TO_PF(vsi); @@ -4504,8 +4496,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) struct i40e_mac_filter *f; struct ether_addr *mac; - PMD_DRV_LOG(WARNING, "Cannot remove the default " - "macvlan filter"); + PMD_DRV_LOG(WARNING, + "Cannot remove the default macvlan filter"); /* It needs to add the permanent mac into mac list */ f = rte_zmalloc("macv_filter", sizeof(*f), 0); if (f == NULL) { @@ -4555,8 +4547,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith " - "configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "VSI failed to get TC bandwdith configuration %u", + hw->aq.asq_last_status); return ret; } @@ -4623,7 +4616,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) - PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d", hw->aq.asq_last_status); } @@ -4644,14 +4637,14 @@ i40e_vsi_setup(struct i40e_pf *pf, if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && uplink_vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, " - "VSI link shouldn't be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, VSI link shouldn't be NULL"); return NULL; } if (type == I40E_VSI_MAIN && uplink_vsi != NULL) { - PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI " - "uplink VSI should be NULL"); + PMD_DRV_LOG(ERR, + "VSI setup failed, MAIN VSI uplink VSI should be NULL"); return NULL; } @@ -4695,6 +4688,8 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; + vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; /* Allocate queues */ switch (vsi->type) { case I40E_VSI_MAIN : @@ -4802,8 +4797,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.seid = vsi->seid; @@ -4871,13 +4866,14 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, - I40E_DEFAULT_TCMAP); + hw->func_caps.enabled_tcmap); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } - ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); /** @@ -4916,8 +4912,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping"); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping"); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -4934,8 +4930,8 @@ i40e_vsi_setup(struct i40e_pf *pf, ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, I40E_DEFAULT_TCMAP); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to configure " - "TC queue mapping."); + PMD_DRV_LOG(ERR, + "Failed to configure TC queue mapping."); goto fail_msix_alloc; } ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; @@ -5198,8 +5194,9 @@ i40e_pf_setup(struct i40e_pf *pf) /* make queue allocated first, let FDIR use queue pair 0*/ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); if (ret != I40E_FDIR_QUEUE_ID) { - PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :" - " ret =%d", ret); + PMD_DRV_LOG(ERR, + "queue allocation fails for FDIR: ret =%d", + ret); pf->flags &= ~I40E_FLAG_FDIR; } } @@ -5218,12 +5215,12 @@ i40e_pf_setup(struct i40e_pf *pf) else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; else { - PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n", - hw->func_caps.rss_table_size); + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", + hw->func_caps.rss_table_size); return I40E_ERR_PARAM; } - PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table " - "size: %u\n", hw->func_caps.rss_table_size); + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u", + hw->func_caps.rss_table_size); pf->hash_lut_size = hw->func_caps.rss_table_size; /* Enable ethtype and macvlan filters */ @@ -5473,8 +5470,8 @@ i40e_dev_rx_init(struct i40e_pf *pf) ret = i40e_rx_queue_init(rxq); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to do RX queue " - "initialization"); + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); break; } } @@ -5758,8 +5755,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) ret = i40e_clean_arq_element(hw, &info, &pending); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, " - "aq_err: %u", hw->aq.asq_last_status); + PMD_DRV_LOG(INFO, + "Failed to read msg from AdminQ, aq_err: %u", + hw->aq.asq_last_status); break; } opcode = rte_le_to_cpu_16(info.desc.opcode); @@ -5904,7 +5902,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC match type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC match type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5979,7 +5977,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC filter type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC filter type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -6034,14 +6032,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi, } static void -i40e_set_vlan_filter(struct i40e_vsi *vsi, - uint16_t vlan_id, bool on) +i40e_store_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) { uint32_t vid_idx, vid_bit; - if (vlan_id > ETH_VLAN_ID_MAX) - return; - vid_idx = I40E_VFTA_IDX(vlan_id); vid_bit = I40E_VFTA_BIT(vlan_id); @@ -6051,6 +6046,38 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, vsi->vfta[vid_idx] &= ~vid_bit; } +static void +i40e_set_vlan_filter(struct i40e_vsi *vsi, + uint16_t vlan_id, bool on) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + if (vlan_id > ETH_VLAN_ID_MAX) + return; + + i40e_store_vlan_filter(vsi, vlan_id, on); + + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) + return; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + + if (on) { + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); + } else { + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, + "Failed to remove vlan filter"); + } +} + /** * Find all vlan options for specific mac addr, * return with actual vlan found. @@ -6076,8 +6103,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { if (vsi->vfta[j] & (1 << k)) { if (i > num - 1) { - PMD_DRV_LOG(ERR, "vlan number " - "not match"); + PMD_DRV_LOG(ERR, + "vlan number doesn't match"); return I40E_ERR_PARAM; } (void)rte_memcpy(&mv_f[i].macaddr, @@ -6122,7 +6149,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) { - int i, num; + int i, j, num; struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int ret = I40E_SUCCESS; @@ -6147,6 +6174,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) TAILQ_FOREACH(f, &vsi->mac_list, next) { (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); + mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; i++; } @@ -6156,6 +6184,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) vsi->vlan_num, &f->mac_info.mac_addr); if (ret != I40E_SUCCESS) goto DONE; + for (j = i; j < i + vsi->vlan_num; j++) + mv_f[j].filter_type = f->mac_info.filter_type; i += vsi->vlan_num; } } @@ -6367,7 +6397,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) if (filter_type == RTE_MACVLAN_PERFECT_MATCH || filter_type == RTE_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { - PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n"); + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } } else if (filter_type == RTE_MAC_PERFECT_MATCH || @@ -6550,8 +6580,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw); if (ret) - PMD_INIT_LOG(ERR, "Failed to configure RSS key " - "via AQ"); + PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ"); } else { uint32_t *hash_key = (uint32_t *)key; uint16_t i; @@ -6708,6 +6737,12 @@ i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, (struct ether_addr *)&tunnel_filter->input.inner_mac); tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; tunnel_filter->input.flags = cld_filter->flags; tunnel_filter->input.tenant_id = cld_filter->tenant_id; tunnel_filter->queue = cld_filter->queue_number; @@ -6776,7 +6811,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf, return 0; } -static int +int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add) @@ -6924,8 +6959,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port) /* Now check if there is space to add the new port */ idx = i40e_get_vxlan_port_idx(pf, 0); if (idx < 0) { - PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached," - "not adding port %d", port); + PMD_DRV_LOG(ERR, + "Maximum number of UDP ports reached, not adding port %d", + port); return -ENOSPC; } @@ -7164,7 +7200,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) int ret = -EINVAL; val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); - PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); if (len == 3) { reg = val | I40E_GL_PRS_FVBM_MSK_ENA; @@ -7183,7 +7219,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } else { ret = 0; } - PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n", + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x", I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); return ret; @@ -7296,15 +7332,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) if (enable > 0) { if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been enabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been enabled"); return; } reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } else { if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) { - PMD_DRV_LOG(INFO, "Symmetric hash has already " - "been disabled"); + PMD_DRV_LOG(INFO, + "Symmetric hash has already been disabled"); return; } reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; @@ -7428,16 +7464,16 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { /* Toeplitz */ if (reg & I40E_GLQF_CTL_HTOEP_MASK) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Toeplitz"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Toeplitz"); goto out; } reg |= I40E_GLQF_CTL_HTOEP_MASK; } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) { /* Simple XOR */ if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) { - PMD_DRV_LOG(DEBUG, "Hash function already set to " - "Simple XOR"); + PMD_DRV_LOG(DEBUG, + "Hash function already set to Simple XOR"); goto out; } reg &= ~I40E_GLQF_CTL_HTOEP_MASK; @@ -7728,7 +7764,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype, } /* default input set fields combination per pctype */ -static uint64_t +uint64_t i40e_get_default_input_set(uint16_t pctype) { static const uint64_t default_inset_table[] = { @@ -8027,10 +8063,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); if (reg != val) i40e_write_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, (uint32_t)i40e_read_rx_ctl(hw, addr)); } @@ -8421,7 +8457,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf, * Configure ethertype filter, which can director packet by filtering * with mac address and ether_type or only ether_type */ -static int +int i40e_ethertype_filter_set(struct i40e_pf *pf, struct rte_eth_ethertype_filter *filter, bool add) @@ -8440,13 +8476,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, } if (filter->ether_type == ETHER_TYPE_IPv4 || filter->ether_type == ETHER_TYPE_IPv6) { - PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" - " control packet filter.", filter->ether_type); + PMD_DRV_LOG(ERR, + "unsupported ether_type(0x%04x) in control packet filter.", + filter->ether_type); return -EINVAL; } if (filter->ether_type == ETHER_TYPE_VLAN) - PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is" - " not supported."); + PMD_DRV_LOG(WARNING, + "filter vlan ether_type in first tag is not supported."); /* Check if there is the filter in SW list */ memset(&check_filter, 0, sizeof(check_filter)); @@ -8476,11 +8513,10 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, pf->main_vsi->seid, filter->queue, add, &stats, NULL); - PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d," - " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", - ret, stats.mac_etype_used, stats.etype_used, - stats.mac_etype_free, stats.etype_free); + PMD_DRV_LOG(INFO, + "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u", + ret, stats.mac_etype_used, stats.etype_used, + stats.mac_etype_free, stats.etype_free); if (ret < 0) return -ENOSYS; @@ -8530,7 +8566,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev, FALSE); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -ENOSYS; break; } @@ -8734,6 +8770,10 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 +/* For X722 */ +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200 +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200 + /* For X710 */ #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 /* For XL710 */ @@ -8756,7 +8796,6 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw) return 0; } - static void i40e_configure_registers(struct i40e_hw *hw) { @@ -8764,8 +8803,8 @@ i40e_configure_registers(struct i40e_hw *hw) uint32_t addr; uint64_t val; } reg_table[] = { - {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, - {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_0, 0}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, 0}, {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ }; uint64_t reg; @@ -8773,6 +8812,24 @@ i40e_configure_registers(struct i40e_hw *hw) int ret; for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE; + } + + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE; + } + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ @@ -8798,9 +8855,9 @@ i40e_configure_registers(struct i40e_hw *hw) ret = i40e_aq_debug_write_register(hw, reg_table[i].addr, reg_table[i].val, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the " - "address of 0x%"PRIx32, reg_table[i].val, - reg_table[i].addr); + PMD_DRV_LOG(ERR, + "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32, + reg_table[i].val, reg_table[i].addr); break; } PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of " @@ -8845,8 +8902,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi) I40E_VSI_L2TAGSTXVALID( vsi->vsi_id), reg, NULL); if (ret < 0) { - PMD_DRV_LOG(ERR, "Failed to update " - "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id); + PMD_DRV_LOG(ERR, + "Failed to update VSI_L2TAGSTXVALID[%d]", + vsi->vsi_id); return I40E_ERR_CONFIG; } } @@ -8897,11 +8955,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw, rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd)); status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL); - PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d," - "rule_id = %u" - " mirror_rules_used = %u, mirror_rules_free = %u,", - hw->aq.asq_last_status, resp->rule_id, - resp->mirror_rules_used, resp->mirror_rules_free); + PMD_DRV_LOG(INFO, + "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,", + hw->aq.asq_last_status, resp->rule_id, + resp->mirror_rules_used, resp->mirror_rules_free); *rule_id = rte_le_to_cpu_16(resp->rule_id); return status; @@ -8979,8 +9036,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id); if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { - PMD_DRV_LOG(ERR, "mirror rule can not be configured" - " without veb or vfs."); + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); return -ENOSYS; } if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { @@ -9012,9 +9069,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -9103,9 +9160,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev, mirr_rule->rule_type, mirr_rule->entries, j, &rule_id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to add mirror rule:" - " ret = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); rte_free(mirr_rule); return -ENOSYS; } @@ -9157,9 +9214,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id) mirr_rule->entries, mirr_rule->num_entries, mirr_rule->id); if (ret < 0) { - PMD_DRV_LOG(ERR, "failed to remove mirror rule:" - " status = %d, aq_err = %d.", - ret, hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: status = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); @@ -9591,9 +9648,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, &veb_bw, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" - " per TC failed = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "AQ command Config switch_comp BW allocation per TC failed = %d", + hw->aq.asq_last_status); return ret; } @@ -9601,16 +9658,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, &ets_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp ETS configuration %u", + hw->aq.asq_last_status); return ret; } memset(&bw_query, 0, sizeof(bw_query)); ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, &bw_query, NULL); if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" - " configuration %u", hw->aq.asq_last_status); + PMD_DRV_LOG(ERR, + "Failed to get switch_comp bandwidth configuration %u", + hw->aq.asq_last_status); return ret; } @@ -9675,8 +9734,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) } ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL); if (ret) { - PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation" - " per TC failed = %d", + PMD_INIT_LOG(ERR, + "AQ command Config VSI BW allocation per TC failed = %d", hw->aq.asq_last_status); goto out; } @@ -9697,9 +9756,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) /* Update the VSI after updating the VSI queue-mapping information */ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { - PMD_INIT_LOG(ERR, "Failed to configure " - "TC queue mapping = %d", - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d", + hw->aq.asq_last_status); goto out; } /* update the local VSI info with updated queue map */ @@ -9751,8 +9809,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, /* Use the FW API if FW > v4.4*/ if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) || (hw->aq.fw_maj_ver >= 5))) { - PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API" - " to configure DCB"); + PMD_INIT_LOG(ERR, + "FW < v4.4, can not use FW LLDP API to configure DCB"); return I40E_ERR_FIRMWARE_API_VERSION; } @@ -9767,8 +9825,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, old_cfg->etsrec = old_cfg->etscfg; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, - "Set DCB Config failed, err %s aq_err %s\n", + PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; @@ -9800,7 +9857,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VEB seid=%d\n", + "Failed configuring TC for VEB seid=%d", main_vsi->veb->seid); } /* Update each VSI */ @@ -9818,8 +9875,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VSI seid=%d\n", - vsi_list->vsi->seid); + "Failed configuring TC for VSI seid=%d", + vsi_list->vsi->seid); /* continue */ } } @@ -9838,7 +9895,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; + int i, ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support DCB"); @@ -9865,11 +9922,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; hw->local_dcbx_config.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + hw->local_dcbx_config.pfc.pfcenable = + I40E_DEFAULT_TCMAP; /* FW needs one App to configure HW */ hw->local_dcbx_config.numapps = 1; hw->local_dcbx_config.app[0].selector = @@ -9879,15 +9941,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) I40E_APP_PROTOID_FCOE; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, "default dcb config fails." - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "default dcb config fails. err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOSYS; } } else { - PMD_INIT_LOG(ERR, "DCB initialization in FW fails," - " err = %d, aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCB initialization in FW fails, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } else { @@ -9898,14 +9960,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret = i40e_init_dcb(hw); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - PMD_INIT_LOG(ERR, "HW doesn't support" - " DCBX offload."); + PMD_INIT_LOG(ERR, + "HW doesn't support DCBX offload."); return -ENOTSUP; } } else { - PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d," - " aq_err = %d.", ret, - hw->aq.asq_last_status); + PMD_INIT_LOG(ERR, + "DCBX configuration failed, err = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); return -ENOTSUP; } } @@ -10181,8 +10243,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* mtu setting is forbidden if port is start */ if (dev_data->dev_started) { - PMD_DRV_LOG(ERR, - "port %d must be stopped before configuration\n", + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", dev_data->port_id); return -EBUSY; } @@ -10225,7 +10286,7 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf) } PMD_DRV_LOG(INFO, "Ethertype filter:" " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", + " mac_etype_free = %u, etype_free = %u", stats.mac_etype_used, stats.etype_used, stats.mac_etype_free, stats.etype_free); } @@ -10256,3 +10317,1396 @@ i40e_filter_restore(struct i40e_pf *pf) i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); } + +static bool +is_device_supported(struct rte_eth_dev *dev, struct eth_driver *drv) +{ + if (strcmp(dev->driver->pci_drv.driver.name, + drv->pci_drv.driver.name)) + return false; + + return true; +} + +int +rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + i40e_notify_vf_link_status(dev, &pf->vfs[vf]); + + return 0; +} + +int +rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) { + if (on) { + if ((vsi->info.sec_flags & + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) + return 0; /* already on */ + } else { + if ((vsi->info.sec_flags & + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0) + return 0; /* already off */ + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (on) + vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + else + vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +static int +i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add) +{ + uint32_t j, k; + uint16_t vlan_id; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; + + for (j = 0; j < I40E_VFTA_SIZE; j++) { + if (!vsi->vfta[j]) + continue; + + for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) { + if (!(vsi->vfta[j] & (1 << k))) + continue; + + vlan_id = j * I40E_UINT32_BIT_SIZE + k; + if (!vlan_id) + continue; + + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); + if (add) + ret = i40e_aq_add_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + else + ret = i40e_aq_remove_vlan(hw, vsi->seid, + &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to add/rm vlan filter"); + return ret; + } + } + } + + return I40E_SUCCESS; +} + +int +rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + /* Check if it has been already on or off */ + if (vsi->vlan_anti_spoof_on == on) + return 0; /* already on or off */ + + vsi->vlan_anti_spoof_on = on; + if (!vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, on); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters."); + return -ENOTSUP; + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (on) + vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; + else + vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK; + + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +static int +i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num; + enum rte_mac_filter_type filter_type; + int ret = I40E_SUCCESS; + void *temp; + + /* remove all the MACs */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + vlan_num = vsi->vlan_num; + filter_type = f->mac_info.filter_type; + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + if (vlan_num == 0) { + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); + return I40E_ERR_PARAM; + } + } else if (filter_type == RTE_MAC_PERFECT_MATCH || + filter_type == RTE_MAC_HASH_MATCH) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (!mv_f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = filter_type; + (void)rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + if (filter_type == RTE_MACVLAN_PERFECT_MATCH || + filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + } + + ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + + rte_free(mv_f); + ret = I40E_SUCCESS; + } + + return ret; +} + +static int +i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct i40e_macvlan_filter *mv_f; + int i, vlan_num = 0; + int ret = I40E_SUCCESS; + void *temp; + + /* restore all the MACs */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) { + if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) || + (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) { + /** + * If vlan_num is 0, that's the first time to add mac, + * set mask for vlan_id 0. + */ + if (vsi->vlan_num == 0) { + i40e_set_vlan_filter(vsi, 0, 1); + vsi->vlan_num = 1; + } + vlan_num = vsi->vlan_num; + } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) || + (f->mac_info.filter_type == RTE_MAC_HASH_MATCH)) + vlan_num = 1; + + mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0); + if (!mv_f) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + for (i = 0; i < vlan_num; i++) { + mv_f[i].filter_type = f->mac_info.filter_type; + (void)rte_memcpy(&mv_f[i].macaddr, + &f->mac_info.mac_addr, + ETH_ADDR_LEN); + } + + if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH || + f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) { + ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, + &f->mac_info.mac_addr); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + } + + ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num); + if (ret != I40E_SUCCESS) { + rte_free(mv_f); + return ret; + } + + rte_free(mv_f); + ret = I40E_SUCCESS; + } + + return ret; +} + +static int +i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) +{ + struct i40e_vsi_context ctxt; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return -EINVAL; + + hw = I40E_VSI_TO_HW(vsi); + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return -ENOTSUP; + } + + /* Check if it has been already on or off */ + if (vsi->info.valid_sections & + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) { + if (on) { + if ((vsi->info.switch_id & + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) + return 0; /* already on */ + } else { + if ((vsi->info.switch_id & + I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0) + return 0; /* already off */ + } + } + + /* remove all the MAC and VLAN first */ + ret = i40e_vsi_rm_mac_filter(vsi); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to remove MAC filters."); + return ret; + } + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, 0); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to remove VLAN filters."); + return ret; + } + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + if (on) + vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; + else + vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB; + + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + return ret; + } + + /* add all the MAC and VLAN back */ + ret = i40e_vsi_restore_mac_filter(vsi); + if (ret) + return ret; + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, 1); + if (ret) + return ret; + } + + return ret; +} + +int +rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + uint16_t vf_id; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + /* setup PF TX loopback */ + vsi = pf->main_vsi; + ret = i40e_vsi_set_tx_loopback(vsi, on); + if (ret) + return -ENOTSUP; + + /* setup TX loopback for all the VFs */ + if (!pf->vfs) { + /* if no VF, do nothing. */ + return 0; + } + + for (vf_id = 0; vf_id < pf->vf_num; vf_id++) { + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + + ret = i40e_vsi_set_tx_loopback(vsi, on); + if (ret) + return -ENOTSUP; + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + on, NULL, true); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode"); + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + on, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode"); + } + + return ret; +} + +int +rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, + struct ether_addr *mac_addr) +{ + struct i40e_mac_filter *f; + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + void *temp; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + ether_addr_copy(mac_addr, &vf->mac_addr); + + /* Remove all existing mac */ + TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) + i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + + return 0; +} + +/* Set vlan strip on/off for specific VF from host */ +int +rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid argument."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + + if (!vsi) + return -EINVAL; + + ret = i40e_vsi_config_vlan_stripping(vsi, !!on); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!"); + } + + return ret; +} + +int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, + uint16_t vlan_id) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (vlan_id > ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid VLAN ID."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) + return -ENODEV; + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.pvid = vlan_id; + if (vlan_id > 0) + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; + else + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID; + + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, + uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_mac_filter_info filter; + struct ether_addr broadcast = { + .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) { + PMD_DRV_LOG(ERR, "on should be 0 or 1."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (on) { + (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + ret = i40e_vsi_add_mac(vsi, &filter); + } else { + ret = i40e_vsi_delete_mac(vsi, &broadcast); + } + + if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VSI broadcast"); + } else { + ret = 0; + } + + return ret; +} + +int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + struct i40e_vsi_context ctxt; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (on > 1) { + PMD_DRV_LOG(ERR, "on should be 0 or 1."); + return -EINVAL; + } + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + if (on) { + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED; + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + } else { + vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED; + } + + memset(&ctxt, 0, sizeof(ctxt)); + (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.seid = vsi->seid; + + hw = I40E_VSI_TO_HW(vsi); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to update VSI params"); + } + + return ret; +} + +int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, + uint64_t vf_mask, uint8_t on) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_hw *hw; + struct i40e_vsi *vsi; + uint16_t vf_idx; + int ret = I40E_SUCCESS; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + if (vlan_id > ETHER_MAX_VLAN_ID) { + PMD_DRV_LOG(ERR, "Invalid VLAN ID."); + return -EINVAL; + } + + if (vf_mask == 0) { + PMD_DRV_LOG(ERR, "No VF."); + return -EINVAL; + } + + if (on > 1) { + PMD_DRV_LOG(ERR, "on is should be 0 or 1."); + return -EINVAL; + } + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + hw = I40E_PF_TO_HW(pf); + + /** + * return -ENODEV if SRIOV not enabled, VF number not configured + * or no queue assigned. + */ + if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || + pf->vf_nb_qps == 0) { + PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue."); + return -ENODEV; + } + + for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + vsi = pf->vfs[vf_idx].vsi; + if (on) { + if (!vsi->vlan_filter_on) { + vsi->vlan_filter_on = true; + if (!vsi->vlan_anti_spoof_on) + i40e_add_rm_all_vlan_filter( + vsi, true); + } + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + false, NULL); + ret = i40e_vsi_add_vlan(vsi, vlan_id); + } else { + ret = i40e_vsi_delete_vlan(vsi, vlan_id); + } + } + } + + if (ret != I40E_SUCCESS) { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on); + } + + return ret; +} + +int +rte_pmd_i40e_get_vf_stats(uint8_t port, + uint16_t vf_id, + struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + i40e_update_vsi_stats(vsi); + + stats->ipackets = vsi->eth_stats.rx_unicast + + vsi->eth_stats.rx_multicast + + vsi->eth_stats.rx_broadcast; + stats->opackets = vsi->eth_stats.tx_unicast + + vsi->eth_stats.tx_multicast + + vsi->eth_stats.tx_broadcast; + stats->ibytes = vsi->eth_stats.rx_bytes; + stats->obytes = vsi->eth_stats.tx_bytes; + stats->ierrors = vsi->eth_stats.rx_discards; + stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards; + + return 0; +} + +int +rte_pmd_i40e_reset_vf_stats(uint8_t port, + uint16_t vf_id) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + vsi->offset_loaded = false; + i40e_update_vsi_stats(vsi); + + return 0; +} + +int +rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret = 0; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (bw > I40E_QOS_BW_MAX) { + PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.", + I40E_QOS_BW_MAX); + return -EINVAL; + } + + if (bw % I40E_QOS_BW_GRANULARITY) { + PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.", + I40E_QOS_BW_GRANULARITY); + return -EINVAL; + } + + bw /= I40E_QOS_BW_GRANULARITY; + + hw = I40E_VSI_TO_HW(vsi); + + /* No change. */ + if (bw == vsi->bw_info.bw_limit) { + PMD_DRV_LOG(INFO, + "No change for VF max bandwidth. Nothing to do."); + return 0; + } + + /** + * VF bandwidth limitation and TC bandwidth limitation cannot be + * enabled in parallel, quit if TC bandwidth limitation is enabled. + * + * If bw is 0, means disable bandwidth limitation. Then no need to + * check TC bandwidth limitation. + */ + if (bw) { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if ((vsi->enabled_tc & BIT_ULL(i)) && + vsi->bw_info.bw_ets_credits[i]) + break; + } + if (i != I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, + "TC max bandwidth has been set on this VF," + " please disable it first."); + return -EINVAL; + } + } + + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d bandwidth, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + vsi->bw_info.bw_limit = (uint16_t)bw; + vsi->bw_info.bw_max = 0; + + return 0; +} + +int +rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, + uint8_t tc_num, uint8_t *bw_weight) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_tc_bw_data tc_bw; + int ret = 0; + int i, j; + uint16_t sum; + bool b_change = false; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (tc_num > I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TCs should be no more than %d.", + I40E_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) + sum++; + } + if (sum != tc_num) { + PMD_DRV_LOG(ERR, + "Weight should be set for all %d enabled TCs.", + sum); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < tc_num; i++) { + if (!bw_weight[i]) { + PMD_DRV_LOG(ERR, + "The weight should be 1 at least."); + return -EINVAL; + } + sum += bw_weight[i]; + } + if (sum != 100) { + PMD_DRV_LOG(ERR, + "The summary of the TC weight should be 100."); + return -EINVAL; + } + + /** + * Create the configuration for all the TCs. + */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + if (bw_weight[j] != + vsi->bw_info.bw_ets_share_credits[i]) + b_change = true; + + tc_bw.tc_bw_credits[i] = bw_weight[j]; + j++; + } + } + + /* No change. */ + if (!b_change) { + PMD_DRV_LOG(INFO, + "No change for TC allocated bandwidth." + " Nothing to do."); + return 0; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d TC bandwidth weight, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j]; + j++; + } + } + + return 0; +} + +int +rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id, + uint8_t tc_no, uint32_t bw) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw; + int ret = 0; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (bw > I40E_QOS_BW_MAX) { + PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.", + I40E_QOS_BW_MAX); + return -EINVAL; + } + + if (bw % I40E_QOS_BW_GRANULARITY) { + PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.", + I40E_QOS_BW_GRANULARITY); + return -EINVAL; + } + + bw /= I40E_QOS_BW_GRANULARITY; + + if (tc_no >= I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TC No. should be less than %d.", + I40E_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + hw = I40E_VSI_TO_HW(vsi); + + if (!(vsi->enabled_tc & BIT_ULL(tc_no))) { + PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.", + vf_id, tc_no); + return -EINVAL; + } + + /* No change. */ + if (bw == vsi->bw_info.bw_ets_credits[tc_no]) { + PMD_DRV_LOG(INFO, + "No change for TC max bandwidth. Nothing to do."); + return 0; + } + + /** + * VF bandwidth limitation and TC bandwidth limitation cannot be + * enabled in parallel, disable VF bandwidth limitation if it's + * enabled. + * If bw is 0, means disable bandwidth limitation. Then no need to + * care about VF bandwidth limitation configuration. + */ + if (bw && vsi->bw_info.bw_limit) { + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to disable VF(%d)" + " bandwidth limitation, err(%d).", + vf_id, ret); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, + "VF max bandwidth is disabled according" + " to TC max bandwidth setting."); + } + + /** + * Get all the TCs' info to create a whole picture. + * Because the incremental change isn't permitted. + */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + tc_bw.tc_bw_credits[i] = + rte_cpu_to_le_16( + vsi->bw_info.bw_ets_credits[i]); + } + } + tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw); + + ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d TC %d max bandwidth, err(%d).", + vf_id, tc_no, ret); + return -EINVAL; + } + + /* Store the configuration. */ + vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw; + + return 0; +} + +int +rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_veb *veb; + struct i40e_hw *hw; + struct i40e_aqc_configure_switching_comp_ets_data ets_data; + int i; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + vsi = pf->main_vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + veb = vsi->veb; + if (!veb) { + PMD_DRV_LOG(ERR, "Invalid VEB."); + return -EINVAL; + } + + if ((tc_map & veb->enabled_tc) != tc_map) { + PMD_DRV_LOG(ERR, + "TC bitmap isn't the subset of enabled TCs 0x%x.", + veb->enabled_tc); + return -EINVAL; + } + + if (tc_map == veb->strict_prio_tc) { + PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do."); + return 0; + } + + hw = I40E_VSI_TO_HW(vsi); + + /* Disable DCBx if it's the first time to set strict priority. */ + if (!veb->strict_prio_tc) { + ret = i40e_aq_stop_lldp(hw, true, NULL); + if (ret) + PMD_DRV_LOG(INFO, + "Failed to disable DCBx as it's already" + " disabled."); + else + PMD_DRV_LOG(INFO, + "DCBx is disabled according to strict" + " priority setting."); + } + + memset(&ets_data, 0, sizeof(ets_data)); + ets_data.tc_valid_bits = veb->enabled_tc; + ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK; + ets_data.tc_strict_priority_flags = tc_map; + /* Get all TCs' bandwidth. */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (veb->enabled_tc & BIT_ULL(i)) { + /* For rubust, if bandwidth is 0, use 1 instead. */ + if (veb->bw_info.bw_ets_share_credits[i]) + ets_data.tc_bw_share_credits[i] = + veb->bw_info.bw_ets_share_credits[i]; + else + ets_data.tc_bw_share_credits[i] = + I40E_QOS_BW_WEIGHT_MIN; + } + } + + if (!veb->strict_prio_tc) + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_enable_switching_comp_ets, + NULL); + else if (tc_map) + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_modify_switching_comp_ets, + NULL); + else + ret = i40e_aq_config_switch_comp_ets( + hw, veb->uplink_seid, + &ets_data, i40e_aqc_opc_disable_switching_comp_ets, + NULL); + + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set TCs' strict priority mode." + " err (%d)", ret); + return -EINVAL; + } + + veb->strict_prio_tc = tc_map; + + /* Enable DCBx again, if all the TCs' strict priority disabled. */ + if (!tc_map) { + ret = i40e_aq_start_lldp(hw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to enable DCBx, err(%d).", ret); + return -EINVAL; + } + + PMD_DRV_LOG(INFO, + "DCBx is enabled again according to strict" + " priority setting."); + } + + return ret; +}