X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=03fc1f71799c3225e221698b577e51628f071a94;hb=c3318f529c28e8320ac43f86f214f93e2190445d;hp=2470c89cf07391bb4a604fd5cf5f3b64f0227ad7;hpb=03bb633aa86232df29a0692b2727cff3e4e1b685;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 2470c89cf0..03fc1f7179 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,13 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 +#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" + +static const char * const ixgbevf_valid_arguments[] = { + IXGBEVF_DEVARG_PFLINK_FULLCHK, + NULL +}; + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); @@ -223,11 +231,12 @@ static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); static void ixgbe_dev_setup_link_alarm_handler(void *param); -static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int ixgbe_add_rar(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv); @@ -268,7 +277,7 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr * mac_addr, uint8_t on); + rte_ether_addr * mac_addr, uint8_t on); static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, @@ -284,11 +293,11 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, + struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); + struct rte_ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -315,7 +324,7 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); @@ -461,6 +470,7 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, #ifdef RTE_LIBRTE_IXGBE_BYPASS { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, #endif @@ -1212,26 +1222,26 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) ixgbe_reset_qstat_mappings(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); /* Allocate memory for storing hash filter MAC addresses */ - eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + eth_dev->data->hash_mac_addrs = rte_zmalloc( + "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -1501,7 +1511,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) } l2_tn_info->e_tag_en = FALSE; l2_tn_info->e_tag_fwd_en = FALSE; - l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; return 0; } @@ -1533,7 +1543,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) } static void -generate_random_mac_addr(struct ether_addr *mac_addr) +generate_random_mac_addr(struct rte_ether_addr *mac_addr) { uint64_t random; @@ -1542,12 +1552,51 @@ generate_random_mac_addr(struct ether_addr *mac_addr) mac_addr->addr_bytes[1] = 0x09; mac_addr->addr_bytes[2] = 0xC0; /* Force indication of locally assigned MAC address. */ - mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; /* Generate the last 3 bytes of the MAC address with a random number. */ random = rte_rand(); memcpy(&mac_addr->addr_bytes[3], &random, 3); } +static int +devarg_handle_int(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + uint16_t pflink_fullchk; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); + if (kvlist == NULL) + return; + + if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && + rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, + devarg_handle_int, &pflink_fullchk) == 0 && + pflink_fullchk == 1) + adapter->pflink_fullchk = 1; + + rte_kvargs_free(kvlist); +} + /* * Virtual Function device init */ @@ -1564,7 +1613,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; PMD_INIT_FUNC_TRACE(); @@ -1595,6 +1645,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } + ixgbevf_parse_devargs(eth_dev->data->dev_private, + pci_dev->device.devargs); + rte_eth_copy_pci_info(eth_dev, pci_dev); hw->device_id = pci_dev->id.device_id; @@ -1648,18 +1701,18 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_get_queues(hw, &tcs, &tc); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %u bytes needed to store " "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Generate a random MAC address, if none was assigned by PF. */ - if (is_zero_ether_addr(perm_addr)) { + if (rte_is_zero_ether_addr(perm_addr)) { generate_random_mac_addr(perm_addr); diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); if (diag) { @@ -1679,7 +1732,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } /* Copy the permanent MAC address */ - ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); @@ -1816,8 +1869,7 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1839,7 +1891,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -2405,8 +2457,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -2814,8 +2865,7 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = @@ -3053,7 +3103,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->qbrc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); if (crc_strip == 0) - hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); hw_stats->qbtc[i] += @@ -3098,12 +3148,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); if (crc_strip == 0) - hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); hw_stats->gptc += delta_gptc; - hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; - hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; + hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; /* * Workaround: mprc hardware is incorrectly counting @@ -3133,7 +3183,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->gptc -= total; hw_stats->mptc -= total; hw_stats->ptc64 -= total; - hw_stats->gotc -= total * ETHER_MIN_LEN; + hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); @@ -3755,7 +3805,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) else dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; - dev_info->min_mtu = ETHER_MIN_MTU; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->vmdq_queue_num = dev_info->max_rx_queues; dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | @@ -3876,6 +3926,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_queue_offload_capa); dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3907,6 +3959,8 @@ static int ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, int *link_up, int wait_to_complete) { + struct ixgbe_adapter *adapter = container_of(hw, + struct ixgbe_adapter, hw); struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; uint32_t links_reg, in_msg; @@ -3967,6 +4021,15 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } + if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ @@ -4440,7 +4503,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); } /** @@ -4556,7 +4619,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4777,7 +4841,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -4810,8 +4875,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reta_reg; @@ -4901,7 +4965,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, } static int -ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4920,7 +4984,7 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) } static int -ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); @@ -4958,7 +5022,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ixgbe_dev_info_get(dev, &dev_info); /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; /* If device is started, refuse mtu that requires the support of @@ -4975,7 +5039,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { + if (frame_size > RTE_ETHER_MAX_LEN) { dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; @@ -5044,8 +5108,7 @@ static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf *conf = &dev->data->dev_conf; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); @@ -5177,8 +5240,7 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -5370,7 +5432,7 @@ ixgbe_vt_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) { uint32_t vector = 0; @@ -5401,8 +5463,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -5702,7 +5764,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -5751,7 +5813,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) mask &= (1 << (queue_id - 32)); IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } - rte_intr_enable(intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -6033,7 +6095,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, } static int -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __attribute__((unused)) uint32_t index, __attribute__((unused)) uint32_t pool) { @@ -6045,7 +6107,8 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, * operation. Trap this case to avoid exhausting the [very limited] * set of PF resources used to store VF MAC addresses. */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(hw->mac.perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) return -1; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -6065,8 +6128,9 @@ static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + struct rte_ether_addr *mac_addr; uint32_t i; int diag; @@ -6088,10 +6152,11 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) if (i == index) continue; /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) + if (rte_is_zero_ether_addr(mac_addr)) continue; /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) continue; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -6110,7 +6175,8 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } static int -ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -6360,7 +6426,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if (mtu < RTE_ETHER_MIN_MTU || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) return -EINVAL; /* If device is started, refuse mtu that requires the support of @@ -6657,8 +6724,8 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; @@ -6839,13 +6906,13 @@ ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, *vmdq = 0; mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); return mc_addr; } static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct ixgbe_hw *hw; @@ -6935,8 +7002,7 @@ static void ixgbe_start_timecounters(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t incval = 0; uint32_t shift = 0; @@ -7004,8 +7070,7 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -7018,8 +7083,7 @@ static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); /* Set the timecounters to a new value. */ @@ -7034,8 +7098,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; systime_cycles = ixgbe_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -7066,7 +7129,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev) /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | + (RTE_ETHER_TYPE_1588 | IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_1588)); @@ -7116,8 +7179,7 @@ ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -7138,8 +7200,7 @@ ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -7378,6 +7439,9 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: return ETH_RSS_RETA_SIZE_64; + case ixgbe_mac_X540_vf: + case ixgbe_mac_82599_vf: + return 0; default: return ETH_RSS_RETA_SIZE_128; } @@ -8661,6 +8725,8 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, + IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); RTE_INIT(ixgbe_init_log) {