X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_ethdev.c;h=5a5a7f59e152d88d254dc8d82be32b8342ec5528;hb=39e4a2577fd05199f53182b7c8509aeed40dc07f;hp=53094270825ea8197624aae7fcdd0f5b2c590c24;hpb=f3bbf08fafa51ce70451b8f4b6bbe8d1673df366;p=dpdk.git diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 5309427082..5a5a7f59e1 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -117,16 +118,19 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); -static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg); +static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num); +static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg); static const struct rte_pci_id pci_id_iavf_map[] = { { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -195,10 +199,24 @@ static const struct eth_dev_ops iavf_eth_dev_ops = { .mtu_set = iavf_dev_mtu_set, .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable, .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable, - .filter_ctrl = iavf_dev_filter_ctrl, + .flow_ops_get = iavf_dev_flow_ops_get, .tx_done_cleanup = iavf_dev_tx_done_cleanup, + .get_monitor_addr = iavf_get_monitor_addr, + .tm_ops_get = iavf_tm_ops_get, }; +static int +iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &iavf_tm_ops; + + return 0; +} + static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addrs, @@ -242,6 +260,121 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev, return err; } +static void +iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf) +{ + static const uint64_t map_hena_rss[] = { + /* IPv4 */ + [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + ETH_RSS_NONFRAG_IPV4_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] = + ETH_RSS_NONFRAG_IPV4_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] = + ETH_RSS_NONFRAG_IPV4_SCTP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] = + ETH_RSS_NONFRAG_IPV4_OTHER, + [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4, + + /* IPv6 */ + [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + ETH_RSS_NONFRAG_IPV6_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] = + ETH_RSS_NONFRAG_IPV6_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] = + ETH_RSS_NONFRAG_IPV6_SCTP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] = + ETH_RSS_NONFRAG_IPV6_OTHER, + [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6, + + /* L2 Payload */ + [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD + }; + + const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_NONFRAG_IPV4_OTHER | + ETH_RSS_FRAG_IPV4; + + const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_NONFRAG_IPV6_OTHER | + ETH_RSS_FRAG_IPV6; + + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint64_t caps = 0, hena = 0, valid_rss_hf = 0; + uint32_t i; + int ret; + + ret = iavf_get_hena_caps(adapter, &caps); + if (ret) { + /** + * RSS offload type configuration is not a necessary feature + * for VF, so here just print a warning and return. + */ + PMD_DRV_LOG(WARNING, + "fail to get RSS offload type caps, ret: %d", ret); + return; + } + + /** + * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2 + * generalizations of all other IPv4 and IPv6 RSS types. + */ + if (rss_hf & ETH_RSS_IPV4) + rss_hf |= ipv4_rss; + + if (rss_hf & ETH_RSS_IPV6) + rss_hf |= ipv6_rss; + + RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT); + + for (i = 0; i < RTE_DIM(map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if ((caps & bit) && (map_hena_rss[i] & rss_hf)) { + valid_rss_hf |= map_hena_rss[i]; + hena |= bit; + } + } + + ret = iavf_set_hena(adapter, hena); + if (ret) { + /** + * RSS offload type configuration is not a necessary feature + * for VF, so here just print a warning and return. + */ + PMD_DRV_LOG(WARNING, + "fail to set RSS offload types, ret: %d", ret); + return; + } + + if (valid_rss_hf & ipv4_rss) + valid_rss_hf |= rss_hf & ETH_RSS_IPV4; + + if (valid_rss_hf & ipv6_rss) + valid_rss_hf |= rss_hf & ETH_RSS_IPV6; + + if (rss_hf & ~valid_rss_hf) + PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64, + rss_hf & ~valid_rss_hf); + + vf->rss_hf = valid_rss_hf; +} + static int iavf_init_rss(struct iavf_adapter *adapter) { @@ -258,19 +391,11 @@ iavf_init_rss(struct iavf_adapter *adapter) PMD_DRV_LOG(DEBUG, "RSS is not supported"); return -ENOTSUP; } - if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { - PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); - /* set all lut items to default queue */ - for (i = 0; i < vf->vf_res->rss_lut_size; i++) - vf->rss_lut[i] = 0; - ret = iavf_configure_rss_lut(adapter); - return ret; - } /* configure RSS key */ if (!rss_conf->rss_key) { /* Calculate the default hash key */ - for (i = 0; i <= vf->vf_res->rss_key_size; i++) + for (i = 0; i < vf->vf_res->rss_key_size; i++) vf->rss_key[i] = (uint8_t)rte_rand(); } else rte_memcpy(vf->rss_key, rss_conf->rss_key, @@ -291,11 +416,15 @@ iavf_init_rss(struct iavf_adapter *adapter) if (ret) return ret; - /* Set RSS hash configuration based on rss_conf->rss_hf. */ - ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); - if (ret) { - PMD_DRV_LOG(ERR, "fail to set default RSS"); - return ret; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { + /* Set RSS hash configuration based on rss_conf->rss_hf. */ + ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); + if (ret) { + PMD_DRV_LOG(ERR, "fail to set default RSS"); + return ret; + } + } else { + iavf_config_rss_hf(adapter, rss_conf->rss_hf); } return 0; @@ -446,13 +575,14 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_dev_data *dev_data = dev->data; - uint16_t buf_size, max_pkt_len, len; + uint16_t buf_size, max_pkt_len; buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; /* Calculate the maximum packet length allowed */ - len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS; - max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + max_pkt_len = RTE_MIN((uint32_t) + rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS, + dev->data->dev_conf.rxmode.max_rx_pkt_len); /* Check if the jumbo frame and maximum packet length are set * correctly. @@ -546,7 +676,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (!qv_map) { PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", dev->data->nb_rx_queues); - return -1; + goto qv_map_alloc_err; } if (!dev->data->dev_conf.intr_conf.rxq || @@ -576,9 +706,9 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, */ vf->msix_base = IAVF_MISC_VEC_ID; - /* set ITR to max */ + /* set ITR to default */ interval = iavf_calc_itr_interval( - IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_QUEUE_ITR_INTERVAL_DEFAULT); IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | (IAVF_ITR_INDEX_DEFAULT << @@ -610,15 +740,15 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, /* If Rx interrupt is reuquired, and we can use * multi interrupts, then the vec is from 1 */ - vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, - intr_handle->nb_efd); + vf->nb_msix = RTE_MIN(intr_handle->nb_efd, + (uint16_t)(vf->vf_res->max_vectors - 1)); vf->msix_base = IAVF_RX_VEC_START; vec = IAVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { qv_map[i].queue_id = i; qv_map[i].vector_id = vec; intr_handle->intr_vec[i] = vec++; - if (vec >= vf->nb_msix) + if (vec >= vf->nb_msix + IAVF_RX_VEC_START) vec = IAVF_RX_VEC_START; } vf->qv_map = qv_map; @@ -631,7 +761,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (!vf->lv_enabled) { if (iavf_config_irq_map(adapter)) { PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + goto config_irq_map_err; } } else { uint16_t num_qv_maps = dev->data->nb_rx_queues; @@ -641,7 +771,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (iavf_config_irq_map_lv(adapter, IAVF_IRQ_MAP_NUM_PER_BUF, index)) { PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); - return -1; + goto config_irq_map_err; } num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF; index += IAVF_IRQ_MAP_NUM_PER_BUF; @@ -649,10 +779,20 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) { PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); - return -1; + goto config_irq_map_err; } } return 0; + +config_irq_map_err: + rte_free(vf->qv_map); + vf->qv_map = NULL; + +qv_map_alloc_err: + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + + return -1; } static int @@ -704,6 +844,12 @@ iavf_dev_start(struct rte_eth_dev *dev) dev->data->nb_tx_queues); num_queue_pairs = vf->num_queue_pairs; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + if (iavf_get_qos_cap(adapter)) { + PMD_INIT_LOG(ERR, "Failed to get qos capability"); + return -1; + } + if (iavf_init_queues(dev) != 0) { PMD_DRV_LOG(ERR, "failed to do Queue init"); return -1; @@ -733,7 +879,8 @@ iavf_dev_start(struct rte_eth_dev *dev) } /* re-enable intr again, because efd assign may change */ if (dev->data->dev_conf.intr_conf.rxq != 0) { - rte_intr_disable(intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + rte_intr_disable(intr_handle); rte_intr_enable(intr_handle); } @@ -767,6 +914,10 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) && + dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); + if (adapter->stopped == 1) return 0; @@ -996,7 +1147,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, return -EINVAL; } - err = iavf_add_del_eth_addr(adapter, addr, true); + err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA); if (err) { PMD_DRV_LOG(ERR, "fail to add MAC address"); return -EIO; @@ -1018,7 +1169,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) addr = &dev->data->mac_addrs[index]; - err = iavf_add_del_eth_addr(adapter, addr, false); + err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA); if (err) PMD_DRV_LOG(ERR, "fail to delete MAC address"); @@ -1248,24 +1399,37 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, if (ret) return ret; - if (rss_conf->rss_hf == 0) + if (rss_conf->rss_hf == 0) { + vf->rss_hf = 0; + ret = iavf_set_hena(adapter, 0); + + /* It is a workaround, temporarily allow error to be returned + * due to possible lack of PF handling for hena = 0. + */ + if (ret) + PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support"); return 0; + } - /* Clear existing RSS. */ - ret = iavf_set_hena(adapter, 0); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { + /* Clear existing RSS. */ + ret = iavf_set_hena(adapter, 0); - /* It is a workaround, temporarily allow error to be returned - * due to possible lack of PF handling for hena = 0. - */ - if (ret) - PMD_DRV_LOG(WARNING, "fail to clean existing RSS," - "lack PF support"); + /* It is a workaround, temporarily allow error to be returned + * due to possible lack of PF handling for hena = 0. + */ + if (ret) + PMD_DRV_LOG(WARNING, "fail to clean existing RSS," + "lack PF support"); - /* Set new RSS configuration. */ - ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); - if (ret) { - PMD_DRV_LOG(ERR, "fail to set new RSS"); - return ret; + /* Set new RSS configuration. */ + ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); + if (ret) { + PMD_DRV_LOG(ERR, "fail to set new RSS"); + return ret; + } + } else { + iavf_config_rss_hf(adapter, rss_conf->rss_hf); } return 0; @@ -1327,37 +1491,25 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); - struct rte_ether_addr *perm_addr, *old_addr; + struct rte_ether_addr *old_addr; int ret; old_addr = (struct rte_ether_addr *)hw->mac.addr; - perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr; - /* If the MAC address is configured by host, skip the setting */ - if (rte_is_valid_assigned_ether_addr(perm_addr)) - return -EPERM; + if (rte_is_same_ether_addr(old_addr, mac_addr)) + return 0; - ret = iavf_add_del_eth_addr(adapter, old_addr, false); + ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY); if (ret) PMD_DRV_LOG(ERR, "Fail to delete old MAC:" - " %02X:%02X:%02X:%02X:%02X:%02X", - old_addr->addr_bytes[0], - old_addr->addr_bytes[1], - old_addr->addr_bytes[2], - old_addr->addr_bytes[3], - old_addr->addr_bytes[4], - old_addr->addr_bytes[5]); - - ret = iavf_add_del_eth_addr(adapter, mac_addr, true); + RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(old_addr)); + + ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY); if (ret) PMD_DRV_LOG(ERR, "Fail to add new MAC:" - " %02X:%02X:%02X:%02X:%02X:%02X", - mac_addr->addr_bytes[0], - mac_addr->addr_bytes[1], - mac_addr->addr_bytes[2], - mac_addr->addr_bytes[3], - mac_addr->addr_bytes[4], - mac_addr->addr_bytes[5]); + RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); if (ret) return -EIO; @@ -1514,6 +1666,7 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t msix_intr; msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; @@ -1534,7 +1687,8 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_WRITE_FLUSH(hw); - rte_intr_ack(&pci_dev->intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + rte_intr_ack(&pci_dev->intr_handle); return 0; } @@ -1975,6 +2129,7 @@ iavf_init_vf(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); goto err_api; } + if (iavf_get_vf_resource(adapter) != 0) { PMD_INIT_LOG(ERR, "iavf_get_vf_config failed"); goto err_alloc; @@ -2009,6 +2164,18 @@ iavf_init_vf(struct rte_eth_dev *dev) } } + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) { + bufsz = sizeof(struct virtchnl_qos_cap_list) + + IAVF_MAX_TRAFFIC_CLASS * + sizeof(struct virtchnl_qos_cap_elem); + vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0); + if (!vf->qos_cap) { + PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory"); + goto err_rss; + } + iavf_tm_conf_init(dev); + } + iavf_init_proto_xtr(dev); return 0; @@ -2016,6 +2183,7 @@ err_rss: rte_free(vf->rss_key); rte_free(vf->rss_lut); err_alloc: + rte_free(vf->qos_cap); rte_free(vf->vf_res); vf->vsi_res = NULL; err_api: @@ -2026,6 +2194,30 @@ err: return -1; } +static void +iavf_uninit_vf(struct rte_eth_dev *dev) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + iavf_shutdown_adminq(hw); + + rte_free(vf->vf_res); + vf->vsi_res = NULL; + vf->vf_res = NULL; + + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + + rte_free(vf->qos_cap); + vf->qos_cap = NULL; + + rte_free(vf->rss_lut); + vf->rss_lut = NULL; + rte_free(vf->rss_key); + vf->rss_key = NULL; +} + /* Enable default admin queue interrupt setting */ static inline void iavf_enable_irq0(struct iavf_hw *hw) @@ -2065,33 +2257,58 @@ iavf_dev_interrupt_handler(void *param) iavf_enable_irq0(hw); } -static int -iavf_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +void +iavf_dev_alarm_handler(void *param) { - int ret = 0; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; - if (!dev) - return -EINVAL; + iavf_disable_irq0(hw); - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - *(const void **)arg = &iavf_flow_ops; - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - ret = -EINVAL; - break; + /* read out interrupt causes */ + icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01); + + if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) { + PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported"); + iavf_handle_virtchnl_msg(dev); } - return ret; + iavf_enable_irq0(hw); + + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, dev); +} + +static int +iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + if (!dev) + return -EINVAL; + + *ops = &iavf_flow_ops; + return 0; } +static void +iavf_default_rss_disable(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret = 0; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + /* Set hena = 0 to ask PF to cleanup all existing RSS. */ + ret = iavf_set_hena(adapter, 0); + if (ret) + /* It is a workaround, temporarily allow error to be + * returned due to possible lack of PF handling for + * hena = 0. + */ + PMD_INIT_LOG(WARNING, "fail to disable default RSS," + "lack PF support"); + } +} static int iavf_dev_init(struct rte_eth_dev *eth_dev) @@ -2099,6 +2316,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); int ret = 0; @@ -2152,7 +2370,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" " store MAC addresses", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); - return -ENOMEM; + ret = -ENOMEM; + goto init_vf_err; } /* If the MAC address is not configured by host, * generate a random one. @@ -2163,13 +2382,18 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - /* register callback func to eal lib */ - rte_intr_callback_register(&pci_dev->intr_handle, - iavf_dev_interrupt_handler, - (void *)eth_dev); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* register callback func to eal lib */ + rte_intr_callback_register(&pci_dev->intr_handle, + iavf_dev_interrupt_handler, + (void *)eth_dev); - /* enable uio intr after callback register */ - rte_intr_enable(&pci_dev->intr_handle); + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + } else { + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, eth_dev); + } /* configure and enable device interrupt */ iavf_enable_irq0(hw); @@ -2177,19 +2401,21 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_flow_init(adapter); if (ret) { PMD_INIT_LOG(ERR, "Failed to initialize flow"); - return ret; + goto flow_init_err; } - /* Set hena = 0 to ask PF to cleanup all existing RSS. */ - ret = iavf_set_hena(adapter, 0); - if (ret) - /* It is a workaround, temporarily allow error to be returned - * due to possible lack of PF handling for hena = 0. - */ - PMD_DRV_LOG(WARNING, "fail to disable default RSS," - "lack PF support"); + iavf_default_rss_disable(adapter); return 0; + +flow_init_err: + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + +init_vf_err: + iavf_uninit_vf(eth_dev); + + return ret; } static int @@ -2220,14 +2446,21 @@ iavf_dev_close(struct rte_eth_dev *dev) iavf_config_promisc(adapter, false, false); iavf_shutdown_adminq(hw); - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); - /* unregister callback func from eal lib */ - rte_intr_callback_unregister(intr_handle, - iavf_dev_interrupt_handler, dev); + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(intr_handle, + iavf_dev_interrupt_handler, dev); + } else { + rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); + } iavf_disable_irq0(hw); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + iavf_tm_conf_uninit(dev); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vf->rss_lut) { rte_free(vf->rss_lut); @@ -2246,7 +2479,15 @@ iavf_dev_close(struct rte_eth_dev *dev) rte_free(vf->aq_resp); vf->aq_resp = NULL; - vf->vf_reset = false; + /* + * If the VF is reset via VFLR, the device will be knocked out of bus + * master mode, and the driver will fail to recover from the reset. Fix + * this by enabling bus mastering after every reset. In a non-VFLR case, + * the bus master bit will not be disabled, and this call will have no + * effect. + */ + if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) + vf->vf_reset = false; return ret; } @@ -2343,14 +2584,11 @@ RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map); RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf"); -RTE_LOG_REGISTER(iavf_logtype_init, pmd.net.iavf.init, NOTICE); -RTE_LOG_REGISTER(iavf_logtype_driver, pmd.net.iavf.driver, NOTICE); -#ifdef RTE_LIBRTE_IAVF_DEBUG_RX -RTE_LOG_REGISTER(iavf_logtype_rx, pmd.net.iavf.rx, DEBUG); -#endif -#ifdef RTE_LIBRTE_IAVF_DEBUG_TX -RTE_LOG_REGISTER(iavf_logtype_tx, pmd.net.iavf.tx, DEBUG); +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_RX +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG); #endif -#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE -RTE_LOG_REGISTER(iavf_logtype_tx_free, pmd.net.iavf.tx_free, DEBUG); +#ifdef RTE_ETHDEV_DEBUG_TX +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG); #endif