X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_ethdev.c;h=5a5a7f59e152d88d254dc8d82be32b8342ec5528;hb=7483341ae5533c5d5fa080a5d229e6f2daf03ea5;hp=7bf31d4f4e99df4983011c9d51b1541dceaee687;hpb=403aebc44c04952f98fc7501e26eb77098d74bc8;p=dpdk.git diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 7bf31d4f4e..5a5a7f59e1 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -16,11 +16,12 @@ #include #include #include +#include #include #include #include -#include -#include +#include +#include #include #include #include @@ -117,16 +118,19 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); -static int iavf_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg); +static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num); +static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg); static const struct rte_pci_id pci_id_iavf_map[] = { { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) }, + { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -195,10 +199,24 @@ static const struct eth_dev_ops iavf_eth_dev_ops = { .mtu_set = iavf_dev_mtu_set, .rx_queue_intr_enable = iavf_dev_rx_queue_intr_enable, .rx_queue_intr_disable = iavf_dev_rx_queue_intr_disable, - .filter_ctrl = iavf_dev_filter_ctrl, + .flow_ops_get = iavf_dev_flow_ops_get, .tx_done_cleanup = iavf_dev_tx_done_cleanup, + .get_monitor_addr = iavf_get_monitor_addr, + .tm_ops_get = iavf_tm_ops_get, }; +static int +iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &iavf_tm_ops; + + return 0; +} + static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *mc_addrs, @@ -242,6 +260,121 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev, return err; } +static void +iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf) +{ + static const uint64_t map_hena_rss[] = { + /* IPv4 */ + [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] = + ETH_RSS_NONFRAG_IPV4_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + ETH_RSS_NONFRAG_IPV4_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] = + ETH_RSS_NONFRAG_IPV4_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] = + ETH_RSS_NONFRAG_IPV4_SCTP, + [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] = + ETH_RSS_NONFRAG_IPV4_OTHER, + [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4, + + /* IPv6 */ + [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] = + ETH_RSS_NONFRAG_IPV6_UDP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + ETH_RSS_NONFRAG_IPV6_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] = + ETH_RSS_NONFRAG_IPV6_TCP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] = + ETH_RSS_NONFRAG_IPV6_SCTP, + [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] = + ETH_RSS_NONFRAG_IPV6_OTHER, + [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6, + + /* L2 Payload */ + [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD + }; + + const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_NONFRAG_IPV4_OTHER | + ETH_RSS_FRAG_IPV4; + + const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_NONFRAG_IPV6_SCTP | + ETH_RSS_NONFRAG_IPV6_OTHER | + ETH_RSS_FRAG_IPV6; + + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + uint64_t caps = 0, hena = 0, valid_rss_hf = 0; + uint32_t i; + int ret; + + ret = iavf_get_hena_caps(adapter, &caps); + if (ret) { + /** + * RSS offload type configuration is not a necessary feature + * for VF, so here just print a warning and return. + */ + PMD_DRV_LOG(WARNING, + "fail to get RSS offload type caps, ret: %d", ret); + return; + } + + /** + * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2 + * generalizations of all other IPv4 and IPv6 RSS types. + */ + if (rss_hf & ETH_RSS_IPV4) + rss_hf |= ipv4_rss; + + if (rss_hf & ETH_RSS_IPV6) + rss_hf |= ipv6_rss; + + RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT); + + for (i = 0; i < RTE_DIM(map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if ((caps & bit) && (map_hena_rss[i] & rss_hf)) { + valid_rss_hf |= map_hena_rss[i]; + hena |= bit; + } + } + + ret = iavf_set_hena(adapter, hena); + if (ret) { + /** + * RSS offload type configuration is not a necessary feature + * for VF, so here just print a warning and return. + */ + PMD_DRV_LOG(WARNING, + "fail to set RSS offload types, ret: %d", ret); + return; + } + + if (valid_rss_hf & ipv4_rss) + valid_rss_hf |= rss_hf & ETH_RSS_IPV4; + + if (valid_rss_hf & ipv6_rss) + valid_rss_hf |= rss_hf & ETH_RSS_IPV6; + + if (rss_hf & ~valid_rss_hf) + PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64, + rss_hf & ~valid_rss_hf); + + vf->rss_hf = valid_rss_hf; +} + static int iavf_init_rss(struct iavf_adapter *adapter) { @@ -258,19 +391,11 @@ iavf_init_rss(struct iavf_adapter *adapter) PMD_DRV_LOG(DEBUG, "RSS is not supported"); return -ENOTSUP; } - if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { - PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); - /* set all lut items to default queue */ - for (i = 0; i < vf->vf_res->rss_lut_size; i++) - vf->rss_lut[i] = 0; - ret = iavf_configure_rss_lut(adapter); - return ret; - } /* configure RSS key */ if (!rss_conf->rss_key) { /* Calculate the default hash key */ - for (i = 0; i <= vf->vf_res->rss_key_size; i++) + for (i = 0; i < vf->vf_res->rss_key_size; i++) vf->rss_key[i] = (uint8_t)rte_rand(); } else rte_memcpy(vf->rss_key, rss_conf->rss_key, @@ -291,11 +416,15 @@ iavf_init_rss(struct iavf_adapter *adapter) if (ret) return ret; - /* Set RSS hash configuration based on rss_conf->rss_hf. */ - ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); - if (ret) { - PMD_DRV_LOG(ERR, "fail to set default RSS"); - return ret; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { + /* Set RSS hash configuration based on rss_conf->rss_hf. */ + ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); + if (ret) { + PMD_DRV_LOG(ERR, "fail to set default RSS"); + return ret; + } + } else { + iavf_config_rss_hf(adapter, rss_conf->rss_hf); } return 0; @@ -326,13 +455,52 @@ iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num) return 0; } +static int +iavf_dev_vlan_insert_set(struct rte_eth_dev *dev) +{ + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + bool enable; + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)) + return 0; + + enable = !!(dev->data->dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_VLAN_INSERT); + iavf_config_vlan_insert_v2(adapter, enable); + + return 0; +} + +static int +iavf_dev_init_vlan(struct rte_eth_dev *dev) +{ + int err; + + err = iavf_dev_vlan_offload_set(dev, + ETH_VLAN_STRIP_MASK | + ETH_QINQ_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + if (err) { + PMD_DRV_LOG(ERR, "Failed to update vlan offload"); + return err; + } + + err = iavf_dev_vlan_insert_set(dev); + if (err) + PMD_DRV_LOG(ERR, "Failed to update vlan insertion"); + + return err; +} + static int iavf_dev_configure(struct rte_eth_dev *dev) { struct iavf_adapter *ad = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); - struct rte_eth_conf *dev_conf = &dev->data->dev_conf; uint16_t num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); int ret; @@ -389,13 +557,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; } - /* Vlan stripping setting */ - if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) { - if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) - iavf_enable_vlan_strip(ad); - else - iavf_disable_vlan_strip(ad); - } + ret = iavf_dev_init_vlan(dev); + if (ret) + PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (iavf_init_rss(ad) != 0) { @@ -411,35 +575,36 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_dev_data *dev_data = dev->data; - uint16_t buf_size, max_pkt_len, len; + uint16_t buf_size, max_pkt_len; buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; /* Calculate the maximum packet length allowed */ - len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS; - max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + max_pkt_len = RTE_MIN((uint32_t) + rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS, + dev->data->dev_conf.rxmode.max_rx_pkt_len); /* Check if the jumbo frame and maximum packet length are set * correctly. */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (max_pkt_len <= RTE_ETHER_MAX_LEN || + if (max_pkt_len <= IAVF_ETH_MAX_LEN || max_pkt_len > IAVF_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is enabled", - (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)IAVF_ETH_MAX_LEN, (uint32_t)IAVF_FRAME_SIZE_MAX); return -EINVAL; } } else { if (max_pkt_len < RTE_ETHER_MIN_LEN || - max_pkt_len > RTE_ETHER_MAX_LEN) { + max_pkt_len > IAVF_ETH_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", (uint32_t)RTE_ETHER_MIN_LEN, - (uint32_t)RTE_ETHER_MAX_LEN); + (uint32_t)IAVF_ETH_MAX_LEN); return -EINVAL; } } @@ -511,7 +676,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (!qv_map) { PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", dev->data->nb_rx_queues); - return -1; + goto qv_map_alloc_err; } if (!dev->data->dev_conf.intr_conf.rxq || @@ -541,9 +706,9 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, */ vf->msix_base = IAVF_MISC_VEC_ID; - /* set ITR to max */ + /* set ITR to default */ interval = iavf_calc_itr_interval( - IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_QUEUE_ITR_INTERVAL_DEFAULT); IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK | (IAVF_ITR_INDEX_DEFAULT << @@ -575,15 +740,15 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, /* If Rx interrupt is reuquired, and we can use * multi interrupts, then the vec is from 1 */ - vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, - intr_handle->nb_efd); + vf->nb_msix = RTE_MIN(intr_handle->nb_efd, + (uint16_t)(vf->vf_res->max_vectors - 1)); vf->msix_base = IAVF_RX_VEC_START; vec = IAVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { qv_map[i].queue_id = i; qv_map[i].vector_id = vec; intr_handle->intr_vec[i] = vec++; - if (vec >= vf->nb_msix) + if (vec >= vf->nb_msix + IAVF_RX_VEC_START) vec = IAVF_RX_VEC_START; } vf->qv_map = qv_map; @@ -596,7 +761,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (!vf->lv_enabled) { if (iavf_config_irq_map(adapter)) { PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + goto config_irq_map_err; } } else { uint16_t num_qv_maps = dev->data->nb_rx_queues; @@ -606,7 +771,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (iavf_config_irq_map_lv(adapter, IAVF_IRQ_MAP_NUM_PER_BUF, index)) { PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); - return -1; + goto config_irq_map_err; } num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF; index += IAVF_IRQ_MAP_NUM_PER_BUF; @@ -614,10 +779,20 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) { PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); - return -1; + goto config_irq_map_err; } } return 0; + +config_irq_map_err: + rte_free(vf->qv_map); + vf->qv_map = NULL; + +qv_map_alloc_err: + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + + return -1; } static int @@ -669,6 +844,12 @@ iavf_dev_start(struct rte_eth_dev *dev) dev->data->nb_tx_queues); num_queue_pairs = vf->num_queue_pairs; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + if (iavf_get_qos_cap(adapter)) { + PMD_INIT_LOG(ERR, "Failed to get qos capability"); + return -1; + } + if (iavf_init_queues(dev) != 0) { PMD_DRV_LOG(ERR, "failed to do Queue init"); return -1; @@ -698,7 +879,8 @@ iavf_dev_start(struct rte_eth_dev *dev) } /* re-enable intr again, because efd assign may change */ if (dev->data->dev_conf.intr_conf.rxq != 0) { - rte_intr_disable(intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + rte_intr_disable(intr_handle); rte_intr_enable(intr_handle); } @@ -732,6 +914,10 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) && + dev->data->dev_conf.intr_conf.rxq != 0) + rte_intr_disable(intr_handle); + if (adapter->stopped == 1) return 0; @@ -784,6 +970,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_VLAN_FILTER | DEV_RX_OFFLOAD_RSS_HASH; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -800,6 +987,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_MBUF_FAST_FREE; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, @@ -957,7 +1147,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, return -EINVAL; } - err = iavf_add_del_eth_addr(adapter, addr, true); + err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA); if (err) { PMD_DRV_LOG(ERR, "fail to add MAC address"); return -EIO; @@ -979,7 +1169,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) addr = &dev->data->mac_addrs[index]; - err = iavf_add_del_eth_addr(adapter, addr, false); + err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA); if (err) PMD_DRV_LOG(ERR, "fail to delete MAC address"); @@ -994,6 +1184,13 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); int err; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + err = iavf_add_del_vlan_v2(adapter, vlan_id, on); + if (err) + return -EIO; + return 0; + } + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) return -ENOTSUP; @@ -1003,6 +1200,57 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } +static void +iavf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable) +{ + struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + uint32_t i, j; + uint64_t ids; + + for (i = 0; i < RTE_DIM(vfc->ids); i++) { + if (vfc->ids[i] == 0) + continue; + + ids = vfc->ids[i]; + for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) { + if (ids & 1) + iavf_add_del_vlan_v2(adapter, + 64 * i + j, enable); + } + } +} + +static int +iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + bool enable; + int err; + + if (mask & ETH_VLAN_FILTER_MASK) { + enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER); + + iavf_iterate_vlan_filters_v2(dev, enable); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + enable = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + + err = iavf_config_vlan_strip_v2(adapter, enable); + /* If not support, the stripping is already disabled by PF */ + if (err == -ENOTSUP && !enable) + err = 0; + if (err) + return -EIO; + } + + return 0; +} + static int iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) { @@ -1012,6 +1260,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) + return iavf_dev_vlan_offload_set_v2(dev, mask); + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) return -ENOTSUP; @@ -1148,20 +1399,37 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, if (ret) return ret; - if (rss_conf->rss_hf == 0) + if (rss_conf->rss_hf == 0) { + vf->rss_hf = 0; + ret = iavf_set_hena(adapter, 0); + + /* It is a workaround, temporarily allow error to be returned + * due to possible lack of PF handling for hena = 0. + */ + if (ret) + PMD_DRV_LOG(WARNING, "fail to clean existing RSS, lack PF support"); return 0; + } - /* Overwritten default RSS. */ - ret = iavf_set_hena(adapter, 0); - if (ret) - PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d", - __func__, ret); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) { + /* Clear existing RSS. */ + ret = iavf_set_hena(adapter, 0); - /* Set new RSS configuration. */ - ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); - if (ret) { - PMD_DRV_LOG(ERR, "fail to set new RSS"); - return ret; + /* It is a workaround, temporarily allow error to be returned + * due to possible lack of PF handling for hena = 0. + */ + if (ret) + PMD_DRV_LOG(WARNING, "fail to clean existing RSS," + "lack PF support"); + + /* Set new RSS configuration. */ + ret = iavf_rss_hash_set(adapter, rss_conf->rss_hf, true); + if (ret) { + PMD_DRV_LOG(ERR, "fail to set new RSS"); + return ret; + } + } else { + iavf_config_rss_hf(adapter, rss_conf->rss_hf); } return 0; @@ -1204,7 +1472,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } - if (frame_size > RTE_ETHER_MAX_LEN) + if (frame_size > IAVF_ETH_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -1223,37 +1491,25 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); - struct rte_ether_addr *perm_addr, *old_addr; + struct rte_ether_addr *old_addr; int ret; old_addr = (struct rte_ether_addr *)hw->mac.addr; - perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr; - /* If the MAC address is configured by host, skip the setting */ - if (rte_is_valid_assigned_ether_addr(perm_addr)) - return -EPERM; + if (rte_is_same_ether_addr(old_addr, mac_addr)) + return 0; - ret = iavf_add_del_eth_addr(adapter, old_addr, false); + ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY); if (ret) PMD_DRV_LOG(ERR, "Fail to delete old MAC:" - " %02X:%02X:%02X:%02X:%02X:%02X", - old_addr->addr_bytes[0], - old_addr->addr_bytes[1], - old_addr->addr_bytes[2], - old_addr->addr_bytes[3], - old_addr->addr_bytes[4], - old_addr->addr_bytes[5]); - - ret = iavf_add_del_eth_addr(adapter, mac_addr, true); + RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(old_addr)); + + ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY); if (ret) PMD_DRV_LOG(ERR, "Fail to add new MAC:" - " %02X:%02X:%02X:%02X:%02X:%02X", - mac_addr->addr_bytes[0], - mac_addr->addr_bytes[1], - mac_addr->addr_bytes[2], - mac_addr->addr_bytes[3], - mac_addr->addr_bytes[4], - mac_addr->addr_bytes[5]); + RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); if (ret) return -EIO; @@ -1314,6 +1570,9 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ret = iavf_query_stats(adapter, &pstats); if (ret == 0) { + uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_KEEP_CRC) ? 0 : + RTE_ETHER_CRC_LEN; iavf_update_stats(vsi, pstats); stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + pstats->rx_broadcast - pstats->rx_discards; @@ -1322,7 +1581,7 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->imissed = pstats->rx_discards; stats->oerrors = pstats->tx_errors + pstats->tx_discards; stats->ibytes = pstats->rx_bytes; - stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; + stats->ibytes -= stats->ipackets * crc_stats_len; stats->obytes = pstats->tx_bytes; } else { PMD_DRV_LOG(ERR, "Get statistics failed"); @@ -1407,6 +1666,7 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t msix_intr; msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; @@ -1427,7 +1687,8 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_WRITE_FLUSH(hw); - rte_intr_ack(&pci_dev->intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + rte_intr_ack(&pci_dev->intr_handle); return 0; } @@ -1868,6 +2129,7 @@ iavf_init_vf(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); goto err_api; } + if (iavf_get_vf_resource(adapter) != 0) { PMD_INIT_LOG(ERR, "iavf_get_vf_config failed"); goto err_alloc; @@ -1895,6 +2157,25 @@ iavf_init_vf(struct rte_eth_dev *dev) } } + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + if (iavf_get_vlan_offload_caps_v2(adapter) != 0) { + PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities"); + goto err_rss; + } + } + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) { + bufsz = sizeof(struct virtchnl_qos_cap_list) + + IAVF_MAX_TRAFFIC_CLASS * + sizeof(struct virtchnl_qos_cap_elem); + vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0); + if (!vf->qos_cap) { + PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory"); + goto err_rss; + } + iavf_tm_conf_init(dev); + } + iavf_init_proto_xtr(dev); return 0; @@ -1902,6 +2183,7 @@ err_rss: rte_free(vf->rss_key); rte_free(vf->rss_lut); err_alloc: + rte_free(vf->qos_cap); rte_free(vf->vf_res); vf->vsi_res = NULL; err_api: @@ -1912,6 +2194,30 @@ err: return -1; } +static void +iavf_uninit_vf(struct rte_eth_dev *dev) +{ + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + iavf_shutdown_adminq(hw); + + rte_free(vf->vf_res); + vf->vsi_res = NULL; + vf->vf_res = NULL; + + rte_free(vf->aq_resp); + vf->aq_resp = NULL; + + rte_free(vf->qos_cap); + vf->qos_cap = NULL; + + rte_free(vf->rss_lut); + vf->rss_lut = NULL; + rte_free(vf->rss_key); + vf->rss_key = NULL; +} + /* Enable default admin queue interrupt setting */ static inline void iavf_enable_irq0(struct iavf_hw *hw) @@ -1951,33 +2257,58 @@ iavf_dev_interrupt_handler(void *param) iavf_enable_irq0(hw); } -static int -iavf_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +void +iavf_dev_alarm_handler(void *param) { - int ret = 0; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t icr0; - if (!dev) - return -EINVAL; + iavf_disable_irq0(hw); - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - *(const void **)arg = &iavf_flow_ops; - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - ret = -EINVAL; - break; + /* read out interrupt causes */ + icr0 = IAVF_READ_REG(hw, IAVF_VFINT_ICR01); + + if (icr0 & IAVF_VFINT_ICR01_ADMINQ_MASK) { + PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported"); + iavf_handle_virtchnl_msg(dev); } - return ret; + iavf_enable_irq0(hw); + + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, dev); } +static int +iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + if (!dev) + return -EINVAL; + + *ops = &iavf_flow_ops; + return 0; +} + +static void +iavf_default_rss_disable(struct iavf_adapter *adapter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + int ret = 0; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + /* Set hena = 0 to ask PF to cleanup all existing RSS. */ + ret = iavf_set_hena(adapter, 0); + if (ret) + /* It is a workaround, temporarily allow error to be + * returned due to possible lack of PF handling for + * hena = 0. + */ + PMD_INIT_LOG(WARNING, "fail to disable default RSS," + "lack PF support"); + } +} static int iavf_dev_init(struct rte_eth_dev *eth_dev) @@ -1985,6 +2316,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); int ret = 0; @@ -2038,7 +2370,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to" " store MAC addresses", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX); - return -ENOMEM; + ret = -ENOMEM; + goto init_vf_err; } /* If the MAC address is not configured by host, * generate a random one. @@ -2049,13 +2382,18 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - /* register callback func to eal lib */ - rte_intr_callback_register(&pci_dev->intr_handle, - iavf_dev_interrupt_handler, - (void *)eth_dev); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* register callback func to eal lib */ + rte_intr_callback_register(&pci_dev->intr_handle, + iavf_dev_interrupt_handler, + (void *)eth_dev); - /* enable uio intr after callback register */ - rte_intr_enable(&pci_dev->intr_handle); + /* enable uio intr after callback register */ + rte_intr_enable(&pci_dev->intr_handle); + } else { + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, eth_dev); + } /* configure and enable device interrupt */ iavf_enable_irq0(hw); @@ -2063,17 +2401,21 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_flow_init(adapter); if (ret) { PMD_INIT_LOG(ERR, "Failed to initialize flow"); - return ret; + goto flow_init_err; } - /* Set hena = 0 to ask PF to cleanup all existing RSS. */ - ret = iavf_set_hena(adapter, 0); - if (ret) { - PMD_DRV_LOG(ERR, "fail to disable default PF RSS"); - return ret; - } + iavf_default_rss_disable(adapter); return 0; + +flow_init_err: + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + +init_vf_err: + iavf_uninit_vf(eth_dev); + + return ret; } static int @@ -2104,14 +2446,21 @@ iavf_dev_close(struct rte_eth_dev *dev) iavf_config_promisc(adapter, false, false); iavf_shutdown_adminq(hw); - /* disable uio intr before callback unregister */ - rte_intr_disable(intr_handle); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); - /* unregister callback func from eal lib */ - rte_intr_callback_unregister(intr_handle, - iavf_dev_interrupt_handler, dev); + /* unregister callback func from eal lib */ + rte_intr_callback_unregister(intr_handle, + iavf_dev_interrupt_handler, dev); + } else { + rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); + } iavf_disable_irq0(hw); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) + iavf_tm_conf_uninit(dev); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vf->rss_lut) { rte_free(vf->rss_lut); @@ -2130,7 +2479,15 @@ iavf_dev_close(struct rte_eth_dev *dev) rte_free(vf->aq_resp); vf->aq_resp = NULL; - vf->vf_reset = false; + /* + * If the VF is reset via VFLR, the device will be knocked out of bus + * master mode, and the driver will fail to recover from the reset. Fix + * this by enabling bus mastering after every reset. In a non-VFLR case, + * the bus master bit will not be disabled, and this call will have no + * effect. + */ + if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) + vf->vf_reset = false; return ret; } @@ -2227,14 +2584,11 @@ RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map); RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf"); -RTE_LOG_REGISTER(iavf_logtype_init, pmd.net.iavf.init, NOTICE); -RTE_LOG_REGISTER(iavf_logtype_driver, pmd.net.iavf.driver, NOTICE); -#ifdef RTE_LIBRTE_IAVF_DEBUG_RX -RTE_LOG_REGISTER(iavf_logtype_rx, pmd.net.iavf.rx, DEBUG); -#endif -#ifdef RTE_LIBRTE_IAVF_DEBUG_TX -RTE_LOG_REGISTER(iavf_logtype_tx, pmd.net.iavf.tx, DEBUG); +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE); +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_RX +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG); #endif -#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_FREE -RTE_LOG_REGISTER(iavf_logtype_tx_free, pmd.net.iavf.tx_free, DEBUG); +#ifdef RTE_ETHDEV_DEBUG_TX +RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG); #endif