net/iavf: fix overflow in maximum packet length config
[dpdk.git] / drivers / net / iavf / iavf_ethdev.c
index 51cad48..dc5cbc2 100644 (file)
@@ -122,9 +122,14 @@ static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 static int iavf_set_mc_addr_list(struct rte_eth_dev *dev,
                        struct rte_ether_addr *mc_addrs,
                        uint32_t mc_addrs_num);
+static int iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg);
 
 static const struct rte_pci_id pci_id_iavf_map[] = {
        { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
+       { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF) },
+       { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_VF_HV) },
+       { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_VF) },
+       { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_X722_A0_VF) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -196,8 +201,21 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
        .flow_ops_get               = iavf_dev_flow_ops_get,
        .tx_done_cleanup            = iavf_dev_tx_done_cleanup,
        .get_monitor_addr           = iavf_get_monitor_addr,
+       .tm_ops_get                 = iavf_tm_ops_get,
 };
 
+static int
+iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+                       void *arg)
+{
+       if (!arg)
+               return -EINVAL;
+
+       *(const void **)arg = &iavf_tm_ops;
+
+       return 0;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
                        struct rte_ether_addr *mc_addrs,
@@ -241,6 +259,121 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
        return err;
 }
 
+static void
+iavf_config_rss_hf(struct iavf_adapter *adapter, uint64_t rss_hf)
+{
+       static const uint64_t map_hena_rss[] = {
+               /* IPv4 */
+               [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+                               ETH_RSS_NONFRAG_IPV4_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+                               ETH_RSS_NONFRAG_IPV4_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV4_UDP] =
+                               ETH_RSS_NONFRAG_IPV4_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+                               ETH_RSS_NONFRAG_IPV4_TCP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV4_TCP] =
+                               ETH_RSS_NONFRAG_IPV4_TCP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP] =
+                               ETH_RSS_NONFRAG_IPV4_SCTP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER] =
+                               ETH_RSS_NONFRAG_IPV4_OTHER,
+               [IAVF_FILTER_PCTYPE_FRAG_IPV4] = ETH_RSS_FRAG_IPV4,
+
+               /* IPv6 */
+               [IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+                               ETH_RSS_NONFRAG_IPV6_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+                               ETH_RSS_NONFRAG_IPV6_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV6_UDP] =
+                               ETH_RSS_NONFRAG_IPV6_UDP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+                               ETH_RSS_NONFRAG_IPV6_TCP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV6_TCP] =
+                               ETH_RSS_NONFRAG_IPV6_TCP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP] =
+                               ETH_RSS_NONFRAG_IPV6_SCTP,
+               [IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER] =
+                               ETH_RSS_NONFRAG_IPV6_OTHER,
+               [IAVF_FILTER_PCTYPE_FRAG_IPV6] = ETH_RSS_FRAG_IPV6,
+
+               /* L2 Payload */
+               [IAVF_FILTER_PCTYPE_L2_PAYLOAD] = ETH_RSS_L2_PAYLOAD
+       };
+
+       const uint64_t ipv4_rss = ETH_RSS_NONFRAG_IPV4_UDP |
+                                 ETH_RSS_NONFRAG_IPV4_TCP |
+                                 ETH_RSS_NONFRAG_IPV4_SCTP |
+                                 ETH_RSS_NONFRAG_IPV4_OTHER |
+                                 ETH_RSS_FRAG_IPV4;
+
+       const uint64_t ipv6_rss = ETH_RSS_NONFRAG_IPV6_UDP |
+                                 ETH_RSS_NONFRAG_IPV6_TCP |
+                                 ETH_RSS_NONFRAG_IPV6_SCTP |
+                                 ETH_RSS_NONFRAG_IPV6_OTHER |
+                                 ETH_RSS_FRAG_IPV6;
+
+       struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
+       uint64_t caps = 0, hena = 0, valid_rss_hf = 0;
+       uint32_t i;
+       int ret;
+
+       ret = iavf_get_hena_caps(adapter, &caps);
+       if (ret) {
+               /**
+                * RSS offload type configuration is not a necessary feature
+                * for VF, so here just print a warning and return.
+                */
+               PMD_DRV_LOG(WARNING,
+                           "fail to get RSS offload type caps, ret: %d", ret);
+               return;
+       }
+
+       /**
+        * ETH_RSS_IPV4 and ETH_RSS_IPV6 can be considered as 2
+        * generalizations of all other IPv4 and IPv6 RSS types.
+        */
+       if (rss_hf & ETH_RSS_IPV4)
+               rss_hf |= ipv4_rss;
+
+       if (rss_hf & ETH_RSS_IPV6)
+               rss_hf |= ipv6_rss;
+
+       RTE_BUILD_BUG_ON(RTE_DIM(map_hena_rss) > sizeof(uint64_t) * CHAR_BIT);
+
+       for (i = 0; i < RTE_DIM(map_hena_rss); i++) {
+               uint64_t bit = BIT_ULL(i);
+
+               if ((caps & bit) && (map_hena_rss[i] & rss_hf)) {
+                       valid_rss_hf |= map_hena_rss[i];
+                       hena |= bit;
+               }
+       }
+
+       ret = iavf_set_hena(adapter, hena);
+       if (ret) {
+               /**
+                * RSS offload type configuration is not a necessary feature
+                * for VF, so here just print a warning and return.
+                */
+               PMD_DRV_LOG(WARNING,
+                           "fail to set RSS offload types, ret: %d", ret);
+               return;
+       }
+
+       if (valid_rss_hf & ipv4_rss)
+               valid_rss_hf |= rss_hf & ETH_RSS_IPV4;
+
+       if (valid_rss_hf & ipv6_rss)
+               valid_rss_hf |= rss_hf & ETH_RSS_IPV6;
+
+       if (rss_hf & ~valid_rss_hf)
+               PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
+                           rss_hf & ~valid_rss_hf);
+
+       vf->rss_hf = valid_rss_hf;
+}
+
 static int
 iavf_init_rss(struct iavf_adapter *adapter)
 {
@@ -257,19 +390,11 @@ iavf_init_rss(struct iavf_adapter *adapter)
                PMD_DRV_LOG(DEBUG, "RSS is not supported");
                return -ENOTSUP;
        }
-       if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
-               PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
-               /* set all lut items to default queue */
-               for (i = 0; i < vf->vf_res->rss_lut_size; i++)
-                       vf->rss_lut[i] = 0;
-               ret = iavf_configure_rss_lut(adapter);
-               return ret;
-       }
 
        /* configure RSS key */
        if (!rss_conf->rss_key) {
                /* Calculate the default hash key */
-               for (i = 0; i <= vf->vf_res->rss_key_size; i++)
+               for (i = 0; i < vf->vf_res->rss_key_size; i++)
                        vf->rss_key[i] = (uint8_t)rte_rand();
        } else
                rte_memcpy(vf->rss_key, rss_conf->rss_key,
@@ -297,6 +422,8 @@ iavf_init_rss(struct iavf_adapter *adapter)
                        PMD_DRV_LOG(ERR, "fail to set default RSS");
                        return ret;
                }
+       } else {
+               iavf_config_rss_hf(adapter, rss_conf->rss_hf);
        }
 
        return 0;
@@ -447,13 +574,14 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 {
        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_dev_data *dev_data = dev->data;
-       uint16_t buf_size, max_pkt_len, len;
+       uint16_t buf_size, max_pkt_len;
 
        buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 
        /* Calculate the maximum packet length allowed */
-       len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
-       max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       max_pkt_len = RTE_MIN((uint32_t)
+                       rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
+                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
        /* Check if the jumbo frame and maximum packet length are set
         * correctly.
@@ -705,6 +833,12 @@ iavf_dev_start(struct rte_eth_dev *dev)
                                      dev->data->nb_tx_queues);
        num_queue_pairs = vf->num_queue_pairs;
 
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+               if (iavf_get_qos_cap(adapter)) {
+                       PMD_INIT_LOG(ERR, "Failed to get qos capability");
+                       return -1;
+               }
+
        if (iavf_init_queues(dev) != 0) {
                PMD_DRV_LOG(ERR, "failed to do Queue init");
                return -1;
@@ -997,7 +1131,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr,
                return -EINVAL;
        }
 
-       err = iavf_add_del_eth_addr(adapter, addr, true);
+       err = iavf_add_del_eth_addr(adapter, addr, true, VIRTCHNL_ETHER_ADDR_EXTRA);
        if (err) {
                PMD_DRV_LOG(ERR, "fail to add MAC address");
                return -EIO;
@@ -1019,7 +1153,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
 
        addr = &dev->data->mac_addrs[index];
 
-       err = iavf_add_del_eth_addr(adapter, addr, false);
+       err = iavf_add_del_eth_addr(adapter, addr, false, VIRTCHNL_ETHER_ADDR_EXTRA);
        if (err)
                PMD_DRV_LOG(ERR, "fail to delete MAC address");
 
@@ -1278,6 +1412,8 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
                        PMD_DRV_LOG(ERR, "fail to set new RSS");
                        return ret;
                }
+       } else {
+               iavf_config_rss_hf(adapter, rss_conf->rss_hf);
        }
 
        return 0;
@@ -1339,17 +1475,15 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
        struct iavf_adapter *adapter =
                IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
-       struct rte_ether_addr *perm_addr, *old_addr;
+       struct rte_ether_addr *old_addr;
        int ret;
 
        old_addr = (struct rte_ether_addr *)hw->mac.addr;
-       perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
 
-       /* If the MAC address is configured by host, skip the setting */
-       if (rte_is_valid_assigned_ether_addr(perm_addr))
-               return -EPERM;
+       if (rte_is_same_ether_addr(old_addr, mac_addr))
+               return 0;
 
-       ret = iavf_add_del_eth_addr(adapter, old_addr, false);
+       ret = iavf_add_del_eth_addr(adapter, old_addr, false, VIRTCHNL_ETHER_ADDR_PRIMARY);
        if (ret)
                PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
                            " %02X:%02X:%02X:%02X:%02X:%02X",
@@ -1360,7 +1494,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
                            old_addr->addr_bytes[4],
                            old_addr->addr_bytes[5]);
 
-       ret = iavf_add_del_eth_addr(adapter, mac_addr, true);
+       ret = iavf_add_del_eth_addr(adapter, mac_addr, true, VIRTCHNL_ETHER_ADDR_PRIMARY);
        if (ret)
                PMD_DRV_LOG(ERR, "Fail to add new MAC:"
                            " %02X:%02X:%02X:%02X:%02X:%02X",
@@ -1987,6 +2121,7 @@ iavf_init_vf(struct rte_eth_dev *dev)
                PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
                goto err_api;
        }
+
        if (iavf_get_vf_resource(adapter) != 0) {
                PMD_INIT_LOG(ERR, "iavf_get_vf_config failed");
                goto err_alloc;
@@ -2021,6 +2156,18 @@ iavf_init_vf(struct rte_eth_dev *dev)
                }
        }
 
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
+               bufsz = sizeof(struct virtchnl_qos_cap_list) +
+                       IAVF_MAX_TRAFFIC_CLASS *
+                       sizeof(struct virtchnl_qos_cap_elem);
+               vf->qos_cap = rte_zmalloc("qos_cap", bufsz, 0);
+               if (!vf->qos_cap) {
+                       PMD_INIT_LOG(ERR, "unable to allocate qos_cap memory");
+                       goto err_rss;
+               }
+               iavf_tm_conf_init(dev);
+       }
+
        iavf_init_proto_xtr(dev);
 
        return 0;
@@ -2028,6 +2175,7 @@ err_rss:
        rte_free(vf->rss_key);
        rte_free(vf->rss_lut);
 err_alloc:
+       rte_free(vf->qos_cap);
        rte_free(vf->vf_res);
        vf->vsi_res = NULL;
 err_api:
@@ -2094,7 +2242,7 @@ iavf_default_rss_disable(struct iavf_adapter *adapter)
        struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
        int ret = 0;
 
-       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) {
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
                /* Set hena = 0 to ask PF to cleanup all existing RSS. */
                ret = iavf_set_hena(adapter, 0);
                if (ret)
@@ -2235,6 +2383,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
                                     iavf_dev_interrupt_handler, dev);
        iavf_disable_irq0(hw);
 
+       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+               iavf_tm_conf_uninit(dev);
+
        if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
                if (vf->rss_lut) {
                        rte_free(vf->rss_lut);
@@ -2253,7 +2404,15 @@ iavf_dev_close(struct rte_eth_dev *dev)
        rte_free(vf->aq_resp);
        vf->aq_resp = NULL;
 
-       vf->vf_reset = false;
+       /*
+        * If the VF is reset via VFLR, the device will be knocked out of bus
+        * master mode, and the driver will fail to recover from the reset. Fix
+        * this by enabling bus mastering after every reset. In a non-VFLR case,
+        * the bus master bit will not be disabled, and this call will have no
+        * effect.
+        */
+       if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
+               vf->vf_reset = false;
 
        return ret;
 }
@@ -2323,10 +2482,58 @@ exit:
        return ret;
 }
 
+static int
+iavf_drv_i40evf_check_handler(__rte_unused const char *key,
+                             const char *value, __rte_unused void *opaque)
+{
+       if (strcmp(value, "i40evf"))
+               return -1;
+
+       return 0;
+}
+
+static int
+iavf_drv_i40evf_selected(struct rte_devargs *devargs, uint16_t device_id)
+{
+       struct rte_kvargs *kvlist;
+       int ret = 0;
+
+       if (device_id != IAVF_DEV_ID_VF &&
+           device_id != IAVF_DEV_ID_VF_HV &&
+           device_id != IAVF_DEV_ID_X722_VF &&
+           device_id != IAVF_DEV_ID_X722_A0_VF)
+               return 0;
+
+       if (devargs == NULL)
+               return 0;
+
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
+       if (kvlist == NULL)
+               return 0;
+
+       if (!rte_kvargs_count(kvlist, RTE_DEVARGS_KEY_DRIVER))
+               goto exit;
+
+       /* i40evf driver selected when there's a key-value pair:
+        * driver=i40evf
+        */
+       if (rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_DRIVER,
+                              iavf_drv_i40evf_check_handler, NULL) < 0)
+               goto exit;
+
+       ret = 1;
+
+exit:
+       rte_kvargs_free(kvlist);
+       return ret;
+}
+
 static int eth_iavf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                             struct rte_pci_device *pci_dev)
 {
-       if (iavf_dcf_cap_selected(pci_dev->device.devargs))
+       if (iavf_dcf_cap_selected(pci_dev->device.devargs) ||
+           iavf_drv_i40evf_selected(pci_dev->device.devargs,
+                                    pci_dev->id.device_id))
                return 1;
 
        return rte_eth_dev_pci_generic_probe(pci_dev,
@@ -2349,12 +2556,12 @@ static struct rte_pci_driver rte_iavf_pmd = {
 RTE_PMD_REGISTER_PCI(net_iavf, rte_iavf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_iavf, pci_id_iavf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_iavf, "* igb_uio | vfio-pci");
-RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf");
-RTE_LOG_REGISTER(iavf_logtype_init, pmd.net.iavf.init, NOTICE);
-RTE_LOG_REGISTER(iavf_logtype_driver, pmd.net.iavf.driver, NOTICE);
+RTE_PMD_REGISTER_PARAM_STRING(net_iavf, "cap=dcf driver=i40evf");
+RTE_LOG_REGISTER_SUFFIX(iavf_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(iavf_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-RTE_LOG_REGISTER(iavf_logtype_rx, pmd.net.iavf.rx, DEBUG);
+RTE_LOG_REGISTER_SUFFIX(iavf_logtype_rx, rx, DEBUG);
 #endif
 #ifdef RTE_ETHDEV_DEBUG_TX
-RTE_LOG_REGISTER(iavf_logtype_tx, pmd.net.iavf.tx, DEBUG);
+RTE_LOG_REGISTER_SUFFIX(iavf_logtype_tx, tx, DEBUG);
 #endif