net/mlx5: add option to configure FCS or decapsulation
[dpdk.git] / drivers / net / netvsc / hn_vf.c
index 5ae4dc9..f5f15c0 100644 (file)
@@ -82,8 +82,6 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
 
        PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
        hv->vf_port = port_id;
-       rte_smp_wmb();
-
        return 0;
 }
 
@@ -98,19 +96,9 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
                return port;
        }
 
-       rte_spinlock_lock(&hv->vf_lock);
        err = hn_vf_attach(hv, port);
-
-       if (err == 0) {
-               dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
-               hv->vf_intr = (struct rte_intr_handle) {
-                       .fd = -1,
-                       .type = RTE_INTR_HANDLE_EXT,
-               };
-               dev->intr_handle = &hv->vf_intr;
+       if (err == 0)
                hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
-       }
-       rte_spinlock_unlock(&hv->vf_lock);
 
        return err;
 }
@@ -119,22 +107,18 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
 static void hn_vf_remove(struct hn_data *hv)
 {
 
-       rte_spinlock_lock(&hv->vf_lock);
-
        if (!hn_vf_attached(hv)) {
                PMD_DRV_LOG(ERR, "VF path not active");
        } else {
                /* Stop incoming packets from arriving on VF */
                hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
 
-               /* Stop transmission over VF */
-               hv->vf_port = HN_INVALID_PORT;
-               rte_smp_wmb();
-
                /* Give back ownership */
                rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
+
+               /* Stop transmission over VF */
+               hv->vf_port = HN_INVALID_PORT;
        }
-       rte_spinlock_unlock(&hv->vf_lock);
 }
 
 /* Handle VF association message from host */
@@ -156,15 +140,27 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
                    vf_assoc->allocated ? "add to" : "remove from",
                    dev->data->port_id);
 
+       rte_rwlock_write_lock(&hv->vf_lock);
        hv->vf_present = vf_assoc->allocated;
 
-       if (dev->state != RTE_ETH_DEV_ATTACHED)
-               return;
+       if (dev->state == RTE_ETH_DEV_ATTACHED) {
+               if (vf_assoc->allocated)
+                       hn_vf_add(dev, hv);
+               else
+                       hn_vf_remove(hv);
+       }
+       rte_rwlock_write_unlock(&hv->vf_lock);
+}
 
-       if (vf_assoc->allocated)
-               hn_vf_add(dev, hv);
-       else
-               hn_vf_remove(hv);
+static void
+hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
+                    const struct rte_eth_desc_lim *vf_lim)
+{
+       lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
+       lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
+       lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
+       lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
+       lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
 }
 
 /*
@@ -196,11 +192,13 @@ static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
                                      info->max_tx_queues);
        info->tx_offload_capa &= vf_info.tx_offload_capa;
        info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
+       hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
 
        info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
                                       info->min_rx_bufsize);
        info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
                                       info->max_rx_pktlen);
+       hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
 
        return 0;
 }
@@ -210,85 +208,11 @@ int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = hn_vf_info_merge(vf_dev, info);
-       rte_spinlock_unlock(&hv->vf_lock);
-       return ret;
-}
-
-int hn_vf_link_update(struct rte_eth_dev *dev,
-                     int wait_to_complete)
-{
-       struct hn_data *hv = dev->data->dev_private;
-       struct rte_eth_dev *vf_dev;
-       int ret = 0;
-
-       rte_spinlock_lock(&hv->vf_lock);
-       vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->link_update)
-               ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
-       rte_spinlock_unlock(&hv->vf_lock);
-
-       return ret;
-}
-
-/* called when VF has link state interrupts enabled */
-static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
-                          enum rte_eth_event_type event,
-                          void *cb_arg, void *out __rte_unused)
-{
-       struct rte_eth_dev *dev = cb_arg;
-
-       if (event != RTE_ETH_EVENT_INTR_LSC)
-               return 0;
-
-       /* if link state has changed pass on */
-       if (hn_dev_link_update(dev, 0) == 0)
-               return 0; /* no change */
-
-       return _rte_eth_dev_callback_process(dev,
-                                            RTE_ETH_EVENT_INTR_LSC,
-                                            NULL);
-}
-
-static int _hn_vf_configure(struct rte_eth_dev *dev,
-                           uint16_t vf_port,
-                           const struct rte_eth_conf *dev_conf)
-{
-       struct rte_eth_conf vf_conf = *dev_conf;
-       struct rte_eth_dev *vf_dev;
-       int ret;
-
-       vf_dev = &rte_eth_devices[vf_port];
-       if (dev_conf->intr_conf.lsc &&
-           (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
-               PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
-                           vf_port);
-               vf_conf.intr_conf.lsc = 1;
-       } else {
-               PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
-                           vf_port);
-               vf_conf.intr_conf.lsc = 0;
-       }
-
-       ret = rte_eth_dev_configure(vf_port,
-                                   dev->data->nb_rx_queues,
-                                   dev->data->nb_tx_queues,
-                                   &vf_conf);
-       if (ret) {
-               PMD_DRV_LOG(ERR,
-                           "VF configuration failed: %d", ret);
-       } else if (vf_conf.intr_conf.lsc) {
-               ret = rte_eth_dev_callback_register(vf_port,
-                                                   RTE_ETH_DEV_INTR_LSC,
-                                                   hn_vf_lsc_event, dev);
-               if (ret)
-                       PMD_DRV_LOG(ERR,
-                                   "Failed to register LSC callback for VF %u",
-                                   vf_port);
-       }
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -300,12 +224,23 @@ int hn_vf_configure(struct rte_eth_dev *dev,
                    const struct rte_eth_conf *dev_conf)
 {
        struct hn_data *hv = dev->data->dev_private;
+       struct rte_eth_conf vf_conf = *dev_conf;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
-       if (hv->vf_port != HN_INVALID_PORT)
-               ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
-       rte_spinlock_unlock(&hv->vf_lock);
+       /* link state interrupt does not matter here. */
+       vf_conf.intr_conf.lsc = 0;
+
+       rte_rwlock_read_lock(&hv->vf_lock);
+       if (hv->vf_port != HN_INVALID_PORT) {
+               ret = rte_eth_dev_configure(hv->vf_port,
+                                           dev->data->nb_rx_queues,
+                                           dev->data->nb_tx_queues,
+                                           &vf_conf);
+               if (ret != 0)
+                       PMD_DRV_LOG(ERR,
+                                   "VF configuration failed: %d", ret);
+       }
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -315,11 +250,11 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
        struct rte_eth_dev *vf_dev;
        const uint32_t *ptypes = NULL;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
                ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ptypes;
 }
@@ -330,11 +265,11 @@ int hn_vf_start(struct rte_eth_dev *dev)
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_dev_start(vf_dev->data->port_id);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -343,11 +278,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                rte_eth_dev_stop(vf_dev->data->port_id);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 /* If VF is present, then cascade configuration down */
@@ -355,11 +290,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
        {                                                       \
                struct hn_data *hv = (dev)->data->dev_private;  \
                struct rte_eth_dev *vf_dev;                     \
-               rte_spinlock_lock(&hv->vf_lock);                \
+               rte_rwlock_read_lock(&hv->vf_lock);             \
                vf_dev = hn_get_vf_dev(hv);                     \
                if (vf_dev)                                     \
                        func(vf_dev->data->port_id);            \
-               rte_spinlock_unlock(&hv->vf_lock);              \
+               rte_rwlock_read_unlock(&hv->vf_lock);           \
        }
 
 /* If VF is present, then cascade configuration down */
@@ -368,11 +303,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
                struct hn_data *hv = (dev)->data->dev_private;  \
                struct rte_eth_dev *vf_dev;                     \
                int ret = 0;                                    \
-               rte_spinlock_lock(&hv->vf_lock);                \
+               rte_rwlock_read_lock(&hv->vf_lock);             \
                vf_dev = hn_get_vf_dev(hv);                     \
                if (vf_dev)                                     \
                        ret = func(vf_dev->data->port_id);      \
-               rte_spinlock_unlock(&hv->vf_lock);              \
+               rte_rwlock_read_unlock(&hv->vf_lock);           \
                return ret;                                     \
        }
 
@@ -386,13 +321,13 @@ void hn_vf_close(struct rte_eth_dev *dev)
        struct hn_data *hv = dev->data->dev_private;
        uint16_t vf_port;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_port = hv->vf_port;
        if (vf_port != HN_INVALID_PORT)
                rte_eth_dev_close(vf_port);
 
        hv->vf_port = HN_INVALID_PORT;
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_stats_reset(struct rte_eth_dev *dev)
@@ -400,14 +335,14 @@ int hn_vf_stats_reset(struct rte_eth_dev *dev)
        VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
 }
 
-void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
+int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
 }
 
-void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
+int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
 }
 
 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
@@ -428,12 +363,12 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
                                                   mc_addr_set, nb_mc_addr);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -446,13 +381,13 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
                                             queue_idx, nb_desc,
                                             socket_id, tx_conf);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -460,7 +395,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
                void *subq = vf_dev->data->tx_queues[queue_id];
@@ -468,7 +403,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
                (*vf_dev->dev_ops->tx_queue_release)(subq);
        }
 
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
@@ -481,13 +416,13 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
                                             queue_idx, nb_desc,
                                             socket_id, rx_conf, mp);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -495,14 +430,14 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
                void *subq = vf_dev->data->rx_queues[queue_id];
 
                (*vf_dev->dev_ops->rx_queue_release)(subq);
        }
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_stats_get(struct rte_eth_dev *dev,
@@ -512,11 +447,11 @@ int hn_vf_stats_get(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -528,12 +463,12 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int i, count = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                count = rte_eth_xstats_get_names(vf_dev->data->port_id,
                                                 names, n);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        /* add vf_ prefix to xstat names */
        if (names) {
@@ -557,12 +492,12 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int i, count = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                count = rte_eth_xstats_get(vf_dev->data->port_id,
                                           xstats + offset, n - offset);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        /* Offset id's for VF stats */
        if (count > 0) {
@@ -579,13 +514,13 @@ int hn_vf_xstats_reset(struct rte_eth_dev *dev)
        struct rte_eth_dev *vf_dev;
        int ret;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_xstats_reset(vf_dev->data->port_id);
        else
                ret = -EINVAL;
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ret;
 }
@@ -597,11 +532,11 @@ int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->rss_hash_update)
                ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ret;
 }
@@ -614,12 +549,12 @@ int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->reta_update)
                ret = vf_dev->dev_ops->reta_update(vf_dev,
                                                   reta_conf, reta_size);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ret;
 }