net/sfc: use action rules in tunnel offload jump rules
[dpdk.git] / drivers / net / avp / avp_ethdev.c
index 95fdb57..6cb8bb4 100644 (file)
@@ -8,8 +8,8 @@
 #include <errno.h>
 #include <unistd.h>
 
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_malloc.h>
@@ -37,7 +37,7 @@ static int avp_dev_create(struct rte_pci_device *pci_dev,
 
 static int avp_dev_configure(struct rte_eth_dev *dev);
 static int avp_dev_start(struct rte_eth_dev *dev);
-static void avp_dev_stop(struct rte_eth_dev *dev);
+static int avp_dev_stop(struct rte_eth_dev *dev);
 static int avp_dev_close(struct rte_eth_dev *dev);
 static int avp_dev_info_get(struct rte_eth_dev *dev,
                            struct rte_eth_dev_info *dev_info);
@@ -75,8 +75,8 @@ static uint16_t avp_xmit_pkts(void *tx_queue,
                              struct rte_mbuf **tx_pkts,
                              uint16_t nb_pkts);
 
-static void avp_dev_rx_queue_release(void *rxq);
-static void avp_dev_tx_queue_release(void *txq);
+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 static int avp_dev_stats_get(struct rte_eth_dev *dev,
                              struct rte_eth_stats *stats);
@@ -267,7 +267,7 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
                        break;
                }
 
-               if ((count < 1) && (retry == 0)) {
+               if (retry == 0) {
                        PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
                                    request->req_id);
                        ret = -ETIME;
@@ -974,6 +974,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* Check current migration status */
        if (avp_dev_migration_pending(eth_dev)) {
@@ -1204,17 +1205,17 @@ _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
 {
        struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
+       if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->dst_addr) == 0)) {
                /* allow all packets destined to our address */
                return 0;
        }
 
-       if (likely(rte_is_broadcast_ether_addr(&eth->d_addr))) {
+       if (likely(rte_is_broadcast_ether_addr(&eth->dst_addr))) {
                /* allow all broadcast packets */
                return 0;
        }
 
-       if (likely(rte_is_multicast_ether_addr(&eth->d_addr))) {
+       if (likely(rte_is_multicast_ether_addr(&eth->dst_addr))) {
                /* allow all multicast packets */
                return 0;
        }
@@ -1925,18 +1926,11 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 }
 
 static void
-avp_dev_rx_queue_release(void *rx_queue)
+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-       struct avp_queue *rxq = (struct avp_queue *)rx_queue;
-       struct avp_dev *avp = rxq->avp;
-       struct rte_eth_dev_data *data = avp->dev_data;
-       unsigned int i;
-
-       for (i = 0; i < avp->num_rx_queues; i++) {
-               if (data->rx_queues[i] == rxq) {
-                       rte_free(data->rx_queues[i]);
-                       data->rx_queues[i] = NULL;
-               }
+       if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+               rte_free(eth_dev->data->rx_queues[rx_queue_id]);
+               eth_dev->data->rx_queues[rx_queue_id] = NULL;
        }
 }
 
@@ -1956,18 +1950,11 @@ avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
 }
 
 static void
-avp_dev_tx_queue_release(void *tx_queue)
+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 {
-       struct avp_queue *txq = (struct avp_queue *)tx_queue;
-       struct avp_dev *avp = txq->avp;
-       struct rte_eth_dev_data *data = avp->dev_data;
-       unsigned int i;
-
-       for (i = 0; i < avp->num_tx_queues; i++) {
-               if (data->tx_queues[i] == txq) {
-                       rte_free(data->tx_queues[i]);
-                       data->tx_queues[i] = NULL;
-               }
+       if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+               rte_free(eth_dev->data->tx_queues[tx_queue_id]);
+               eth_dev->data->tx_queues[tx_queue_id] = NULL;
        }
 }
 
@@ -2075,7 +2062,7 @@ unlock:
        return ret;
 }
 
-static void
+static int
 avp_dev_stop(struct rte_eth_dev *eth_dev)
 {
        struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
@@ -2084,6 +2071,7 @@ avp_dev_stop(struct rte_eth_dev *eth_dev)
        rte_spinlock_lock(&avp->lock);
        if (avp->flags & AVP_F_DETACHED) {
                PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+               ret = -ENOTSUP;
                goto unlock;
        }
 
@@ -2099,6 +2087,7 @@ avp_dev_stop(struct rte_eth_dev *eth_dev)
 
 unlock:
        rte_spinlock_unlock(&avp->lock);
+       return ret;
 }
 
 static int
@@ -2306,4 +2295,4 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
 
 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
-RTE_LOG_REGISTER(avp_logtype_driver, pmd.net.avp.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(avp_logtype_driver, driver, NOTICE);