#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
static int avp_dev_configure(struct rte_eth_dev *dev);
static int avp_dev_start(struct rte_eth_dev *dev);
-static void avp_dev_stop(struct rte_eth_dev *dev);
+static int avp_dev_stop(struct rte_eth_dev *dev);
static int avp_dev_close(struct rte_eth_dev *dev);
static int avp_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-static void avp_dev_rx_queue_release(void *rxq);
-static void avp_dev_tx_queue_release(void *txq);
+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
static int avp_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
break;
}
- if ((count < 1) && (retry == 0)) {
+ if (retry == 0) {
PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
request->req_id);
ret = -ETIME;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Check current migration status */
if (avp_dev_migration_pending(eth_dev)) {
{
struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
- if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->dst_addr) == 0)) {
/* allow all packets destined to our address */
return 0;
}
- if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_broadcast_ether_addr(ð->dst_addr))) {
/* allow all broadcast packets */
return 0;
}
- if (likely(rte_is_multicast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_multicast_ether_addr(ð->dst_addr))) {
/* allow all multicast packets */
return 0;
}
}
static void
-avp_dev_rx_queue_release(void *rx_queue)
+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct avp_queue *rxq = (struct avp_queue *)rx_queue;
- struct avp_dev *avp = rxq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_rx_queues; i++) {
- if (data->rx_queues[i] == rxq) {
- rte_free(data->rx_queues[i]);
- data->rx_queues[i] = NULL;
- }
+ if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+ rte_free(eth_dev->data->rx_queues[rx_queue_id]);
+ eth_dev->data->rx_queues[rx_queue_id] = NULL;
}
}
}
static void
-avp_dev_tx_queue_release(void *tx_queue)
+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
- struct avp_queue *txq = (struct avp_queue *)tx_queue;
- struct avp_dev *avp = txq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_tx_queues; i++) {
- if (data->tx_queues[i] == txq) {
- rte_free(data->tx_queues[i]);
- data->tx_queues[i] = NULL;
- }
+ if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+ rte_free(eth_dev->data->tx_queues[tx_queue_id]);
+ eth_dev->data->tx_queues[tx_queue_id] = NULL;
}
}
return ret;
}
-static void
+static int
avp_dev_stop(struct rte_eth_dev *eth_dev)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
goto unlock;
}
unlock:
rte_spinlock_unlock(&avp->lock);
+ return ret;
}
static int
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int ret;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
-RTE_LOG_REGISTER(avp_logtype_driver, pmd.net.avp.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(avp_logtype_driver, driver, NOTICE);