X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Favp%2Favp_ethdev.c;h=bd5eba4a06ec4f376ee1c4a98e4edba1bbdc8e3c;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=081a397055deb6d0178053451bb69bb830bcdfc0;hpb=ea37523dbc7b71e861b468dbedb46cbfcf6bd8fb;p=dpdk.git diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index 081a397055..bd5eba4a06 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2017, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #include @@ -36,17 +8,19 @@ #include #include -#include +#include +#include #include #include -#include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -58,7 +32,10 @@ #include "avp_logs.h" +int avp_logtype_driver; +static int avp_dev_create(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev); static int avp_dev_configure(struct rte_eth_dev *dev); static int avp_dev_start(struct rte_eth_dev *dev); @@ -66,9 +43,8 @@ static void avp_dev_stop(struct rte_eth_dev *dev); static void avp_dev_close(struct rte_eth_dev *dev); static void avp_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); -static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static int avp_dev_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); +static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev); static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -104,18 +80,15 @@ static uint16_t avp_xmit_pkts(void *tx_queue, static void avp_dev_rx_queue_release(void *rxq); static void avp_dev_tx_queue_release(void *txq); -static void avp_dev_stats_get(struct rte_eth_dev *dev, +static int avp_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void avp_dev_stats_reset(struct rte_eth_dev *dev); -#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device) - - #define AVP_MAX_RX_BURST 64 #define AVP_MAX_TX_BURST 64 #define AVP_MAX_MAC_ADDRS 1 -#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN +#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN /* @@ -173,6 +146,7 @@ static const struct eth_dev_ops avp_eth_dev_ops = { #define AVP_F_PROMISC (1 << 1) #define AVP_F_CONFIGURED (1 << 2) #define AVP_F_LINKUP (1 << 3) +#define AVP_F_DETACHED (1 << 4) /**@} */ /* Ethernet device validation marker */ @@ -185,11 +159,11 @@ static const struct eth_dev_ops avp_eth_dev_ops = { struct avp_dev { uint32_t magic; /**< Memory validation marker */ uint64_t device_id; /**< Unique system identifier */ - struct ether_addr ethaddr; /**< Host specified MAC address */ + struct rte_ether_addr ethaddr; /**< Host specified MAC address */ struct rte_eth_dev_data *dev_data; /**< Back pointer to ethernet device data */ volatile uint32_t flags; /**< Device operational flags */ - uint8_t port_id; /**< Ethernet port identifier */ + uint16_t port_id; /**< Ethernet port identifier */ struct rte_mempool *pool; /**< pkt mbuf mempool */ unsigned int guest_mbuf_size; /**< local pool mbuf size */ unsigned int host_mbuf_size; /**< host mbuf size */ @@ -208,6 +182,9 @@ struct avp_dev { struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES]; /**< To be freed mbufs queue */ + /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */ + rte_spinlock_t lock; + /* For request & response */ struct rte_avp_fifo *req_q; /**< Request queue */ struct rte_avp_fifo *resp_q; /**< Response queue */ @@ -383,9 +360,9 @@ avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address) /* translate from host physical address to guest virtual address */ static void * avp_dev_translate_address(struct rte_eth_dev *eth_dev, - phys_addr_t host_phys_addr) + rte_iova_t host_phys_addr) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_mem_resource *resource; struct rte_avp_memmap_info *info; struct rte_avp_memmap *map; @@ -406,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev, (host_phys_addr < (map->phys_addr + map->length))) { /* address is within this segment */ offset += (host_phys_addr - map->phys_addr); - addr = RTE_PTR_ADD(addr, offset); + addr = RTE_PTR_ADD(addr, (uintptr_t)offset); PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n", host_phys_addr, addr); @@ -438,7 +415,7 @@ avp_dev_version_check(uint32_t version) static int avp_dev_check_regions(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_avp_memmap_info *memmap; struct rte_avp_device_info *info; struct rte_mem_resource *resource; @@ -495,6 +472,46 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev) return 0; } +static int +avp_dev_detach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(NOTICE, "port %u already detached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* shutdown the device first so the host stops sending us packets. */ + ret = avp_dev_ctrl_shutdown(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n", + ret); + avp->flags &= ~AVP_F_DETACHED; + goto unlock; + } + + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* wait for queues to acknowledge the presence of the detach flag */ + rte_delay_ms(1); + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + static void _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { @@ -534,7 +551,7 @@ _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) static void _avp_set_queue_counts(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct rte_avp_device_info *host_info; void *addr; @@ -563,6 +580,239 @@ _avp_set_queue_counts(struct rte_eth_dev *eth_dev) avp->num_tx_queues, avp->num_rx_queues); } +static int +avp_dev_attach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_config config; + unsigned int i; + int ret; + + PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (!(avp->flags & AVP_F_DETACHED)) { + PMD_DRV_LOG(NOTICE, "port %u already attached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* + * make sure that the detached flag is set prior to reconfiguring the + * queues. + */ + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* + * re-run the device create utility which will parse the new host info + * and setup the AVP device queue pointers. + */ + ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n", + ret); + goto unlock; + } + + if (avp->flags & AVP_F_CONFIGURED) { + /* + * Update the receive queue mapping to handle cases where the + * source and destination hosts have different queue + * requirements. As long as the DETACHED flag is asserted the + * queue table should not be referenced so it should be safe to + * update it. + */ + _avp_set_queue_counts(eth_dev); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + _avp_set_rx_queue_mappings(eth_dev, i); + + /* + * Update the host with our config details so that it knows the + * device is active. + */ + memset(&config, 0, sizeof(config)); + config.device_id = avp->device_id; + config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; + config.driver_version = AVP_DPDK_DRIVER_VERSION; + config.features = avp->features; + config.num_tx_queues = avp->num_tx_queues; + config.num_rx_queues = avp->num_rx_queues; + config.if_up = !!(avp->flags & AVP_F_LINKUP); + + ret = avp_dev_ctrl_set_config(eth_dev, &config); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", + ret); + goto unlock; + } + } + + rte_wmb(); + avp->flags &= ~AVP_F_DETACHED; + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static void +avp_dev_interrupt_handler(void *data) +{ + struct rte_eth_dev *eth_dev = data; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t status, value; + int ret; + + if (registers == NULL) + rte_panic("no mapped MMIO register space\n"); + + /* read the interrupt status register + * note: this register clears on read so all raised interrupts must be + * handled or remembered for later processing + */ + status = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_INTERRUPT_STATUS_OFFSET)); + + if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) { + /* handle interrupt based on current status */ + value = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + switch (value) { + case RTE_AVP_MIGRATION_DETACHED: + ret = avp_dev_detach(eth_dev); + break; + case RTE_AVP_MIGRATION_ATTACHED: + ret = avp_dev_attach(eth_dev); + break; + default: + PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n", + value); + ret = -EINVAL; + } + + /* acknowledge the request by writing out our current status */ + value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR); + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + + PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n"); + } + + if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK) + PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n", + status); + + /* re-enable UIO interrupt handling */ + ret = rte_intr_enable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n", + ret); + /* continue */ + } +} + +static int +avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return -EINVAL; + + /* enable UIO interrupt handling */ + ret = rte_intr_enable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + /* inform the device that all interrupts are enabled */ + AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + return 0; +} + +static int +avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return 0; + + /* inform the device that all interrupts are disabled */ + AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + /* enable UIO interrupt handling */ + ret = rte_intr_disable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + return 0; +} + +static int +avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret; + + /* register a callback handler with UIO for interrupt notifications */ + ret = rte_intr_callback_register(&pci_dev->intr_handle, + avp_dev_interrupt_handler, + (void *)eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n", + ret); + return ret; + } + + /* enable interrupt processing */ + return avp_dev_enable_interrupts(eth_dev); +} + +static int +avp_dev_migration_pending(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t value; + + if (registers == NULL) + return 0; + + value = AVP_READ32(RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + if (value == RTE_AVP_MIGRATION_DETACHED) { + /* migration is in progress; ack it if we have not already */ + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + return 1; + } + return 0; +} + /* * create a AVP device using the supplied device info by first translating it * to guest address space(s). @@ -615,8 +865,9 @@ avp_dev_create(struct rte_pci_device *pci_dev, avp->port_id = eth_dev->data->port_id; avp->host_mbuf_size = host_info->mbuf_size; avp->host_features = host_info->features; + rte_spinlock_init(&avp->lock); memcpy(&avp->ethaddr.addr_bytes[0], - host_info->ethaddr, ETHER_ADDR_LEN); + host_info->ethaddr, RTE_ETHER_ADDR_LEN); /* adjust max values to not exceed our max */ avp->max_tx_queues = RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES); @@ -704,7 +955,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; int ret; - pci_dev = AVP_DEV_TO_PCI(eth_dev); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); eth_dev->dev_ops = &avp_eth_dev_ops; eth_dev->rx_pkt_burst = &avp_recv_pkts; eth_dev->tx_pkt_burst = &avp_xmit_pkts; @@ -726,7 +977,11 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; + /* Check current migration status */ + if (avp_dev_migration_pending(eth_dev)) { + PMD_DRV_LOG(ERR, "VM live migration operation in progress\n"); + return -EBUSY; + } /* Check BAR resources */ ret = avp_dev_check_regions(eth_dev); @@ -736,6 +991,13 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) return ret; } + /* Enable interrupts */ + ret = avp_dev_setup_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret); + return ret; + } + /* Handle each subtype */ ret = avp_dev_create(pci_dev, eth_dev); if (ret < 0) { @@ -744,15 +1006,16 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) } /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0); + eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", + RTE_ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n", - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); return -ENOMEM; } /* Get a mac from device config */ - ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]); return 0; } @@ -760,31 +1023,43 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) static int eth_avp_dev_uninit(struct rte_eth_dev *eth_dev) { + int ret; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; if (eth_dev->data == NULL) return 0; - if (eth_dev->data->mac_addrs != NULL) { - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + ret = avp_dev_disable_interrupts(eth_dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret); + return ret; } return 0; } +static int +eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), + eth_avp_dev_init); +} -static struct eth_driver rte_avp_pmd = { - { - .id_table = pci_id_avp_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_avp_dev_init, - .eth_dev_uninit = eth_avp_dev_uninit, - .dev_private_size = sizeof(struct avp_adapter), +static int +eth_avp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_avp_dev_uninit); +} + +static struct rte_pci_driver rte_avp_pmd = { + .id_table = pci_id_avp_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_avp_pci_probe, + .remove = eth_avp_pci_remove, }; static int @@ -925,7 +1200,7 @@ avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, } static inline int -_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b) +_avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b) { uint16_t *_a = (uint16_t *)&a->addr_bytes[0]; uint16_t *_b = (uint16_t *)&b->addr_bytes[0]; @@ -935,19 +1210,19 @@ _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b) static inline int _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m) { - struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *); + struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) { /* allow all packets destined to our address */ return 0; } - if (likely(is_broadcast_ether_addr(ð->d_addr))) { + if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) { /* allow all broadcast packets */ return 0; } - if (likely(is_multicast_ether_addr(ð->d_addr))) { + if (likely(rte_is_multicast_ether_addr(ð->d_addr))) { /* allow all multicast packets */ return 0; } @@ -1042,7 +1317,7 @@ avp_dev_copy_from_buffers(struct avp_dev *avp, src_offset = 0; if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { - ol_flags = PKT_RX_VLAN_PKT; + ol_flags = PKT_RX_VLAN; vlan_tci = pkt_buf->vlan_tci; } else { ol_flags = 0; @@ -1127,6 +1402,11 @@ avp_recv_scattered_pkts(void *rx_queue, unsigned int port_id; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + guest_mbuf_size = avp->guest_mbuf_size; port_id = avp->port_id; rx_q = avp->rx_q[rxq->queue_id]; @@ -1221,6 +1501,11 @@ avp_recv_pkts(void *rx_queue, char *pkt_data; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + rx_q = avp->rx_q[rxq->queue_id]; free_q = avp->free_q[rxq->queue_id]; @@ -1290,7 +1575,7 @@ avp_recv_pkts(void *rx_queue, m->port = avp->port_id; if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { - m->ol_flags = PKT_RX_VLAN_PKT; + m->ol_flags = PKT_RX_VLAN; m->vlan_tci = pkt_buf->vlan_tci; } @@ -1428,6 +1713,13 @@ avp_xmit_scattered_pkts(void *tx_queue, unsigned int i; orig_nb_pkts = nb_pkts; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop? */ + txq->errors += nb_pkts; + return 0; + } + tx_q = avp->tx_q[txq->queue_id]; alloc_q = avp->alloc_q[txq->queue_id]; @@ -1540,6 +1832,13 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) char *pkt_data; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop?! */ + txq->errors++; + return 0; + } + tx_q = avp->tx_q[txq->queue_id]; alloc_q = avp->alloc_q[txq->queue_id]; @@ -1664,7 +1963,7 @@ avp_dev_tx_queue_release(void *tx_queue) static int avp_dev_configure(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct rte_avp_device_info *host_info; struct rte_avp_device_config config; @@ -1672,6 +1971,13 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) void *addr; int ret; + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; host_info = (struct rte_avp_device_info *)addr; @@ -1681,7 +1987,12 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) mask = (ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK); - avp_vlan_offload_set(eth_dev, mask); + ret = avp_vlan_offload_set(eth_dev, mask); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n", + ret); + goto unlock; + } /* update device config */ memset(&config, 0, sizeof(config)); @@ -1703,6 +2014,7 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) ret = 0; unlock: + rte_spinlock_unlock(&avp->lock); return ret; } @@ -1712,11 +2024,12 @@ avp_dev_start(struct rte_eth_dev *eth_dev) struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int ret; - /* disable features that we do not support */ - eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0; - eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0; + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } /* update link state */ ret = avp_dev_ctrl_set_link_state(eth_dev, 1); @@ -1732,6 +2045,7 @@ avp_dev_start(struct rte_eth_dev *eth_dev) ret = 0; unlock: + rte_spinlock_unlock(&avp->lock); return ret; } @@ -1741,6 +2055,13 @@ avp_dev_stop(struct rte_eth_dev *eth_dev) struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int ret; + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + + /* remember current link state */ avp->flags &= ~AVP_F_LINKUP; /* update link state */ @@ -1749,6 +2070,9 @@ avp_dev_stop(struct rte_eth_dev *eth_dev) PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", ret); } + +unlock: + rte_spinlock_unlock(&avp->lock); } static void @@ -1757,10 +2081,22 @@ avp_dev_close(struct rte_eth_dev *eth_dev) struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int ret; + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + /* remember current link state */ avp->flags &= ~AVP_F_LINKUP; avp->flags &= ~AVP_F_CONFIGURED; + ret = avp_dev_disable_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable interrupts\n"); + /* continue */ + } + /* update device state */ ret = avp_dev_ctrl_shutdown(eth_dev); if (ret < 0) { @@ -1768,6 +2104,9 @@ avp_dev_close(struct rte_eth_dev *eth_dev) ret); /* continue */ } + +unlock: + rte_spinlock_unlock(&avp->lock); } static int @@ -1789,11 +2128,13 @@ avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + rte_spinlock_lock(&avp->lock); if ((avp->flags & AVP_F_PROMISC) == 0) { avp->flags |= AVP_F_PROMISC; PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n", eth_dev->data->port_id); } + rte_spinlock_unlock(&avp->lock); } static void @@ -1801,11 +2142,13 @@ avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + rte_spinlock_lock(&avp->lock); if ((avp->flags & AVP_F_PROMISC) != 0) { avp->flags &= ~AVP_F_PROMISC; PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n", eth_dev->data->port_id); } + rte_spinlock_unlock(&avp->lock); } static void @@ -1814,8 +2157,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - dev_info->driver_name = "rte_avp_pmd"; - dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); dev_info->max_rx_queues = avp->max_rx_queues; dev_info->max_tx_queues = avp->max_tx_queues; dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; @@ -1827,14 +2168,16 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, } } -static void +static int avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint64_t offloads = dev_conf->rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; else avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; @@ -1844,17 +2187,19 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend) + if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); } + + return 0; } -static void +static int avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1887,6 +2232,8 @@ avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) stats->q_errors[i] += txq->errors; } } + + return 0; } static void @@ -1916,5 +2263,12 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev) } } -RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map); + +RTE_INIT(avp_init_log) +{ + avp_logtype_driver = rte_log_register("pmd.net.avp.driver"); + if (avp_logtype_driver >= 0) + rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE); +}