1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2017 Wind River Systems, Inc.
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_malloc.h>
16 #include <rte_atomic.h>
17 #include <rte_branch_prediction.h>
19 #include <rte_bus_pci.h>
20 #include <rte_ether.h>
21 #include <rte_common.h>
22 #include <rte_cycles.h>
23 #include <rte_spinlock.h>
24 #include <rte_byteorder.h>
26 #include <rte_memory.h>
30 #include "rte_avp_common.h"
31 #include "rte_avp_fifo.h"
35 static int avp_dev_create(struct rte_pci_device *pci_dev,
36 struct rte_eth_dev *eth_dev);
38 static int avp_dev_configure(struct rte_eth_dev *dev);
39 static int avp_dev_start(struct rte_eth_dev *dev);
40 static void avp_dev_stop(struct rte_eth_dev *dev);
41 static int avp_dev_close(struct rte_eth_dev *dev);
42 static int avp_dev_info_get(struct rte_eth_dev *dev,
43 struct rte_eth_dev_info *dev_info);
44 static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
45 static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
46 static int avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
47 static int avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
49 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
52 unsigned int socket_id,
53 const struct rte_eth_rxconf *rx_conf,
54 struct rte_mempool *pool);
56 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
59 unsigned int socket_id,
60 const struct rte_eth_txconf *tx_conf);
62 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
63 struct rte_mbuf **rx_pkts,
66 static uint16_t avp_recv_pkts(void *rx_queue,
67 struct rte_mbuf **rx_pkts,
70 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
71 struct rte_mbuf **tx_pkts,
74 static uint16_t avp_xmit_pkts(void *tx_queue,
75 struct rte_mbuf **tx_pkts,
78 static void avp_dev_rx_queue_release(void *rxq);
79 static void avp_dev_tx_queue_release(void *txq);
81 static int avp_dev_stats_get(struct rte_eth_dev *dev,
82 struct rte_eth_stats *stats);
83 static int avp_dev_stats_reset(struct rte_eth_dev *dev);
86 #define AVP_MAX_RX_BURST 64
87 #define AVP_MAX_TX_BURST 64
88 #define AVP_MAX_MAC_ADDRS 1
89 #define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
93 * Defines the number of microseconds to wait before checking the response
94 * queue for completion.
96 #define AVP_REQUEST_DELAY_USECS (5000)
99 * Defines the number times to check the response queue for completion before
100 * declaring a timeout.
102 #define AVP_MAX_REQUEST_RETRY (100)
104 /* Defines the current PCI driver version number */
105 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
108 * The set of PCI devices this driver supports
110 static const struct rte_pci_id pci_id_avp_map[] = {
111 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
112 .device_id = RTE_AVP_PCI_DEVICE_ID,
113 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
114 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
115 .class_id = RTE_CLASS_ANY_ID,
118 { .vendor_id = 0, /* sentinel */
123 * dev_ops for avp, bare necessities for basic operation
125 static const struct eth_dev_ops avp_eth_dev_ops = {
126 .dev_configure = avp_dev_configure,
127 .dev_start = avp_dev_start,
128 .dev_stop = avp_dev_stop,
129 .dev_close = avp_dev_close,
130 .dev_infos_get = avp_dev_info_get,
131 .vlan_offload_set = avp_vlan_offload_set,
132 .stats_get = avp_dev_stats_get,
133 .stats_reset = avp_dev_stats_reset,
134 .link_update = avp_dev_link_update,
135 .promiscuous_enable = avp_dev_promiscuous_enable,
136 .promiscuous_disable = avp_dev_promiscuous_disable,
137 .rx_queue_setup = avp_dev_rx_queue_setup,
138 .rx_queue_release = avp_dev_rx_queue_release,
139 .tx_queue_setup = avp_dev_tx_queue_setup,
140 .tx_queue_release = avp_dev_tx_queue_release,
143 /**@{ AVP device flags */
144 #define AVP_F_PROMISC (1 << 1)
145 #define AVP_F_CONFIGURED (1 << 2)
146 #define AVP_F_LINKUP (1 << 3)
147 #define AVP_F_DETACHED (1 << 4)
150 /* Ethernet device validation marker */
151 #define AVP_ETHDEV_MAGIC 0x92972862
154 * Defines the AVP device attributes which are attached to an RTE ethernet
158 uint32_t magic; /**< Memory validation marker */
159 uint64_t device_id; /**< Unique system identifier */
160 struct rte_ether_addr ethaddr; /**< Host specified MAC address */
161 struct rte_eth_dev_data *dev_data;
162 /**< Back pointer to ethernet device data */
163 volatile uint32_t flags; /**< Device operational flags */
164 uint16_t port_id; /**< Ethernet port identifier */
165 struct rte_mempool *pool; /**< pkt mbuf mempool */
166 unsigned int guest_mbuf_size; /**< local pool mbuf size */
167 unsigned int host_mbuf_size; /**< host mbuf size */
168 unsigned int max_rx_pkt_len; /**< maximum receive unit */
169 uint32_t host_features; /**< Supported feature bitmap */
170 uint32_t features; /**< Enabled feature bitmap */
171 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
172 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
173 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
174 unsigned int max_rx_queues; /**< Maximum number of receive queues */
176 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
177 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
178 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
179 /**< Allocated mbufs queue */
180 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
181 /**< To be freed mbufs queue */
183 /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
186 /* For request & response */
187 struct rte_avp_fifo *req_q; /**< Request queue */
188 struct rte_avp_fifo *resp_q; /**< Response queue */
189 void *host_sync_addr; /**< (host) Req/Resp Mem address */
190 void *sync_addr; /**< Req/Resp Mem address */
191 void *host_mbuf_addr; /**< (host) MBUF pool start address */
192 void *mbuf_addr; /**< MBUF pool start address */
193 } __rte_cache_aligned;
195 /* RTE ethernet private data */
198 } __rte_cache_aligned;
201 /* 32-bit MMIO register write */
202 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
204 /* 32-bit MMIO register read */
205 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
207 /* Macro to cast the ethernet device private data to a AVP object */
208 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
209 (&((struct avp_adapter *)adapter)->avp)
212 * Defines the structure of a AVP device queue for the purpose of handling the
213 * receive and transmit burst callback functions
216 struct rte_eth_dev_data *dev_data;
217 /**< Backpointer to ethernet device data */
218 struct avp_dev *avp; /**< Backpointer to AVP device */
220 /**< Queue identifier used for indexing current queue */
222 /**< Base queue identifier for queue servicing */
223 uint16_t queue_limit;
224 /**< Maximum queue identifier for queue servicing */
231 /* send a request and wait for a response
233 * @warning must be called while holding the avp->lock spinlock.
236 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
238 unsigned int retry = AVP_MAX_REQUEST_RETRY;
239 void *resp_addr = NULL;
243 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
245 request->result = -ENOTSUP;
247 /* Discard any stale responses before starting a new request */
248 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
249 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
251 rte_memcpy(avp->sync_addr, request, sizeof(*request));
252 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
254 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
261 /* wait for a response */
262 usleep(AVP_REQUEST_DELAY_USECS);
264 count = avp_fifo_count(avp->resp_q);
266 /* response received */
270 if ((count < 1) && (retry == 0)) {
271 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
278 /* retrieve the response */
279 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
280 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
281 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
282 count, resp_addr, avp->host_sync_addr);
287 /* copy to user buffer */
288 rte_memcpy(request, avp->sync_addr, sizeof(*request));
291 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
292 request->result, request->req_id);
299 avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
301 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
302 struct rte_avp_request request;
305 /* setup a link state change request */
306 memset(&request, 0, sizeof(request));
307 request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
308 request.if_up = state;
310 ret = avp_dev_process_request(avp, &request);
312 return ret == 0 ? request.result : ret;
316 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
317 struct rte_avp_device_config *config)
319 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
320 struct rte_avp_request request;
323 /* setup a configure request */
324 memset(&request, 0, sizeof(request));
325 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
326 memcpy(&request.config, config, sizeof(request.config));
328 ret = avp_dev_process_request(avp, &request);
330 return ret == 0 ? request.result : ret;
334 avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
336 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
337 struct rte_avp_request request;
340 /* setup a shutdown request */
341 memset(&request, 0, sizeof(request));
342 request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
344 ret = avp_dev_process_request(avp, &request);
346 return ret == 0 ? request.result : ret;
349 /* translate from host mbuf virtual address to guest virtual address */
351 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
353 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
354 (uintptr_t)avp->host_mbuf_addr),
355 (uintptr_t)avp->mbuf_addr);
358 /* translate from host physical address to guest virtual address */
360 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
361 rte_iova_t host_phys_addr)
363 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
364 struct rte_mem_resource *resource;
365 struct rte_avp_memmap_info *info;
366 struct rte_avp_memmap *map;
371 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
372 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
373 info = (struct rte_avp_memmap_info *)resource->addr;
376 for (i = 0; i < info->nb_maps; i++) {
377 /* search all segments looking for a matching address */
378 map = &info->maps[i];
380 if ((host_phys_addr >= map->phys_addr) &&
381 (host_phys_addr < (map->phys_addr + map->length))) {
382 /* address is within this segment */
383 offset += (host_phys_addr - map->phys_addr);
384 addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
386 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
387 host_phys_addr, addr);
391 offset += map->length;
397 /* verify that the incoming device version is compatible with our version */
399 avp_dev_version_check(uint32_t version)
401 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
402 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
404 if (device <= driver) {
405 /* the host driver version is less than or equal to ours */
412 /* verify that memory regions have expected version and validation markers */
414 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
416 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
417 struct rte_avp_memmap_info *memmap;
418 struct rte_avp_device_info *info;
419 struct rte_mem_resource *resource;
422 /* Dump resource info for debug */
423 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
424 resource = &pci_dev->mem_resource[i];
425 if ((resource->phys_addr == 0) || (resource->len == 0))
428 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
429 i, resource->phys_addr,
430 resource->len, resource->addr);
433 case RTE_AVP_PCI_MEMMAP_BAR:
434 memmap = (struct rte_avp_memmap_info *)resource->addr;
435 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
436 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
437 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
438 memmap->magic, memmap->version);
443 case RTE_AVP_PCI_DEVICE_BAR:
444 info = (struct rte_avp_device_info *)resource->addr;
445 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
446 avp_dev_version_check(info->version)) {
447 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
448 info->magic, info->version,
449 AVP_DPDK_DRIVER_VERSION);
454 case RTE_AVP_PCI_MEMORY_BAR:
455 case RTE_AVP_PCI_MMIO_BAR:
456 if (resource->addr == NULL) {
457 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
463 case RTE_AVP_PCI_MSIX_BAR:
465 /* no validation required */
474 avp_dev_detach(struct rte_eth_dev *eth_dev)
476 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
479 PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
480 eth_dev->data->port_id, avp->device_id);
482 rte_spinlock_lock(&avp->lock);
484 if (avp->flags & AVP_F_DETACHED) {
485 PMD_DRV_LOG(NOTICE, "port %u already detached\n",
486 eth_dev->data->port_id);
491 /* shutdown the device first so the host stops sending us packets. */
492 ret = avp_dev_ctrl_shutdown(eth_dev);
494 PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
496 avp->flags &= ~AVP_F_DETACHED;
500 avp->flags |= AVP_F_DETACHED;
503 /* wait for queues to acknowledge the presence of the detach flag */
509 rte_spinlock_unlock(&avp->lock);
514 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
516 struct avp_dev *avp =
517 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
518 struct avp_queue *rxq;
519 uint16_t queue_count;
522 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
525 * Must map all AVP fifos as evenly as possible between the configured
526 * device queues. Each device queue will service a subset of the AVP
527 * fifos. If there is an odd number of device queues the first set of
528 * device queues will get the extra AVP fifos.
530 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
531 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
532 if (rx_queue_id < remainder) {
533 /* these queues must service one extra FIFO */
534 rxq->queue_base = rx_queue_id * (queue_count + 1);
535 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
537 /* these queues service the regular number of FIFO */
538 rxq->queue_base = ((remainder * (queue_count + 1)) +
539 ((rx_queue_id - remainder) * queue_count));
540 rxq->queue_limit = rxq->queue_base + queue_count - 1;
543 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
544 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
546 rxq->queue_id = rxq->queue_base;
550 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
552 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
553 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
554 struct rte_avp_device_info *host_info;
557 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
558 host_info = (struct rte_avp_device_info *)addr;
561 * the transmit direction is not negotiated beyond respecting the max
562 * number of queues because the host can handle arbitrary guest tx
563 * queues (host rx queues).
565 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
568 * the receive direction is more restrictive. The host requires a
569 * minimum number of guest rx queues (host tx queues) therefore
570 * negotiate a value that is at least as large as the host minimum
571 * requirement. If the host and guest values are not identical then a
572 * mapping will be established in the receive_queue_setup function.
574 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
575 eth_dev->data->nb_rx_queues);
577 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
578 avp->num_tx_queues, avp->num_rx_queues);
582 avp_dev_attach(struct rte_eth_dev *eth_dev)
584 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
585 struct rte_avp_device_config config;
589 PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
590 eth_dev->data->port_id, avp->device_id);
592 rte_spinlock_lock(&avp->lock);
594 if (!(avp->flags & AVP_F_DETACHED)) {
595 PMD_DRV_LOG(NOTICE, "port %u already attached\n",
596 eth_dev->data->port_id);
602 * make sure that the detached flag is set prior to reconfiguring the
605 avp->flags |= AVP_F_DETACHED;
609 * re-run the device create utility which will parse the new host info
610 * and setup the AVP device queue pointers.
612 ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
614 PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
619 if (avp->flags & AVP_F_CONFIGURED) {
621 * Update the receive queue mapping to handle cases where the
622 * source and destination hosts have different queue
623 * requirements. As long as the DETACHED flag is asserted the
624 * queue table should not be referenced so it should be safe to
627 _avp_set_queue_counts(eth_dev);
628 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
629 _avp_set_rx_queue_mappings(eth_dev, i);
632 * Update the host with our config details so that it knows the
635 memset(&config, 0, sizeof(config));
636 config.device_id = avp->device_id;
637 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
638 config.driver_version = AVP_DPDK_DRIVER_VERSION;
639 config.features = avp->features;
640 config.num_tx_queues = avp->num_tx_queues;
641 config.num_rx_queues = avp->num_rx_queues;
642 config.if_up = !!(avp->flags & AVP_F_LINKUP);
644 ret = avp_dev_ctrl_set_config(eth_dev, &config);
646 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
653 avp->flags &= ~AVP_F_DETACHED;
658 rte_spinlock_unlock(&avp->lock);
663 avp_dev_interrupt_handler(void *data)
665 struct rte_eth_dev *eth_dev = data;
666 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
667 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
668 uint32_t status, value;
671 if (registers == NULL)
672 rte_panic("no mapped MMIO register space\n");
674 /* read the interrupt status register
675 * note: this register clears on read so all raised interrupts must be
676 * handled or remembered for later processing
679 RTE_PTR_ADD(registers,
680 RTE_AVP_INTERRUPT_STATUS_OFFSET));
682 if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
683 /* handle interrupt based on current status */
685 RTE_PTR_ADD(registers,
686 RTE_AVP_MIGRATION_STATUS_OFFSET));
688 case RTE_AVP_MIGRATION_DETACHED:
689 ret = avp_dev_detach(eth_dev);
691 case RTE_AVP_MIGRATION_ATTACHED:
692 ret = avp_dev_attach(eth_dev);
695 PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
700 /* acknowledge the request by writing out our current status */
701 value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
703 RTE_PTR_ADD(registers,
704 RTE_AVP_MIGRATION_ACK_OFFSET));
706 PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
709 if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
710 PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
713 /* re-enable UIO interrupt handling */
714 ret = rte_intr_ack(&pci_dev->intr_handle);
716 PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
723 avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
725 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
726 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
729 if (registers == NULL)
732 /* enable UIO interrupt handling */
733 ret = rte_intr_enable(&pci_dev->intr_handle);
735 PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
740 /* inform the device that all interrupts are enabled */
741 AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
742 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
748 avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
750 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
751 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
754 if (registers == NULL)
757 /* inform the device that all interrupts are disabled */
758 AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
759 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
761 /* enable UIO interrupt handling */
762 ret = rte_intr_disable(&pci_dev->intr_handle);
764 PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
773 avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
775 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
778 /* register a callback handler with UIO for interrupt notifications */
779 ret = rte_intr_callback_register(&pci_dev->intr_handle,
780 avp_dev_interrupt_handler,
783 PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
788 /* enable interrupt processing */
789 return avp_dev_enable_interrupts(eth_dev);
793 avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
796 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
799 if (registers == NULL)
802 value = AVP_READ32(RTE_PTR_ADD(registers,
803 RTE_AVP_MIGRATION_STATUS_OFFSET));
804 if (value == RTE_AVP_MIGRATION_DETACHED) {
805 /* migration is in progress; ack it if we have not already */
807 RTE_PTR_ADD(registers,
808 RTE_AVP_MIGRATION_ACK_OFFSET));
815 * create a AVP device using the supplied device info by first translating it
816 * to guest address space(s).
819 avp_dev_create(struct rte_pci_device *pci_dev,
820 struct rte_eth_dev *eth_dev)
822 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
823 struct rte_avp_device_info *host_info;
824 struct rte_mem_resource *resource;
827 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
828 if (resource->addr == NULL) {
829 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
830 RTE_AVP_PCI_DEVICE_BAR);
833 host_info = (struct rte_avp_device_info *)resource->addr;
835 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
836 avp_dev_version_check(host_info->version)) {
837 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
838 host_info->magic, host_info->version,
839 AVP_DPDK_DRIVER_VERSION);
843 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
844 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
845 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
846 RTE_AVP_GET_MINOR_VERSION(host_info->version));
848 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
849 host_info->min_tx_queues, host_info->max_tx_queues);
850 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
851 host_info->min_rx_queues, host_info->max_rx_queues);
852 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
853 host_info->features);
855 if (avp->magic != AVP_ETHDEV_MAGIC) {
857 * First time initialization (i.e., not during a VM
860 memset(avp, 0, sizeof(*avp));
861 avp->magic = AVP_ETHDEV_MAGIC;
862 avp->dev_data = eth_dev->data;
863 avp->port_id = eth_dev->data->port_id;
864 avp->host_mbuf_size = host_info->mbuf_size;
865 avp->host_features = host_info->features;
866 rte_spinlock_init(&avp->lock);
867 memcpy(&avp->ethaddr.addr_bytes[0],
868 host_info->ethaddr, RTE_ETHER_ADDR_LEN);
869 /* adjust max values to not exceed our max */
871 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
873 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
875 /* Re-attaching during migration */
877 /* TODO... requires validation of host values */
878 if ((host_info->features & avp->features) != avp->features) {
879 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
880 avp->features, host_info->features);
881 /* this should not be possible; continue for now */
885 /* the device id is allowed to change over migrations */
886 avp->device_id = host_info->device_id;
888 /* translate incoming host addresses to guest address space */
889 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
891 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
892 host_info->alloc_phys);
893 for (i = 0; i < avp->max_tx_queues; i++) {
894 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
895 host_info->tx_phys + (i * host_info->tx_size));
897 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
898 host_info->alloc_phys + (i * host_info->alloc_size));
901 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
903 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
904 host_info->free_phys);
905 for (i = 0; i < avp->max_rx_queues; i++) {
906 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
907 host_info->rx_phys + (i * host_info->rx_size));
908 avp->free_q[i] = avp_dev_translate_address(eth_dev,
909 host_info->free_phys + (i * host_info->free_size));
912 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
913 host_info->req_phys);
914 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
915 host_info->resp_phys);
916 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
917 host_info->sync_phys);
918 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
919 host_info->mbuf_phys);
920 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
921 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
923 avp_dev_translate_address(eth_dev, host_info->sync_phys);
925 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
928 * store the host mbuf virtual address so that we can calculate
929 * relative offsets for each mbuf as they are processed
931 avp->host_mbuf_addr = host_info->mbuf_va;
932 avp->host_sync_addr = host_info->sync_va;
935 * store the maximum packet length that is supported by the host.
937 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
938 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
939 host_info->max_rx_pkt_len);
945 * This function is based on probe() function in avp_pci.c
946 * It returns 0 on success.
949 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
951 struct avp_dev *avp =
952 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
953 struct rte_pci_device *pci_dev;
956 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
957 eth_dev->dev_ops = &avp_eth_dev_ops;
958 eth_dev->rx_pkt_burst = &avp_recv_pkts;
959 eth_dev->tx_pkt_burst = &avp_xmit_pkts;
960 /* Let rte_eth_dev_close() release the port resources */
961 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
963 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
965 * no setup required on secondary processes. All data is saved
966 * in dev_private by the primary process. All resource should
967 * be mapped to the same virtual address so all pointers should
970 if (eth_dev->data->scattered_rx) {
971 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
972 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
973 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
978 rte_eth_copy_pci_info(eth_dev, pci_dev);
980 /* Check current migration status */
981 if (avp_dev_migration_pending(eth_dev)) {
982 PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
986 /* Check BAR resources */
987 ret = avp_dev_check_regions(eth_dev);
989 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
994 /* Enable interrupts */
995 ret = avp_dev_setup_interrupts(eth_dev);
997 PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
1001 /* Handle each subtype */
1002 ret = avp_dev_create(pci_dev, eth_dev);
1004 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
1008 /* Allocate memory for storing MAC addresses */
1009 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
1010 RTE_ETHER_ADDR_LEN, 0);
1011 if (eth_dev->data->mac_addrs == NULL) {
1012 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
1013 RTE_ETHER_ADDR_LEN);
1017 /* Get a mac from device config */
1018 rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
1024 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
1026 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1029 if (eth_dev->data == NULL)
1032 avp_dev_close(eth_dev);
1038 eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1039 struct rte_pci_device *pci_dev)
1041 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
1046 eth_avp_pci_remove(struct rte_pci_device *pci_dev)
1048 return rte_eth_dev_pci_generic_remove(pci_dev,
1049 eth_avp_dev_uninit);
1052 static struct rte_pci_driver rte_avp_pmd = {
1053 .id_table = pci_id_avp_map,
1054 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1055 .probe = eth_avp_pci_probe,
1056 .remove = eth_avp_pci_remove,
1060 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
1061 struct avp_dev *avp)
1063 unsigned int max_rx_pkt_len;
1065 max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1067 if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
1068 (max_rx_pkt_len > avp->host_mbuf_size)) {
1070 * If the guest MTU is greater than either the host or guest
1071 * buffers then chained mbufs have to be enabled in the TX
1072 * direction. It is assumed that the application will not need
1073 * to send packets larger than their max_rx_pkt_len (MRU).
1078 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
1079 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
1081 * If the host MRU is greater than its own mbuf size or the
1082 * guest mbuf size then chained mbufs have to be enabled in the
1092 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
1093 uint16_t rx_queue_id,
1094 uint16_t nb_rx_desc,
1095 unsigned int socket_id,
1096 const struct rte_eth_rxconf *rx_conf,
1097 struct rte_mempool *pool)
1099 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1100 struct rte_pktmbuf_pool_private *mbp_priv;
1101 struct avp_queue *rxq;
1103 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
1104 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
1105 rx_queue_id, eth_dev->data->nb_rx_queues);
1109 /* Save mbuf pool pointer */
1112 /* Save the local mbuf size */
1113 mbp_priv = rte_mempool_get_priv(pool);
1114 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
1115 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
1117 if (avp_dev_enable_scattered(eth_dev, avp)) {
1118 if (!eth_dev->data->scattered_rx) {
1119 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1120 eth_dev->data->scattered_rx = 1;
1121 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1122 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1126 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
1127 avp->max_rx_pkt_len,
1128 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
1129 avp->host_mbuf_size,
1130 avp->guest_mbuf_size);
1132 /* allocate a queue object */
1133 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
1134 RTE_CACHE_LINE_SIZE, socket_id);
1136 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
1140 /* save back pointers to AVP and Ethernet devices */
1142 rxq->dev_data = eth_dev->data;
1143 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
1145 /* setup the queue receive mapping for the current queue. */
1146 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
1148 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
1156 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
1157 uint16_t tx_queue_id,
1158 uint16_t nb_tx_desc,
1159 unsigned int socket_id,
1160 const struct rte_eth_txconf *tx_conf)
1162 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1163 struct avp_queue *txq;
1165 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
1166 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
1167 tx_queue_id, eth_dev->data->nb_tx_queues);
1171 /* allocate a queue object */
1172 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
1173 RTE_CACHE_LINE_SIZE, socket_id);
1175 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
1179 /* only the configured set of transmit queues are used */
1180 txq->queue_id = tx_queue_id;
1181 txq->queue_base = tx_queue_id;
1182 txq->queue_limit = tx_queue_id;
1184 /* save back pointers to AVP and Ethernet devices */
1186 txq->dev_data = eth_dev->data;
1187 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
1189 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
1197 _avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b)
1199 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
1200 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
1201 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
1205 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
1207 struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1209 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
1210 /* allow all packets destined to our address */
1214 if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) {
1215 /* allow all broadcast packets */
1219 if (likely(rte_is_multicast_ether_addr(ð->d_addr))) {
1220 /* allow all multicast packets */
1224 if (avp->flags & AVP_F_PROMISC) {
1225 /* allow all packets when in promiscuous mode */
1232 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1234 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
1236 struct rte_avp_desc *first_buf;
1237 struct rte_avp_desc *pkt_buf;
1238 unsigned int pkt_len;
1239 unsigned int nb_segs;
1243 first_buf = avp_dev_translate_buffer(avp, buf);
1247 nb_segs = first_buf->nb_segs;
1249 /* Adjust pointers for guest addressing */
1250 pkt_buf = avp_dev_translate_buffer(avp, buf);
1251 if (pkt_buf == NULL)
1252 rte_panic("bad buffer: segment %u has an invalid address %p\n",
1254 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1255 if (pkt_data == NULL)
1256 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
1258 if (pkt_buf->data_len == 0)
1259 rte_panic("bad buffer: segment %u has 0 data length\n",
1261 pkt_len += pkt_buf->data_len;
1265 } while (nb_segs && (buf = pkt_buf->next) != NULL);
1268 rte_panic("bad buffer: expected %u segments found %u\n",
1269 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
1270 if (pkt_len != first_buf->pkt_len)
1271 rte_panic("bad buffer: expected length %u found %u\n",
1272 first_buf->pkt_len, pkt_len);
1275 #define avp_dev_buffer_sanity_check(a, b) \
1276 __avp_dev_buffer_sanity_check((a), (b))
1278 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
1280 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1285 * Copy a host buffer chain to a set of mbufs. This function assumes that
1286 * there exactly the required number of mbufs to copy all source bytes.
1288 static inline struct rte_mbuf *
1289 avp_dev_copy_from_buffers(struct avp_dev *avp,
1290 struct rte_avp_desc *buf,
1291 struct rte_mbuf **mbufs,
1294 struct rte_mbuf *m_previous = NULL;
1295 struct rte_avp_desc *pkt_buf;
1296 unsigned int total_length = 0;
1297 unsigned int copy_length;
1298 unsigned int src_offset;
1305 avp_dev_buffer_sanity_check(avp, buf);
1307 /* setup the first source buffer */
1308 pkt_buf = avp_dev_translate_buffer(avp, buf);
1309 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1310 total_length = pkt_buf->pkt_len;
1313 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1314 ol_flags = PKT_RX_VLAN;
1315 vlan_tci = pkt_buf->vlan_tci;
1321 for (i = 0; (i < count) && (buf != NULL); i++) {
1322 /* fill each destination buffer */
1325 if (m_previous != NULL)
1326 m_previous->next = m;
1332 * Copy as many source buffers as will fit in the
1333 * destination buffer.
1335 copy_length = RTE_MIN((avp->guest_mbuf_size -
1336 rte_pktmbuf_data_len(m)),
1337 (pkt_buf->data_len -
1339 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1340 rte_pktmbuf_data_len(m)),
1341 RTE_PTR_ADD(pkt_data, src_offset),
1343 rte_pktmbuf_data_len(m) += copy_length;
1344 src_offset += copy_length;
1346 if (likely(src_offset == pkt_buf->data_len)) {
1347 /* need a new source buffer */
1348 buf = pkt_buf->next;
1350 pkt_buf = avp_dev_translate_buffer(
1352 pkt_data = avp_dev_translate_buffer(
1353 avp, pkt_buf->data);
1358 if (unlikely(rte_pktmbuf_data_len(m) ==
1359 avp->guest_mbuf_size)) {
1360 /* need a new destination mbuf */
1364 } while (buf != NULL);
1368 m->ol_flags = ol_flags;
1370 rte_pktmbuf_pkt_len(m) = total_length;
1371 m->vlan_tci = vlan_tci;
1373 __rte_mbuf_sanity_check(m, 1);
1379 avp_recv_scattered_pkts(void *rx_queue,
1380 struct rte_mbuf **rx_pkts,
1383 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1384 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1385 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1386 struct avp_dev *avp = rxq->avp;
1387 struct rte_avp_desc *pkt_buf;
1388 struct rte_avp_fifo *free_q;
1389 struct rte_avp_fifo *rx_q;
1390 struct rte_avp_desc *buf;
1391 unsigned int count, avail, n;
1392 unsigned int guest_mbuf_size;
1394 unsigned int required;
1395 unsigned int buf_len;
1396 unsigned int port_id;
1399 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1400 /* VM live migration in progress */
1404 guest_mbuf_size = avp->guest_mbuf_size;
1405 port_id = avp->port_id;
1406 rx_q = avp->rx_q[rxq->queue_id];
1407 free_q = avp->free_q[rxq->queue_id];
1409 /* setup next queue to service */
1410 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1411 (rxq->queue_id + 1) : rxq->queue_base;
1413 /* determine how many slots are available in the free queue */
1414 count = avp_fifo_free_count(free_q);
1416 /* determine how many packets are available in the rx queue */
1417 avail = avp_fifo_count(rx_q);
1419 /* determine how many packets can be received */
1420 count = RTE_MIN(count, avail);
1421 count = RTE_MIN(count, nb_pkts);
1422 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1424 if (unlikely(count == 0)) {
1425 /* no free buffers, or no buffers on the rx queue */
1429 /* retrieve pending packets */
1430 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1431 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1435 for (i = 0; i < n; i++) {
1436 /* prefetch next entry while processing current one */
1438 pkt_buf = avp_dev_translate_buffer(avp,
1440 rte_prefetch0(pkt_buf);
1444 /* Peek into the first buffer to determine the total length */
1445 pkt_buf = avp_dev_translate_buffer(avp, buf);
1446 buf_len = pkt_buf->pkt_len;
1448 /* Allocate enough mbufs to receive the entire packet */
1449 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1450 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1451 rxq->dev_data->rx_mbuf_alloc_failed++;
1455 /* Copy the data from the buffers to our mbufs */
1456 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1461 if (_avp_mac_filter(avp, m) != 0) {
1462 /* silently discard packets not destined to our MAC */
1463 rte_pktmbuf_free(m);
1467 /* return new mbuf to caller */
1468 rx_pkts[count++] = m;
1469 rxq->bytes += buf_len;
1472 rxq->packets += count;
1474 /* return the buffers to the free queue */
1475 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1482 avp_recv_pkts(void *rx_queue,
1483 struct rte_mbuf **rx_pkts,
1486 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1487 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1488 struct avp_dev *avp = rxq->avp;
1489 struct rte_avp_desc *pkt_buf;
1490 struct rte_avp_fifo *free_q;
1491 struct rte_avp_fifo *rx_q;
1492 unsigned int count, avail, n;
1493 unsigned int pkt_len;
1498 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1499 /* VM live migration in progress */
1503 rx_q = avp->rx_q[rxq->queue_id];
1504 free_q = avp->free_q[rxq->queue_id];
1506 /* setup next queue to service */
1507 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1508 (rxq->queue_id + 1) : rxq->queue_base;
1510 /* determine how many slots are available in the free queue */
1511 count = avp_fifo_free_count(free_q);
1513 /* determine how many packets are available in the rx queue */
1514 avail = avp_fifo_count(rx_q);
1516 /* determine how many packets can be received */
1517 count = RTE_MIN(count, avail);
1518 count = RTE_MIN(count, nb_pkts);
1519 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1521 if (unlikely(count == 0)) {
1522 /* no free buffers, or no buffers on the rx queue */
1526 /* retrieve pending packets */
1527 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1528 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1532 for (i = 0; i < n; i++) {
1533 /* prefetch next entry while processing current one */
1535 pkt_buf = avp_dev_translate_buffer(avp,
1537 rte_prefetch0(pkt_buf);
1540 /* Adjust host pointers for guest addressing */
1541 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1542 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1543 pkt_len = pkt_buf->pkt_len;
1545 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1546 (pkt_buf->nb_segs > 1))) {
1548 * application should be using the scattered receive
1555 /* process each packet to be transmitted */
1556 m = rte_pktmbuf_alloc(avp->pool);
1557 if (unlikely(m == NULL)) {
1558 rxq->dev_data->rx_mbuf_alloc_failed++;
1562 /* copy data out of the host buffer to our buffer */
1563 m->data_off = RTE_PKTMBUF_HEADROOM;
1564 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1566 /* initialize the local mbuf */
1567 rte_pktmbuf_data_len(m) = pkt_len;
1568 rte_pktmbuf_pkt_len(m) = pkt_len;
1569 m->port = avp->port_id;
1571 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1572 m->ol_flags = PKT_RX_VLAN;
1573 m->vlan_tci = pkt_buf->vlan_tci;
1576 if (_avp_mac_filter(avp, m) != 0) {
1577 /* silently discard packets not destined to our MAC */
1578 rte_pktmbuf_free(m);
1582 /* return new mbuf to caller */
1583 rx_pkts[count++] = m;
1584 rxq->bytes += pkt_len;
1587 rxq->packets += count;
1589 /* return the buffers to the free queue */
1590 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1596 * Copy a chained mbuf to a set of host buffers. This function assumes that
1597 * there are sufficient destination buffers to contain the entire source
1600 static inline uint16_t
1601 avp_dev_copy_to_buffers(struct avp_dev *avp,
1602 struct rte_mbuf *mbuf,
1603 struct rte_avp_desc **buffers,
1606 struct rte_avp_desc *previous_buf = NULL;
1607 struct rte_avp_desc *first_buf = NULL;
1608 struct rte_avp_desc *pkt_buf;
1609 struct rte_avp_desc *buf;
1610 size_t total_length;
1617 __rte_mbuf_sanity_check(mbuf, 1);
1621 total_length = rte_pktmbuf_pkt_len(m);
1622 for (i = 0; (i < count) && (m != NULL); i++) {
1623 /* fill each destination buffer */
1626 if (i < count - 1) {
1627 /* prefetch next entry while processing this one */
1628 pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1629 rte_prefetch0(pkt_buf);
1632 /* Adjust pointers for guest addressing */
1633 pkt_buf = avp_dev_translate_buffer(avp, buf);
1634 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1636 /* setup the buffer chain */
1637 if (previous_buf != NULL)
1638 previous_buf->next = buf;
1640 first_buf = pkt_buf;
1642 previous_buf = pkt_buf;
1646 * copy as many source mbuf segments as will fit in the
1647 * destination buffer.
1649 copy_length = RTE_MIN((avp->host_mbuf_size -
1651 (rte_pktmbuf_data_len(m) -
1653 rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1654 RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1657 pkt_buf->data_len += copy_length;
1658 src_offset += copy_length;
1660 if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1661 /* need a new source buffer */
1666 if (unlikely(pkt_buf->data_len ==
1667 avp->host_mbuf_size)) {
1668 /* need a new destination buffer */
1672 } while (m != NULL);
1675 first_buf->nb_segs = count;
1676 first_buf->pkt_len = total_length;
1678 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1679 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1680 first_buf->vlan_tci = mbuf->vlan_tci;
1683 avp_dev_buffer_sanity_check(avp, buffers[0]);
1685 return total_length;
1690 avp_xmit_scattered_pkts(void *tx_queue,
1691 struct rte_mbuf **tx_pkts,
1694 struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1695 RTE_AVP_MAX_MBUF_SEGMENTS)] = {};
1696 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1697 struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1698 struct avp_dev *avp = txq->avp;
1699 struct rte_avp_fifo *alloc_q;
1700 struct rte_avp_fifo *tx_q;
1701 unsigned int count, avail, n;
1702 unsigned int orig_nb_pkts;
1704 unsigned int required;
1705 unsigned int segments;
1706 unsigned int tx_bytes;
1709 orig_nb_pkts = nb_pkts;
1710 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1711 /* VM live migration in progress */
1712 /* TODO ... buffer for X packets then drop? */
1713 txq->errors += nb_pkts;
1717 tx_q = avp->tx_q[txq->queue_id];
1718 alloc_q = avp->alloc_q[txq->queue_id];
1720 /* limit the number of transmitted packets to the max burst size */
1721 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1722 nb_pkts = AVP_MAX_TX_BURST;
1724 /* determine how many buffers are available to copy into */
1725 avail = avp_fifo_count(alloc_q);
1726 if (unlikely(avail > (AVP_MAX_TX_BURST *
1727 RTE_AVP_MAX_MBUF_SEGMENTS)))
1728 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1730 /* determine how many slots are available in the transmit queue */
1731 count = avp_fifo_free_count(tx_q);
1733 /* determine how many packets can be sent */
1734 nb_pkts = RTE_MIN(count, nb_pkts);
1736 /* determine how many packets will fit in the available buffers */
1739 for (i = 0; i < nb_pkts; i++) {
1741 if (likely(i < (unsigned int)nb_pkts - 1)) {
1742 /* prefetch next entry while processing this one */
1743 rte_prefetch0(tx_pkts[i + 1]);
1745 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1746 avp->host_mbuf_size;
1748 if (unlikely((required == 0) ||
1749 (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1751 else if (unlikely(required + segments > avail))
1753 segments += required;
1758 if (unlikely(nb_pkts == 0)) {
1759 /* no available buffers, or no space on the tx queue */
1760 txq->errors += orig_nb_pkts;
1764 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1767 /* retrieve sufficient send buffers */
1768 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1769 if (unlikely(n != segments)) {
1770 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1771 "n=%u, segments=%u, orig=%u\n",
1772 n, segments, orig_nb_pkts);
1773 txq->errors += orig_nb_pkts;
1779 for (i = 0; i < nb_pkts; i++) {
1780 /* process each packet to be transmitted */
1783 /* determine how many buffers are required for this packet */
1784 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1785 avp->host_mbuf_size;
1787 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1788 &avp_bufs[count], required);
1789 tx_bufs[i] = avp_bufs[count];
1792 /* free the original mbuf */
1793 rte_pktmbuf_free(m);
1796 txq->packets += nb_pkts;
1797 txq->bytes += tx_bytes;
1799 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1800 for (i = 0; i < nb_pkts; i++)
1801 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1804 /* send the packets */
1805 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1806 if (unlikely(n != orig_nb_pkts))
1807 txq->errors += (orig_nb_pkts - n);
1814 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1816 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1817 struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1818 struct avp_dev *avp = txq->avp;
1819 struct rte_avp_desc *pkt_buf;
1820 struct rte_avp_fifo *alloc_q;
1821 struct rte_avp_fifo *tx_q;
1822 unsigned int count, avail, n;
1824 unsigned int pkt_len;
1825 unsigned int tx_bytes;
1829 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1830 /* VM live migration in progress */
1831 /* TODO ... buffer for X packets then drop?! */
1836 tx_q = avp->tx_q[txq->queue_id];
1837 alloc_q = avp->alloc_q[txq->queue_id];
1839 /* limit the number of transmitted packets to the max burst size */
1840 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1841 nb_pkts = AVP_MAX_TX_BURST;
1843 /* determine how many buffers are available to copy into */
1844 avail = avp_fifo_count(alloc_q);
1846 /* determine how many slots are available in the transmit queue */
1847 count = avp_fifo_free_count(tx_q);
1849 /* determine how many packets can be sent */
1850 count = RTE_MIN(count, avail);
1851 count = RTE_MIN(count, nb_pkts);
1853 if (unlikely(count == 0)) {
1854 /* no available buffers, or no space on the tx queue */
1855 txq->errors += nb_pkts;
1859 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1862 /* retrieve sufficient send buffers */
1863 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1864 if (unlikely(n != count)) {
1870 for (i = 0; i < count; i++) {
1871 /* prefetch next entry while processing the current one */
1872 if (i < count - 1) {
1873 pkt_buf = avp_dev_translate_buffer(avp,
1875 rte_prefetch0(pkt_buf);
1878 /* process each packet to be transmitted */
1881 /* Adjust pointers for guest addressing */
1882 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1883 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1884 pkt_len = rte_pktmbuf_pkt_len(m);
1886 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1887 (pkt_len > avp->host_mbuf_size))) {
1889 * application should be using the scattered transmit
1890 * function; send it truncated to avoid the performance
1891 * hit of having to manage returning the already
1892 * allocated buffer to the free list. This should not
1893 * happen since the application should have set the
1894 * max_rx_pkt_len based on its MTU and it should be
1895 * policing its own packet sizes.
1898 pkt_len = RTE_MIN(avp->guest_mbuf_size,
1899 avp->host_mbuf_size);
1902 /* copy data out of our mbuf and into the AVP buffer */
1903 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1904 pkt_buf->pkt_len = pkt_len;
1905 pkt_buf->data_len = pkt_len;
1906 pkt_buf->nb_segs = 1;
1907 pkt_buf->next = NULL;
1909 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1910 pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1911 pkt_buf->vlan_tci = m->vlan_tci;
1914 tx_bytes += pkt_len;
1916 /* free the original mbuf */
1917 rte_pktmbuf_free(m);
1920 txq->packets += count;
1921 txq->bytes += tx_bytes;
1923 /* send the packets */
1924 n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1930 avp_dev_rx_queue_release(void *rx_queue)
1932 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1933 struct avp_dev *avp = rxq->avp;
1934 struct rte_eth_dev_data *data = avp->dev_data;
1937 for (i = 0; i < avp->num_rx_queues; i++) {
1938 if (data->rx_queues[i] == rxq) {
1939 rte_free(data->rx_queues[i]);
1940 data->rx_queues[i] = NULL;
1946 avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
1948 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1949 struct rte_eth_dev_data *data = avp->dev_data;
1952 for (i = 0; i < avp->num_rx_queues; i++) {
1953 if (data->rx_queues[i]) {
1954 rte_free(data->rx_queues[i]);
1955 data->rx_queues[i] = NULL;
1961 avp_dev_tx_queue_release(void *tx_queue)
1963 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1964 struct avp_dev *avp = txq->avp;
1965 struct rte_eth_dev_data *data = avp->dev_data;
1968 for (i = 0; i < avp->num_tx_queues; i++) {
1969 if (data->tx_queues[i] == txq) {
1970 rte_free(data->tx_queues[i]);
1971 data->tx_queues[i] = NULL;
1977 avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev)
1979 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1980 struct rte_eth_dev_data *data = avp->dev_data;
1983 for (i = 0; i < avp->num_tx_queues; i++) {
1984 if (data->tx_queues[i]) {
1985 rte_free(data->tx_queues[i]);
1986 data->tx_queues[i] = NULL;
1992 avp_dev_configure(struct rte_eth_dev *eth_dev)
1994 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1995 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1996 struct rte_avp_device_info *host_info;
1997 struct rte_avp_device_config config;
2002 rte_spinlock_lock(&avp->lock);
2003 if (avp->flags & AVP_F_DETACHED) {
2004 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2009 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
2010 host_info = (struct rte_avp_device_info *)addr;
2012 /* Setup required number of queues */
2013 _avp_set_queue_counts(eth_dev);
2015 mask = (ETH_VLAN_STRIP_MASK |
2016 ETH_VLAN_FILTER_MASK |
2017 ETH_VLAN_EXTEND_MASK);
2018 ret = avp_vlan_offload_set(eth_dev, mask);
2020 PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
2025 /* update device config */
2026 memset(&config, 0, sizeof(config));
2027 config.device_id = host_info->device_id;
2028 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
2029 config.driver_version = AVP_DPDK_DRIVER_VERSION;
2030 config.features = avp->features;
2031 config.num_tx_queues = avp->num_tx_queues;
2032 config.num_rx_queues = avp->num_rx_queues;
2034 ret = avp_dev_ctrl_set_config(eth_dev, &config);
2036 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
2041 avp->flags |= AVP_F_CONFIGURED;
2045 rte_spinlock_unlock(&avp->lock);
2050 avp_dev_start(struct rte_eth_dev *eth_dev)
2052 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2055 rte_spinlock_lock(&avp->lock);
2056 if (avp->flags & AVP_F_DETACHED) {
2057 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2062 /* update link state */
2063 ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
2065 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2070 /* remember current link state */
2071 avp->flags |= AVP_F_LINKUP;
2076 rte_spinlock_unlock(&avp->lock);
2081 avp_dev_stop(struct rte_eth_dev *eth_dev)
2083 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2086 rte_spinlock_lock(&avp->lock);
2087 if (avp->flags & AVP_F_DETACHED) {
2088 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2092 /* remember current link state */
2093 avp->flags &= ~AVP_F_LINKUP;
2095 /* update link state */
2096 ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
2098 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2103 rte_spinlock_unlock(&avp->lock);
2107 avp_dev_close(struct rte_eth_dev *eth_dev)
2109 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2112 rte_spinlock_lock(&avp->lock);
2113 if (avp->flags & AVP_F_DETACHED) {
2114 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2118 /* remember current link state */
2119 avp->flags &= ~AVP_F_LINKUP;
2120 avp->flags &= ~AVP_F_CONFIGURED;
2122 ret = avp_dev_disable_interrupts(eth_dev);
2124 PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
2128 /* update device state */
2129 ret = avp_dev_ctrl_shutdown(eth_dev);
2131 PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
2136 /* release dynamic storage for rx/tx queues */
2137 avp_dev_rx_queue_release_all(eth_dev);
2138 avp_dev_tx_queue_release_all(eth_dev);
2141 rte_spinlock_unlock(&avp->lock);
2146 avp_dev_link_update(struct rte_eth_dev *eth_dev,
2147 __rte_unused int wait_to_complete)
2149 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2150 struct rte_eth_link *link = ð_dev->data->dev_link;
2152 link->link_speed = ETH_SPEED_NUM_10G;
2153 link->link_duplex = ETH_LINK_FULL_DUPLEX;
2154 link->link_status = !!(avp->flags & AVP_F_LINKUP);
2160 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2162 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2164 rte_spinlock_lock(&avp->lock);
2165 if ((avp->flags & AVP_F_PROMISC) == 0) {
2166 avp->flags |= AVP_F_PROMISC;
2167 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
2168 eth_dev->data->port_id);
2170 rte_spinlock_unlock(&avp->lock);
2176 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
2178 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2180 rte_spinlock_lock(&avp->lock);
2181 if ((avp->flags & AVP_F_PROMISC) != 0) {
2182 avp->flags &= ~AVP_F_PROMISC;
2183 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
2184 eth_dev->data->port_id);
2186 rte_spinlock_unlock(&avp->lock);
2192 avp_dev_info_get(struct rte_eth_dev *eth_dev,
2193 struct rte_eth_dev_info *dev_info)
2195 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2197 dev_info->max_rx_queues = avp->max_rx_queues;
2198 dev_info->max_tx_queues = avp->max_tx_queues;
2199 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
2200 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
2201 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
2202 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2203 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
2204 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
2211 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
2213 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2214 struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
2215 uint64_t offloads = dev_conf->rxmode.offloads;
2217 if (mask & ETH_VLAN_STRIP_MASK) {
2218 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2219 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2220 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
2222 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
2224 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
2228 if (mask & ETH_VLAN_FILTER_MASK) {
2229 if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2230 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
2233 if (mask & ETH_VLAN_EXTEND_MASK) {
2234 if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2235 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
2242 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
2244 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2247 for (i = 0; i < avp->num_rx_queues; i++) {
2248 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2251 stats->ipackets += rxq->packets;
2252 stats->ibytes += rxq->bytes;
2253 stats->ierrors += rxq->errors;
2255 stats->q_ipackets[i] += rxq->packets;
2256 stats->q_ibytes[i] += rxq->bytes;
2257 stats->q_errors[i] += rxq->errors;
2261 for (i = 0; i < avp->num_tx_queues; i++) {
2262 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2265 stats->opackets += txq->packets;
2266 stats->obytes += txq->bytes;
2267 stats->oerrors += txq->errors;
2269 stats->q_opackets[i] += txq->packets;
2270 stats->q_obytes[i] += txq->bytes;
2278 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
2280 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2283 for (i = 0; i < avp->num_rx_queues; i++) {
2284 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2293 for (i = 0; i < avp->num_tx_queues; i++) {
2294 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2306 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
2307 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
2308 RTE_LOG_REGISTER(avp_logtype_driver, pmd.net.avp.driver, NOTICE);