4 * Copyright (c) 2013-2017, Wind River Systems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2) Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3) Neither the name of Wind River Systems nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_ethdev_pci.h>
41 #include <rte_memcpy.h>
42 #include <rte_string_fns.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_atomic.h>
46 #include <rte_branch_prediction.h>
48 #include <rte_ether.h>
49 #include <rte_common.h>
50 #include <rte_cycles.h>
51 #include <rte_spinlock.h>
52 #include <rte_byteorder.h>
54 #include <rte_memory.h>
58 #include "rte_avp_common.h"
59 #include "rte_avp_fifo.h"
64 static int avp_dev_create(struct rte_pci_device *pci_dev,
65 struct rte_eth_dev *eth_dev);
67 static int avp_dev_configure(struct rte_eth_dev *dev);
68 static int avp_dev_start(struct rte_eth_dev *dev);
69 static void avp_dev_stop(struct rte_eth_dev *dev);
70 static void avp_dev_close(struct rte_eth_dev *dev);
71 static void avp_dev_info_get(struct rte_eth_dev *dev,
72 struct rte_eth_dev_info *dev_info);
73 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
74 static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete);
75 static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
76 static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
78 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
81 unsigned int socket_id,
82 const struct rte_eth_rxconf *rx_conf,
83 struct rte_mempool *pool);
85 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
88 unsigned int socket_id,
89 const struct rte_eth_txconf *tx_conf);
91 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
92 struct rte_mbuf **rx_pkts,
95 static uint16_t avp_recv_pkts(void *rx_queue,
96 struct rte_mbuf **rx_pkts,
99 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
100 struct rte_mbuf **tx_pkts,
103 static uint16_t avp_xmit_pkts(void *tx_queue,
104 struct rte_mbuf **tx_pkts,
107 static void avp_dev_rx_queue_release(void *rxq);
108 static void avp_dev_tx_queue_release(void *txq);
110 static void avp_dev_stats_get(struct rte_eth_dev *dev,
111 struct rte_eth_stats *stats);
112 static void avp_dev_stats_reset(struct rte_eth_dev *dev);
115 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
118 #define AVP_MAX_RX_BURST 64
119 #define AVP_MAX_TX_BURST 64
120 #define AVP_MAX_MAC_ADDRS 1
121 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
125 * Defines the number of microseconds to wait before checking the response
126 * queue for completion.
128 #define AVP_REQUEST_DELAY_USECS (5000)
131 * Defines the number times to check the response queue for completion before
132 * declaring a timeout.
134 #define AVP_MAX_REQUEST_RETRY (100)
136 /* Defines the current PCI driver version number */
137 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
140 * The set of PCI devices this driver supports
142 static const struct rte_pci_id pci_id_avp_map[] = {
143 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
144 .device_id = RTE_AVP_PCI_DEVICE_ID,
145 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
146 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
147 .class_id = RTE_CLASS_ANY_ID,
150 { .vendor_id = 0, /* sentinel */
155 * dev_ops for avp, bare necessities for basic operation
157 static const struct eth_dev_ops avp_eth_dev_ops = {
158 .dev_configure = avp_dev_configure,
159 .dev_start = avp_dev_start,
160 .dev_stop = avp_dev_stop,
161 .dev_close = avp_dev_close,
162 .dev_infos_get = avp_dev_info_get,
163 .vlan_offload_set = avp_vlan_offload_set,
164 .stats_get = avp_dev_stats_get,
165 .stats_reset = avp_dev_stats_reset,
166 .link_update = avp_dev_link_update,
167 .promiscuous_enable = avp_dev_promiscuous_enable,
168 .promiscuous_disable = avp_dev_promiscuous_disable,
169 .rx_queue_setup = avp_dev_rx_queue_setup,
170 .rx_queue_release = avp_dev_rx_queue_release,
171 .tx_queue_setup = avp_dev_tx_queue_setup,
172 .tx_queue_release = avp_dev_tx_queue_release,
175 /**@{ AVP device flags */
176 #define AVP_F_PROMISC (1 << 1)
177 #define AVP_F_CONFIGURED (1 << 2)
178 #define AVP_F_LINKUP (1 << 3)
179 #define AVP_F_DETACHED (1 << 4)
182 /* Ethernet device validation marker */
183 #define AVP_ETHDEV_MAGIC 0x92972862
186 * Defines the AVP device attributes which are attached to an RTE ethernet
190 uint32_t magic; /**< Memory validation marker */
191 uint64_t device_id; /**< Unique system identifier */
192 struct ether_addr ethaddr; /**< Host specified MAC address */
193 struct rte_eth_dev_data *dev_data;
194 /**< Back pointer to ethernet device data */
195 volatile uint32_t flags; /**< Device operational flags */
196 uint8_t port_id; /**< Ethernet port identifier */
197 struct rte_mempool *pool; /**< pkt mbuf mempool */
198 unsigned int guest_mbuf_size; /**< local pool mbuf size */
199 unsigned int host_mbuf_size; /**< host mbuf size */
200 unsigned int max_rx_pkt_len; /**< maximum receive unit */
201 uint32_t host_features; /**< Supported feature bitmap */
202 uint32_t features; /**< Enabled feature bitmap */
203 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
204 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
205 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
206 unsigned int max_rx_queues; /**< Maximum number of receive queues */
208 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
209 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
210 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
211 /**< Allocated mbufs queue */
212 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
213 /**< To be freed mbufs queue */
215 /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
218 /* For request & response */
219 struct rte_avp_fifo *req_q; /**< Request queue */
220 struct rte_avp_fifo *resp_q; /**< Response queue */
221 void *host_sync_addr; /**< (host) Req/Resp Mem address */
222 void *sync_addr; /**< Req/Resp Mem address */
223 void *host_mbuf_addr; /**< (host) MBUF pool start address */
224 void *mbuf_addr; /**< MBUF pool start address */
225 } __rte_cache_aligned;
227 /* RTE ethernet private data */
230 } __rte_cache_aligned;
233 /* 32-bit MMIO register write */
234 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
236 /* 32-bit MMIO register read */
237 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
239 /* Macro to cast the ethernet device private data to a AVP object */
240 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
241 (&((struct avp_adapter *)adapter)->avp)
244 * Defines the structure of a AVP device queue for the purpose of handling the
245 * receive and transmit burst callback functions
248 struct rte_eth_dev_data *dev_data;
249 /**< Backpointer to ethernet device data */
250 struct avp_dev *avp; /**< Backpointer to AVP device */
252 /**< Queue identifier used for indexing current queue */
254 /**< Base queue identifier for queue servicing */
255 uint16_t queue_limit;
256 /**< Maximum queue identifier for queue servicing */
263 /* send a request and wait for a response
265 * @warning must be called while holding the avp->lock spinlock.
268 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
270 unsigned int retry = AVP_MAX_REQUEST_RETRY;
271 void *resp_addr = NULL;
275 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
277 request->result = -ENOTSUP;
279 /* Discard any stale responses before starting a new request */
280 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
281 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
283 rte_memcpy(avp->sync_addr, request, sizeof(*request));
284 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
286 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
293 /* wait for a response */
294 usleep(AVP_REQUEST_DELAY_USECS);
296 count = avp_fifo_count(avp->resp_q);
298 /* response received */
302 if ((count < 1) && (retry == 0)) {
303 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
310 /* retrieve the response */
311 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
312 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
313 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
314 count, resp_addr, avp->host_sync_addr);
319 /* copy to user buffer */
320 rte_memcpy(request, avp->sync_addr, sizeof(*request));
323 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
324 request->result, request->req_id);
331 avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
333 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
334 struct rte_avp_request request;
337 /* setup a link state change request */
338 memset(&request, 0, sizeof(request));
339 request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
340 request.if_up = state;
342 ret = avp_dev_process_request(avp, &request);
344 return ret == 0 ? request.result : ret;
348 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
349 struct rte_avp_device_config *config)
351 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
352 struct rte_avp_request request;
355 /* setup a configure request */
356 memset(&request, 0, sizeof(request));
357 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
358 memcpy(&request.config, config, sizeof(request.config));
360 ret = avp_dev_process_request(avp, &request);
362 return ret == 0 ? request.result : ret;
366 avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
368 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
369 struct rte_avp_request request;
372 /* setup a shutdown request */
373 memset(&request, 0, sizeof(request));
374 request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
376 ret = avp_dev_process_request(avp, &request);
378 return ret == 0 ? request.result : ret;
381 /* translate from host mbuf virtual address to guest virtual address */
383 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
385 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
386 (uintptr_t)avp->host_mbuf_addr),
387 (uintptr_t)avp->mbuf_addr);
390 /* translate from host physical address to guest virtual address */
392 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
393 phys_addr_t host_phys_addr)
395 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
396 struct rte_mem_resource *resource;
397 struct rte_avp_memmap_info *info;
398 struct rte_avp_memmap *map;
403 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
404 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
405 info = (struct rte_avp_memmap_info *)resource->addr;
408 for (i = 0; i < info->nb_maps; i++) {
409 /* search all segments looking for a matching address */
410 map = &info->maps[i];
412 if ((host_phys_addr >= map->phys_addr) &&
413 (host_phys_addr < (map->phys_addr + map->length))) {
414 /* address is within this segment */
415 offset += (host_phys_addr - map->phys_addr);
416 addr = RTE_PTR_ADD(addr, offset);
418 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
419 host_phys_addr, addr);
423 offset += map->length;
429 /* verify that the incoming device version is compatible with our version */
431 avp_dev_version_check(uint32_t version)
433 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
434 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
436 if (device <= driver) {
437 /* the host driver version is less than or equal to ours */
444 /* verify that memory regions have expected version and validation markers */
446 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
448 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
449 struct rte_avp_memmap_info *memmap;
450 struct rte_avp_device_info *info;
451 struct rte_mem_resource *resource;
454 /* Dump resource info for debug */
455 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
456 resource = &pci_dev->mem_resource[i];
457 if ((resource->phys_addr == 0) || (resource->len == 0))
460 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
461 i, resource->phys_addr,
462 resource->len, resource->addr);
465 case RTE_AVP_PCI_MEMMAP_BAR:
466 memmap = (struct rte_avp_memmap_info *)resource->addr;
467 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
468 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
469 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
470 memmap->magic, memmap->version);
475 case RTE_AVP_PCI_DEVICE_BAR:
476 info = (struct rte_avp_device_info *)resource->addr;
477 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
478 avp_dev_version_check(info->version)) {
479 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
480 info->magic, info->version,
481 AVP_DPDK_DRIVER_VERSION);
486 case RTE_AVP_PCI_MEMORY_BAR:
487 case RTE_AVP_PCI_MMIO_BAR:
488 if (resource->addr == NULL) {
489 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
495 case RTE_AVP_PCI_MSIX_BAR:
497 /* no validation required */
506 avp_dev_detach(struct rte_eth_dev *eth_dev)
508 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
511 PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
512 eth_dev->data->port_id, avp->device_id);
514 rte_spinlock_lock(&avp->lock);
516 if (avp->flags & AVP_F_DETACHED) {
517 PMD_DRV_LOG(NOTICE, "port %u already detached\n",
518 eth_dev->data->port_id);
523 /* shutdown the device first so the host stops sending us packets. */
524 ret = avp_dev_ctrl_shutdown(eth_dev);
526 PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
528 avp->flags &= ~AVP_F_DETACHED;
532 avp->flags |= AVP_F_DETACHED;
535 /* wait for queues to acknowledge the presence of the detach flag */
541 rte_spinlock_unlock(&avp->lock);
546 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
548 struct avp_dev *avp =
549 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
550 struct avp_queue *rxq;
551 uint16_t queue_count;
554 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
557 * Must map all AVP fifos as evenly as possible between the configured
558 * device queues. Each device queue will service a subset of the AVP
559 * fifos. If there is an odd number of device queues the first set of
560 * device queues will get the extra AVP fifos.
562 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
563 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
564 if (rx_queue_id < remainder) {
565 /* these queues must service one extra FIFO */
566 rxq->queue_base = rx_queue_id * (queue_count + 1);
567 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
569 /* these queues service the regular number of FIFO */
570 rxq->queue_base = ((remainder * (queue_count + 1)) +
571 ((rx_queue_id - remainder) * queue_count));
572 rxq->queue_limit = rxq->queue_base + queue_count - 1;
575 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
576 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
578 rxq->queue_id = rxq->queue_base;
582 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
584 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
585 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
586 struct rte_avp_device_info *host_info;
589 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
590 host_info = (struct rte_avp_device_info *)addr;
593 * the transmit direction is not negotiated beyond respecting the max
594 * number of queues because the host can handle arbitrary guest tx
595 * queues (host rx queues).
597 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
600 * the receive direction is more restrictive. The host requires a
601 * minimum number of guest rx queues (host tx queues) therefore
602 * negotiate a value that is at least as large as the host minimum
603 * requirement. If the host and guest values are not identical then a
604 * mapping will be established in the receive_queue_setup function.
606 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
607 eth_dev->data->nb_rx_queues);
609 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
610 avp->num_tx_queues, avp->num_rx_queues);
614 avp_dev_attach(struct rte_eth_dev *eth_dev)
616 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
617 struct rte_avp_device_config config;
621 PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
622 eth_dev->data->port_id, avp->device_id);
624 rte_spinlock_lock(&avp->lock);
626 if (!(avp->flags & AVP_F_DETACHED)) {
627 PMD_DRV_LOG(NOTICE, "port %u already attached\n",
628 eth_dev->data->port_id);
634 * make sure that the detached flag is set prior to reconfiguring the
637 avp->flags |= AVP_F_DETACHED;
641 * re-run the device create utility which will parse the new host info
642 * and setup the AVP device queue pointers.
644 ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
646 PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
651 if (avp->flags & AVP_F_CONFIGURED) {
653 * Update the receive queue mapping to handle cases where the
654 * source and destination hosts have different queue
655 * requirements. As long as the DETACHED flag is asserted the
656 * queue table should not be referenced so it should be safe to
659 _avp_set_queue_counts(eth_dev);
660 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
661 _avp_set_rx_queue_mappings(eth_dev, i);
664 * Update the host with our config details so that it knows the
667 memset(&config, 0, sizeof(config));
668 config.device_id = avp->device_id;
669 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
670 config.driver_version = AVP_DPDK_DRIVER_VERSION;
671 config.features = avp->features;
672 config.num_tx_queues = avp->num_tx_queues;
673 config.num_rx_queues = avp->num_rx_queues;
674 config.if_up = !!(avp->flags & AVP_F_LINKUP);
676 ret = avp_dev_ctrl_set_config(eth_dev, &config);
678 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
685 avp->flags &= ~AVP_F_DETACHED;
690 rte_spinlock_unlock(&avp->lock);
695 avp_dev_interrupt_handler(void *data)
697 struct rte_eth_dev *eth_dev = data;
698 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
699 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
700 uint32_t status, value;
703 if (registers == NULL)
704 rte_panic("no mapped MMIO register space\n");
706 /* read the interrupt status register
707 * note: this register clears on read so all raised interrupts must be
708 * handled or remembered for later processing
711 RTE_PTR_ADD(registers,
712 RTE_AVP_INTERRUPT_STATUS_OFFSET));
714 if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
715 /* handle interrupt based on current status */
717 RTE_PTR_ADD(registers,
718 RTE_AVP_MIGRATION_STATUS_OFFSET));
720 case RTE_AVP_MIGRATION_DETACHED:
721 ret = avp_dev_detach(eth_dev);
723 case RTE_AVP_MIGRATION_ATTACHED:
724 ret = avp_dev_attach(eth_dev);
727 PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
732 /* acknowledge the request by writing out our current status */
733 value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
735 RTE_PTR_ADD(registers,
736 RTE_AVP_MIGRATION_ACK_OFFSET));
738 PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
741 if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
742 PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
745 /* re-enable UIO interrupt handling */
746 ret = rte_intr_enable(&pci_dev->intr_handle);
748 PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
755 avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
757 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
758 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
761 if (registers == NULL)
764 /* enable UIO interrupt handling */
765 ret = rte_intr_enable(&pci_dev->intr_handle);
767 PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
772 /* inform the device that all interrupts are enabled */
773 AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
774 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
780 avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
782 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
783 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
786 if (registers == NULL)
789 /* inform the device that all interrupts are disabled */
790 AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
791 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
793 /* enable UIO interrupt handling */
794 ret = rte_intr_disable(&pci_dev->intr_handle);
796 PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
805 avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
807 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
810 /* register a callback handler with UIO for interrupt notifications */
811 ret = rte_intr_callback_register(&pci_dev->intr_handle,
812 avp_dev_interrupt_handler,
815 PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
820 /* enable interrupt processing */
821 return avp_dev_enable_interrupts(eth_dev);
825 avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
827 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
828 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
831 if (registers == NULL)
834 value = AVP_READ32(RTE_PTR_ADD(registers,
835 RTE_AVP_MIGRATION_STATUS_OFFSET));
836 if (value == RTE_AVP_MIGRATION_DETACHED) {
837 /* migration is in progress; ack it if we have not already */
839 RTE_PTR_ADD(registers,
840 RTE_AVP_MIGRATION_ACK_OFFSET));
847 * create a AVP device using the supplied device info by first translating it
848 * to guest address space(s).
851 avp_dev_create(struct rte_pci_device *pci_dev,
852 struct rte_eth_dev *eth_dev)
854 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
855 struct rte_avp_device_info *host_info;
856 struct rte_mem_resource *resource;
859 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
860 if (resource->addr == NULL) {
861 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
862 RTE_AVP_PCI_DEVICE_BAR);
865 host_info = (struct rte_avp_device_info *)resource->addr;
867 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
868 avp_dev_version_check(host_info->version)) {
869 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
870 host_info->magic, host_info->version,
871 AVP_DPDK_DRIVER_VERSION);
875 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
876 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
877 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
878 RTE_AVP_GET_MINOR_VERSION(host_info->version));
880 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
881 host_info->min_tx_queues, host_info->max_tx_queues);
882 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
883 host_info->min_rx_queues, host_info->max_rx_queues);
884 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
885 host_info->features);
887 if (avp->magic != AVP_ETHDEV_MAGIC) {
889 * First time initialization (i.e., not during a VM
892 memset(avp, 0, sizeof(*avp));
893 avp->magic = AVP_ETHDEV_MAGIC;
894 avp->dev_data = eth_dev->data;
895 avp->port_id = eth_dev->data->port_id;
896 avp->host_mbuf_size = host_info->mbuf_size;
897 avp->host_features = host_info->features;
898 rte_spinlock_init(&avp->lock);
899 memcpy(&avp->ethaddr.addr_bytes[0],
900 host_info->ethaddr, ETHER_ADDR_LEN);
901 /* adjust max values to not exceed our max */
903 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
905 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
907 /* Re-attaching during migration */
909 /* TODO... requires validation of host values */
910 if ((host_info->features & avp->features) != avp->features) {
911 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
912 avp->features, host_info->features);
913 /* this should not be possible; continue for now */
917 /* the device id is allowed to change over migrations */
918 avp->device_id = host_info->device_id;
920 /* translate incoming host addresses to guest address space */
921 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
923 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
924 host_info->alloc_phys);
925 for (i = 0; i < avp->max_tx_queues; i++) {
926 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
927 host_info->tx_phys + (i * host_info->tx_size));
929 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
930 host_info->alloc_phys + (i * host_info->alloc_size));
933 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
935 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
936 host_info->free_phys);
937 for (i = 0; i < avp->max_rx_queues; i++) {
938 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
939 host_info->rx_phys + (i * host_info->rx_size));
940 avp->free_q[i] = avp_dev_translate_address(eth_dev,
941 host_info->free_phys + (i * host_info->free_size));
944 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
945 host_info->req_phys);
946 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
947 host_info->resp_phys);
948 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
949 host_info->sync_phys);
950 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
951 host_info->mbuf_phys);
952 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
953 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
955 avp_dev_translate_address(eth_dev, host_info->sync_phys);
957 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
960 * store the host mbuf virtual address so that we can calculate
961 * relative offsets for each mbuf as they are processed
963 avp->host_mbuf_addr = host_info->mbuf_va;
964 avp->host_sync_addr = host_info->sync_va;
967 * store the maximum packet length that is supported by the host.
969 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
970 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
971 host_info->max_rx_pkt_len);
977 * This function is based on probe() function in avp_pci.c
978 * It returns 0 on success.
981 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
983 struct avp_dev *avp =
984 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
985 struct rte_pci_device *pci_dev;
988 pci_dev = AVP_DEV_TO_PCI(eth_dev);
989 eth_dev->dev_ops = &avp_eth_dev_ops;
990 eth_dev->rx_pkt_burst = &avp_recv_pkts;
991 eth_dev->tx_pkt_burst = &avp_xmit_pkts;
993 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
995 * no setup required on secondary processes. All data is saved
996 * in dev_private by the primary process. All resource should
997 * be mapped to the same virtual address so all pointers should
1000 if (eth_dev->data->scattered_rx) {
1001 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1002 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1003 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1008 rte_eth_copy_pci_info(eth_dev, pci_dev);
1010 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1012 /* Check current migration status */
1013 if (avp_dev_migration_pending(eth_dev)) {
1014 PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
1018 /* Check BAR resources */
1019 ret = avp_dev_check_regions(eth_dev);
1021 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
1026 /* Enable interrupts */
1027 ret = avp_dev_setup_interrupts(eth_dev);
1029 PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
1033 /* Handle each subtype */
1034 ret = avp_dev_create(pci_dev, eth_dev);
1036 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
1040 /* Allocate memory for storing MAC addresses */
1041 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
1042 if (eth_dev->data->mac_addrs == NULL) {
1043 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
1048 /* Get a mac from device config */
1049 ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
1055 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
1059 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1062 if (eth_dev->data == NULL)
1065 ret = avp_dev_disable_interrupts(eth_dev);
1067 PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
1071 if (eth_dev->data->mac_addrs != NULL) {
1072 rte_free(eth_dev->data->mac_addrs);
1073 eth_dev->data->mac_addrs = NULL;
1080 eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1081 struct rte_pci_device *pci_dev)
1083 struct rte_eth_dev *eth_dev;
1086 eth_dev = rte_eth_dev_pci_allocate(pci_dev,
1087 sizeof(struct avp_adapter));
1088 if (eth_dev == NULL)
1091 ret = eth_avp_dev_init(eth_dev);
1093 rte_eth_dev_pci_release(eth_dev);
1099 eth_avp_pci_remove(struct rte_pci_device *pci_dev)
1101 return rte_eth_dev_pci_generic_remove(pci_dev,
1102 eth_avp_dev_uninit);
1105 static struct rte_pci_driver rte_avp_pmd = {
1106 .id_table = pci_id_avp_map,
1107 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1108 .probe = eth_avp_pci_probe,
1109 .remove = eth_avp_pci_remove,
1113 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
1114 struct avp_dev *avp)
1116 unsigned int max_rx_pkt_len;
1118 max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1120 if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
1121 (max_rx_pkt_len > avp->host_mbuf_size)) {
1123 * If the guest MTU is greater than either the host or guest
1124 * buffers then chained mbufs have to be enabled in the TX
1125 * direction. It is assumed that the application will not need
1126 * to send packets larger than their max_rx_pkt_len (MRU).
1131 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
1132 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
1134 * If the host MRU is greater than its own mbuf size or the
1135 * guest mbuf size then chained mbufs have to be enabled in the
1145 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
1146 uint16_t rx_queue_id,
1147 uint16_t nb_rx_desc,
1148 unsigned int socket_id,
1149 const struct rte_eth_rxconf *rx_conf,
1150 struct rte_mempool *pool)
1152 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1153 struct rte_pktmbuf_pool_private *mbp_priv;
1154 struct avp_queue *rxq;
1156 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
1157 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
1158 rx_queue_id, eth_dev->data->nb_rx_queues);
1162 /* Save mbuf pool pointer */
1165 /* Save the local mbuf size */
1166 mbp_priv = rte_mempool_get_priv(pool);
1167 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
1168 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
1170 if (avp_dev_enable_scattered(eth_dev, avp)) {
1171 if (!eth_dev->data->scattered_rx) {
1172 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1173 eth_dev->data->scattered_rx = 1;
1174 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1175 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1179 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
1180 avp->max_rx_pkt_len,
1181 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
1182 avp->host_mbuf_size,
1183 avp->guest_mbuf_size);
1185 /* allocate a queue object */
1186 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
1187 RTE_CACHE_LINE_SIZE, socket_id);
1189 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
1193 /* save back pointers to AVP and Ethernet devices */
1195 rxq->dev_data = eth_dev->data;
1196 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
1198 /* setup the queue receive mapping for the current queue. */
1199 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
1201 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
1209 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
1210 uint16_t tx_queue_id,
1211 uint16_t nb_tx_desc,
1212 unsigned int socket_id,
1213 const struct rte_eth_txconf *tx_conf)
1215 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1216 struct avp_queue *txq;
1218 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
1219 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
1220 tx_queue_id, eth_dev->data->nb_tx_queues);
1224 /* allocate a queue object */
1225 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
1226 RTE_CACHE_LINE_SIZE, socket_id);
1228 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
1232 /* only the configured set of transmit queues are used */
1233 txq->queue_id = tx_queue_id;
1234 txq->queue_base = tx_queue_id;
1235 txq->queue_limit = tx_queue_id;
1237 /* save back pointers to AVP and Ethernet devices */
1239 txq->dev_data = eth_dev->data;
1240 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
1242 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
1250 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
1252 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
1253 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
1254 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
1258 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
1260 struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
1262 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
1263 /* allow all packets destined to our address */
1267 if (likely(is_broadcast_ether_addr(ð->d_addr))) {
1268 /* allow all broadcast packets */
1272 if (likely(is_multicast_ether_addr(ð->d_addr))) {
1273 /* allow all multicast packets */
1277 if (avp->flags & AVP_F_PROMISC) {
1278 /* allow all packets when in promiscuous mode */
1285 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1287 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
1289 struct rte_avp_desc *first_buf;
1290 struct rte_avp_desc *pkt_buf;
1291 unsigned int pkt_len;
1292 unsigned int nb_segs;
1296 first_buf = avp_dev_translate_buffer(avp, buf);
1300 nb_segs = first_buf->nb_segs;
1302 /* Adjust pointers for guest addressing */
1303 pkt_buf = avp_dev_translate_buffer(avp, buf);
1304 if (pkt_buf == NULL)
1305 rte_panic("bad buffer: segment %u has an invalid address %p\n",
1307 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1308 if (pkt_data == NULL)
1309 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
1311 if (pkt_buf->data_len == 0)
1312 rte_panic("bad buffer: segment %u has 0 data length\n",
1314 pkt_len += pkt_buf->data_len;
1318 } while (nb_segs && (buf = pkt_buf->next) != NULL);
1321 rte_panic("bad buffer: expected %u segments found %u\n",
1322 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
1323 if (pkt_len != first_buf->pkt_len)
1324 rte_panic("bad buffer: expected length %u found %u\n",
1325 first_buf->pkt_len, pkt_len);
1328 #define avp_dev_buffer_sanity_check(a, b) \
1329 __avp_dev_buffer_sanity_check((a), (b))
1331 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
1333 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1338 * Copy a host buffer chain to a set of mbufs. This function assumes that
1339 * there exactly the required number of mbufs to copy all source bytes.
1341 static inline struct rte_mbuf *
1342 avp_dev_copy_from_buffers(struct avp_dev *avp,
1343 struct rte_avp_desc *buf,
1344 struct rte_mbuf **mbufs,
1347 struct rte_mbuf *m_previous = NULL;
1348 struct rte_avp_desc *pkt_buf;
1349 unsigned int total_length = 0;
1350 unsigned int copy_length;
1351 unsigned int src_offset;
1358 avp_dev_buffer_sanity_check(avp, buf);
1360 /* setup the first source buffer */
1361 pkt_buf = avp_dev_translate_buffer(avp, buf);
1362 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1363 total_length = pkt_buf->pkt_len;
1366 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1367 ol_flags = PKT_RX_VLAN_PKT;
1368 vlan_tci = pkt_buf->vlan_tci;
1374 for (i = 0; (i < count) && (buf != NULL); i++) {
1375 /* fill each destination buffer */
1378 if (m_previous != NULL)
1379 m_previous->next = m;
1385 * Copy as many source buffers as will fit in the
1386 * destination buffer.
1388 copy_length = RTE_MIN((avp->guest_mbuf_size -
1389 rte_pktmbuf_data_len(m)),
1390 (pkt_buf->data_len -
1392 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1393 rte_pktmbuf_data_len(m)),
1394 RTE_PTR_ADD(pkt_data, src_offset),
1396 rte_pktmbuf_data_len(m) += copy_length;
1397 src_offset += copy_length;
1399 if (likely(src_offset == pkt_buf->data_len)) {
1400 /* need a new source buffer */
1401 buf = pkt_buf->next;
1403 pkt_buf = avp_dev_translate_buffer(
1405 pkt_data = avp_dev_translate_buffer(
1406 avp, pkt_buf->data);
1411 if (unlikely(rte_pktmbuf_data_len(m) ==
1412 avp->guest_mbuf_size)) {
1413 /* need a new destination mbuf */
1417 } while (buf != NULL);
1421 m->ol_flags = ol_flags;
1423 rte_pktmbuf_pkt_len(m) = total_length;
1424 m->vlan_tci = vlan_tci;
1426 __rte_mbuf_sanity_check(m, 1);
1432 avp_recv_scattered_pkts(void *rx_queue,
1433 struct rte_mbuf **rx_pkts,
1436 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1437 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1438 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1439 struct avp_dev *avp = rxq->avp;
1440 struct rte_avp_desc *pkt_buf;
1441 struct rte_avp_fifo *free_q;
1442 struct rte_avp_fifo *rx_q;
1443 struct rte_avp_desc *buf;
1444 unsigned int count, avail, n;
1445 unsigned int guest_mbuf_size;
1447 unsigned int required;
1448 unsigned int buf_len;
1449 unsigned int port_id;
1452 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1453 /* VM live migration in progress */
1457 guest_mbuf_size = avp->guest_mbuf_size;
1458 port_id = avp->port_id;
1459 rx_q = avp->rx_q[rxq->queue_id];
1460 free_q = avp->free_q[rxq->queue_id];
1462 /* setup next queue to service */
1463 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1464 (rxq->queue_id + 1) : rxq->queue_base;
1466 /* determine how many slots are available in the free queue */
1467 count = avp_fifo_free_count(free_q);
1469 /* determine how many packets are available in the rx queue */
1470 avail = avp_fifo_count(rx_q);
1472 /* determine how many packets can be received */
1473 count = RTE_MIN(count, avail);
1474 count = RTE_MIN(count, nb_pkts);
1475 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1477 if (unlikely(count == 0)) {
1478 /* no free buffers, or no buffers on the rx queue */
1482 /* retrieve pending packets */
1483 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1484 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1488 for (i = 0; i < n; i++) {
1489 /* prefetch next entry while processing current one */
1491 pkt_buf = avp_dev_translate_buffer(avp,
1493 rte_prefetch0(pkt_buf);
1497 /* Peek into the first buffer to determine the total length */
1498 pkt_buf = avp_dev_translate_buffer(avp, buf);
1499 buf_len = pkt_buf->pkt_len;
1501 /* Allocate enough mbufs to receive the entire packet */
1502 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1503 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1504 rxq->dev_data->rx_mbuf_alloc_failed++;
1508 /* Copy the data from the buffers to our mbufs */
1509 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1514 if (_avp_mac_filter(avp, m) != 0) {
1515 /* silently discard packets not destined to our MAC */
1516 rte_pktmbuf_free(m);
1520 /* return new mbuf to caller */
1521 rx_pkts[count++] = m;
1522 rxq->bytes += buf_len;
1525 rxq->packets += count;
1527 /* return the buffers to the free queue */
1528 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1535 avp_recv_pkts(void *rx_queue,
1536 struct rte_mbuf **rx_pkts,
1539 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1540 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1541 struct avp_dev *avp = rxq->avp;
1542 struct rte_avp_desc *pkt_buf;
1543 struct rte_avp_fifo *free_q;
1544 struct rte_avp_fifo *rx_q;
1545 unsigned int count, avail, n;
1546 unsigned int pkt_len;
1551 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1552 /* VM live migration in progress */
1556 rx_q = avp->rx_q[rxq->queue_id];
1557 free_q = avp->free_q[rxq->queue_id];
1559 /* setup next queue to service */
1560 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1561 (rxq->queue_id + 1) : rxq->queue_base;
1563 /* determine how many slots are available in the free queue */
1564 count = avp_fifo_free_count(free_q);
1566 /* determine how many packets are available in the rx queue */
1567 avail = avp_fifo_count(rx_q);
1569 /* determine how many packets can be received */
1570 count = RTE_MIN(count, avail);
1571 count = RTE_MIN(count, nb_pkts);
1572 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1574 if (unlikely(count == 0)) {
1575 /* no free buffers, or no buffers on the rx queue */
1579 /* retrieve pending packets */
1580 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1581 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1585 for (i = 0; i < n; i++) {
1586 /* prefetch next entry while processing current one */
1588 pkt_buf = avp_dev_translate_buffer(avp,
1590 rte_prefetch0(pkt_buf);
1593 /* Adjust host pointers for guest addressing */
1594 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1595 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1596 pkt_len = pkt_buf->pkt_len;
1598 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1599 (pkt_buf->nb_segs > 1))) {
1601 * application should be using the scattered receive
1608 /* process each packet to be transmitted */
1609 m = rte_pktmbuf_alloc(avp->pool);
1610 if (unlikely(m == NULL)) {
1611 rxq->dev_data->rx_mbuf_alloc_failed++;
1615 /* copy data out of the host buffer to our buffer */
1616 m->data_off = RTE_PKTMBUF_HEADROOM;
1617 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1619 /* initialize the local mbuf */
1620 rte_pktmbuf_data_len(m) = pkt_len;
1621 rte_pktmbuf_pkt_len(m) = pkt_len;
1622 m->port = avp->port_id;
1624 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1625 m->ol_flags = PKT_RX_VLAN_PKT;
1626 m->vlan_tci = pkt_buf->vlan_tci;
1629 if (_avp_mac_filter(avp, m) != 0) {
1630 /* silently discard packets not destined to our MAC */
1631 rte_pktmbuf_free(m);
1635 /* return new mbuf to caller */
1636 rx_pkts[count++] = m;
1637 rxq->bytes += pkt_len;
1640 rxq->packets += count;
1642 /* return the buffers to the free queue */
1643 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1649 * Copy a chained mbuf to a set of host buffers. This function assumes that
1650 * there are sufficient destination buffers to contain the entire source
1653 static inline uint16_t
1654 avp_dev_copy_to_buffers(struct avp_dev *avp,
1655 struct rte_mbuf *mbuf,
1656 struct rte_avp_desc **buffers,
1659 struct rte_avp_desc *previous_buf = NULL;
1660 struct rte_avp_desc *first_buf = NULL;
1661 struct rte_avp_desc *pkt_buf;
1662 struct rte_avp_desc *buf;
1663 size_t total_length;
1670 __rte_mbuf_sanity_check(mbuf, 1);
1674 total_length = rte_pktmbuf_pkt_len(m);
1675 for (i = 0; (i < count) && (m != NULL); i++) {
1676 /* fill each destination buffer */
1679 if (i < count - 1) {
1680 /* prefetch next entry while processing this one */
1681 pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1682 rte_prefetch0(pkt_buf);
1685 /* Adjust pointers for guest addressing */
1686 pkt_buf = avp_dev_translate_buffer(avp, buf);
1687 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1689 /* setup the buffer chain */
1690 if (previous_buf != NULL)
1691 previous_buf->next = buf;
1693 first_buf = pkt_buf;
1695 previous_buf = pkt_buf;
1699 * copy as many source mbuf segments as will fit in the
1700 * destination buffer.
1702 copy_length = RTE_MIN((avp->host_mbuf_size -
1704 (rte_pktmbuf_data_len(m) -
1706 rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1707 RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1710 pkt_buf->data_len += copy_length;
1711 src_offset += copy_length;
1713 if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1714 /* need a new source buffer */
1719 if (unlikely(pkt_buf->data_len ==
1720 avp->host_mbuf_size)) {
1721 /* need a new destination buffer */
1725 } while (m != NULL);
1728 first_buf->nb_segs = count;
1729 first_buf->pkt_len = total_length;
1731 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1732 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1733 first_buf->vlan_tci = mbuf->vlan_tci;
1736 avp_dev_buffer_sanity_check(avp, buffers[0]);
1738 return total_length;
1743 avp_xmit_scattered_pkts(void *tx_queue,
1744 struct rte_mbuf **tx_pkts,
1747 struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1748 RTE_AVP_MAX_MBUF_SEGMENTS)];
1749 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1750 struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1751 struct avp_dev *avp = txq->avp;
1752 struct rte_avp_fifo *alloc_q;
1753 struct rte_avp_fifo *tx_q;
1754 unsigned int count, avail, n;
1755 unsigned int orig_nb_pkts;
1757 unsigned int required;
1758 unsigned int segments;
1759 unsigned int tx_bytes;
1762 orig_nb_pkts = nb_pkts;
1763 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1764 /* VM live migration in progress */
1765 /* TODO ... buffer for X packets then drop? */
1766 txq->errors += nb_pkts;
1770 tx_q = avp->tx_q[txq->queue_id];
1771 alloc_q = avp->alloc_q[txq->queue_id];
1773 /* limit the number of transmitted packets to the max burst size */
1774 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1775 nb_pkts = AVP_MAX_TX_BURST;
1777 /* determine how many buffers are available to copy into */
1778 avail = avp_fifo_count(alloc_q);
1779 if (unlikely(avail > (AVP_MAX_TX_BURST *
1780 RTE_AVP_MAX_MBUF_SEGMENTS)))
1781 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1783 /* determine how many slots are available in the transmit queue */
1784 count = avp_fifo_free_count(tx_q);
1786 /* determine how many packets can be sent */
1787 nb_pkts = RTE_MIN(count, nb_pkts);
1789 /* determine how many packets will fit in the available buffers */
1792 for (i = 0; i < nb_pkts; i++) {
1794 if (likely(i < (unsigned int)nb_pkts - 1)) {
1795 /* prefetch next entry while processing this one */
1796 rte_prefetch0(tx_pkts[i + 1]);
1798 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1799 avp->host_mbuf_size;
1801 if (unlikely((required == 0) ||
1802 (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1804 else if (unlikely(required + segments > avail))
1806 segments += required;
1811 if (unlikely(nb_pkts == 0)) {
1812 /* no available buffers, or no space on the tx queue */
1813 txq->errors += orig_nb_pkts;
1817 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1820 /* retrieve sufficient send buffers */
1821 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1822 if (unlikely(n != segments)) {
1823 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1824 "n=%u, segments=%u, orig=%u\n",
1825 n, segments, orig_nb_pkts);
1826 txq->errors += orig_nb_pkts;
1832 for (i = 0; i < nb_pkts; i++) {
1833 /* process each packet to be transmitted */
1836 /* determine how many buffers are required for this packet */
1837 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1838 avp->host_mbuf_size;
1840 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1841 &avp_bufs[count], required);
1842 tx_bufs[i] = avp_bufs[count];
1845 /* free the original mbuf */
1846 rte_pktmbuf_free(m);
1849 txq->packets += nb_pkts;
1850 txq->bytes += tx_bytes;
1852 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1853 for (i = 0; i < nb_pkts; i++)
1854 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1857 /* send the packets */
1858 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1859 if (unlikely(n != orig_nb_pkts))
1860 txq->errors += (orig_nb_pkts - n);
1867 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1869 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1870 struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1871 struct avp_dev *avp = txq->avp;
1872 struct rte_avp_desc *pkt_buf;
1873 struct rte_avp_fifo *alloc_q;
1874 struct rte_avp_fifo *tx_q;
1875 unsigned int count, avail, n;
1877 unsigned int pkt_len;
1878 unsigned int tx_bytes;
1882 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1883 /* VM live migration in progress */
1884 /* TODO ... buffer for X packets then drop?! */
1889 tx_q = avp->tx_q[txq->queue_id];
1890 alloc_q = avp->alloc_q[txq->queue_id];
1892 /* limit the number of transmitted packets to the max burst size */
1893 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1894 nb_pkts = AVP_MAX_TX_BURST;
1896 /* determine how many buffers are available to copy into */
1897 avail = avp_fifo_count(alloc_q);
1899 /* determine how many slots are available in the transmit queue */
1900 count = avp_fifo_free_count(tx_q);
1902 /* determine how many packets can be sent */
1903 count = RTE_MIN(count, avail);
1904 count = RTE_MIN(count, nb_pkts);
1906 if (unlikely(count == 0)) {
1907 /* no available buffers, or no space on the tx queue */
1908 txq->errors += nb_pkts;
1912 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1915 /* retrieve sufficient send buffers */
1916 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1917 if (unlikely(n != count)) {
1923 for (i = 0; i < count; i++) {
1924 /* prefetch next entry while processing the current one */
1925 if (i < count - 1) {
1926 pkt_buf = avp_dev_translate_buffer(avp,
1928 rte_prefetch0(pkt_buf);
1931 /* process each packet to be transmitted */
1934 /* Adjust pointers for guest addressing */
1935 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1936 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1937 pkt_len = rte_pktmbuf_pkt_len(m);
1939 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1940 (pkt_len > avp->host_mbuf_size))) {
1942 * application should be using the scattered transmit
1943 * function; send it truncated to avoid the performance
1944 * hit of having to manage returning the already
1945 * allocated buffer to the free list. This should not
1946 * happen since the application should have set the
1947 * max_rx_pkt_len based on its MTU and it should be
1948 * policing its own packet sizes.
1951 pkt_len = RTE_MIN(avp->guest_mbuf_size,
1952 avp->host_mbuf_size);
1955 /* copy data out of our mbuf and into the AVP buffer */
1956 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1957 pkt_buf->pkt_len = pkt_len;
1958 pkt_buf->data_len = pkt_len;
1959 pkt_buf->nb_segs = 1;
1960 pkt_buf->next = NULL;
1962 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1963 pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1964 pkt_buf->vlan_tci = m->vlan_tci;
1967 tx_bytes += pkt_len;
1969 /* free the original mbuf */
1970 rte_pktmbuf_free(m);
1973 txq->packets += count;
1974 txq->bytes += tx_bytes;
1976 /* send the packets */
1977 n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1983 avp_dev_rx_queue_release(void *rx_queue)
1985 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1986 struct avp_dev *avp = rxq->avp;
1987 struct rte_eth_dev_data *data = avp->dev_data;
1990 for (i = 0; i < avp->num_rx_queues; i++) {
1991 if (data->rx_queues[i] == rxq)
1992 data->rx_queues[i] = NULL;
1997 avp_dev_tx_queue_release(void *tx_queue)
1999 struct avp_queue *txq = (struct avp_queue *)tx_queue;
2000 struct avp_dev *avp = txq->avp;
2001 struct rte_eth_dev_data *data = avp->dev_data;
2004 for (i = 0; i < avp->num_tx_queues; i++) {
2005 if (data->tx_queues[i] == txq)
2006 data->tx_queues[i] = NULL;
2011 avp_dev_configure(struct rte_eth_dev *eth_dev)
2013 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
2014 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2015 struct rte_avp_device_info *host_info;
2016 struct rte_avp_device_config config;
2021 rte_spinlock_lock(&avp->lock);
2022 if (avp->flags & AVP_F_DETACHED) {
2023 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2028 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
2029 host_info = (struct rte_avp_device_info *)addr;
2031 /* Setup required number of queues */
2032 _avp_set_queue_counts(eth_dev);
2034 mask = (ETH_VLAN_STRIP_MASK |
2035 ETH_VLAN_FILTER_MASK |
2036 ETH_VLAN_EXTEND_MASK);
2037 avp_vlan_offload_set(eth_dev, mask);
2039 /* update device config */
2040 memset(&config, 0, sizeof(config));
2041 config.device_id = host_info->device_id;
2042 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
2043 config.driver_version = AVP_DPDK_DRIVER_VERSION;
2044 config.features = avp->features;
2045 config.num_tx_queues = avp->num_tx_queues;
2046 config.num_rx_queues = avp->num_rx_queues;
2048 ret = avp_dev_ctrl_set_config(eth_dev, &config);
2050 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
2055 avp->flags |= AVP_F_CONFIGURED;
2059 rte_spinlock_unlock(&avp->lock);
2064 avp_dev_start(struct rte_eth_dev *eth_dev)
2066 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2069 rte_spinlock_lock(&avp->lock);
2070 if (avp->flags & AVP_F_DETACHED) {
2071 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2076 /* disable features that we do not support */
2077 eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
2078 eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
2079 eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
2080 eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
2082 /* update link state */
2083 ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
2085 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2090 /* remember current link state */
2091 avp->flags |= AVP_F_LINKUP;
2096 rte_spinlock_unlock(&avp->lock);
2101 avp_dev_stop(struct rte_eth_dev *eth_dev)
2103 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2106 rte_spinlock_lock(&avp->lock);
2107 if (avp->flags & AVP_F_DETACHED) {
2108 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2112 /* remember current link state */
2113 avp->flags &= ~AVP_F_LINKUP;
2115 /* update link state */
2116 ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
2118 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2123 rte_spinlock_unlock(&avp->lock);
2127 avp_dev_close(struct rte_eth_dev *eth_dev)
2129 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2132 rte_spinlock_lock(&avp->lock);
2133 if (avp->flags & AVP_F_DETACHED) {
2134 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2138 /* remember current link state */
2139 avp->flags &= ~AVP_F_LINKUP;
2140 avp->flags &= ~AVP_F_CONFIGURED;
2142 ret = avp_dev_disable_interrupts(eth_dev);
2144 PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
2148 /* update device state */
2149 ret = avp_dev_ctrl_shutdown(eth_dev);
2151 PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
2157 rte_spinlock_unlock(&avp->lock);
2161 avp_dev_link_update(struct rte_eth_dev *eth_dev,
2162 __rte_unused int wait_to_complete)
2164 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2165 struct rte_eth_link *link = ð_dev->data->dev_link;
2167 link->link_speed = ETH_SPEED_NUM_10G;
2168 link->link_duplex = ETH_LINK_FULL_DUPLEX;
2169 link->link_status = !!(avp->flags & AVP_F_LINKUP);
2175 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2177 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2179 rte_spinlock_lock(&avp->lock);
2180 if ((avp->flags & AVP_F_PROMISC) == 0) {
2181 avp->flags |= AVP_F_PROMISC;
2182 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
2183 eth_dev->data->port_id);
2185 rte_spinlock_unlock(&avp->lock);
2189 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
2191 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2193 rte_spinlock_lock(&avp->lock);
2194 if ((avp->flags & AVP_F_PROMISC) != 0) {
2195 avp->flags &= ~AVP_F_PROMISC;
2196 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
2197 eth_dev->data->port_id);
2199 rte_spinlock_unlock(&avp->lock);
2203 avp_dev_info_get(struct rte_eth_dev *eth_dev,
2204 struct rte_eth_dev_info *dev_info)
2206 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2208 dev_info->driver_name = "rte_avp_pmd";
2209 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2210 dev_info->max_rx_queues = avp->max_rx_queues;
2211 dev_info->max_tx_queues = avp->max_tx_queues;
2212 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
2213 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
2214 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
2215 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2216 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
2217 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
2222 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
2224 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2226 if (mask & ETH_VLAN_STRIP_MASK) {
2227 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2228 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
2229 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
2231 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
2233 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
2237 if (mask & ETH_VLAN_FILTER_MASK) {
2238 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
2239 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
2242 if (mask & ETH_VLAN_EXTEND_MASK) {
2243 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
2244 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
2249 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
2251 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2254 for (i = 0; i < avp->num_rx_queues; i++) {
2255 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2258 stats->ipackets += rxq->packets;
2259 stats->ibytes += rxq->bytes;
2260 stats->ierrors += rxq->errors;
2262 stats->q_ipackets[i] += rxq->packets;
2263 stats->q_ibytes[i] += rxq->bytes;
2264 stats->q_errors[i] += rxq->errors;
2268 for (i = 0; i < avp->num_tx_queues; i++) {
2269 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2272 stats->opackets += txq->packets;
2273 stats->obytes += txq->bytes;
2274 stats->oerrors += txq->errors;
2276 stats->q_opackets[i] += txq->packets;
2277 stats->q_obytes[i] += txq->bytes;
2278 stats->q_errors[i] += txq->errors;
2284 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
2286 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2289 for (i = 0; i < avp->num_rx_queues; i++) {
2290 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2299 for (i = 0; i < avp->num_tx_queues; i++) {
2300 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2310 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
2311 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);