4 * Copyright (c) 2013-2017, Wind River Systems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2) Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3) Neither the name of Wind River Systems nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_ethdev_pci.h>
41 #include <rte_memcpy.h>
42 #include <rte_string_fns.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_atomic.h>
46 #include <rte_branch_prediction.h>
48 #include <rte_ether.h>
49 #include <rte_common.h>
50 #include <rte_cycles.h>
51 #include <rte_spinlock.h>
52 #include <rte_byteorder.h>
54 #include <rte_memory.h>
58 #include "rte_avp_common.h"
59 #include "rte_avp_fifo.h"
64 static int avp_dev_create(struct rte_pci_device *pci_dev,
65 struct rte_eth_dev *eth_dev);
67 static int avp_dev_configure(struct rte_eth_dev *dev);
68 static int avp_dev_start(struct rte_eth_dev *dev);
69 static void avp_dev_stop(struct rte_eth_dev *dev);
70 static void avp_dev_close(struct rte_eth_dev *dev);
71 static void avp_dev_info_get(struct rte_eth_dev *dev,
72 struct rte_eth_dev_info *dev_info);
73 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
74 static int avp_dev_link_update(struct rte_eth_dev *dev,
75 __rte_unused int wait_to_complete);
76 static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
77 static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
79 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
82 unsigned int socket_id,
83 const struct rte_eth_rxconf *rx_conf,
84 struct rte_mempool *pool);
86 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
89 unsigned int socket_id,
90 const struct rte_eth_txconf *tx_conf);
92 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
93 struct rte_mbuf **rx_pkts,
96 static uint16_t avp_recv_pkts(void *rx_queue,
97 struct rte_mbuf **rx_pkts,
100 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
101 struct rte_mbuf **tx_pkts,
104 static uint16_t avp_xmit_pkts(void *tx_queue,
105 struct rte_mbuf **tx_pkts,
108 static void avp_dev_rx_queue_release(void *rxq);
109 static void avp_dev_tx_queue_release(void *txq);
111 static void avp_dev_stats_get(struct rte_eth_dev *dev,
112 struct rte_eth_stats *stats);
113 static void avp_dev_stats_reset(struct rte_eth_dev *dev);
116 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
119 #define AVP_MAX_RX_BURST 64
120 #define AVP_MAX_TX_BURST 64
121 #define AVP_MAX_MAC_ADDRS 1
122 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
126 * Defines the number of microseconds to wait before checking the response
127 * queue for completion.
129 #define AVP_REQUEST_DELAY_USECS (5000)
132 * Defines the number times to check the response queue for completion before
133 * declaring a timeout.
135 #define AVP_MAX_REQUEST_RETRY (100)
137 /* Defines the current PCI driver version number */
138 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
141 * The set of PCI devices this driver supports
143 static const struct rte_pci_id pci_id_avp_map[] = {
144 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
145 .device_id = RTE_AVP_PCI_DEVICE_ID,
146 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
147 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
148 .class_id = RTE_CLASS_ANY_ID,
151 { .vendor_id = 0, /* sentinel */
156 * dev_ops for avp, bare necessities for basic operation
158 static const struct eth_dev_ops avp_eth_dev_ops = {
159 .dev_configure = avp_dev_configure,
160 .dev_start = avp_dev_start,
161 .dev_stop = avp_dev_stop,
162 .dev_close = avp_dev_close,
163 .dev_infos_get = avp_dev_info_get,
164 .vlan_offload_set = avp_vlan_offload_set,
165 .stats_get = avp_dev_stats_get,
166 .stats_reset = avp_dev_stats_reset,
167 .link_update = avp_dev_link_update,
168 .promiscuous_enable = avp_dev_promiscuous_enable,
169 .promiscuous_disable = avp_dev_promiscuous_disable,
170 .rx_queue_setup = avp_dev_rx_queue_setup,
171 .rx_queue_release = avp_dev_rx_queue_release,
172 .tx_queue_setup = avp_dev_tx_queue_setup,
173 .tx_queue_release = avp_dev_tx_queue_release,
176 /**@{ AVP device flags */
177 #define AVP_F_PROMISC (1 << 1)
178 #define AVP_F_CONFIGURED (1 << 2)
179 #define AVP_F_LINKUP (1 << 3)
180 #define AVP_F_DETACHED (1 << 4)
183 /* Ethernet device validation marker */
184 #define AVP_ETHDEV_MAGIC 0x92972862
187 * Defines the AVP device attributes which are attached to an RTE ethernet
191 uint32_t magic; /**< Memory validation marker */
192 uint64_t device_id; /**< Unique system identifier */
193 struct ether_addr ethaddr; /**< Host specified MAC address */
194 struct rte_eth_dev_data *dev_data;
195 /**< Back pointer to ethernet device data */
196 volatile uint32_t flags; /**< Device operational flags */
197 uint8_t port_id; /**< Ethernet port identifier */
198 struct rte_mempool *pool; /**< pkt mbuf mempool */
199 unsigned int guest_mbuf_size; /**< local pool mbuf size */
200 unsigned int host_mbuf_size; /**< host mbuf size */
201 unsigned int max_rx_pkt_len; /**< maximum receive unit */
202 uint32_t host_features; /**< Supported feature bitmap */
203 uint32_t features; /**< Enabled feature bitmap */
204 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
205 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
206 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
207 unsigned int max_rx_queues; /**< Maximum number of receive queues */
209 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
210 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
211 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
212 /**< Allocated mbufs queue */
213 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
214 /**< To be freed mbufs queue */
216 /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
219 /* For request & response */
220 struct rte_avp_fifo *req_q; /**< Request queue */
221 struct rte_avp_fifo *resp_q; /**< Response queue */
222 void *host_sync_addr; /**< (host) Req/Resp Mem address */
223 void *sync_addr; /**< Req/Resp Mem address */
224 void *host_mbuf_addr; /**< (host) MBUF pool start address */
225 void *mbuf_addr; /**< MBUF pool start address */
226 } __rte_cache_aligned;
228 /* RTE ethernet private data */
231 } __rte_cache_aligned;
234 /* 32-bit MMIO register write */
235 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
237 /* 32-bit MMIO register read */
238 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
240 /* Macro to cast the ethernet device private data to a AVP object */
241 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
242 (&((struct avp_adapter *)adapter)->avp)
245 * Defines the structure of a AVP device queue for the purpose of handling the
246 * receive and transmit burst callback functions
249 struct rte_eth_dev_data *dev_data;
250 /**< Backpointer to ethernet device data */
251 struct avp_dev *avp; /**< Backpointer to AVP device */
253 /**< Queue identifier used for indexing current queue */
255 /**< Base queue identifier for queue servicing */
256 uint16_t queue_limit;
257 /**< Maximum queue identifier for queue servicing */
264 /* send a request and wait for a response
266 * @warning must be called while holding the avp->lock spinlock.
269 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
271 unsigned int retry = AVP_MAX_REQUEST_RETRY;
272 void *resp_addr = NULL;
276 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
278 request->result = -ENOTSUP;
280 /* Discard any stale responses before starting a new request */
281 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
282 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
284 rte_memcpy(avp->sync_addr, request, sizeof(*request));
285 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
287 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
294 /* wait for a response */
295 usleep(AVP_REQUEST_DELAY_USECS);
297 count = avp_fifo_count(avp->resp_q);
299 /* response received */
303 if ((count < 1) && (retry == 0)) {
304 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
311 /* retrieve the response */
312 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
313 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
314 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
315 count, resp_addr, avp->host_sync_addr);
320 /* copy to user buffer */
321 rte_memcpy(request, avp->sync_addr, sizeof(*request));
324 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
325 request->result, request->req_id);
332 avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
334 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
335 struct rte_avp_request request;
338 /* setup a link state change request */
339 memset(&request, 0, sizeof(request));
340 request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
341 request.if_up = state;
343 ret = avp_dev_process_request(avp, &request);
345 return ret == 0 ? request.result : ret;
349 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
350 struct rte_avp_device_config *config)
352 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
353 struct rte_avp_request request;
356 /* setup a configure request */
357 memset(&request, 0, sizeof(request));
358 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
359 memcpy(&request.config, config, sizeof(request.config));
361 ret = avp_dev_process_request(avp, &request);
363 return ret == 0 ? request.result : ret;
367 avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
369 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
370 struct rte_avp_request request;
373 /* setup a shutdown request */
374 memset(&request, 0, sizeof(request));
375 request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
377 ret = avp_dev_process_request(avp, &request);
379 return ret == 0 ? request.result : ret;
382 /* translate from host mbuf virtual address to guest virtual address */
384 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
386 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
387 (uintptr_t)avp->host_mbuf_addr),
388 (uintptr_t)avp->mbuf_addr);
391 /* translate from host physical address to guest virtual address */
393 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
394 phys_addr_t host_phys_addr)
396 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
397 struct rte_mem_resource *resource;
398 struct rte_avp_memmap_info *info;
399 struct rte_avp_memmap *map;
404 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
405 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
406 info = (struct rte_avp_memmap_info *)resource->addr;
409 for (i = 0; i < info->nb_maps; i++) {
410 /* search all segments looking for a matching address */
411 map = &info->maps[i];
413 if ((host_phys_addr >= map->phys_addr) &&
414 (host_phys_addr < (map->phys_addr + map->length))) {
415 /* address is within this segment */
416 offset += (host_phys_addr - map->phys_addr);
417 addr = RTE_PTR_ADD(addr, offset);
419 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
420 host_phys_addr, addr);
424 offset += map->length;
430 /* verify that the incoming device version is compatible with our version */
432 avp_dev_version_check(uint32_t version)
434 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
435 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
437 if (device <= driver) {
438 /* the host driver version is less than or equal to ours */
445 /* verify that memory regions have expected version and validation markers */
447 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
449 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
450 struct rte_avp_memmap_info *memmap;
451 struct rte_avp_device_info *info;
452 struct rte_mem_resource *resource;
455 /* Dump resource info for debug */
456 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
457 resource = &pci_dev->mem_resource[i];
458 if ((resource->phys_addr == 0) || (resource->len == 0))
461 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
462 i, resource->phys_addr,
463 resource->len, resource->addr);
466 case RTE_AVP_PCI_MEMMAP_BAR:
467 memmap = (struct rte_avp_memmap_info *)resource->addr;
468 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
469 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
470 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
471 memmap->magic, memmap->version);
476 case RTE_AVP_PCI_DEVICE_BAR:
477 info = (struct rte_avp_device_info *)resource->addr;
478 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
479 avp_dev_version_check(info->version)) {
480 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
481 info->magic, info->version,
482 AVP_DPDK_DRIVER_VERSION);
487 case RTE_AVP_PCI_MEMORY_BAR:
488 case RTE_AVP_PCI_MMIO_BAR:
489 if (resource->addr == NULL) {
490 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
496 case RTE_AVP_PCI_MSIX_BAR:
498 /* no validation required */
507 avp_dev_detach(struct rte_eth_dev *eth_dev)
509 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
512 PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
513 eth_dev->data->port_id, avp->device_id);
515 rte_spinlock_lock(&avp->lock);
517 if (avp->flags & AVP_F_DETACHED) {
518 PMD_DRV_LOG(NOTICE, "port %u already detached\n",
519 eth_dev->data->port_id);
524 /* shutdown the device first so the host stops sending us packets. */
525 ret = avp_dev_ctrl_shutdown(eth_dev);
527 PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
529 avp->flags &= ~AVP_F_DETACHED;
533 avp->flags |= AVP_F_DETACHED;
536 /* wait for queues to acknowledge the presence of the detach flag */
542 rte_spinlock_unlock(&avp->lock);
547 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
549 struct avp_dev *avp =
550 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
551 struct avp_queue *rxq;
552 uint16_t queue_count;
555 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
558 * Must map all AVP fifos as evenly as possible between the configured
559 * device queues. Each device queue will service a subset of the AVP
560 * fifos. If there is an odd number of device queues the first set of
561 * device queues will get the extra AVP fifos.
563 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
564 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
565 if (rx_queue_id < remainder) {
566 /* these queues must service one extra FIFO */
567 rxq->queue_base = rx_queue_id * (queue_count + 1);
568 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
570 /* these queues service the regular number of FIFO */
571 rxq->queue_base = ((remainder * (queue_count + 1)) +
572 ((rx_queue_id - remainder) * queue_count));
573 rxq->queue_limit = rxq->queue_base + queue_count - 1;
576 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
577 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
579 rxq->queue_id = rxq->queue_base;
583 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
585 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
586 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
587 struct rte_avp_device_info *host_info;
590 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
591 host_info = (struct rte_avp_device_info *)addr;
594 * the transmit direction is not negotiated beyond respecting the max
595 * number of queues because the host can handle arbitrary guest tx
596 * queues (host rx queues).
598 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
601 * the receive direction is more restrictive. The host requires a
602 * minimum number of guest rx queues (host tx queues) therefore
603 * negotiate a value that is at least as large as the host minimum
604 * requirement. If the host and guest values are not identical then a
605 * mapping will be established in the receive_queue_setup function.
607 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
608 eth_dev->data->nb_rx_queues);
610 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
611 avp->num_tx_queues, avp->num_rx_queues);
615 avp_dev_attach(struct rte_eth_dev *eth_dev)
617 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
618 struct rte_avp_device_config config;
622 PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
623 eth_dev->data->port_id, avp->device_id);
625 rte_spinlock_lock(&avp->lock);
627 if (!(avp->flags & AVP_F_DETACHED)) {
628 PMD_DRV_LOG(NOTICE, "port %u already attached\n",
629 eth_dev->data->port_id);
635 * make sure that the detached flag is set prior to reconfiguring the
638 avp->flags |= AVP_F_DETACHED;
642 * re-run the device create utility which will parse the new host info
643 * and setup the AVP device queue pointers.
645 ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
647 PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
652 if (avp->flags & AVP_F_CONFIGURED) {
654 * Update the receive queue mapping to handle cases where the
655 * source and destination hosts have different queue
656 * requirements. As long as the DETACHED flag is asserted the
657 * queue table should not be referenced so it should be safe to
660 _avp_set_queue_counts(eth_dev);
661 for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
662 _avp_set_rx_queue_mappings(eth_dev, i);
665 * Update the host with our config details so that it knows the
668 memset(&config, 0, sizeof(config));
669 config.device_id = avp->device_id;
670 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
671 config.driver_version = AVP_DPDK_DRIVER_VERSION;
672 config.features = avp->features;
673 config.num_tx_queues = avp->num_tx_queues;
674 config.num_rx_queues = avp->num_rx_queues;
675 config.if_up = !!(avp->flags & AVP_F_LINKUP);
677 ret = avp_dev_ctrl_set_config(eth_dev, &config);
679 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
686 avp->flags &= ~AVP_F_DETACHED;
691 rte_spinlock_unlock(&avp->lock);
696 avp_dev_interrupt_handler(void *data)
698 struct rte_eth_dev *eth_dev = data;
699 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
700 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
701 uint32_t status, value;
704 if (registers == NULL)
705 rte_panic("no mapped MMIO register space\n");
707 /* read the interrupt status register
708 * note: this register clears on read so all raised interrupts must be
709 * handled or remembered for later processing
712 RTE_PTR_ADD(registers,
713 RTE_AVP_INTERRUPT_STATUS_OFFSET));
715 if (status | RTE_AVP_MIGRATION_INTERRUPT_MASK) {
716 /* handle interrupt based on current status */
718 RTE_PTR_ADD(registers,
719 RTE_AVP_MIGRATION_STATUS_OFFSET));
721 case RTE_AVP_MIGRATION_DETACHED:
722 ret = avp_dev_detach(eth_dev);
724 case RTE_AVP_MIGRATION_ATTACHED:
725 ret = avp_dev_attach(eth_dev);
728 PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
733 /* acknowledge the request by writing out our current status */
734 value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
736 RTE_PTR_ADD(registers,
737 RTE_AVP_MIGRATION_ACK_OFFSET));
739 PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
742 if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
743 PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
746 /* re-enable UIO interrupt handling */
747 ret = rte_intr_enable(&pci_dev->intr_handle);
749 PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
756 avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
758 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
759 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
762 if (registers == NULL)
765 /* enable UIO interrupt handling */
766 ret = rte_intr_enable(&pci_dev->intr_handle);
768 PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
773 /* inform the device that all interrupts are enabled */
774 AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
775 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
781 avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
783 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
784 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
787 if (registers == NULL)
790 /* inform the device that all interrupts are disabled */
791 AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
792 RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
794 /* enable UIO interrupt handling */
795 ret = rte_intr_disable(&pci_dev->intr_handle);
797 PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
806 avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
808 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
811 /* register a callback handler with UIO for interrupt notifications */
812 ret = rte_intr_callback_register(&pci_dev->intr_handle,
813 avp_dev_interrupt_handler,
816 PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
821 /* enable interrupt processing */
822 return avp_dev_enable_interrupts(eth_dev);
826 avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
828 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
829 void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
832 if (registers == NULL)
835 value = AVP_READ32(RTE_PTR_ADD(registers,
836 RTE_AVP_MIGRATION_STATUS_OFFSET));
837 if (value == RTE_AVP_MIGRATION_DETACHED) {
838 /* migration is in progress; ack it if we have not already */
840 RTE_PTR_ADD(registers,
841 RTE_AVP_MIGRATION_ACK_OFFSET));
848 * create a AVP device using the supplied device info by first translating it
849 * to guest address space(s).
852 avp_dev_create(struct rte_pci_device *pci_dev,
853 struct rte_eth_dev *eth_dev)
855 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
856 struct rte_avp_device_info *host_info;
857 struct rte_mem_resource *resource;
860 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
861 if (resource->addr == NULL) {
862 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
863 RTE_AVP_PCI_DEVICE_BAR);
866 host_info = (struct rte_avp_device_info *)resource->addr;
868 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
869 avp_dev_version_check(host_info->version)) {
870 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
871 host_info->magic, host_info->version,
872 AVP_DPDK_DRIVER_VERSION);
876 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
877 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
878 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
879 RTE_AVP_GET_MINOR_VERSION(host_info->version));
881 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
882 host_info->min_tx_queues, host_info->max_tx_queues);
883 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
884 host_info->min_rx_queues, host_info->max_rx_queues);
885 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
886 host_info->features);
888 if (avp->magic != AVP_ETHDEV_MAGIC) {
890 * First time initialization (i.e., not during a VM
893 memset(avp, 0, sizeof(*avp));
894 avp->magic = AVP_ETHDEV_MAGIC;
895 avp->dev_data = eth_dev->data;
896 avp->port_id = eth_dev->data->port_id;
897 avp->host_mbuf_size = host_info->mbuf_size;
898 avp->host_features = host_info->features;
899 rte_spinlock_init(&avp->lock);
900 memcpy(&avp->ethaddr.addr_bytes[0],
901 host_info->ethaddr, ETHER_ADDR_LEN);
902 /* adjust max values to not exceed our max */
904 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
906 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
908 /* Re-attaching during migration */
910 /* TODO... requires validation of host values */
911 if ((host_info->features & avp->features) != avp->features) {
912 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
913 avp->features, host_info->features);
914 /* this should not be possible; continue for now */
918 /* the device id is allowed to change over migrations */
919 avp->device_id = host_info->device_id;
921 /* translate incoming host addresses to guest address space */
922 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
924 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
925 host_info->alloc_phys);
926 for (i = 0; i < avp->max_tx_queues; i++) {
927 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
928 host_info->tx_phys + (i * host_info->tx_size));
930 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
931 host_info->alloc_phys + (i * host_info->alloc_size));
934 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
936 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
937 host_info->free_phys);
938 for (i = 0; i < avp->max_rx_queues; i++) {
939 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
940 host_info->rx_phys + (i * host_info->rx_size));
941 avp->free_q[i] = avp_dev_translate_address(eth_dev,
942 host_info->free_phys + (i * host_info->free_size));
945 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
946 host_info->req_phys);
947 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
948 host_info->resp_phys);
949 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
950 host_info->sync_phys);
951 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
952 host_info->mbuf_phys);
953 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
954 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
956 avp_dev_translate_address(eth_dev, host_info->sync_phys);
958 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
961 * store the host mbuf virtual address so that we can calculate
962 * relative offsets for each mbuf as they are processed
964 avp->host_mbuf_addr = host_info->mbuf_va;
965 avp->host_sync_addr = host_info->sync_va;
968 * store the maximum packet length that is supported by the host.
970 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
971 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
972 host_info->max_rx_pkt_len);
978 * This function is based on probe() function in avp_pci.c
979 * It returns 0 on success.
982 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
984 struct avp_dev *avp =
985 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
986 struct rte_pci_device *pci_dev;
989 pci_dev = AVP_DEV_TO_PCI(eth_dev);
990 eth_dev->dev_ops = &avp_eth_dev_ops;
991 eth_dev->rx_pkt_burst = &avp_recv_pkts;
992 eth_dev->tx_pkt_burst = &avp_xmit_pkts;
994 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
996 * no setup required on secondary processes. All data is saved
997 * in dev_private by the primary process. All resource should
998 * be mapped to the same virtual address so all pointers should
1001 if (eth_dev->data->scattered_rx) {
1002 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1003 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1004 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1009 rte_eth_copy_pci_info(eth_dev, pci_dev);
1011 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1013 /* Check current migration status */
1014 if (avp_dev_migration_pending(eth_dev)) {
1015 PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
1019 /* Check BAR resources */
1020 ret = avp_dev_check_regions(eth_dev);
1022 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
1027 /* Enable interrupts */
1028 ret = avp_dev_setup_interrupts(eth_dev);
1030 PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
1034 /* Handle each subtype */
1035 ret = avp_dev_create(pci_dev, eth_dev);
1037 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
1041 /* Allocate memory for storing MAC addresses */
1042 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
1043 if (eth_dev->data->mac_addrs == NULL) {
1044 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
1049 /* Get a mac from device config */
1050 ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
1056 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
1060 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1063 if (eth_dev->data == NULL)
1066 ret = avp_dev_disable_interrupts(eth_dev);
1068 PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
1072 if (eth_dev->data->mac_addrs != NULL) {
1073 rte_free(eth_dev->data->mac_addrs);
1074 eth_dev->data->mac_addrs = NULL;
1081 eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1082 struct rte_pci_device *pci_dev)
1084 struct rte_eth_dev *eth_dev;
1087 eth_dev = rte_eth_dev_pci_allocate(pci_dev,
1088 sizeof(struct avp_adapter));
1089 if (eth_dev == NULL)
1092 ret = eth_avp_dev_init(eth_dev);
1094 rte_eth_dev_pci_release(eth_dev);
1100 eth_avp_pci_remove(struct rte_pci_device *pci_dev)
1102 return rte_eth_dev_pci_generic_remove(pci_dev,
1103 eth_avp_dev_uninit);
1106 static struct rte_pci_driver rte_avp_pmd = {
1107 .id_table = pci_id_avp_map,
1108 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1109 .probe = eth_avp_pci_probe,
1110 .remove = eth_avp_pci_remove,
1114 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
1115 struct avp_dev *avp)
1117 unsigned int max_rx_pkt_len;
1119 max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1121 if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
1122 (max_rx_pkt_len > avp->host_mbuf_size)) {
1124 * If the guest MTU is greater than either the host or guest
1125 * buffers then chained mbufs have to be enabled in the TX
1126 * direction. It is assumed that the application will not need
1127 * to send packets larger than their max_rx_pkt_len (MRU).
1132 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
1133 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
1135 * If the host MRU is greater than its own mbuf size or the
1136 * guest mbuf size then chained mbufs have to be enabled in the
1146 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
1147 uint16_t rx_queue_id,
1148 uint16_t nb_rx_desc,
1149 unsigned int socket_id,
1150 const struct rte_eth_rxconf *rx_conf,
1151 struct rte_mempool *pool)
1153 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1154 struct rte_pktmbuf_pool_private *mbp_priv;
1155 struct avp_queue *rxq;
1157 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
1158 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
1159 rx_queue_id, eth_dev->data->nb_rx_queues);
1163 /* Save mbuf pool pointer */
1166 /* Save the local mbuf size */
1167 mbp_priv = rte_mempool_get_priv(pool);
1168 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
1169 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
1171 if (avp_dev_enable_scattered(eth_dev, avp)) {
1172 if (!eth_dev->data->scattered_rx) {
1173 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
1174 eth_dev->data->scattered_rx = 1;
1175 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
1176 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
1180 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
1181 avp->max_rx_pkt_len,
1182 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
1183 avp->host_mbuf_size,
1184 avp->guest_mbuf_size);
1186 /* allocate a queue object */
1187 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
1188 RTE_CACHE_LINE_SIZE, socket_id);
1190 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
1194 /* save back pointers to AVP and Ethernet devices */
1196 rxq->dev_data = eth_dev->data;
1197 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
1199 /* setup the queue receive mapping for the current queue. */
1200 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
1202 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
1210 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
1211 uint16_t tx_queue_id,
1212 uint16_t nb_tx_desc,
1213 unsigned int socket_id,
1214 const struct rte_eth_txconf *tx_conf)
1216 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1217 struct avp_queue *txq;
1219 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
1220 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
1221 tx_queue_id, eth_dev->data->nb_tx_queues);
1225 /* allocate a queue object */
1226 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
1227 RTE_CACHE_LINE_SIZE, socket_id);
1229 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
1233 /* only the configured set of transmit queues are used */
1234 txq->queue_id = tx_queue_id;
1235 txq->queue_base = tx_queue_id;
1236 txq->queue_limit = tx_queue_id;
1238 /* save back pointers to AVP and Ethernet devices */
1240 txq->dev_data = eth_dev->data;
1241 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
1243 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
1251 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
1253 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
1254 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
1255 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
1259 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
1261 struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
1263 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
1264 /* allow all packets destined to our address */
1268 if (likely(is_broadcast_ether_addr(ð->d_addr))) {
1269 /* allow all broadcast packets */
1273 if (likely(is_multicast_ether_addr(ð->d_addr))) {
1274 /* allow all multicast packets */
1278 if (avp->flags & AVP_F_PROMISC) {
1279 /* allow all packets when in promiscuous mode */
1286 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1288 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
1290 struct rte_avp_desc *first_buf;
1291 struct rte_avp_desc *pkt_buf;
1292 unsigned int pkt_len;
1293 unsigned int nb_segs;
1297 first_buf = avp_dev_translate_buffer(avp, buf);
1301 nb_segs = first_buf->nb_segs;
1303 /* Adjust pointers for guest addressing */
1304 pkt_buf = avp_dev_translate_buffer(avp, buf);
1305 if (pkt_buf == NULL)
1306 rte_panic("bad buffer: segment %u has an invalid address %p\n",
1308 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1309 if (pkt_data == NULL)
1310 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
1312 if (pkt_buf->data_len == 0)
1313 rte_panic("bad buffer: segment %u has 0 data length\n",
1315 pkt_len += pkt_buf->data_len;
1319 } while (nb_segs && (buf = pkt_buf->next) != NULL);
1322 rte_panic("bad buffer: expected %u segments found %u\n",
1323 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
1324 if (pkt_len != first_buf->pkt_len)
1325 rte_panic("bad buffer: expected length %u found %u\n",
1326 first_buf->pkt_len, pkt_len);
1329 #define avp_dev_buffer_sanity_check(a, b) \
1330 __avp_dev_buffer_sanity_check((a), (b))
1332 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
1334 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1339 * Copy a host buffer chain to a set of mbufs. This function assumes that
1340 * there exactly the required number of mbufs to copy all source bytes.
1342 static inline struct rte_mbuf *
1343 avp_dev_copy_from_buffers(struct avp_dev *avp,
1344 struct rte_avp_desc *buf,
1345 struct rte_mbuf **mbufs,
1348 struct rte_mbuf *m_previous = NULL;
1349 struct rte_avp_desc *pkt_buf;
1350 unsigned int total_length = 0;
1351 unsigned int copy_length;
1352 unsigned int src_offset;
1359 avp_dev_buffer_sanity_check(avp, buf);
1361 /* setup the first source buffer */
1362 pkt_buf = avp_dev_translate_buffer(avp, buf);
1363 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1364 total_length = pkt_buf->pkt_len;
1367 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1368 ol_flags = PKT_RX_VLAN_PKT;
1369 vlan_tci = pkt_buf->vlan_tci;
1375 for (i = 0; (i < count) && (buf != NULL); i++) {
1376 /* fill each destination buffer */
1379 if (m_previous != NULL)
1380 m_previous->next = m;
1386 * Copy as many source buffers as will fit in the
1387 * destination buffer.
1389 copy_length = RTE_MIN((avp->guest_mbuf_size -
1390 rte_pktmbuf_data_len(m)),
1391 (pkt_buf->data_len -
1393 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1394 rte_pktmbuf_data_len(m)),
1395 RTE_PTR_ADD(pkt_data, src_offset),
1397 rte_pktmbuf_data_len(m) += copy_length;
1398 src_offset += copy_length;
1400 if (likely(src_offset == pkt_buf->data_len)) {
1401 /* need a new source buffer */
1402 buf = pkt_buf->next;
1404 pkt_buf = avp_dev_translate_buffer(
1406 pkt_data = avp_dev_translate_buffer(
1407 avp, pkt_buf->data);
1412 if (unlikely(rte_pktmbuf_data_len(m) ==
1413 avp->guest_mbuf_size)) {
1414 /* need a new destination mbuf */
1418 } while (buf != NULL);
1422 m->ol_flags = ol_flags;
1424 rte_pktmbuf_pkt_len(m) = total_length;
1425 m->vlan_tci = vlan_tci;
1427 __rte_mbuf_sanity_check(m, 1);
1433 avp_recv_scattered_pkts(void *rx_queue,
1434 struct rte_mbuf **rx_pkts,
1437 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1438 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1439 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1440 struct avp_dev *avp = rxq->avp;
1441 struct rte_avp_desc *pkt_buf;
1442 struct rte_avp_fifo *free_q;
1443 struct rte_avp_fifo *rx_q;
1444 struct rte_avp_desc *buf;
1445 unsigned int count, avail, n;
1446 unsigned int guest_mbuf_size;
1448 unsigned int required;
1449 unsigned int buf_len;
1450 unsigned int port_id;
1453 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1454 /* VM live migration in progress */
1458 guest_mbuf_size = avp->guest_mbuf_size;
1459 port_id = avp->port_id;
1460 rx_q = avp->rx_q[rxq->queue_id];
1461 free_q = avp->free_q[rxq->queue_id];
1463 /* setup next queue to service */
1464 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1465 (rxq->queue_id + 1) : rxq->queue_base;
1467 /* determine how many slots are available in the free queue */
1468 count = avp_fifo_free_count(free_q);
1470 /* determine how many packets are available in the rx queue */
1471 avail = avp_fifo_count(rx_q);
1473 /* determine how many packets can be received */
1474 count = RTE_MIN(count, avail);
1475 count = RTE_MIN(count, nb_pkts);
1476 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1478 if (unlikely(count == 0)) {
1479 /* no free buffers, or no buffers on the rx queue */
1483 /* retrieve pending packets */
1484 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1485 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1489 for (i = 0; i < n; i++) {
1490 /* prefetch next entry while processing current one */
1492 pkt_buf = avp_dev_translate_buffer(avp,
1494 rte_prefetch0(pkt_buf);
1498 /* Peek into the first buffer to determine the total length */
1499 pkt_buf = avp_dev_translate_buffer(avp, buf);
1500 buf_len = pkt_buf->pkt_len;
1502 /* Allocate enough mbufs to receive the entire packet */
1503 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1504 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1505 rxq->dev_data->rx_mbuf_alloc_failed++;
1509 /* Copy the data from the buffers to our mbufs */
1510 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1515 if (_avp_mac_filter(avp, m) != 0) {
1516 /* silently discard packets not destined to our MAC */
1517 rte_pktmbuf_free(m);
1521 /* return new mbuf to caller */
1522 rx_pkts[count++] = m;
1523 rxq->bytes += buf_len;
1526 rxq->packets += count;
1528 /* return the buffers to the free queue */
1529 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1536 avp_recv_pkts(void *rx_queue,
1537 struct rte_mbuf **rx_pkts,
1540 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1541 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1542 struct avp_dev *avp = rxq->avp;
1543 struct rte_avp_desc *pkt_buf;
1544 struct rte_avp_fifo *free_q;
1545 struct rte_avp_fifo *rx_q;
1546 unsigned int count, avail, n;
1547 unsigned int pkt_len;
1552 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1553 /* VM live migration in progress */
1557 rx_q = avp->rx_q[rxq->queue_id];
1558 free_q = avp->free_q[rxq->queue_id];
1560 /* setup next queue to service */
1561 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1562 (rxq->queue_id + 1) : rxq->queue_base;
1564 /* determine how many slots are available in the free queue */
1565 count = avp_fifo_free_count(free_q);
1567 /* determine how many packets are available in the rx queue */
1568 avail = avp_fifo_count(rx_q);
1570 /* determine how many packets can be received */
1571 count = RTE_MIN(count, avail);
1572 count = RTE_MIN(count, nb_pkts);
1573 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1575 if (unlikely(count == 0)) {
1576 /* no free buffers, or no buffers on the rx queue */
1580 /* retrieve pending packets */
1581 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1582 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1586 for (i = 0; i < n; i++) {
1587 /* prefetch next entry while processing current one */
1589 pkt_buf = avp_dev_translate_buffer(avp,
1591 rte_prefetch0(pkt_buf);
1594 /* Adjust host pointers for guest addressing */
1595 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1596 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1597 pkt_len = pkt_buf->pkt_len;
1599 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1600 (pkt_buf->nb_segs > 1))) {
1602 * application should be using the scattered receive
1609 /* process each packet to be transmitted */
1610 m = rte_pktmbuf_alloc(avp->pool);
1611 if (unlikely(m == NULL)) {
1612 rxq->dev_data->rx_mbuf_alloc_failed++;
1616 /* copy data out of the host buffer to our buffer */
1617 m->data_off = RTE_PKTMBUF_HEADROOM;
1618 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1620 /* initialize the local mbuf */
1621 rte_pktmbuf_data_len(m) = pkt_len;
1622 rte_pktmbuf_pkt_len(m) = pkt_len;
1623 m->port = avp->port_id;
1625 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1626 m->ol_flags = PKT_RX_VLAN_PKT;
1627 m->vlan_tci = pkt_buf->vlan_tci;
1630 if (_avp_mac_filter(avp, m) != 0) {
1631 /* silently discard packets not destined to our MAC */
1632 rte_pktmbuf_free(m);
1636 /* return new mbuf to caller */
1637 rx_pkts[count++] = m;
1638 rxq->bytes += pkt_len;
1641 rxq->packets += count;
1643 /* return the buffers to the free queue */
1644 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1650 * Copy a chained mbuf to a set of host buffers. This function assumes that
1651 * there are sufficient destination buffers to contain the entire source
1654 static inline uint16_t
1655 avp_dev_copy_to_buffers(struct avp_dev *avp,
1656 struct rte_mbuf *mbuf,
1657 struct rte_avp_desc **buffers,
1660 struct rte_avp_desc *previous_buf = NULL;
1661 struct rte_avp_desc *first_buf = NULL;
1662 struct rte_avp_desc *pkt_buf;
1663 struct rte_avp_desc *buf;
1664 size_t total_length;
1671 __rte_mbuf_sanity_check(mbuf, 1);
1675 total_length = rte_pktmbuf_pkt_len(m);
1676 for (i = 0; (i < count) && (m != NULL); i++) {
1677 /* fill each destination buffer */
1680 if (i < count - 1) {
1681 /* prefetch next entry while processing this one */
1682 pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1683 rte_prefetch0(pkt_buf);
1686 /* Adjust pointers for guest addressing */
1687 pkt_buf = avp_dev_translate_buffer(avp, buf);
1688 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1690 /* setup the buffer chain */
1691 if (previous_buf != NULL)
1692 previous_buf->next = buf;
1694 first_buf = pkt_buf;
1696 previous_buf = pkt_buf;
1700 * copy as many source mbuf segments as will fit in the
1701 * destination buffer.
1703 copy_length = RTE_MIN((avp->host_mbuf_size -
1705 (rte_pktmbuf_data_len(m) -
1707 rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1708 RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1711 pkt_buf->data_len += copy_length;
1712 src_offset += copy_length;
1714 if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1715 /* need a new source buffer */
1720 if (unlikely(pkt_buf->data_len ==
1721 avp->host_mbuf_size)) {
1722 /* need a new destination buffer */
1726 } while (m != NULL);
1729 first_buf->nb_segs = count;
1730 first_buf->pkt_len = total_length;
1732 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1733 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1734 first_buf->vlan_tci = mbuf->vlan_tci;
1737 avp_dev_buffer_sanity_check(avp, buffers[0]);
1739 return total_length;
1744 avp_xmit_scattered_pkts(void *tx_queue,
1745 struct rte_mbuf **tx_pkts,
1748 struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1749 RTE_AVP_MAX_MBUF_SEGMENTS)];
1750 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1751 struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1752 struct avp_dev *avp = txq->avp;
1753 struct rte_avp_fifo *alloc_q;
1754 struct rte_avp_fifo *tx_q;
1755 unsigned int count, avail, n;
1756 unsigned int orig_nb_pkts;
1758 unsigned int required;
1759 unsigned int segments;
1760 unsigned int tx_bytes;
1763 orig_nb_pkts = nb_pkts;
1764 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1765 /* VM live migration in progress */
1766 /* TODO ... buffer for X packets then drop? */
1767 txq->errors += nb_pkts;
1771 tx_q = avp->tx_q[txq->queue_id];
1772 alloc_q = avp->alloc_q[txq->queue_id];
1774 /* limit the number of transmitted packets to the max burst size */
1775 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1776 nb_pkts = AVP_MAX_TX_BURST;
1778 /* determine how many buffers are available to copy into */
1779 avail = avp_fifo_count(alloc_q);
1780 if (unlikely(avail > (AVP_MAX_TX_BURST *
1781 RTE_AVP_MAX_MBUF_SEGMENTS)))
1782 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1784 /* determine how many slots are available in the transmit queue */
1785 count = avp_fifo_free_count(tx_q);
1787 /* determine how many packets can be sent */
1788 nb_pkts = RTE_MIN(count, nb_pkts);
1790 /* determine how many packets will fit in the available buffers */
1793 for (i = 0; i < nb_pkts; i++) {
1795 if (likely(i < (unsigned int)nb_pkts - 1)) {
1796 /* prefetch next entry while processing this one */
1797 rte_prefetch0(tx_pkts[i + 1]);
1799 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1800 avp->host_mbuf_size;
1802 if (unlikely((required == 0) ||
1803 (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1805 else if (unlikely(required + segments > avail))
1807 segments += required;
1812 if (unlikely(nb_pkts == 0)) {
1813 /* no available buffers, or no space on the tx queue */
1814 txq->errors += orig_nb_pkts;
1818 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1821 /* retrieve sufficient send buffers */
1822 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1823 if (unlikely(n != segments)) {
1824 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1825 "n=%u, segments=%u, orig=%u\n",
1826 n, segments, orig_nb_pkts);
1827 txq->errors += orig_nb_pkts;
1833 for (i = 0; i < nb_pkts; i++) {
1834 /* process each packet to be transmitted */
1837 /* determine how many buffers are required for this packet */
1838 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1839 avp->host_mbuf_size;
1841 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1842 &avp_bufs[count], required);
1843 tx_bufs[i] = avp_bufs[count];
1846 /* free the original mbuf */
1847 rte_pktmbuf_free(m);
1850 txq->packets += nb_pkts;
1851 txq->bytes += tx_bytes;
1853 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1854 for (i = 0; i < nb_pkts; i++)
1855 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1858 /* send the packets */
1859 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1860 if (unlikely(n != orig_nb_pkts))
1861 txq->errors += (orig_nb_pkts - n);
1868 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1870 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1871 struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1872 struct avp_dev *avp = txq->avp;
1873 struct rte_avp_desc *pkt_buf;
1874 struct rte_avp_fifo *alloc_q;
1875 struct rte_avp_fifo *tx_q;
1876 unsigned int count, avail, n;
1878 unsigned int pkt_len;
1879 unsigned int tx_bytes;
1883 if (unlikely(avp->flags & AVP_F_DETACHED)) {
1884 /* VM live migration in progress */
1885 /* TODO ... buffer for X packets then drop?! */
1890 tx_q = avp->tx_q[txq->queue_id];
1891 alloc_q = avp->alloc_q[txq->queue_id];
1893 /* limit the number of transmitted packets to the max burst size */
1894 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1895 nb_pkts = AVP_MAX_TX_BURST;
1897 /* determine how many buffers are available to copy into */
1898 avail = avp_fifo_count(alloc_q);
1900 /* determine how many slots are available in the transmit queue */
1901 count = avp_fifo_free_count(tx_q);
1903 /* determine how many packets can be sent */
1904 count = RTE_MIN(count, avail);
1905 count = RTE_MIN(count, nb_pkts);
1907 if (unlikely(count == 0)) {
1908 /* no available buffers, or no space on the tx queue */
1909 txq->errors += nb_pkts;
1913 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1916 /* retrieve sufficient send buffers */
1917 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1918 if (unlikely(n != count)) {
1924 for (i = 0; i < count; i++) {
1925 /* prefetch next entry while processing the current one */
1926 if (i < count - 1) {
1927 pkt_buf = avp_dev_translate_buffer(avp,
1929 rte_prefetch0(pkt_buf);
1932 /* process each packet to be transmitted */
1935 /* Adjust pointers for guest addressing */
1936 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1937 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1938 pkt_len = rte_pktmbuf_pkt_len(m);
1940 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1941 (pkt_len > avp->host_mbuf_size))) {
1943 * application should be using the scattered transmit
1944 * function; send it truncated to avoid the performance
1945 * hit of having to manage returning the already
1946 * allocated buffer to the free list. This should not
1947 * happen since the application should have set the
1948 * max_rx_pkt_len based on its MTU and it should be
1949 * policing its own packet sizes.
1952 pkt_len = RTE_MIN(avp->guest_mbuf_size,
1953 avp->host_mbuf_size);
1956 /* copy data out of our mbuf and into the AVP buffer */
1957 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1958 pkt_buf->pkt_len = pkt_len;
1959 pkt_buf->data_len = pkt_len;
1960 pkt_buf->nb_segs = 1;
1961 pkt_buf->next = NULL;
1963 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1964 pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1965 pkt_buf->vlan_tci = m->vlan_tci;
1968 tx_bytes += pkt_len;
1970 /* free the original mbuf */
1971 rte_pktmbuf_free(m);
1974 txq->packets += count;
1975 txq->bytes += tx_bytes;
1977 /* send the packets */
1978 n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1984 avp_dev_rx_queue_release(void *rx_queue)
1986 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1987 struct avp_dev *avp = rxq->avp;
1988 struct rte_eth_dev_data *data = avp->dev_data;
1991 for (i = 0; i < avp->num_rx_queues; i++) {
1992 if (data->rx_queues[i] == rxq)
1993 data->rx_queues[i] = NULL;
1998 avp_dev_tx_queue_release(void *tx_queue)
2000 struct avp_queue *txq = (struct avp_queue *)tx_queue;
2001 struct avp_dev *avp = txq->avp;
2002 struct rte_eth_dev_data *data = avp->dev_data;
2005 for (i = 0; i < avp->num_tx_queues; i++) {
2006 if (data->tx_queues[i] == txq)
2007 data->tx_queues[i] = NULL;
2012 avp_dev_configure(struct rte_eth_dev *eth_dev)
2014 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
2015 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2016 struct rte_avp_device_info *host_info;
2017 struct rte_avp_device_config config;
2022 rte_spinlock_lock(&avp->lock);
2023 if (avp->flags & AVP_F_DETACHED) {
2024 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2029 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
2030 host_info = (struct rte_avp_device_info *)addr;
2032 /* Setup required number of queues */
2033 _avp_set_queue_counts(eth_dev);
2035 mask = (ETH_VLAN_STRIP_MASK |
2036 ETH_VLAN_FILTER_MASK |
2037 ETH_VLAN_EXTEND_MASK);
2038 avp_vlan_offload_set(eth_dev, mask);
2040 /* update device config */
2041 memset(&config, 0, sizeof(config));
2042 config.device_id = host_info->device_id;
2043 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
2044 config.driver_version = AVP_DPDK_DRIVER_VERSION;
2045 config.features = avp->features;
2046 config.num_tx_queues = avp->num_tx_queues;
2047 config.num_rx_queues = avp->num_rx_queues;
2049 ret = avp_dev_ctrl_set_config(eth_dev, &config);
2051 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
2056 avp->flags |= AVP_F_CONFIGURED;
2060 rte_spinlock_unlock(&avp->lock);
2065 avp_dev_start(struct rte_eth_dev *eth_dev)
2067 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2070 rte_spinlock_lock(&avp->lock);
2071 if (avp->flags & AVP_F_DETACHED) {
2072 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2077 /* disable features that we do not support */
2078 eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
2079 eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
2080 eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
2081 eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
2083 /* update link state */
2084 ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
2086 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2091 /* remember current link state */
2092 avp->flags |= AVP_F_LINKUP;
2097 rte_spinlock_unlock(&avp->lock);
2102 avp_dev_stop(struct rte_eth_dev *eth_dev)
2104 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2107 rte_spinlock_lock(&avp->lock);
2108 if (avp->flags & AVP_F_DETACHED) {
2109 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2113 /* remember current link state */
2114 avp->flags &= ~AVP_F_LINKUP;
2116 /* update link state */
2117 ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
2119 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
2124 rte_spinlock_unlock(&avp->lock);
2128 avp_dev_close(struct rte_eth_dev *eth_dev)
2130 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2133 rte_spinlock_lock(&avp->lock);
2134 if (avp->flags & AVP_F_DETACHED) {
2135 PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
2139 /* remember current link state */
2140 avp->flags &= ~AVP_F_LINKUP;
2141 avp->flags &= ~AVP_F_CONFIGURED;
2143 ret = avp_dev_disable_interrupts(eth_dev);
2145 PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
2149 /* update device state */
2150 ret = avp_dev_ctrl_shutdown(eth_dev);
2152 PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
2158 rte_spinlock_unlock(&avp->lock);
2162 avp_dev_link_update(struct rte_eth_dev *eth_dev,
2163 __rte_unused int wait_to_complete)
2165 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2166 struct rte_eth_link *link = ð_dev->data->dev_link;
2168 link->link_speed = ETH_SPEED_NUM_10G;
2169 link->link_duplex = ETH_LINK_FULL_DUPLEX;
2170 link->link_status = !!(avp->flags & AVP_F_LINKUP);
2176 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
2178 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2180 rte_spinlock_lock(&avp->lock);
2181 if ((avp->flags & AVP_F_PROMISC) == 0) {
2182 avp->flags |= AVP_F_PROMISC;
2183 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
2184 eth_dev->data->port_id);
2186 rte_spinlock_unlock(&avp->lock);
2190 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
2192 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2194 rte_spinlock_lock(&avp->lock);
2195 if ((avp->flags & AVP_F_PROMISC) != 0) {
2196 avp->flags &= ~AVP_F_PROMISC;
2197 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
2198 eth_dev->data->port_id);
2200 rte_spinlock_unlock(&avp->lock);
2204 avp_dev_info_get(struct rte_eth_dev *eth_dev,
2205 struct rte_eth_dev_info *dev_info)
2207 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2209 dev_info->driver_name = "rte_avp_pmd";
2210 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2211 dev_info->max_rx_queues = avp->max_rx_queues;
2212 dev_info->max_tx_queues = avp->max_tx_queues;
2213 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
2214 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
2215 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
2216 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2217 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
2218 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
2223 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
2225 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2227 if (mask & ETH_VLAN_STRIP_MASK) {
2228 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
2229 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
2230 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
2232 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
2234 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
2238 if (mask & ETH_VLAN_FILTER_MASK) {
2239 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
2240 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
2243 if (mask & ETH_VLAN_EXTEND_MASK) {
2244 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
2245 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
2250 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
2252 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2255 for (i = 0; i < avp->num_rx_queues; i++) {
2256 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2259 stats->ipackets += rxq->packets;
2260 stats->ibytes += rxq->bytes;
2261 stats->ierrors += rxq->errors;
2263 stats->q_ipackets[i] += rxq->packets;
2264 stats->q_ibytes[i] += rxq->bytes;
2265 stats->q_errors[i] += rxq->errors;
2269 for (i = 0; i < avp->num_tx_queues; i++) {
2270 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2273 stats->opackets += txq->packets;
2274 stats->obytes += txq->bytes;
2275 stats->oerrors += txq->errors;
2277 stats->q_opackets[i] += txq->packets;
2278 stats->q_obytes[i] += txq->bytes;
2279 stats->q_errors[i] += txq->errors;
2285 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
2287 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2290 for (i = 0; i < avp->num_rx_queues; i++) {
2291 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
2300 for (i = 0; i < avp->num_tx_queues; i++) {
2301 struct avp_queue *txq = avp->dev_data->tx_queues[i];
2311 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
2312 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);