4 * Copyright (c) 2013-2017, Wind River Systems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2) Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3) Neither the name of Wind River Systems nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_memzone.h>
43 #include <rte_malloc.h>
44 #include <rte_atomic.h>
45 #include <rte_branch_prediction.h>
47 #include <rte_ether.h>
48 #include <rte_common.h>
49 #include <rte_cycles.h>
50 #include <rte_byteorder.h>
52 #include <rte_memory.h>
56 #include "rte_avp_common.h"
57 #include "rte_avp_fifo.h"
63 static int avp_dev_configure(struct rte_eth_dev *dev);
64 static void avp_dev_info_get(struct rte_eth_dev *dev,
65 struct rte_eth_dev_info *dev_info);
66 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
67 static int avp_dev_link_update(struct rte_eth_dev *dev,
68 __rte_unused int wait_to_complete);
69 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
72 unsigned int socket_id,
73 const struct rte_eth_rxconf *rx_conf,
74 struct rte_mempool *pool);
76 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
79 unsigned int socket_id,
80 const struct rte_eth_txconf *tx_conf);
82 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
83 struct rte_mbuf **rx_pkts,
86 static uint16_t avp_recv_pkts(void *rx_queue,
87 struct rte_mbuf **rx_pkts,
89 static void avp_dev_rx_queue_release(void *rxq);
90 static void avp_dev_tx_queue_release(void *txq);
91 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
94 #define AVP_MAX_RX_BURST 64
95 #define AVP_MAX_MAC_ADDRS 1
96 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
100 * Defines the number of microseconds to wait before checking the response
101 * queue for completion.
103 #define AVP_REQUEST_DELAY_USECS (5000)
106 * Defines the number times to check the response queue for completion before
107 * declaring a timeout.
109 #define AVP_MAX_REQUEST_RETRY (100)
111 /* Defines the current PCI driver version number */
112 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
115 * The set of PCI devices this driver supports
117 static const struct rte_pci_id pci_id_avp_map[] = {
118 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
119 .device_id = RTE_AVP_PCI_DEVICE_ID,
120 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
121 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
122 .class_id = RTE_CLASS_ANY_ID,
125 { .vendor_id = 0, /* sentinel */
130 * dev_ops for avp, bare necessities for basic operation
132 static const struct eth_dev_ops avp_eth_dev_ops = {
133 .dev_configure = avp_dev_configure,
134 .dev_infos_get = avp_dev_info_get,
135 .vlan_offload_set = avp_vlan_offload_set,
136 .link_update = avp_dev_link_update,
137 .rx_queue_setup = avp_dev_rx_queue_setup,
138 .rx_queue_release = avp_dev_rx_queue_release,
139 .tx_queue_setup = avp_dev_tx_queue_setup,
140 .tx_queue_release = avp_dev_tx_queue_release,
143 /**@{ AVP device flags */
144 #define AVP_F_PROMISC (1 << 1)
145 #define AVP_F_CONFIGURED (1 << 2)
146 #define AVP_F_LINKUP (1 << 3)
149 /* Ethernet device validation marker */
150 #define AVP_ETHDEV_MAGIC 0x92972862
153 * Defines the AVP device attributes which are attached to an RTE ethernet
157 uint32_t magic; /**< Memory validation marker */
158 uint64_t device_id; /**< Unique system identifier */
159 struct ether_addr ethaddr; /**< Host specified MAC address */
160 struct rte_eth_dev_data *dev_data;
161 /**< Back pointer to ethernet device data */
162 volatile uint32_t flags; /**< Device operational flags */
163 uint8_t port_id; /**< Ethernet port identifier */
164 struct rte_mempool *pool; /**< pkt mbuf mempool */
165 unsigned int guest_mbuf_size; /**< local pool mbuf size */
166 unsigned int host_mbuf_size; /**< host mbuf size */
167 unsigned int max_rx_pkt_len; /**< maximum receive unit */
168 uint32_t host_features; /**< Supported feature bitmap */
169 uint32_t features; /**< Enabled feature bitmap */
170 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
171 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
172 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
173 unsigned int max_rx_queues; /**< Maximum number of receive queues */
175 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
176 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
177 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
178 /**< Allocated mbufs queue */
179 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
180 /**< To be freed mbufs queue */
182 /* For request & response */
183 struct rte_avp_fifo *req_q; /**< Request queue */
184 struct rte_avp_fifo *resp_q; /**< Response queue */
185 void *host_sync_addr; /**< (host) Req/Resp Mem address */
186 void *sync_addr; /**< Req/Resp Mem address */
187 void *host_mbuf_addr; /**< (host) MBUF pool start address */
188 void *mbuf_addr; /**< MBUF pool start address */
189 } __rte_cache_aligned;
191 /* RTE ethernet private data */
194 } __rte_cache_aligned;
197 /* 32-bit MMIO register write */
198 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
200 /* 32-bit MMIO register read */
201 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
203 /* Macro to cast the ethernet device private data to a AVP object */
204 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
205 (&((struct avp_adapter *)adapter)->avp)
208 * Defines the structure of a AVP device queue for the purpose of handling the
209 * receive and transmit burst callback functions
212 struct rte_eth_dev_data *dev_data;
213 /**< Backpointer to ethernet device data */
214 struct avp_dev *avp; /**< Backpointer to AVP device */
216 /**< Queue identifier used for indexing current queue */
218 /**< Base queue identifier for queue servicing */
219 uint16_t queue_limit;
220 /**< Maximum queue identifier for queue servicing */
227 /* send a request and wait for a response
229 * @warning must be called while holding the avp->lock spinlock.
232 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
234 unsigned int retry = AVP_MAX_REQUEST_RETRY;
235 void *resp_addr = NULL;
239 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
241 request->result = -ENOTSUP;
243 /* Discard any stale responses before starting a new request */
244 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
245 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
247 rte_memcpy(avp->sync_addr, request, sizeof(*request));
248 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
250 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
257 /* wait for a response */
258 usleep(AVP_REQUEST_DELAY_USECS);
260 count = avp_fifo_count(avp->resp_q);
262 /* response received */
266 if ((count < 1) && (retry == 0)) {
267 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
274 /* retrieve the response */
275 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
276 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
277 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
278 count, resp_addr, avp->host_sync_addr);
283 /* copy to user buffer */
284 rte_memcpy(request, avp->sync_addr, sizeof(*request));
287 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
288 request->result, request->req_id);
295 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
296 struct rte_avp_device_config *config)
298 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
299 struct rte_avp_request request;
302 /* setup a configure request */
303 memset(&request, 0, sizeof(request));
304 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
305 memcpy(&request.config, config, sizeof(request.config));
307 ret = avp_dev_process_request(avp, &request);
309 return ret == 0 ? request.result : ret;
312 /* translate from host mbuf virtual address to guest virtual address */
314 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
316 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
317 (uintptr_t)avp->host_mbuf_addr),
318 (uintptr_t)avp->mbuf_addr);
321 /* translate from host physical address to guest virtual address */
323 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
324 phys_addr_t host_phys_addr)
326 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
327 struct rte_mem_resource *resource;
328 struct rte_avp_memmap_info *info;
329 struct rte_avp_memmap *map;
334 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
335 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
336 info = (struct rte_avp_memmap_info *)resource->addr;
339 for (i = 0; i < info->nb_maps; i++) {
340 /* search all segments looking for a matching address */
341 map = &info->maps[i];
343 if ((host_phys_addr >= map->phys_addr) &&
344 (host_phys_addr < (map->phys_addr + map->length))) {
345 /* address is within this segment */
346 offset += (host_phys_addr - map->phys_addr);
347 addr = RTE_PTR_ADD(addr, offset);
349 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
350 host_phys_addr, addr);
354 offset += map->length;
360 /* verify that the incoming device version is compatible with our version */
362 avp_dev_version_check(uint32_t version)
364 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
365 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
367 if (device <= driver) {
368 /* the host driver version is less than or equal to ours */
375 /* verify that memory regions have expected version and validation markers */
377 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
379 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
380 struct rte_avp_memmap_info *memmap;
381 struct rte_avp_device_info *info;
382 struct rte_mem_resource *resource;
385 /* Dump resource info for debug */
386 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
387 resource = &pci_dev->mem_resource[i];
388 if ((resource->phys_addr == 0) || (resource->len == 0))
391 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
392 i, resource->phys_addr,
393 resource->len, resource->addr);
396 case RTE_AVP_PCI_MEMMAP_BAR:
397 memmap = (struct rte_avp_memmap_info *)resource->addr;
398 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
399 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
400 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
401 memmap->magic, memmap->version);
406 case RTE_AVP_PCI_DEVICE_BAR:
407 info = (struct rte_avp_device_info *)resource->addr;
408 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
409 avp_dev_version_check(info->version)) {
410 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
411 info->magic, info->version,
412 AVP_DPDK_DRIVER_VERSION);
417 case RTE_AVP_PCI_MEMORY_BAR:
418 case RTE_AVP_PCI_MMIO_BAR:
419 if (resource->addr == NULL) {
420 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
426 case RTE_AVP_PCI_MSIX_BAR:
428 /* no validation required */
437 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
439 struct avp_dev *avp =
440 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
441 struct avp_queue *rxq;
442 uint16_t queue_count;
445 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
448 * Must map all AVP fifos as evenly as possible between the configured
449 * device queues. Each device queue will service a subset of the AVP
450 * fifos. If there is an odd number of device queues the first set of
451 * device queues will get the extra AVP fifos.
453 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
454 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
455 if (rx_queue_id < remainder) {
456 /* these queues must service one extra FIFO */
457 rxq->queue_base = rx_queue_id * (queue_count + 1);
458 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
460 /* these queues service the regular number of FIFO */
461 rxq->queue_base = ((remainder * (queue_count + 1)) +
462 ((rx_queue_id - remainder) * queue_count));
463 rxq->queue_limit = rxq->queue_base + queue_count - 1;
466 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
467 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
469 rxq->queue_id = rxq->queue_base;
473 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
475 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
476 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
477 struct rte_avp_device_info *host_info;
480 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
481 host_info = (struct rte_avp_device_info *)addr;
484 * the transmit direction is not negotiated beyond respecting the max
485 * number of queues because the host can handle arbitrary guest tx
486 * queues (host rx queues).
488 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
491 * the receive direction is more restrictive. The host requires a
492 * minimum number of guest rx queues (host tx queues) therefore
493 * negotiate a value that is at least as large as the host minimum
494 * requirement. If the host and guest values are not identical then a
495 * mapping will be established in the receive_queue_setup function.
497 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
498 eth_dev->data->nb_rx_queues);
500 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
501 avp->num_tx_queues, avp->num_rx_queues);
505 * create a AVP device using the supplied device info by first translating it
506 * to guest address space(s).
509 avp_dev_create(struct rte_pci_device *pci_dev,
510 struct rte_eth_dev *eth_dev)
512 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
513 struct rte_avp_device_info *host_info;
514 struct rte_mem_resource *resource;
517 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
518 if (resource->addr == NULL) {
519 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
520 RTE_AVP_PCI_DEVICE_BAR);
523 host_info = (struct rte_avp_device_info *)resource->addr;
525 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
526 avp_dev_version_check(host_info->version)) {
527 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
528 host_info->magic, host_info->version,
529 AVP_DPDK_DRIVER_VERSION);
533 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
534 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
535 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
536 RTE_AVP_GET_MINOR_VERSION(host_info->version));
538 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
539 host_info->min_tx_queues, host_info->max_tx_queues);
540 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
541 host_info->min_rx_queues, host_info->max_rx_queues);
542 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
543 host_info->features);
545 if (avp->magic != AVP_ETHDEV_MAGIC) {
547 * First time initialization (i.e., not during a VM
550 memset(avp, 0, sizeof(*avp));
551 avp->magic = AVP_ETHDEV_MAGIC;
552 avp->dev_data = eth_dev->data;
553 avp->port_id = eth_dev->data->port_id;
554 avp->host_mbuf_size = host_info->mbuf_size;
555 avp->host_features = host_info->features;
556 memcpy(&avp->ethaddr.addr_bytes[0],
557 host_info->ethaddr, ETHER_ADDR_LEN);
558 /* adjust max values to not exceed our max */
560 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
562 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
564 /* Re-attaching during migration */
566 /* TODO... requires validation of host values */
567 if ((host_info->features & avp->features) != avp->features) {
568 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
569 avp->features, host_info->features);
570 /* this should not be possible; continue for now */
574 /* the device id is allowed to change over migrations */
575 avp->device_id = host_info->device_id;
577 /* translate incoming host addresses to guest address space */
578 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
580 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
581 host_info->alloc_phys);
582 for (i = 0; i < avp->max_tx_queues; i++) {
583 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
584 host_info->tx_phys + (i * host_info->tx_size));
586 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
587 host_info->alloc_phys + (i * host_info->alloc_size));
590 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
592 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
593 host_info->free_phys);
594 for (i = 0; i < avp->max_rx_queues; i++) {
595 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
596 host_info->rx_phys + (i * host_info->rx_size));
597 avp->free_q[i] = avp_dev_translate_address(eth_dev,
598 host_info->free_phys + (i * host_info->free_size));
601 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
602 host_info->req_phys);
603 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
604 host_info->resp_phys);
605 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
606 host_info->sync_phys);
607 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
608 host_info->mbuf_phys);
609 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
610 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
612 avp_dev_translate_address(eth_dev, host_info->sync_phys);
614 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
617 * store the host mbuf virtual address so that we can calculate
618 * relative offsets for each mbuf as they are processed
620 avp->host_mbuf_addr = host_info->mbuf_va;
621 avp->host_sync_addr = host_info->sync_va;
624 * store the maximum packet length that is supported by the host.
626 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
627 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
628 host_info->max_rx_pkt_len);
634 * This function is based on probe() function in avp_pci.c
635 * It returns 0 on success.
638 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
640 struct avp_dev *avp =
641 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
642 struct rte_pci_device *pci_dev;
645 pci_dev = AVP_DEV_TO_PCI(eth_dev);
646 eth_dev->dev_ops = &avp_eth_dev_ops;
647 eth_dev->rx_pkt_burst = &avp_recv_pkts;
649 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
651 * no setup required on secondary processes. All data is saved
652 * in dev_private by the primary process. All resource should
653 * be mapped to the same virtual address so all pointers should
656 if (eth_dev->data->scattered_rx) {
657 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
658 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
663 rte_eth_copy_pci_info(eth_dev, pci_dev);
665 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
667 /* Check BAR resources */
668 ret = avp_dev_check_regions(eth_dev);
670 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
675 /* Handle each subtype */
676 ret = avp_dev_create(pci_dev, eth_dev);
678 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
682 /* Allocate memory for storing MAC addresses */
683 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
684 if (eth_dev->data->mac_addrs == NULL) {
685 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
690 /* Get a mac from device config */
691 ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
697 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
699 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
702 if (eth_dev->data == NULL)
705 if (eth_dev->data->mac_addrs != NULL) {
706 rte_free(eth_dev->data->mac_addrs);
707 eth_dev->data->mac_addrs = NULL;
714 static struct eth_driver rte_avp_pmd = {
716 .id_table = pci_id_avp_map,
717 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
718 .probe = rte_eth_dev_pci_probe,
719 .remove = rte_eth_dev_pci_remove,
721 .eth_dev_init = eth_avp_dev_init,
722 .eth_dev_uninit = eth_avp_dev_uninit,
723 .dev_private_size = sizeof(struct avp_adapter),
727 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
730 unsigned int max_rx_pkt_len;
732 max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
734 if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
735 (max_rx_pkt_len > avp->host_mbuf_size)) {
737 * If the guest MTU is greater than either the host or guest
738 * buffers then chained mbufs have to be enabled in the TX
739 * direction. It is assumed that the application will not need
740 * to send packets larger than their max_rx_pkt_len (MRU).
745 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
746 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
748 * If the host MRU is greater than its own mbuf size or the
749 * guest mbuf size then chained mbufs have to be enabled in the
759 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
760 uint16_t rx_queue_id,
762 unsigned int socket_id,
763 const struct rte_eth_rxconf *rx_conf,
764 struct rte_mempool *pool)
766 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
767 struct rte_pktmbuf_pool_private *mbp_priv;
768 struct avp_queue *rxq;
770 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
771 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
772 rx_queue_id, eth_dev->data->nb_rx_queues);
776 /* Save mbuf pool pointer */
779 /* Save the local mbuf size */
780 mbp_priv = rte_mempool_get_priv(pool);
781 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
782 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
784 if (avp_dev_enable_scattered(eth_dev, avp)) {
785 if (!eth_dev->data->scattered_rx) {
786 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
787 eth_dev->data->scattered_rx = 1;
788 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
792 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
794 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
796 avp->guest_mbuf_size);
798 /* allocate a queue object */
799 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
800 RTE_CACHE_LINE_SIZE, socket_id);
802 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
806 /* save back pointers to AVP and Ethernet devices */
808 rxq->dev_data = eth_dev->data;
809 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
811 /* setup the queue receive mapping for the current queue. */
812 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
814 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
822 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
823 uint16_t tx_queue_id,
825 unsigned int socket_id,
826 const struct rte_eth_txconf *tx_conf)
828 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
829 struct avp_queue *txq;
831 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
832 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
833 tx_queue_id, eth_dev->data->nb_tx_queues);
837 /* allocate a queue object */
838 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
839 RTE_CACHE_LINE_SIZE, socket_id);
841 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
845 /* only the configured set of transmit queues are used */
846 txq->queue_id = tx_queue_id;
847 txq->queue_base = tx_queue_id;
848 txq->queue_limit = tx_queue_id;
850 /* save back pointers to AVP and Ethernet devices */
852 txq->dev_data = eth_dev->data;
853 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
855 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
863 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
865 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
866 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
867 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
871 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
873 struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
875 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
876 /* allow all packets destined to our address */
880 if (likely(is_broadcast_ether_addr(ð->d_addr))) {
881 /* allow all broadcast packets */
885 if (likely(is_multicast_ether_addr(ð->d_addr))) {
886 /* allow all multicast packets */
890 if (avp->flags & AVP_F_PROMISC) {
891 /* allow all packets when in promiscuous mode */
898 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
900 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
902 struct rte_avp_desc *first_buf;
903 struct rte_avp_desc *pkt_buf;
904 unsigned int pkt_len;
905 unsigned int nb_segs;
909 first_buf = avp_dev_translate_buffer(avp, buf);
913 nb_segs = first_buf->nb_segs;
915 /* Adjust pointers for guest addressing */
916 pkt_buf = avp_dev_translate_buffer(avp, buf);
918 rte_panic("bad buffer: segment %u has an invalid address %p\n",
920 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
921 if (pkt_data == NULL)
922 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
924 if (pkt_buf->data_len == 0)
925 rte_panic("bad buffer: segment %u has 0 data length\n",
927 pkt_len += pkt_buf->data_len;
931 } while (nb_segs && (buf = pkt_buf->next) != NULL);
934 rte_panic("bad buffer: expected %u segments found %u\n",
935 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
936 if (pkt_len != first_buf->pkt_len)
937 rte_panic("bad buffer: expected length %u found %u\n",
938 first_buf->pkt_len, pkt_len);
941 #define avp_dev_buffer_sanity_check(a, b) \
942 __avp_dev_buffer_sanity_check((a), (b))
944 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
946 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
951 * Copy a host buffer chain to a set of mbufs. This function assumes that
952 * there exactly the required number of mbufs to copy all source bytes.
954 static inline struct rte_mbuf *
955 avp_dev_copy_from_buffers(struct avp_dev *avp,
956 struct rte_avp_desc *buf,
957 struct rte_mbuf **mbufs,
960 struct rte_mbuf *m_previous = NULL;
961 struct rte_avp_desc *pkt_buf;
962 unsigned int total_length = 0;
963 unsigned int copy_length;
964 unsigned int src_offset;
971 avp_dev_buffer_sanity_check(avp, buf);
973 /* setup the first source buffer */
974 pkt_buf = avp_dev_translate_buffer(avp, buf);
975 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
976 total_length = pkt_buf->pkt_len;
979 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
980 ol_flags = PKT_RX_VLAN_PKT;
981 vlan_tci = pkt_buf->vlan_tci;
987 for (i = 0; (i < count) && (buf != NULL); i++) {
988 /* fill each destination buffer */
991 if (m_previous != NULL)
992 m_previous->next = m;
998 * Copy as many source buffers as will fit in the
999 * destination buffer.
1001 copy_length = RTE_MIN((avp->guest_mbuf_size -
1002 rte_pktmbuf_data_len(m)),
1003 (pkt_buf->data_len -
1005 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1006 rte_pktmbuf_data_len(m)),
1007 RTE_PTR_ADD(pkt_data, src_offset),
1009 rte_pktmbuf_data_len(m) += copy_length;
1010 src_offset += copy_length;
1012 if (likely(src_offset == pkt_buf->data_len)) {
1013 /* need a new source buffer */
1014 buf = pkt_buf->next;
1016 pkt_buf = avp_dev_translate_buffer(
1018 pkt_data = avp_dev_translate_buffer(
1019 avp, pkt_buf->data);
1024 if (unlikely(rte_pktmbuf_data_len(m) ==
1025 avp->guest_mbuf_size)) {
1026 /* need a new destination mbuf */
1030 } while (buf != NULL);
1034 m->ol_flags = ol_flags;
1036 rte_pktmbuf_pkt_len(m) = total_length;
1037 m->vlan_tci = vlan_tci;
1039 __rte_mbuf_sanity_check(m, 1);
1045 avp_recv_scattered_pkts(void *rx_queue,
1046 struct rte_mbuf **rx_pkts,
1049 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1050 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1051 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1052 struct avp_dev *avp = rxq->avp;
1053 struct rte_avp_desc *pkt_buf;
1054 struct rte_avp_fifo *free_q;
1055 struct rte_avp_fifo *rx_q;
1056 struct rte_avp_desc *buf;
1057 unsigned int count, avail, n;
1058 unsigned int guest_mbuf_size;
1060 unsigned int required;
1061 unsigned int buf_len;
1062 unsigned int port_id;
1065 guest_mbuf_size = avp->guest_mbuf_size;
1066 port_id = avp->port_id;
1067 rx_q = avp->rx_q[rxq->queue_id];
1068 free_q = avp->free_q[rxq->queue_id];
1070 /* setup next queue to service */
1071 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1072 (rxq->queue_id + 1) : rxq->queue_base;
1074 /* determine how many slots are available in the free queue */
1075 count = avp_fifo_free_count(free_q);
1077 /* determine how many packets are available in the rx queue */
1078 avail = avp_fifo_count(rx_q);
1080 /* determine how many packets can be received */
1081 count = RTE_MIN(count, avail);
1082 count = RTE_MIN(count, nb_pkts);
1083 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1085 if (unlikely(count == 0)) {
1086 /* no free buffers, or no buffers on the rx queue */
1090 /* retrieve pending packets */
1091 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1092 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1096 for (i = 0; i < n; i++) {
1097 /* prefetch next entry while processing current one */
1099 pkt_buf = avp_dev_translate_buffer(avp,
1101 rte_prefetch0(pkt_buf);
1105 /* Peek into the first buffer to determine the total length */
1106 pkt_buf = avp_dev_translate_buffer(avp, buf);
1107 buf_len = pkt_buf->pkt_len;
1109 /* Allocate enough mbufs to receive the entire packet */
1110 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1111 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1112 rxq->dev_data->rx_mbuf_alloc_failed++;
1116 /* Copy the data from the buffers to our mbufs */
1117 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1122 if (_avp_mac_filter(avp, m) != 0) {
1123 /* silently discard packets not destined to our MAC */
1124 rte_pktmbuf_free(m);
1128 /* return new mbuf to caller */
1129 rx_pkts[count++] = m;
1130 rxq->bytes += buf_len;
1133 rxq->packets += count;
1135 /* return the buffers to the free queue */
1136 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1143 avp_recv_pkts(void *rx_queue,
1144 struct rte_mbuf **rx_pkts,
1147 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1148 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1149 struct avp_dev *avp = rxq->avp;
1150 struct rte_avp_desc *pkt_buf;
1151 struct rte_avp_fifo *free_q;
1152 struct rte_avp_fifo *rx_q;
1153 unsigned int count, avail, n;
1154 unsigned int pkt_len;
1159 rx_q = avp->rx_q[rxq->queue_id];
1160 free_q = avp->free_q[rxq->queue_id];
1162 /* setup next queue to service */
1163 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1164 (rxq->queue_id + 1) : rxq->queue_base;
1166 /* determine how many slots are available in the free queue */
1167 count = avp_fifo_free_count(free_q);
1169 /* determine how many packets are available in the rx queue */
1170 avail = avp_fifo_count(rx_q);
1172 /* determine how many packets can be received */
1173 count = RTE_MIN(count, avail);
1174 count = RTE_MIN(count, nb_pkts);
1175 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1177 if (unlikely(count == 0)) {
1178 /* no free buffers, or no buffers on the rx queue */
1182 /* retrieve pending packets */
1183 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1184 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1188 for (i = 0; i < n; i++) {
1189 /* prefetch next entry while processing current one */
1191 pkt_buf = avp_dev_translate_buffer(avp,
1193 rte_prefetch0(pkt_buf);
1196 /* Adjust host pointers for guest addressing */
1197 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1198 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1199 pkt_len = pkt_buf->pkt_len;
1201 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1202 (pkt_buf->nb_segs > 1))) {
1204 * application should be using the scattered receive
1211 /* process each packet to be transmitted */
1212 m = rte_pktmbuf_alloc(avp->pool);
1213 if (unlikely(m == NULL)) {
1214 rxq->dev_data->rx_mbuf_alloc_failed++;
1218 /* copy data out of the host buffer to our buffer */
1219 m->data_off = RTE_PKTMBUF_HEADROOM;
1220 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1222 /* initialize the local mbuf */
1223 rte_pktmbuf_data_len(m) = pkt_len;
1224 rte_pktmbuf_pkt_len(m) = pkt_len;
1225 m->port = avp->port_id;
1227 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1228 m->ol_flags = PKT_RX_VLAN_PKT;
1229 m->vlan_tci = pkt_buf->vlan_tci;
1232 if (_avp_mac_filter(avp, m) != 0) {
1233 /* silently discard packets not destined to our MAC */
1234 rte_pktmbuf_free(m);
1238 /* return new mbuf to caller */
1239 rx_pkts[count++] = m;
1240 rxq->bytes += pkt_len;
1243 rxq->packets += count;
1245 /* return the buffers to the free queue */
1246 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1252 avp_dev_rx_queue_release(void *rx_queue)
1254 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1255 struct avp_dev *avp = rxq->avp;
1256 struct rte_eth_dev_data *data = avp->dev_data;
1259 for (i = 0; i < avp->num_rx_queues; i++) {
1260 if (data->rx_queues[i] == rxq)
1261 data->rx_queues[i] = NULL;
1266 avp_dev_tx_queue_release(void *tx_queue)
1268 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1269 struct avp_dev *avp = txq->avp;
1270 struct rte_eth_dev_data *data = avp->dev_data;
1273 for (i = 0; i < avp->num_tx_queues; i++) {
1274 if (data->tx_queues[i] == txq)
1275 data->tx_queues[i] = NULL;
1280 avp_dev_configure(struct rte_eth_dev *eth_dev)
1282 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
1283 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1284 struct rte_avp_device_info *host_info;
1285 struct rte_avp_device_config config;
1290 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
1291 host_info = (struct rte_avp_device_info *)addr;
1293 /* Setup required number of queues */
1294 _avp_set_queue_counts(eth_dev);
1296 mask = (ETH_VLAN_STRIP_MASK |
1297 ETH_VLAN_FILTER_MASK |
1298 ETH_VLAN_EXTEND_MASK);
1299 avp_vlan_offload_set(eth_dev, mask);
1301 /* update device config */
1302 memset(&config, 0, sizeof(config));
1303 config.device_id = host_info->device_id;
1304 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
1305 config.driver_version = AVP_DPDK_DRIVER_VERSION;
1306 config.features = avp->features;
1307 config.num_tx_queues = avp->num_tx_queues;
1308 config.num_rx_queues = avp->num_rx_queues;
1310 ret = avp_dev_ctrl_set_config(eth_dev, &config);
1312 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
1317 avp->flags |= AVP_F_CONFIGURED;
1326 avp_dev_link_update(struct rte_eth_dev *eth_dev,
1327 __rte_unused int wait_to_complete)
1329 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1330 struct rte_eth_link *link = ð_dev->data->dev_link;
1332 link->link_speed = ETH_SPEED_NUM_10G;
1333 link->link_duplex = ETH_LINK_FULL_DUPLEX;
1334 link->link_status = !!(avp->flags & AVP_F_LINKUP);
1341 avp_dev_info_get(struct rte_eth_dev *eth_dev,
1342 struct rte_eth_dev_info *dev_info)
1344 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1346 dev_info->driver_name = "rte_avp_pmd";
1347 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1348 dev_info->max_rx_queues = avp->max_rx_queues;
1349 dev_info->max_tx_queues = avp->max_tx_queues;
1350 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
1351 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
1352 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
1353 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1354 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1355 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1360 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1362 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1364 if (mask & ETH_VLAN_STRIP_MASK) {
1365 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1366 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
1367 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
1369 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
1371 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
1375 if (mask & ETH_VLAN_FILTER_MASK) {
1376 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
1377 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
1380 if (mask & ETH_VLAN_EXTEND_MASK) {
1381 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
1382 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
1386 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
1387 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);