4 * Copyright (c) 2013-2017, Wind River Systems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2) Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3) Neither the name of Wind River Systems nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_memzone.h>
43 #include <rte_malloc.h>
44 #include <rte_atomic.h>
45 #include <rte_branch_prediction.h>
47 #include <rte_ether.h>
48 #include <rte_common.h>
49 #include <rte_cycles.h>
50 #include <rte_byteorder.h>
52 #include <rte_memory.h>
56 #include "rte_avp_common.h"
57 #include "rte_avp_fifo.h"
63 static int avp_dev_configure(struct rte_eth_dev *dev);
64 static void avp_dev_info_get(struct rte_eth_dev *dev,
65 struct rte_eth_dev_info *dev_info);
66 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
67 static int avp_dev_link_update(struct rte_eth_dev *dev,
68 __rte_unused int wait_to_complete);
69 static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
75 unsigned int socket_id,
76 const struct rte_eth_rxconf *rx_conf,
77 struct rte_mempool *pool);
79 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
82 unsigned int socket_id,
83 const struct rte_eth_txconf *tx_conf);
85 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
86 struct rte_mbuf **rx_pkts,
89 static uint16_t avp_recv_pkts(void *rx_queue,
90 struct rte_mbuf **rx_pkts,
93 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
94 struct rte_mbuf **tx_pkts,
97 static uint16_t avp_xmit_pkts(void *tx_queue,
98 struct rte_mbuf **tx_pkts,
101 static void avp_dev_rx_queue_release(void *rxq);
102 static void avp_dev_tx_queue_release(void *txq);
104 static void avp_dev_stats_get(struct rte_eth_dev *dev,
105 struct rte_eth_stats *stats);
106 static void avp_dev_stats_reset(struct rte_eth_dev *dev);
109 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
112 #define AVP_MAX_RX_BURST 64
113 #define AVP_MAX_TX_BURST 64
114 #define AVP_MAX_MAC_ADDRS 1
115 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
119 * Defines the number of microseconds to wait before checking the response
120 * queue for completion.
122 #define AVP_REQUEST_DELAY_USECS (5000)
125 * Defines the number times to check the response queue for completion before
126 * declaring a timeout.
128 #define AVP_MAX_REQUEST_RETRY (100)
130 /* Defines the current PCI driver version number */
131 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
134 * The set of PCI devices this driver supports
136 static const struct rte_pci_id pci_id_avp_map[] = {
137 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
138 .device_id = RTE_AVP_PCI_DEVICE_ID,
139 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
140 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
141 .class_id = RTE_CLASS_ANY_ID,
144 { .vendor_id = 0, /* sentinel */
149 * dev_ops for avp, bare necessities for basic operation
151 static const struct eth_dev_ops avp_eth_dev_ops = {
152 .dev_configure = avp_dev_configure,
153 .dev_infos_get = avp_dev_info_get,
154 .vlan_offload_set = avp_vlan_offload_set,
155 .stats_get = avp_dev_stats_get,
156 .stats_reset = avp_dev_stats_reset,
157 .link_update = avp_dev_link_update,
158 .promiscuous_enable = avp_dev_promiscuous_enable,
159 .promiscuous_disable = avp_dev_promiscuous_disable,
160 .rx_queue_setup = avp_dev_rx_queue_setup,
161 .rx_queue_release = avp_dev_rx_queue_release,
162 .tx_queue_setup = avp_dev_tx_queue_setup,
163 .tx_queue_release = avp_dev_tx_queue_release,
166 /**@{ AVP device flags */
167 #define AVP_F_PROMISC (1 << 1)
168 #define AVP_F_CONFIGURED (1 << 2)
169 #define AVP_F_LINKUP (1 << 3)
172 /* Ethernet device validation marker */
173 #define AVP_ETHDEV_MAGIC 0x92972862
176 * Defines the AVP device attributes which are attached to an RTE ethernet
180 uint32_t magic; /**< Memory validation marker */
181 uint64_t device_id; /**< Unique system identifier */
182 struct ether_addr ethaddr; /**< Host specified MAC address */
183 struct rte_eth_dev_data *dev_data;
184 /**< Back pointer to ethernet device data */
185 volatile uint32_t flags; /**< Device operational flags */
186 uint8_t port_id; /**< Ethernet port identifier */
187 struct rte_mempool *pool; /**< pkt mbuf mempool */
188 unsigned int guest_mbuf_size; /**< local pool mbuf size */
189 unsigned int host_mbuf_size; /**< host mbuf size */
190 unsigned int max_rx_pkt_len; /**< maximum receive unit */
191 uint32_t host_features; /**< Supported feature bitmap */
192 uint32_t features; /**< Enabled feature bitmap */
193 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
194 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
195 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
196 unsigned int max_rx_queues; /**< Maximum number of receive queues */
198 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
199 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
200 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
201 /**< Allocated mbufs queue */
202 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
203 /**< To be freed mbufs queue */
205 /* For request & response */
206 struct rte_avp_fifo *req_q; /**< Request queue */
207 struct rte_avp_fifo *resp_q; /**< Response queue */
208 void *host_sync_addr; /**< (host) Req/Resp Mem address */
209 void *sync_addr; /**< Req/Resp Mem address */
210 void *host_mbuf_addr; /**< (host) MBUF pool start address */
211 void *mbuf_addr; /**< MBUF pool start address */
212 } __rte_cache_aligned;
214 /* RTE ethernet private data */
217 } __rte_cache_aligned;
220 /* 32-bit MMIO register write */
221 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
223 /* 32-bit MMIO register read */
224 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
226 /* Macro to cast the ethernet device private data to a AVP object */
227 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
228 (&((struct avp_adapter *)adapter)->avp)
231 * Defines the structure of a AVP device queue for the purpose of handling the
232 * receive and transmit burst callback functions
235 struct rte_eth_dev_data *dev_data;
236 /**< Backpointer to ethernet device data */
237 struct avp_dev *avp; /**< Backpointer to AVP device */
239 /**< Queue identifier used for indexing current queue */
241 /**< Base queue identifier for queue servicing */
242 uint16_t queue_limit;
243 /**< Maximum queue identifier for queue servicing */
250 /* send a request and wait for a response
252 * @warning must be called while holding the avp->lock spinlock.
255 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
257 unsigned int retry = AVP_MAX_REQUEST_RETRY;
258 void *resp_addr = NULL;
262 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
264 request->result = -ENOTSUP;
266 /* Discard any stale responses before starting a new request */
267 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
268 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
270 rte_memcpy(avp->sync_addr, request, sizeof(*request));
271 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
273 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
280 /* wait for a response */
281 usleep(AVP_REQUEST_DELAY_USECS);
283 count = avp_fifo_count(avp->resp_q);
285 /* response received */
289 if ((count < 1) && (retry == 0)) {
290 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
297 /* retrieve the response */
298 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
299 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
300 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
301 count, resp_addr, avp->host_sync_addr);
306 /* copy to user buffer */
307 rte_memcpy(request, avp->sync_addr, sizeof(*request));
310 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
311 request->result, request->req_id);
318 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
319 struct rte_avp_device_config *config)
321 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
322 struct rte_avp_request request;
325 /* setup a configure request */
326 memset(&request, 0, sizeof(request));
327 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
328 memcpy(&request.config, config, sizeof(request.config));
330 ret = avp_dev_process_request(avp, &request);
332 return ret == 0 ? request.result : ret;
335 /* translate from host mbuf virtual address to guest virtual address */
337 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
339 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
340 (uintptr_t)avp->host_mbuf_addr),
341 (uintptr_t)avp->mbuf_addr);
344 /* translate from host physical address to guest virtual address */
346 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
347 phys_addr_t host_phys_addr)
349 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
350 struct rte_mem_resource *resource;
351 struct rte_avp_memmap_info *info;
352 struct rte_avp_memmap *map;
357 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
358 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
359 info = (struct rte_avp_memmap_info *)resource->addr;
362 for (i = 0; i < info->nb_maps; i++) {
363 /* search all segments looking for a matching address */
364 map = &info->maps[i];
366 if ((host_phys_addr >= map->phys_addr) &&
367 (host_phys_addr < (map->phys_addr + map->length))) {
368 /* address is within this segment */
369 offset += (host_phys_addr - map->phys_addr);
370 addr = RTE_PTR_ADD(addr, offset);
372 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
373 host_phys_addr, addr);
377 offset += map->length;
383 /* verify that the incoming device version is compatible with our version */
385 avp_dev_version_check(uint32_t version)
387 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
388 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
390 if (device <= driver) {
391 /* the host driver version is less than or equal to ours */
398 /* verify that memory regions have expected version and validation markers */
400 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
402 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
403 struct rte_avp_memmap_info *memmap;
404 struct rte_avp_device_info *info;
405 struct rte_mem_resource *resource;
408 /* Dump resource info for debug */
409 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
410 resource = &pci_dev->mem_resource[i];
411 if ((resource->phys_addr == 0) || (resource->len == 0))
414 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
415 i, resource->phys_addr,
416 resource->len, resource->addr);
419 case RTE_AVP_PCI_MEMMAP_BAR:
420 memmap = (struct rte_avp_memmap_info *)resource->addr;
421 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
422 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
423 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
424 memmap->magic, memmap->version);
429 case RTE_AVP_PCI_DEVICE_BAR:
430 info = (struct rte_avp_device_info *)resource->addr;
431 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
432 avp_dev_version_check(info->version)) {
433 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
434 info->magic, info->version,
435 AVP_DPDK_DRIVER_VERSION);
440 case RTE_AVP_PCI_MEMORY_BAR:
441 case RTE_AVP_PCI_MMIO_BAR:
442 if (resource->addr == NULL) {
443 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
449 case RTE_AVP_PCI_MSIX_BAR:
451 /* no validation required */
460 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
462 struct avp_dev *avp =
463 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
464 struct avp_queue *rxq;
465 uint16_t queue_count;
468 rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
471 * Must map all AVP fifos as evenly as possible between the configured
472 * device queues. Each device queue will service a subset of the AVP
473 * fifos. If there is an odd number of device queues the first set of
474 * device queues will get the extra AVP fifos.
476 queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
477 remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
478 if (rx_queue_id < remainder) {
479 /* these queues must service one extra FIFO */
480 rxq->queue_base = rx_queue_id * (queue_count + 1);
481 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
483 /* these queues service the regular number of FIFO */
484 rxq->queue_base = ((remainder * (queue_count + 1)) +
485 ((rx_queue_id - remainder) * queue_count));
486 rxq->queue_limit = rxq->queue_base + queue_count - 1;
489 PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
490 rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
492 rxq->queue_id = rxq->queue_base;
496 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
498 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
499 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
500 struct rte_avp_device_info *host_info;
503 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
504 host_info = (struct rte_avp_device_info *)addr;
507 * the transmit direction is not negotiated beyond respecting the max
508 * number of queues because the host can handle arbitrary guest tx
509 * queues (host rx queues).
511 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
514 * the receive direction is more restrictive. The host requires a
515 * minimum number of guest rx queues (host tx queues) therefore
516 * negotiate a value that is at least as large as the host minimum
517 * requirement. If the host and guest values are not identical then a
518 * mapping will be established in the receive_queue_setup function.
520 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
521 eth_dev->data->nb_rx_queues);
523 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
524 avp->num_tx_queues, avp->num_rx_queues);
528 * create a AVP device using the supplied device info by first translating it
529 * to guest address space(s).
532 avp_dev_create(struct rte_pci_device *pci_dev,
533 struct rte_eth_dev *eth_dev)
535 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
536 struct rte_avp_device_info *host_info;
537 struct rte_mem_resource *resource;
540 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
541 if (resource->addr == NULL) {
542 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
543 RTE_AVP_PCI_DEVICE_BAR);
546 host_info = (struct rte_avp_device_info *)resource->addr;
548 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
549 avp_dev_version_check(host_info->version)) {
550 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
551 host_info->magic, host_info->version,
552 AVP_DPDK_DRIVER_VERSION);
556 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
557 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
558 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
559 RTE_AVP_GET_MINOR_VERSION(host_info->version));
561 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
562 host_info->min_tx_queues, host_info->max_tx_queues);
563 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
564 host_info->min_rx_queues, host_info->max_rx_queues);
565 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
566 host_info->features);
568 if (avp->magic != AVP_ETHDEV_MAGIC) {
570 * First time initialization (i.e., not during a VM
573 memset(avp, 0, sizeof(*avp));
574 avp->magic = AVP_ETHDEV_MAGIC;
575 avp->dev_data = eth_dev->data;
576 avp->port_id = eth_dev->data->port_id;
577 avp->host_mbuf_size = host_info->mbuf_size;
578 avp->host_features = host_info->features;
579 memcpy(&avp->ethaddr.addr_bytes[0],
580 host_info->ethaddr, ETHER_ADDR_LEN);
581 /* adjust max values to not exceed our max */
583 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
585 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
587 /* Re-attaching during migration */
589 /* TODO... requires validation of host values */
590 if ((host_info->features & avp->features) != avp->features) {
591 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
592 avp->features, host_info->features);
593 /* this should not be possible; continue for now */
597 /* the device id is allowed to change over migrations */
598 avp->device_id = host_info->device_id;
600 /* translate incoming host addresses to guest address space */
601 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
603 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
604 host_info->alloc_phys);
605 for (i = 0; i < avp->max_tx_queues; i++) {
606 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
607 host_info->tx_phys + (i * host_info->tx_size));
609 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
610 host_info->alloc_phys + (i * host_info->alloc_size));
613 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
615 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
616 host_info->free_phys);
617 for (i = 0; i < avp->max_rx_queues; i++) {
618 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
619 host_info->rx_phys + (i * host_info->rx_size));
620 avp->free_q[i] = avp_dev_translate_address(eth_dev,
621 host_info->free_phys + (i * host_info->free_size));
624 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
625 host_info->req_phys);
626 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
627 host_info->resp_phys);
628 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
629 host_info->sync_phys);
630 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
631 host_info->mbuf_phys);
632 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
633 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
635 avp_dev_translate_address(eth_dev, host_info->sync_phys);
637 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
640 * store the host mbuf virtual address so that we can calculate
641 * relative offsets for each mbuf as they are processed
643 avp->host_mbuf_addr = host_info->mbuf_va;
644 avp->host_sync_addr = host_info->sync_va;
647 * store the maximum packet length that is supported by the host.
649 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
650 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
651 host_info->max_rx_pkt_len);
657 * This function is based on probe() function in avp_pci.c
658 * It returns 0 on success.
661 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
663 struct avp_dev *avp =
664 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
665 struct rte_pci_device *pci_dev;
668 pci_dev = AVP_DEV_TO_PCI(eth_dev);
669 eth_dev->dev_ops = &avp_eth_dev_ops;
670 eth_dev->rx_pkt_burst = &avp_recv_pkts;
671 eth_dev->tx_pkt_burst = &avp_xmit_pkts;
673 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
675 * no setup required on secondary processes. All data is saved
676 * in dev_private by the primary process. All resource should
677 * be mapped to the same virtual address so all pointers should
680 if (eth_dev->data->scattered_rx) {
681 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
682 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
683 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
688 rte_eth_copy_pci_info(eth_dev, pci_dev);
690 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
692 /* Check BAR resources */
693 ret = avp_dev_check_regions(eth_dev);
695 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
700 /* Handle each subtype */
701 ret = avp_dev_create(pci_dev, eth_dev);
703 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
707 /* Allocate memory for storing MAC addresses */
708 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
709 if (eth_dev->data->mac_addrs == NULL) {
710 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
715 /* Get a mac from device config */
716 ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
722 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
724 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
727 if (eth_dev->data == NULL)
730 if (eth_dev->data->mac_addrs != NULL) {
731 rte_free(eth_dev->data->mac_addrs);
732 eth_dev->data->mac_addrs = NULL;
739 static struct eth_driver rte_avp_pmd = {
741 .id_table = pci_id_avp_map,
742 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
743 .probe = rte_eth_dev_pci_probe,
744 .remove = rte_eth_dev_pci_remove,
746 .eth_dev_init = eth_avp_dev_init,
747 .eth_dev_uninit = eth_avp_dev_uninit,
748 .dev_private_size = sizeof(struct avp_adapter),
752 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
755 unsigned int max_rx_pkt_len;
757 max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
759 if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
760 (max_rx_pkt_len > avp->host_mbuf_size)) {
762 * If the guest MTU is greater than either the host or guest
763 * buffers then chained mbufs have to be enabled in the TX
764 * direction. It is assumed that the application will not need
765 * to send packets larger than their max_rx_pkt_len (MRU).
770 if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
771 (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
773 * If the host MRU is greater than its own mbuf size or the
774 * guest mbuf size then chained mbufs have to be enabled in the
784 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
785 uint16_t rx_queue_id,
787 unsigned int socket_id,
788 const struct rte_eth_rxconf *rx_conf,
789 struct rte_mempool *pool)
791 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
792 struct rte_pktmbuf_pool_private *mbp_priv;
793 struct avp_queue *rxq;
795 if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
796 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
797 rx_queue_id, eth_dev->data->nb_rx_queues);
801 /* Save mbuf pool pointer */
804 /* Save the local mbuf size */
805 mbp_priv = rte_mempool_get_priv(pool);
806 avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
807 avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
809 if (avp_dev_enable_scattered(eth_dev, avp)) {
810 if (!eth_dev->data->scattered_rx) {
811 PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
812 eth_dev->data->scattered_rx = 1;
813 eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
814 eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
818 PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
820 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
822 avp->guest_mbuf_size);
824 /* allocate a queue object */
825 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
826 RTE_CACHE_LINE_SIZE, socket_id);
828 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
832 /* save back pointers to AVP and Ethernet devices */
834 rxq->dev_data = eth_dev->data;
835 eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
837 /* setup the queue receive mapping for the current queue. */
838 _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
840 PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
848 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
849 uint16_t tx_queue_id,
851 unsigned int socket_id,
852 const struct rte_eth_txconf *tx_conf)
854 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
855 struct avp_queue *txq;
857 if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
858 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
859 tx_queue_id, eth_dev->data->nb_tx_queues);
863 /* allocate a queue object */
864 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
865 RTE_CACHE_LINE_SIZE, socket_id);
867 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
871 /* only the configured set of transmit queues are used */
872 txq->queue_id = tx_queue_id;
873 txq->queue_base = tx_queue_id;
874 txq->queue_limit = tx_queue_id;
876 /* save back pointers to AVP and Ethernet devices */
878 txq->dev_data = eth_dev->data;
879 eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
881 PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
889 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
891 uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
892 uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
893 return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
897 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
899 struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
901 if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
902 /* allow all packets destined to our address */
906 if (likely(is_broadcast_ether_addr(ð->d_addr))) {
907 /* allow all broadcast packets */
911 if (likely(is_multicast_ether_addr(ð->d_addr))) {
912 /* allow all multicast packets */
916 if (avp->flags & AVP_F_PROMISC) {
917 /* allow all packets when in promiscuous mode */
924 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
926 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
928 struct rte_avp_desc *first_buf;
929 struct rte_avp_desc *pkt_buf;
930 unsigned int pkt_len;
931 unsigned int nb_segs;
935 first_buf = avp_dev_translate_buffer(avp, buf);
939 nb_segs = first_buf->nb_segs;
941 /* Adjust pointers for guest addressing */
942 pkt_buf = avp_dev_translate_buffer(avp, buf);
944 rte_panic("bad buffer: segment %u has an invalid address %p\n",
946 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
947 if (pkt_data == NULL)
948 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
950 if (pkt_buf->data_len == 0)
951 rte_panic("bad buffer: segment %u has 0 data length\n",
953 pkt_len += pkt_buf->data_len;
957 } while (nb_segs && (buf = pkt_buf->next) != NULL);
960 rte_panic("bad buffer: expected %u segments found %u\n",
961 first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
962 if (pkt_len != first_buf->pkt_len)
963 rte_panic("bad buffer: expected length %u found %u\n",
964 first_buf->pkt_len, pkt_len);
967 #define avp_dev_buffer_sanity_check(a, b) \
968 __avp_dev_buffer_sanity_check((a), (b))
970 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
972 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
977 * Copy a host buffer chain to a set of mbufs. This function assumes that
978 * there exactly the required number of mbufs to copy all source bytes.
980 static inline struct rte_mbuf *
981 avp_dev_copy_from_buffers(struct avp_dev *avp,
982 struct rte_avp_desc *buf,
983 struct rte_mbuf **mbufs,
986 struct rte_mbuf *m_previous = NULL;
987 struct rte_avp_desc *pkt_buf;
988 unsigned int total_length = 0;
989 unsigned int copy_length;
990 unsigned int src_offset;
997 avp_dev_buffer_sanity_check(avp, buf);
999 /* setup the first source buffer */
1000 pkt_buf = avp_dev_translate_buffer(avp, buf);
1001 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1002 total_length = pkt_buf->pkt_len;
1005 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1006 ol_flags = PKT_RX_VLAN_PKT;
1007 vlan_tci = pkt_buf->vlan_tci;
1013 for (i = 0; (i < count) && (buf != NULL); i++) {
1014 /* fill each destination buffer */
1017 if (m_previous != NULL)
1018 m_previous->next = m;
1024 * Copy as many source buffers as will fit in the
1025 * destination buffer.
1027 copy_length = RTE_MIN((avp->guest_mbuf_size -
1028 rte_pktmbuf_data_len(m)),
1029 (pkt_buf->data_len -
1031 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1032 rte_pktmbuf_data_len(m)),
1033 RTE_PTR_ADD(pkt_data, src_offset),
1035 rte_pktmbuf_data_len(m) += copy_length;
1036 src_offset += copy_length;
1038 if (likely(src_offset == pkt_buf->data_len)) {
1039 /* need a new source buffer */
1040 buf = pkt_buf->next;
1042 pkt_buf = avp_dev_translate_buffer(
1044 pkt_data = avp_dev_translate_buffer(
1045 avp, pkt_buf->data);
1050 if (unlikely(rte_pktmbuf_data_len(m) ==
1051 avp->guest_mbuf_size)) {
1052 /* need a new destination mbuf */
1056 } while (buf != NULL);
1060 m->ol_flags = ol_flags;
1062 rte_pktmbuf_pkt_len(m) = total_length;
1063 m->vlan_tci = vlan_tci;
1065 __rte_mbuf_sanity_check(m, 1);
1071 avp_recv_scattered_pkts(void *rx_queue,
1072 struct rte_mbuf **rx_pkts,
1075 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1076 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1077 struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1078 struct avp_dev *avp = rxq->avp;
1079 struct rte_avp_desc *pkt_buf;
1080 struct rte_avp_fifo *free_q;
1081 struct rte_avp_fifo *rx_q;
1082 struct rte_avp_desc *buf;
1083 unsigned int count, avail, n;
1084 unsigned int guest_mbuf_size;
1086 unsigned int required;
1087 unsigned int buf_len;
1088 unsigned int port_id;
1091 guest_mbuf_size = avp->guest_mbuf_size;
1092 port_id = avp->port_id;
1093 rx_q = avp->rx_q[rxq->queue_id];
1094 free_q = avp->free_q[rxq->queue_id];
1096 /* setup next queue to service */
1097 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1098 (rxq->queue_id + 1) : rxq->queue_base;
1100 /* determine how many slots are available in the free queue */
1101 count = avp_fifo_free_count(free_q);
1103 /* determine how many packets are available in the rx queue */
1104 avail = avp_fifo_count(rx_q);
1106 /* determine how many packets can be received */
1107 count = RTE_MIN(count, avail);
1108 count = RTE_MIN(count, nb_pkts);
1109 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1111 if (unlikely(count == 0)) {
1112 /* no free buffers, or no buffers on the rx queue */
1116 /* retrieve pending packets */
1117 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1118 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1122 for (i = 0; i < n; i++) {
1123 /* prefetch next entry while processing current one */
1125 pkt_buf = avp_dev_translate_buffer(avp,
1127 rte_prefetch0(pkt_buf);
1131 /* Peek into the first buffer to determine the total length */
1132 pkt_buf = avp_dev_translate_buffer(avp, buf);
1133 buf_len = pkt_buf->pkt_len;
1135 /* Allocate enough mbufs to receive the entire packet */
1136 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1137 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1138 rxq->dev_data->rx_mbuf_alloc_failed++;
1142 /* Copy the data from the buffers to our mbufs */
1143 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1148 if (_avp_mac_filter(avp, m) != 0) {
1149 /* silently discard packets not destined to our MAC */
1150 rte_pktmbuf_free(m);
1154 /* return new mbuf to caller */
1155 rx_pkts[count++] = m;
1156 rxq->bytes += buf_len;
1159 rxq->packets += count;
1161 /* return the buffers to the free queue */
1162 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1169 avp_recv_pkts(void *rx_queue,
1170 struct rte_mbuf **rx_pkts,
1173 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1174 struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1175 struct avp_dev *avp = rxq->avp;
1176 struct rte_avp_desc *pkt_buf;
1177 struct rte_avp_fifo *free_q;
1178 struct rte_avp_fifo *rx_q;
1179 unsigned int count, avail, n;
1180 unsigned int pkt_len;
1185 rx_q = avp->rx_q[rxq->queue_id];
1186 free_q = avp->free_q[rxq->queue_id];
1188 /* setup next queue to service */
1189 rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1190 (rxq->queue_id + 1) : rxq->queue_base;
1192 /* determine how many slots are available in the free queue */
1193 count = avp_fifo_free_count(free_q);
1195 /* determine how many packets are available in the rx queue */
1196 avail = avp_fifo_count(rx_q);
1198 /* determine how many packets can be received */
1199 count = RTE_MIN(count, avail);
1200 count = RTE_MIN(count, nb_pkts);
1201 count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1203 if (unlikely(count == 0)) {
1204 /* no free buffers, or no buffers on the rx queue */
1208 /* retrieve pending packets */
1209 n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1210 PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1214 for (i = 0; i < n; i++) {
1215 /* prefetch next entry while processing current one */
1217 pkt_buf = avp_dev_translate_buffer(avp,
1219 rte_prefetch0(pkt_buf);
1222 /* Adjust host pointers for guest addressing */
1223 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1224 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1225 pkt_len = pkt_buf->pkt_len;
1227 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1228 (pkt_buf->nb_segs > 1))) {
1230 * application should be using the scattered receive
1237 /* process each packet to be transmitted */
1238 m = rte_pktmbuf_alloc(avp->pool);
1239 if (unlikely(m == NULL)) {
1240 rxq->dev_data->rx_mbuf_alloc_failed++;
1244 /* copy data out of the host buffer to our buffer */
1245 m->data_off = RTE_PKTMBUF_HEADROOM;
1246 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1248 /* initialize the local mbuf */
1249 rte_pktmbuf_data_len(m) = pkt_len;
1250 rte_pktmbuf_pkt_len(m) = pkt_len;
1251 m->port = avp->port_id;
1253 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1254 m->ol_flags = PKT_RX_VLAN_PKT;
1255 m->vlan_tci = pkt_buf->vlan_tci;
1258 if (_avp_mac_filter(avp, m) != 0) {
1259 /* silently discard packets not destined to our MAC */
1260 rte_pktmbuf_free(m);
1264 /* return new mbuf to caller */
1265 rx_pkts[count++] = m;
1266 rxq->bytes += pkt_len;
1269 rxq->packets += count;
1271 /* return the buffers to the free queue */
1272 avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1278 * Copy a chained mbuf to a set of host buffers. This function assumes that
1279 * there are sufficient destination buffers to contain the entire source
1282 static inline uint16_t
1283 avp_dev_copy_to_buffers(struct avp_dev *avp,
1284 struct rte_mbuf *mbuf,
1285 struct rte_avp_desc **buffers,
1288 struct rte_avp_desc *previous_buf = NULL;
1289 struct rte_avp_desc *first_buf = NULL;
1290 struct rte_avp_desc *pkt_buf;
1291 struct rte_avp_desc *buf;
1292 size_t total_length;
1299 __rte_mbuf_sanity_check(mbuf, 1);
1303 total_length = rte_pktmbuf_pkt_len(m);
1304 for (i = 0; (i < count) && (m != NULL); i++) {
1305 /* fill each destination buffer */
1308 if (i < count - 1) {
1309 /* prefetch next entry while processing this one */
1310 pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1311 rte_prefetch0(pkt_buf);
1314 /* Adjust pointers for guest addressing */
1315 pkt_buf = avp_dev_translate_buffer(avp, buf);
1316 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1318 /* setup the buffer chain */
1319 if (previous_buf != NULL)
1320 previous_buf->next = buf;
1322 first_buf = pkt_buf;
1324 previous_buf = pkt_buf;
1328 * copy as many source mbuf segments as will fit in the
1329 * destination buffer.
1331 copy_length = RTE_MIN((avp->host_mbuf_size -
1333 (rte_pktmbuf_data_len(m) -
1335 rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1336 RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1339 pkt_buf->data_len += copy_length;
1340 src_offset += copy_length;
1342 if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1343 /* need a new source buffer */
1348 if (unlikely(pkt_buf->data_len ==
1349 avp->host_mbuf_size)) {
1350 /* need a new destination buffer */
1354 } while (m != NULL);
1357 first_buf->nb_segs = count;
1358 first_buf->pkt_len = total_length;
1360 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1361 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1362 first_buf->vlan_tci = mbuf->vlan_tci;
1365 avp_dev_buffer_sanity_check(avp, buffers[0]);
1367 return total_length;
1372 avp_xmit_scattered_pkts(void *tx_queue,
1373 struct rte_mbuf **tx_pkts,
1376 struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1377 RTE_AVP_MAX_MBUF_SEGMENTS)];
1378 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1379 struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1380 struct avp_dev *avp = txq->avp;
1381 struct rte_avp_fifo *alloc_q;
1382 struct rte_avp_fifo *tx_q;
1383 unsigned int count, avail, n;
1384 unsigned int orig_nb_pkts;
1386 unsigned int required;
1387 unsigned int segments;
1388 unsigned int tx_bytes;
1391 orig_nb_pkts = nb_pkts;
1392 tx_q = avp->tx_q[txq->queue_id];
1393 alloc_q = avp->alloc_q[txq->queue_id];
1395 /* limit the number of transmitted packets to the max burst size */
1396 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1397 nb_pkts = AVP_MAX_TX_BURST;
1399 /* determine how many buffers are available to copy into */
1400 avail = avp_fifo_count(alloc_q);
1401 if (unlikely(avail > (AVP_MAX_TX_BURST *
1402 RTE_AVP_MAX_MBUF_SEGMENTS)))
1403 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1405 /* determine how many slots are available in the transmit queue */
1406 count = avp_fifo_free_count(tx_q);
1408 /* determine how many packets can be sent */
1409 nb_pkts = RTE_MIN(count, nb_pkts);
1411 /* determine how many packets will fit in the available buffers */
1414 for (i = 0; i < nb_pkts; i++) {
1416 if (likely(i < (unsigned int)nb_pkts - 1)) {
1417 /* prefetch next entry while processing this one */
1418 rte_prefetch0(tx_pkts[i + 1]);
1420 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1421 avp->host_mbuf_size;
1423 if (unlikely((required == 0) ||
1424 (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1426 else if (unlikely(required + segments > avail))
1428 segments += required;
1433 if (unlikely(nb_pkts == 0)) {
1434 /* no available buffers, or no space on the tx queue */
1435 txq->errors += orig_nb_pkts;
1439 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1442 /* retrieve sufficient send buffers */
1443 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1444 if (unlikely(n != segments)) {
1445 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1446 "n=%u, segments=%u, orig=%u\n",
1447 n, segments, orig_nb_pkts);
1448 txq->errors += orig_nb_pkts;
1454 for (i = 0; i < nb_pkts; i++) {
1455 /* process each packet to be transmitted */
1458 /* determine how many buffers are required for this packet */
1459 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1460 avp->host_mbuf_size;
1462 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1463 &avp_bufs[count], required);
1464 tx_bufs[i] = avp_bufs[count];
1467 /* free the original mbuf */
1468 rte_pktmbuf_free(m);
1471 txq->packets += nb_pkts;
1472 txq->bytes += tx_bytes;
1474 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1475 for (i = 0; i < nb_pkts; i++)
1476 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1479 /* send the packets */
1480 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1481 if (unlikely(n != orig_nb_pkts))
1482 txq->errors += (orig_nb_pkts - n);
1489 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1491 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1492 struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1493 struct avp_dev *avp = txq->avp;
1494 struct rte_avp_desc *pkt_buf;
1495 struct rte_avp_fifo *alloc_q;
1496 struct rte_avp_fifo *tx_q;
1497 unsigned int count, avail, n;
1499 unsigned int pkt_len;
1500 unsigned int tx_bytes;
1504 tx_q = avp->tx_q[txq->queue_id];
1505 alloc_q = avp->alloc_q[txq->queue_id];
1507 /* limit the number of transmitted packets to the max burst size */
1508 if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1509 nb_pkts = AVP_MAX_TX_BURST;
1511 /* determine how many buffers are available to copy into */
1512 avail = avp_fifo_count(alloc_q);
1514 /* determine how many slots are available in the transmit queue */
1515 count = avp_fifo_free_count(tx_q);
1517 /* determine how many packets can be sent */
1518 count = RTE_MIN(count, avail);
1519 count = RTE_MIN(count, nb_pkts);
1521 if (unlikely(count == 0)) {
1522 /* no available buffers, or no space on the tx queue */
1523 txq->errors += nb_pkts;
1527 PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1530 /* retrieve sufficient send buffers */
1531 n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1532 if (unlikely(n != count)) {
1538 for (i = 0; i < count; i++) {
1539 /* prefetch next entry while processing the current one */
1540 if (i < count - 1) {
1541 pkt_buf = avp_dev_translate_buffer(avp,
1543 rte_prefetch0(pkt_buf);
1546 /* process each packet to be transmitted */
1549 /* Adjust pointers for guest addressing */
1550 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1551 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1552 pkt_len = rte_pktmbuf_pkt_len(m);
1554 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1555 (pkt_len > avp->host_mbuf_size))) {
1557 * application should be using the scattered transmit
1558 * function; send it truncated to avoid the performance
1559 * hit of having to manage returning the already
1560 * allocated buffer to the free list. This should not
1561 * happen since the application should have set the
1562 * max_rx_pkt_len based on its MTU and it should be
1563 * policing its own packet sizes.
1566 pkt_len = RTE_MIN(avp->guest_mbuf_size,
1567 avp->host_mbuf_size);
1570 /* copy data out of our mbuf and into the AVP buffer */
1571 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1572 pkt_buf->pkt_len = pkt_len;
1573 pkt_buf->data_len = pkt_len;
1574 pkt_buf->nb_segs = 1;
1575 pkt_buf->next = NULL;
1577 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1578 pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1579 pkt_buf->vlan_tci = m->vlan_tci;
1582 tx_bytes += pkt_len;
1584 /* free the original mbuf */
1585 rte_pktmbuf_free(m);
1588 txq->packets += count;
1589 txq->bytes += tx_bytes;
1591 /* send the packets */
1592 n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1598 avp_dev_rx_queue_release(void *rx_queue)
1600 struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1601 struct avp_dev *avp = rxq->avp;
1602 struct rte_eth_dev_data *data = avp->dev_data;
1605 for (i = 0; i < avp->num_rx_queues; i++) {
1606 if (data->rx_queues[i] == rxq)
1607 data->rx_queues[i] = NULL;
1612 avp_dev_tx_queue_release(void *tx_queue)
1614 struct avp_queue *txq = (struct avp_queue *)tx_queue;
1615 struct avp_dev *avp = txq->avp;
1616 struct rte_eth_dev_data *data = avp->dev_data;
1619 for (i = 0; i < avp->num_tx_queues; i++) {
1620 if (data->tx_queues[i] == txq)
1621 data->tx_queues[i] = NULL;
1626 avp_dev_configure(struct rte_eth_dev *eth_dev)
1628 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
1629 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1630 struct rte_avp_device_info *host_info;
1631 struct rte_avp_device_config config;
1636 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
1637 host_info = (struct rte_avp_device_info *)addr;
1639 /* Setup required number of queues */
1640 _avp_set_queue_counts(eth_dev);
1642 mask = (ETH_VLAN_STRIP_MASK |
1643 ETH_VLAN_FILTER_MASK |
1644 ETH_VLAN_EXTEND_MASK);
1645 avp_vlan_offload_set(eth_dev, mask);
1647 /* update device config */
1648 memset(&config, 0, sizeof(config));
1649 config.device_id = host_info->device_id;
1650 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
1651 config.driver_version = AVP_DPDK_DRIVER_VERSION;
1652 config.features = avp->features;
1653 config.num_tx_queues = avp->num_tx_queues;
1654 config.num_rx_queues = avp->num_rx_queues;
1656 ret = avp_dev_ctrl_set_config(eth_dev, &config);
1658 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
1663 avp->flags |= AVP_F_CONFIGURED;
1672 avp_dev_link_update(struct rte_eth_dev *eth_dev,
1673 __rte_unused int wait_to_complete)
1675 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1676 struct rte_eth_link *link = ð_dev->data->dev_link;
1678 link->link_speed = ETH_SPEED_NUM_10G;
1679 link->link_duplex = ETH_LINK_FULL_DUPLEX;
1680 link->link_status = !!(avp->flags & AVP_F_LINKUP);
1686 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1688 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1690 if ((avp->flags & AVP_F_PROMISC) == 0) {
1691 avp->flags |= AVP_F_PROMISC;
1692 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
1693 eth_dev->data->port_id);
1698 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1700 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1702 if ((avp->flags & AVP_F_PROMISC) != 0) {
1703 avp->flags &= ~AVP_F_PROMISC;
1704 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
1705 eth_dev->data->port_id);
1710 avp_dev_info_get(struct rte_eth_dev *eth_dev,
1711 struct rte_eth_dev_info *dev_info)
1713 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1715 dev_info->driver_name = "rte_avp_pmd";
1716 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1717 dev_info->max_rx_queues = avp->max_rx_queues;
1718 dev_info->max_tx_queues = avp->max_tx_queues;
1719 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
1720 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
1721 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
1722 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1723 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1724 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1729 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1731 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1733 if (mask & ETH_VLAN_STRIP_MASK) {
1734 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1735 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
1736 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
1738 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
1740 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
1744 if (mask & ETH_VLAN_FILTER_MASK) {
1745 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
1746 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
1749 if (mask & ETH_VLAN_EXTEND_MASK) {
1750 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
1751 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
1756 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
1758 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1761 for (i = 0; i < avp->num_rx_queues; i++) {
1762 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1765 stats->ipackets += rxq->packets;
1766 stats->ibytes += rxq->bytes;
1767 stats->ierrors += rxq->errors;
1769 stats->q_ipackets[i] += rxq->packets;
1770 stats->q_ibytes[i] += rxq->bytes;
1771 stats->q_errors[i] += rxq->errors;
1775 for (i = 0; i < avp->num_tx_queues; i++) {
1776 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1779 stats->opackets += txq->packets;
1780 stats->obytes += txq->bytes;
1781 stats->oerrors += txq->errors;
1783 stats->q_opackets[i] += txq->packets;
1784 stats->q_obytes[i] += txq->bytes;
1785 stats->q_errors[i] += txq->errors;
1791 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
1793 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1796 for (i = 0; i < avp->num_rx_queues; i++) {
1797 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1806 for (i = 0; i < avp->num_tx_queues; i++) {
1807 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1817 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
1818 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);