4 * Copyright (c) 2013-2017, Wind River Systems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * 2) Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3) Neither the name of Wind River Systems nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_memzone.h>
43 #include <rte_malloc.h>
44 #include <rte_atomic.h>
45 #include <rte_branch_prediction.h>
47 #include <rte_ether.h>
48 #include <rte_common.h>
49 #include <rte_cycles.h>
50 #include <rte_byteorder.h>
52 #include <rte_memory.h>
56 #include "rte_avp_common.h"
57 #include "rte_avp_fifo.h"
63 static int avp_dev_configure(struct rte_eth_dev *dev);
64 static void avp_dev_info_get(struct rte_eth_dev *dev,
65 struct rte_eth_dev_info *dev_info);
66 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
67 static int avp_dev_link_update(struct rte_eth_dev *dev,
68 __rte_unused int wait_to_complete);
70 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
73 #define AVP_MAX_MAC_ADDRS 1
74 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
78 * Defines the number of microseconds to wait before checking the response
79 * queue for completion.
81 #define AVP_REQUEST_DELAY_USECS (5000)
84 * Defines the number times to check the response queue for completion before
85 * declaring a timeout.
87 #define AVP_MAX_REQUEST_RETRY (100)
89 /* Defines the current PCI driver version number */
90 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
93 * The set of PCI devices this driver supports
95 static const struct rte_pci_id pci_id_avp_map[] = {
96 { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
97 .device_id = RTE_AVP_PCI_DEVICE_ID,
98 .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
99 .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
100 .class_id = RTE_CLASS_ANY_ID,
103 { .vendor_id = 0, /* sentinel */
108 * dev_ops for avp, bare necessities for basic operation
110 static const struct eth_dev_ops avp_eth_dev_ops = {
111 .dev_configure = avp_dev_configure,
112 .dev_infos_get = avp_dev_info_get,
113 .vlan_offload_set = avp_vlan_offload_set,
114 .link_update = avp_dev_link_update,
117 /**@{ AVP device flags */
118 #define AVP_F_PROMISC (1 << 1)
119 #define AVP_F_CONFIGURED (1 << 2)
120 #define AVP_F_LINKUP (1 << 3)
123 /* Ethernet device validation marker */
124 #define AVP_ETHDEV_MAGIC 0x92972862
127 * Defines the AVP device attributes which are attached to an RTE ethernet
131 uint32_t magic; /**< Memory validation marker */
132 uint64_t device_id; /**< Unique system identifier */
133 struct ether_addr ethaddr; /**< Host specified MAC address */
134 struct rte_eth_dev_data *dev_data;
135 /**< Back pointer to ethernet device data */
136 volatile uint32_t flags; /**< Device operational flags */
137 uint8_t port_id; /**< Ethernet port identifier */
138 struct rte_mempool *pool; /**< pkt mbuf mempool */
139 unsigned int guest_mbuf_size; /**< local pool mbuf size */
140 unsigned int host_mbuf_size; /**< host mbuf size */
141 unsigned int max_rx_pkt_len; /**< maximum receive unit */
142 uint32_t host_features; /**< Supported feature bitmap */
143 uint32_t features; /**< Enabled feature bitmap */
144 unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
145 unsigned int max_tx_queues; /**< Maximum number of transmit queues */
146 unsigned int num_rx_queues; /**< Negotiated number of receive queues */
147 unsigned int max_rx_queues; /**< Maximum number of receive queues */
149 struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
150 struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
151 struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
152 /**< Allocated mbufs queue */
153 struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
154 /**< To be freed mbufs queue */
156 /* For request & response */
157 struct rte_avp_fifo *req_q; /**< Request queue */
158 struct rte_avp_fifo *resp_q; /**< Response queue */
159 void *host_sync_addr; /**< (host) Req/Resp Mem address */
160 void *sync_addr; /**< Req/Resp Mem address */
161 void *host_mbuf_addr; /**< (host) MBUF pool start address */
162 void *mbuf_addr; /**< MBUF pool start address */
163 } __rte_cache_aligned;
165 /* RTE ethernet private data */
168 } __rte_cache_aligned;
171 /* 32-bit MMIO register write */
172 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
174 /* 32-bit MMIO register read */
175 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
177 /* Macro to cast the ethernet device private data to a AVP object */
178 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
179 (&((struct avp_adapter *)adapter)->avp)
182 * Defines the structure of a AVP device queue for the purpose of handling the
183 * receive and transmit burst callback functions
186 struct rte_eth_dev_data *dev_data;
187 /**< Backpointer to ethernet device data */
188 struct avp_dev *avp; /**< Backpointer to AVP device */
190 /**< Queue identifier used for indexing current queue */
192 /**< Base queue identifier for queue servicing */
193 uint16_t queue_limit;
194 /**< Maximum queue identifier for queue servicing */
201 /* send a request and wait for a response
203 * @warning must be called while holding the avp->lock spinlock.
206 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
208 unsigned int retry = AVP_MAX_REQUEST_RETRY;
209 void *resp_addr = NULL;
213 PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
215 request->result = -ENOTSUP;
217 /* Discard any stale responses before starting a new request */
218 while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
219 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
221 rte_memcpy(avp->sync_addr, request, sizeof(*request));
222 count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
224 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
231 /* wait for a response */
232 usleep(AVP_REQUEST_DELAY_USECS);
234 count = avp_fifo_count(avp->resp_q);
236 /* response received */
240 if ((count < 1) && (retry == 0)) {
241 PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
248 /* retrieve the response */
249 count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
250 if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
251 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
252 count, resp_addr, avp->host_sync_addr);
257 /* copy to user buffer */
258 rte_memcpy(request, avp->sync_addr, sizeof(*request));
261 PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
262 request->result, request->req_id);
269 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
270 struct rte_avp_device_config *config)
272 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
273 struct rte_avp_request request;
276 /* setup a configure request */
277 memset(&request, 0, sizeof(request));
278 request.req_id = RTE_AVP_REQ_CFG_DEVICE;
279 memcpy(&request.config, config, sizeof(request.config));
281 ret = avp_dev_process_request(avp, &request);
283 return ret == 0 ? request.result : ret;
286 /* translate from host physical address to guest virtual address */
288 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
289 phys_addr_t host_phys_addr)
291 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
292 struct rte_mem_resource *resource;
293 struct rte_avp_memmap_info *info;
294 struct rte_avp_memmap *map;
299 addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
300 resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
301 info = (struct rte_avp_memmap_info *)resource->addr;
304 for (i = 0; i < info->nb_maps; i++) {
305 /* search all segments looking for a matching address */
306 map = &info->maps[i];
308 if ((host_phys_addr >= map->phys_addr) &&
309 (host_phys_addr < (map->phys_addr + map->length))) {
310 /* address is within this segment */
311 offset += (host_phys_addr - map->phys_addr);
312 addr = RTE_PTR_ADD(addr, offset);
314 PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
315 host_phys_addr, addr);
319 offset += map->length;
325 /* verify that the incoming device version is compatible with our version */
327 avp_dev_version_check(uint32_t version)
329 uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
330 uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
332 if (device <= driver) {
333 /* the host driver version is less than or equal to ours */
340 /* verify that memory regions have expected version and validation markers */
342 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
344 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
345 struct rte_avp_memmap_info *memmap;
346 struct rte_avp_device_info *info;
347 struct rte_mem_resource *resource;
350 /* Dump resource info for debug */
351 for (i = 0; i < PCI_MAX_RESOURCE; i++) {
352 resource = &pci_dev->mem_resource[i];
353 if ((resource->phys_addr == 0) || (resource->len == 0))
356 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
357 i, resource->phys_addr,
358 resource->len, resource->addr);
361 case RTE_AVP_PCI_MEMMAP_BAR:
362 memmap = (struct rte_avp_memmap_info *)resource->addr;
363 if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
364 (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
365 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
366 memmap->magic, memmap->version);
371 case RTE_AVP_PCI_DEVICE_BAR:
372 info = (struct rte_avp_device_info *)resource->addr;
373 if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
374 avp_dev_version_check(info->version)) {
375 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
376 info->magic, info->version,
377 AVP_DPDK_DRIVER_VERSION);
382 case RTE_AVP_PCI_MEMORY_BAR:
383 case RTE_AVP_PCI_MMIO_BAR:
384 if (resource->addr == NULL) {
385 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
391 case RTE_AVP_PCI_MSIX_BAR:
393 /* no validation required */
402 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
404 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
405 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
406 struct rte_avp_device_info *host_info;
409 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
410 host_info = (struct rte_avp_device_info *)addr;
413 * the transmit direction is not negotiated beyond respecting the max
414 * number of queues because the host can handle arbitrary guest tx
415 * queues (host rx queues).
417 avp->num_tx_queues = eth_dev->data->nb_tx_queues;
420 * the receive direction is more restrictive. The host requires a
421 * minimum number of guest rx queues (host tx queues) therefore
422 * negotiate a value that is at least as large as the host minimum
423 * requirement. If the host and guest values are not identical then a
424 * mapping will be established in the receive_queue_setup function.
426 avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
427 eth_dev->data->nb_rx_queues);
429 PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
430 avp->num_tx_queues, avp->num_rx_queues);
434 * create a AVP device using the supplied device info by first translating it
435 * to guest address space(s).
438 avp_dev_create(struct rte_pci_device *pci_dev,
439 struct rte_eth_dev *eth_dev)
441 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
442 struct rte_avp_device_info *host_info;
443 struct rte_mem_resource *resource;
446 resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
447 if (resource->addr == NULL) {
448 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
449 RTE_AVP_PCI_DEVICE_BAR);
452 host_info = (struct rte_avp_device_info *)resource->addr;
454 if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
455 avp_dev_version_check(host_info->version)) {
456 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
457 host_info->magic, host_info->version,
458 AVP_DPDK_DRIVER_VERSION);
462 PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
463 RTE_AVP_GET_RELEASE_VERSION(host_info->version),
464 RTE_AVP_GET_MAJOR_VERSION(host_info->version),
465 RTE_AVP_GET_MINOR_VERSION(host_info->version));
467 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
468 host_info->min_tx_queues, host_info->max_tx_queues);
469 PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
470 host_info->min_rx_queues, host_info->max_rx_queues);
471 PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
472 host_info->features);
474 if (avp->magic != AVP_ETHDEV_MAGIC) {
476 * First time initialization (i.e., not during a VM
479 memset(avp, 0, sizeof(*avp));
480 avp->magic = AVP_ETHDEV_MAGIC;
481 avp->dev_data = eth_dev->data;
482 avp->port_id = eth_dev->data->port_id;
483 avp->host_mbuf_size = host_info->mbuf_size;
484 avp->host_features = host_info->features;
485 memcpy(&avp->ethaddr.addr_bytes[0],
486 host_info->ethaddr, ETHER_ADDR_LEN);
487 /* adjust max values to not exceed our max */
489 RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
491 RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
493 /* Re-attaching during migration */
495 /* TODO... requires validation of host values */
496 if ((host_info->features & avp->features) != avp->features) {
497 PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
498 avp->features, host_info->features);
499 /* this should not be possible; continue for now */
503 /* the device id is allowed to change over migrations */
504 avp->device_id = host_info->device_id;
506 /* translate incoming host addresses to guest address space */
507 PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
509 PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
510 host_info->alloc_phys);
511 for (i = 0; i < avp->max_tx_queues; i++) {
512 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
513 host_info->tx_phys + (i * host_info->tx_size));
515 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
516 host_info->alloc_phys + (i * host_info->alloc_size));
519 PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
521 PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
522 host_info->free_phys);
523 for (i = 0; i < avp->max_rx_queues; i++) {
524 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
525 host_info->rx_phys + (i * host_info->rx_size));
526 avp->free_q[i] = avp_dev_translate_address(eth_dev,
527 host_info->free_phys + (i * host_info->free_size));
530 PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
531 host_info->req_phys);
532 PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
533 host_info->resp_phys);
534 PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
535 host_info->sync_phys);
536 PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
537 host_info->mbuf_phys);
538 avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
539 avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
541 avp_dev_translate_address(eth_dev, host_info->sync_phys);
543 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
546 * store the host mbuf virtual address so that we can calculate
547 * relative offsets for each mbuf as they are processed
549 avp->host_mbuf_addr = host_info->mbuf_va;
550 avp->host_sync_addr = host_info->sync_va;
553 * store the maximum packet length that is supported by the host.
555 avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
556 PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
557 host_info->max_rx_pkt_len);
563 * This function is based on probe() function in avp_pci.c
564 * It returns 0 on success.
567 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
569 struct avp_dev *avp =
570 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
571 struct rte_pci_device *pci_dev;
574 pci_dev = AVP_DEV_TO_PCI(eth_dev);
575 eth_dev->dev_ops = &avp_eth_dev_ops;
577 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
579 * no setup required on secondary processes. All data is saved
580 * in dev_private by the primary process. All resource should
581 * be mapped to the same virtual address so all pointers should
587 rte_eth_copy_pci_info(eth_dev, pci_dev);
589 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
591 /* Check BAR resources */
592 ret = avp_dev_check_regions(eth_dev);
594 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
599 /* Handle each subtype */
600 ret = avp_dev_create(pci_dev, eth_dev);
602 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
606 /* Allocate memory for storing MAC addresses */
607 eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
608 if (eth_dev->data->mac_addrs == NULL) {
609 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
614 /* Get a mac from device config */
615 ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
621 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
623 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
626 if (eth_dev->data == NULL)
629 if (eth_dev->data->mac_addrs != NULL) {
630 rte_free(eth_dev->data->mac_addrs);
631 eth_dev->data->mac_addrs = NULL;
638 static struct eth_driver rte_avp_pmd = {
640 .id_table = pci_id_avp_map,
641 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
642 .probe = rte_eth_dev_pci_probe,
643 .remove = rte_eth_dev_pci_remove,
645 .eth_dev_init = eth_avp_dev_init,
646 .eth_dev_uninit = eth_avp_dev_uninit,
647 .dev_private_size = sizeof(struct avp_adapter),
651 avp_dev_configure(struct rte_eth_dev *eth_dev)
653 struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
654 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
655 struct rte_avp_device_info *host_info;
656 struct rte_avp_device_config config;
661 addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
662 host_info = (struct rte_avp_device_info *)addr;
664 /* Setup required number of queues */
665 _avp_set_queue_counts(eth_dev);
667 mask = (ETH_VLAN_STRIP_MASK |
668 ETH_VLAN_FILTER_MASK |
669 ETH_VLAN_EXTEND_MASK);
670 avp_vlan_offload_set(eth_dev, mask);
672 /* update device config */
673 memset(&config, 0, sizeof(config));
674 config.device_id = host_info->device_id;
675 config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
676 config.driver_version = AVP_DPDK_DRIVER_VERSION;
677 config.features = avp->features;
678 config.num_tx_queues = avp->num_tx_queues;
679 config.num_rx_queues = avp->num_rx_queues;
681 ret = avp_dev_ctrl_set_config(eth_dev, &config);
683 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
688 avp->flags |= AVP_F_CONFIGURED;
697 avp_dev_link_update(struct rte_eth_dev *eth_dev,
698 __rte_unused int wait_to_complete)
700 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
701 struct rte_eth_link *link = ð_dev->data->dev_link;
703 link->link_speed = ETH_SPEED_NUM_10G;
704 link->link_duplex = ETH_LINK_FULL_DUPLEX;
705 link->link_status = !!(avp->flags & AVP_F_LINKUP);
712 avp_dev_info_get(struct rte_eth_dev *eth_dev,
713 struct rte_eth_dev_info *dev_info)
715 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
717 dev_info->driver_name = "rte_avp_pmd";
718 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
719 dev_info->max_rx_queues = avp->max_rx_queues;
720 dev_info->max_tx_queues = avp->max_tx_queues;
721 dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
722 dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
723 dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
724 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
725 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
726 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
731 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
733 struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
735 if (mask & ETH_VLAN_STRIP_MASK) {
736 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
737 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
738 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
740 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
742 PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
746 if (mask & ETH_VLAN_FILTER_MASK) {
747 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
748 PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
751 if (mask & ETH_VLAN_EXTEND_MASK) {
752 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
753 PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
757 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
758 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);