net/avp: add device start and stop operations
[dpdk.git] / drivers / net / avp / avp_ethdev.c
1 /*
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2013-2017, Wind River Systems, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1) Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2) Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * 3) Neither the name of Wind River Systems nor the names of its contributors
17  * may be used to endorse or promote products derived from this software
18  * without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <stdint.h>
34 #include <string.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <unistd.h>
38
39 #include <rte_ethdev.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_memzone.h>
43 #include <rte_malloc.h>
44 #include <rte_atomic.h>
45 #include <rte_branch_prediction.h>
46 #include <rte_pci.h>
47 #include <rte_ether.h>
48 #include <rte_common.h>
49 #include <rte_cycles.h>
50 #include <rte_byteorder.h>
51 #include <rte_dev.h>
52 #include <rte_memory.h>
53 #include <rte_eal.h>
54 #include <rte_io.h>
55
56 #include "rte_avp_common.h"
57 #include "rte_avp_fifo.h"
58
59 #include "avp_logs.h"
60
61
62
63 static int avp_dev_configure(struct rte_eth_dev *dev);
64 static int avp_dev_start(struct rte_eth_dev *dev);
65 static void avp_dev_stop(struct rte_eth_dev *dev);
66 static void avp_dev_close(struct rte_eth_dev *dev);
67 static void avp_dev_info_get(struct rte_eth_dev *dev,
68                              struct rte_eth_dev_info *dev_info);
69 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
70 static int avp_dev_link_update(struct rte_eth_dev *dev,
71                                __rte_unused int wait_to_complete);
72 static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
73 static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
74
75 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
76                                   uint16_t rx_queue_id,
77                                   uint16_t nb_rx_desc,
78                                   unsigned int socket_id,
79                                   const struct rte_eth_rxconf *rx_conf,
80                                   struct rte_mempool *pool);
81
82 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
83                                   uint16_t tx_queue_id,
84                                   uint16_t nb_tx_desc,
85                                   unsigned int socket_id,
86                                   const struct rte_eth_txconf *tx_conf);
87
88 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
89                                         struct rte_mbuf **rx_pkts,
90                                         uint16_t nb_pkts);
91
92 static uint16_t avp_recv_pkts(void *rx_queue,
93                               struct rte_mbuf **rx_pkts,
94                               uint16_t nb_pkts);
95
96 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
97                                         struct rte_mbuf **tx_pkts,
98                                         uint16_t nb_pkts);
99
100 static uint16_t avp_xmit_pkts(void *tx_queue,
101                               struct rte_mbuf **tx_pkts,
102                               uint16_t nb_pkts);
103
104 static void avp_dev_rx_queue_release(void *rxq);
105 static void avp_dev_tx_queue_release(void *txq);
106
107 static void avp_dev_stats_get(struct rte_eth_dev *dev,
108                               struct rte_eth_stats *stats);
109 static void avp_dev_stats_reset(struct rte_eth_dev *dev);
110
111
112 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
113
114
115 #define AVP_MAX_RX_BURST 64
116 #define AVP_MAX_TX_BURST 64
117 #define AVP_MAX_MAC_ADDRS 1
118 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
119
120
121 /*
122  * Defines the number of microseconds to wait before checking the response
123  * queue for completion.
124  */
125 #define AVP_REQUEST_DELAY_USECS (5000)
126
127 /*
128  * Defines the number times to check the response queue for completion before
129  * declaring a timeout.
130  */
131 #define AVP_MAX_REQUEST_RETRY (100)
132
133 /* Defines the current PCI driver version number */
134 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
135
136 /*
137  * The set of PCI devices this driver supports
138  */
139 static const struct rte_pci_id pci_id_avp_map[] = {
140         { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
141           .device_id = RTE_AVP_PCI_DEVICE_ID,
142           .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
143           .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
144           .class_id = RTE_CLASS_ANY_ID,
145         },
146
147         { .vendor_id = 0, /* sentinel */
148         },
149 };
150
151 /*
152  * dev_ops for avp, bare necessities for basic operation
153  */
154 static const struct eth_dev_ops avp_eth_dev_ops = {
155         .dev_configure       = avp_dev_configure,
156         .dev_start           = avp_dev_start,
157         .dev_stop            = avp_dev_stop,
158         .dev_close           = avp_dev_close,
159         .dev_infos_get       = avp_dev_info_get,
160         .vlan_offload_set    = avp_vlan_offload_set,
161         .stats_get           = avp_dev_stats_get,
162         .stats_reset         = avp_dev_stats_reset,
163         .link_update         = avp_dev_link_update,
164         .promiscuous_enable  = avp_dev_promiscuous_enable,
165         .promiscuous_disable = avp_dev_promiscuous_disable,
166         .rx_queue_setup      = avp_dev_rx_queue_setup,
167         .rx_queue_release    = avp_dev_rx_queue_release,
168         .tx_queue_setup      = avp_dev_tx_queue_setup,
169         .tx_queue_release    = avp_dev_tx_queue_release,
170 };
171
172 /**@{ AVP device flags */
173 #define AVP_F_PROMISC (1 << 1)
174 #define AVP_F_CONFIGURED (1 << 2)
175 #define AVP_F_LINKUP (1 << 3)
176 /**@} */
177
178 /* Ethernet device validation marker */
179 #define AVP_ETHDEV_MAGIC 0x92972862
180
181 /*
182  * Defines the AVP device attributes which are attached to an RTE ethernet
183  * device
184  */
185 struct avp_dev {
186         uint32_t magic; /**< Memory validation marker */
187         uint64_t device_id; /**< Unique system identifier */
188         struct ether_addr ethaddr; /**< Host specified MAC address */
189         struct rte_eth_dev_data *dev_data;
190         /**< Back pointer to ethernet device data */
191         volatile uint32_t flags; /**< Device operational flags */
192         uint8_t port_id; /**< Ethernet port identifier */
193         struct rte_mempool *pool; /**< pkt mbuf mempool */
194         unsigned int guest_mbuf_size; /**< local pool mbuf size */
195         unsigned int host_mbuf_size; /**< host mbuf size */
196         unsigned int max_rx_pkt_len; /**< maximum receive unit */
197         uint32_t host_features; /**< Supported feature bitmap */
198         uint32_t features; /**< Enabled feature bitmap */
199         unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
200         unsigned int max_tx_queues; /**< Maximum number of transmit queues */
201         unsigned int num_rx_queues; /**< Negotiated number of receive queues */
202         unsigned int max_rx_queues; /**< Maximum number of receive queues */
203
204         struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
205         struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
206         struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
207         /**< Allocated mbufs queue */
208         struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
209         /**< To be freed mbufs queue */
210
211         /* For request & response */
212         struct rte_avp_fifo *req_q; /**< Request queue */
213         struct rte_avp_fifo *resp_q; /**< Response queue */
214         void *host_sync_addr; /**< (host) Req/Resp Mem address */
215         void *sync_addr; /**< Req/Resp Mem address */
216         void *host_mbuf_addr; /**< (host) MBUF pool start address */
217         void *mbuf_addr; /**< MBUF pool start address */
218 } __rte_cache_aligned;
219
220 /* RTE ethernet private data */
221 struct avp_adapter {
222         struct avp_dev avp;
223 } __rte_cache_aligned;
224
225
226 /* 32-bit MMIO register write */
227 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
228
229 /* 32-bit MMIO register read */
230 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
231
232 /* Macro to cast the ethernet device private data to a AVP object */
233 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
234         (&((struct avp_adapter *)adapter)->avp)
235
236 /*
237  * Defines the structure of a AVP device queue for the purpose of handling the
238  * receive and transmit burst callback functions
239  */
240 struct avp_queue {
241         struct rte_eth_dev_data *dev_data;
242         /**< Backpointer to ethernet device data */
243         struct avp_dev *avp; /**< Backpointer to AVP device */
244         uint16_t queue_id;
245         /**< Queue identifier used for indexing current queue */
246         uint16_t queue_base;
247         /**< Base queue identifier for queue servicing */
248         uint16_t queue_limit;
249         /**< Maximum queue identifier for queue servicing */
250
251         uint64_t packets;
252         uint64_t bytes;
253         uint64_t errors;
254 };
255
256 /* send a request and wait for a response
257  *
258  * @warning must be called while holding the avp->lock spinlock.
259  */
260 static int
261 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
262 {
263         unsigned int retry = AVP_MAX_REQUEST_RETRY;
264         void *resp_addr = NULL;
265         unsigned int count;
266         int ret;
267
268         PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
269
270         request->result = -ENOTSUP;
271
272         /* Discard any stale responses before starting a new request */
273         while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
274                 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
275
276         rte_memcpy(avp->sync_addr, request, sizeof(*request));
277         count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
278         if (count < 1) {
279                 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
280                             request->req_id);
281                 ret = -EBUSY;
282                 goto done;
283         }
284
285         while (retry--) {
286                 /* wait for a response */
287                 usleep(AVP_REQUEST_DELAY_USECS);
288
289                 count = avp_fifo_count(avp->resp_q);
290                 if (count >= 1) {
291                         /* response received */
292                         break;
293                 }
294
295                 if ((count < 1) && (retry == 0)) {
296                         PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
297                                     request->req_id);
298                         ret = -ETIME;
299                         goto done;
300                 }
301         }
302
303         /* retrieve the response */
304         count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
305         if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
306                 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
307                             count, resp_addr, avp->host_sync_addr);
308                 ret = -ENODATA;
309                 goto done;
310         }
311
312         /* copy to user buffer */
313         rte_memcpy(request, avp->sync_addr, sizeof(*request));
314         ret = 0;
315
316         PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
317                     request->result, request->req_id);
318
319 done:
320         return ret;
321 }
322
323 static int
324 avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
325 {
326         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
327         struct rte_avp_request request;
328         int ret;
329
330         /* setup a link state change request */
331         memset(&request, 0, sizeof(request));
332         request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
333         request.if_up = state;
334
335         ret = avp_dev_process_request(avp, &request);
336
337         return ret == 0 ? request.result : ret;
338 }
339
340 static int
341 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
342                         struct rte_avp_device_config *config)
343 {
344         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
345         struct rte_avp_request request;
346         int ret;
347
348         /* setup a configure request */
349         memset(&request, 0, sizeof(request));
350         request.req_id = RTE_AVP_REQ_CFG_DEVICE;
351         memcpy(&request.config, config, sizeof(request.config));
352
353         ret = avp_dev_process_request(avp, &request);
354
355         return ret == 0 ? request.result : ret;
356 }
357
358 static int
359 avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
360 {
361         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
362         struct rte_avp_request request;
363         int ret;
364
365         /* setup a shutdown request */
366         memset(&request, 0, sizeof(request));
367         request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
368
369         ret = avp_dev_process_request(avp, &request);
370
371         return ret == 0 ? request.result : ret;
372 }
373
374 /* translate from host mbuf virtual address to guest virtual address */
375 static inline void *
376 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
377 {
378         return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
379                                        (uintptr_t)avp->host_mbuf_addr),
380                            (uintptr_t)avp->mbuf_addr);
381 }
382
383 /* translate from host physical address to guest virtual address */
384 static void *
385 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
386                           phys_addr_t host_phys_addr)
387 {
388         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
389         struct rte_mem_resource *resource;
390         struct rte_avp_memmap_info *info;
391         struct rte_avp_memmap *map;
392         off_t offset;
393         void *addr;
394         unsigned int i;
395
396         addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
397         resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
398         info = (struct rte_avp_memmap_info *)resource->addr;
399
400         offset = 0;
401         for (i = 0; i < info->nb_maps; i++) {
402                 /* search all segments looking for a matching address */
403                 map = &info->maps[i];
404
405                 if ((host_phys_addr >= map->phys_addr) &&
406                         (host_phys_addr < (map->phys_addr + map->length))) {
407                         /* address is within this segment */
408                         offset += (host_phys_addr - map->phys_addr);
409                         addr = RTE_PTR_ADD(addr, offset);
410
411                         PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
412                                     host_phys_addr, addr);
413
414                         return addr;
415                 }
416                 offset += map->length;
417         }
418
419         return NULL;
420 }
421
422 /* verify that the incoming device version is compatible with our version */
423 static int
424 avp_dev_version_check(uint32_t version)
425 {
426         uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
427         uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
428
429         if (device <= driver) {
430                 /* the host driver version is less than or equal to ours */
431                 return 0;
432         }
433
434         return 1;
435 }
436
437 /* verify that memory regions have expected version and validation markers */
438 static int
439 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
440 {
441         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
442         struct rte_avp_memmap_info *memmap;
443         struct rte_avp_device_info *info;
444         struct rte_mem_resource *resource;
445         unsigned int i;
446
447         /* Dump resource info for debug */
448         for (i = 0; i < PCI_MAX_RESOURCE; i++) {
449                 resource = &pci_dev->mem_resource[i];
450                 if ((resource->phys_addr == 0) || (resource->len == 0))
451                         continue;
452
453                 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
454                             i, resource->phys_addr,
455                             resource->len, resource->addr);
456
457                 switch (i) {
458                 case RTE_AVP_PCI_MEMMAP_BAR:
459                         memmap = (struct rte_avp_memmap_info *)resource->addr;
460                         if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
461                             (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
462                                 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
463                                             memmap->magic, memmap->version);
464                                 return -EINVAL;
465                         }
466                         break;
467
468                 case RTE_AVP_PCI_DEVICE_BAR:
469                         info = (struct rte_avp_device_info *)resource->addr;
470                         if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
471                             avp_dev_version_check(info->version)) {
472                                 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
473                                             info->magic, info->version,
474                                             AVP_DPDK_DRIVER_VERSION);
475                                 return -EINVAL;
476                         }
477                         break;
478
479                 case RTE_AVP_PCI_MEMORY_BAR:
480                 case RTE_AVP_PCI_MMIO_BAR:
481                         if (resource->addr == NULL) {
482                                 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
483                                             i);
484                                 return -EINVAL;
485                         }
486                         break;
487
488                 case RTE_AVP_PCI_MSIX_BAR:
489                 default:
490                         /* no validation required */
491                         break;
492                 }
493         }
494
495         return 0;
496 }
497
498 static void
499 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
500 {
501         struct avp_dev *avp =
502                 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
503         struct avp_queue *rxq;
504         uint16_t queue_count;
505         uint16_t remainder;
506
507         rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
508
509         /*
510          * Must map all AVP fifos as evenly as possible between the configured
511          * device queues.  Each device queue will service a subset of the AVP
512          * fifos. If there is an odd number of device queues the first set of
513          * device queues will get the extra AVP fifos.
514          */
515         queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
516         remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
517         if (rx_queue_id < remainder) {
518                 /* these queues must service one extra FIFO */
519                 rxq->queue_base = rx_queue_id * (queue_count + 1);
520                 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
521         } else {
522                 /* these queues service the regular number of FIFO */
523                 rxq->queue_base = ((remainder * (queue_count + 1)) +
524                                    ((rx_queue_id - remainder) * queue_count));
525                 rxq->queue_limit = rxq->queue_base + queue_count - 1;
526         }
527
528         PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
529                     rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
530
531         rxq->queue_id = rxq->queue_base;
532 }
533
534 static void
535 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
536 {
537         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
538         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
539         struct rte_avp_device_info *host_info;
540         void *addr;
541
542         addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
543         host_info = (struct rte_avp_device_info *)addr;
544
545         /*
546          * the transmit direction is not negotiated beyond respecting the max
547          * number of queues because the host can handle arbitrary guest tx
548          * queues (host rx queues).
549          */
550         avp->num_tx_queues = eth_dev->data->nb_tx_queues;
551
552         /*
553          * the receive direction is more restrictive.  The host requires a
554          * minimum number of guest rx queues (host tx queues) therefore
555          * negotiate a value that is at least as large as the host minimum
556          * requirement.  If the host and guest values are not identical then a
557          * mapping will be established in the receive_queue_setup function.
558          */
559         avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
560                                      eth_dev->data->nb_rx_queues);
561
562         PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
563                     avp->num_tx_queues, avp->num_rx_queues);
564 }
565
566 /*
567  * create a AVP device using the supplied device info by first translating it
568  * to guest address space(s).
569  */
570 static int
571 avp_dev_create(struct rte_pci_device *pci_dev,
572                struct rte_eth_dev *eth_dev)
573 {
574         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
575         struct rte_avp_device_info *host_info;
576         struct rte_mem_resource *resource;
577         unsigned int i;
578
579         resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
580         if (resource->addr == NULL) {
581                 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
582                             RTE_AVP_PCI_DEVICE_BAR);
583                 return -EFAULT;
584         }
585         host_info = (struct rte_avp_device_info *)resource->addr;
586
587         if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
588                 avp_dev_version_check(host_info->version)) {
589                 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
590                             host_info->magic, host_info->version,
591                             AVP_DPDK_DRIVER_VERSION);
592                 return -EINVAL;
593         }
594
595         PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
596                     RTE_AVP_GET_RELEASE_VERSION(host_info->version),
597                     RTE_AVP_GET_MAJOR_VERSION(host_info->version),
598                     RTE_AVP_GET_MINOR_VERSION(host_info->version));
599
600         PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
601                     host_info->min_tx_queues, host_info->max_tx_queues);
602         PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
603                     host_info->min_rx_queues, host_info->max_rx_queues);
604         PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
605                     host_info->features);
606
607         if (avp->magic != AVP_ETHDEV_MAGIC) {
608                 /*
609                  * First time initialization (i.e., not during a VM
610                  * migration)
611                  */
612                 memset(avp, 0, sizeof(*avp));
613                 avp->magic = AVP_ETHDEV_MAGIC;
614                 avp->dev_data = eth_dev->data;
615                 avp->port_id = eth_dev->data->port_id;
616                 avp->host_mbuf_size = host_info->mbuf_size;
617                 avp->host_features = host_info->features;
618                 memcpy(&avp->ethaddr.addr_bytes[0],
619                        host_info->ethaddr, ETHER_ADDR_LEN);
620                 /* adjust max values to not exceed our max */
621                 avp->max_tx_queues =
622                         RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
623                 avp->max_rx_queues =
624                         RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
625         } else {
626                 /* Re-attaching during migration */
627
628                 /* TODO... requires validation of host values */
629                 if ((host_info->features & avp->features) != avp->features) {
630                         PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
631                                     avp->features, host_info->features);
632                         /* this should not be possible; continue for now */
633                 }
634         }
635
636         /* the device id is allowed to change over migrations */
637         avp->device_id = host_info->device_id;
638
639         /* translate incoming host addresses to guest address space */
640         PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
641                     host_info->tx_phys);
642         PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
643                     host_info->alloc_phys);
644         for (i = 0; i < avp->max_tx_queues; i++) {
645                 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
646                         host_info->tx_phys + (i * host_info->tx_size));
647
648                 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
649                         host_info->alloc_phys + (i * host_info->alloc_size));
650         }
651
652         PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
653                     host_info->rx_phys);
654         PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
655                     host_info->free_phys);
656         for (i = 0; i < avp->max_rx_queues; i++) {
657                 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
658                         host_info->rx_phys + (i * host_info->rx_size));
659                 avp->free_q[i] = avp_dev_translate_address(eth_dev,
660                         host_info->free_phys + (i * host_info->free_size));
661         }
662
663         PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
664                     host_info->req_phys);
665         PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
666                     host_info->resp_phys);
667         PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
668                     host_info->sync_phys);
669         PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
670                     host_info->mbuf_phys);
671         avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
672         avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
673         avp->sync_addr =
674                 avp_dev_translate_address(eth_dev, host_info->sync_phys);
675         avp->mbuf_addr =
676                 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
677
678         /*
679          * store the host mbuf virtual address so that we can calculate
680          * relative offsets for each mbuf as they are processed
681          */
682         avp->host_mbuf_addr = host_info->mbuf_va;
683         avp->host_sync_addr = host_info->sync_va;
684
685         /*
686          * store the maximum packet length that is supported by the host.
687          */
688         avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
689         PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
690                                 host_info->max_rx_pkt_len);
691
692         return 0;
693 }
694
695 /*
696  * This function is based on probe() function in avp_pci.c
697  * It returns 0 on success.
698  */
699 static int
700 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
701 {
702         struct avp_dev *avp =
703                 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
704         struct rte_pci_device *pci_dev;
705         int ret;
706
707         pci_dev = AVP_DEV_TO_PCI(eth_dev);
708         eth_dev->dev_ops = &avp_eth_dev_ops;
709         eth_dev->rx_pkt_burst = &avp_recv_pkts;
710         eth_dev->tx_pkt_burst = &avp_xmit_pkts;
711
712         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
713                 /*
714                  * no setup required on secondary processes.  All data is saved
715                  * in dev_private by the primary process. All resource should
716                  * be mapped to the same virtual address so all pointers should
717                  * be valid.
718                  */
719                 if (eth_dev->data->scattered_rx) {
720                         PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
721                         eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
722                         eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
723                 }
724                 return 0;
725         }
726
727         rte_eth_copy_pci_info(eth_dev, pci_dev);
728
729         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
730
731         /* Check BAR resources */
732         ret = avp_dev_check_regions(eth_dev);
733         if (ret < 0) {
734                 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
735                             ret);
736                 return ret;
737         }
738
739         /* Handle each subtype */
740         ret = avp_dev_create(pci_dev, eth_dev);
741         if (ret < 0) {
742                 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
743                 return ret;
744         }
745
746         /* Allocate memory for storing MAC addresses */
747         eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
748         if (eth_dev->data->mac_addrs == NULL) {
749                 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
750                             ETHER_ADDR_LEN);
751                 return -ENOMEM;
752         }
753
754         /* Get a mac from device config */
755         ether_addr_copy(&avp->ethaddr, &eth_dev->data->mac_addrs[0]);
756
757         return 0;
758 }
759
760 static int
761 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
762 {
763         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
764                 return -EPERM;
765
766         if (eth_dev->data == NULL)
767                 return 0;
768
769         if (eth_dev->data->mac_addrs != NULL) {
770                 rte_free(eth_dev->data->mac_addrs);
771                 eth_dev->data->mac_addrs = NULL;
772         }
773
774         return 0;
775 }
776
777
778 static struct eth_driver rte_avp_pmd = {
779         {
780                 .id_table = pci_id_avp_map,
781                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
782                 .probe = rte_eth_dev_pci_probe,
783                 .remove = rte_eth_dev_pci_remove,
784         },
785         .eth_dev_init = eth_avp_dev_init,
786         .eth_dev_uninit = eth_avp_dev_uninit,
787         .dev_private_size = sizeof(struct avp_adapter),
788 };
789
790 static int
791 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
792                          struct avp_dev *avp)
793 {
794         unsigned int max_rx_pkt_len;
795
796         max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
797
798         if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
799             (max_rx_pkt_len > avp->host_mbuf_size)) {
800                 /*
801                  * If the guest MTU is greater than either the host or guest
802                  * buffers then chained mbufs have to be enabled in the TX
803                  * direction.  It is assumed that the application will not need
804                  * to send packets larger than their max_rx_pkt_len (MRU).
805                  */
806                 return 1;
807         }
808
809         if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
810             (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
811                 /*
812                  * If the host MRU is greater than its own mbuf size or the
813                  * guest mbuf size then chained mbufs have to be enabled in the
814                  * RX direction.
815                  */
816                 return 1;
817         }
818
819         return 0;
820 }
821
822 static int
823 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
824                        uint16_t rx_queue_id,
825                        uint16_t nb_rx_desc,
826                        unsigned int socket_id,
827                        const struct rte_eth_rxconf *rx_conf,
828                        struct rte_mempool *pool)
829 {
830         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
831         struct rte_pktmbuf_pool_private *mbp_priv;
832         struct avp_queue *rxq;
833
834         if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
835                 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
836                             rx_queue_id, eth_dev->data->nb_rx_queues);
837                 return -EINVAL;
838         }
839
840         /* Save mbuf pool pointer */
841         avp->pool = pool;
842
843         /* Save the local mbuf size */
844         mbp_priv = rte_mempool_get_priv(pool);
845         avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
846         avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
847
848         if (avp_dev_enable_scattered(eth_dev, avp)) {
849                 if (!eth_dev->data->scattered_rx) {
850                         PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
851                         eth_dev->data->scattered_rx = 1;
852                         eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
853                         eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
854                 }
855         }
856
857         PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
858                     avp->max_rx_pkt_len,
859                     eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
860                     avp->host_mbuf_size,
861                     avp->guest_mbuf_size);
862
863         /* allocate a queue object */
864         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
865                                  RTE_CACHE_LINE_SIZE, socket_id);
866         if (rxq == NULL) {
867                 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
868                 return -ENOMEM;
869         }
870
871         /* save back pointers to AVP and Ethernet devices */
872         rxq->avp = avp;
873         rxq->dev_data = eth_dev->data;
874         eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
875
876         /* setup the queue receive mapping for the current queue. */
877         _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
878
879         PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
880
881         (void)nb_rx_desc;
882         (void)rx_conf;
883         return 0;
884 }
885
886 static int
887 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
888                        uint16_t tx_queue_id,
889                        uint16_t nb_tx_desc,
890                        unsigned int socket_id,
891                        const struct rte_eth_txconf *tx_conf)
892 {
893         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
894         struct avp_queue *txq;
895
896         if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
897                 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
898                             tx_queue_id, eth_dev->data->nb_tx_queues);
899                 return -EINVAL;
900         }
901
902         /* allocate a queue object */
903         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
904                                  RTE_CACHE_LINE_SIZE, socket_id);
905         if (txq == NULL) {
906                 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
907                 return -ENOMEM;
908         }
909
910         /* only the configured set of transmit queues are used */
911         txq->queue_id = tx_queue_id;
912         txq->queue_base = tx_queue_id;
913         txq->queue_limit = tx_queue_id;
914
915         /* save back pointers to AVP and Ethernet devices */
916         txq->avp = avp;
917         txq->dev_data = eth_dev->data;
918         eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
919
920         PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
921
922         (void)nb_tx_desc;
923         (void)tx_conf;
924         return 0;
925 }
926
927 static inline int
928 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
929 {
930         uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
931         uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
932         return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
933 }
934
935 static inline int
936 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
937 {
938         struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
939
940         if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
941                 /* allow all packets destined to our address */
942                 return 0;
943         }
944
945         if (likely(is_broadcast_ether_addr(&eth->d_addr))) {
946                 /* allow all broadcast packets */
947                 return 0;
948         }
949
950         if (likely(is_multicast_ether_addr(&eth->d_addr))) {
951                 /* allow all multicast packets */
952                 return 0;
953         }
954
955         if (avp->flags & AVP_F_PROMISC) {
956                 /* allow all packets when in promiscuous mode */
957                 return 0;
958         }
959
960         return -1;
961 }
962
963 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
964 static inline void
965 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
966 {
967         struct rte_avp_desc *first_buf;
968         struct rte_avp_desc *pkt_buf;
969         unsigned int pkt_len;
970         unsigned int nb_segs;
971         void *pkt_data;
972         unsigned int i;
973
974         first_buf = avp_dev_translate_buffer(avp, buf);
975
976         i = 0;
977         pkt_len = 0;
978         nb_segs = first_buf->nb_segs;
979         do {
980                 /* Adjust pointers for guest addressing */
981                 pkt_buf = avp_dev_translate_buffer(avp, buf);
982                 if (pkt_buf == NULL)
983                         rte_panic("bad buffer: segment %u has an invalid address %p\n",
984                                   i, buf);
985                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
986                 if (pkt_data == NULL)
987                         rte_panic("bad buffer: segment %u has a NULL data pointer\n",
988                                   i);
989                 if (pkt_buf->data_len == 0)
990                         rte_panic("bad buffer: segment %u has 0 data length\n",
991                                   i);
992                 pkt_len += pkt_buf->data_len;
993                 nb_segs--;
994                 i++;
995
996         } while (nb_segs && (buf = pkt_buf->next) != NULL);
997
998         if (nb_segs != 0)
999                 rte_panic("bad buffer: expected %u segments found %u\n",
1000                           first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
1001         if (pkt_len != first_buf->pkt_len)
1002                 rte_panic("bad buffer: expected length %u found %u\n",
1003                           first_buf->pkt_len, pkt_len);
1004 }
1005
1006 #define avp_dev_buffer_sanity_check(a, b) \
1007         __avp_dev_buffer_sanity_check((a), (b))
1008
1009 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
1010
1011 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1012
1013 #endif
1014
1015 /*
1016  * Copy a host buffer chain to a set of mbufs.  This function assumes that
1017  * there exactly the required number of mbufs to copy all source bytes.
1018  */
1019 static inline struct rte_mbuf *
1020 avp_dev_copy_from_buffers(struct avp_dev *avp,
1021                           struct rte_avp_desc *buf,
1022                           struct rte_mbuf **mbufs,
1023                           unsigned int count)
1024 {
1025         struct rte_mbuf *m_previous = NULL;
1026         struct rte_avp_desc *pkt_buf;
1027         unsigned int total_length = 0;
1028         unsigned int copy_length;
1029         unsigned int src_offset;
1030         struct rte_mbuf *m;
1031         uint16_t ol_flags;
1032         uint16_t vlan_tci;
1033         void *pkt_data;
1034         unsigned int i;
1035
1036         avp_dev_buffer_sanity_check(avp, buf);
1037
1038         /* setup the first source buffer */
1039         pkt_buf = avp_dev_translate_buffer(avp, buf);
1040         pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1041         total_length = pkt_buf->pkt_len;
1042         src_offset = 0;
1043
1044         if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1045                 ol_flags = PKT_RX_VLAN_PKT;
1046                 vlan_tci = pkt_buf->vlan_tci;
1047         } else {
1048                 ol_flags = 0;
1049                 vlan_tci = 0;
1050         }
1051
1052         for (i = 0; (i < count) && (buf != NULL); i++) {
1053                 /* fill each destination buffer */
1054                 m = mbufs[i];
1055
1056                 if (m_previous != NULL)
1057                         m_previous->next = m;
1058
1059                 m_previous = m;
1060
1061                 do {
1062                         /*
1063                          * Copy as many source buffers as will fit in the
1064                          * destination buffer.
1065                          */
1066                         copy_length = RTE_MIN((avp->guest_mbuf_size -
1067                                                rte_pktmbuf_data_len(m)),
1068                                               (pkt_buf->data_len -
1069                                                src_offset));
1070                         rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1071                                                rte_pktmbuf_data_len(m)),
1072                                    RTE_PTR_ADD(pkt_data, src_offset),
1073                                    copy_length);
1074                         rte_pktmbuf_data_len(m) += copy_length;
1075                         src_offset += copy_length;
1076
1077                         if (likely(src_offset == pkt_buf->data_len)) {
1078                                 /* need a new source buffer */
1079                                 buf = pkt_buf->next;
1080                                 if (buf != NULL) {
1081                                         pkt_buf = avp_dev_translate_buffer(
1082                                                 avp, buf);
1083                                         pkt_data = avp_dev_translate_buffer(
1084                                                 avp, pkt_buf->data);
1085                                         src_offset = 0;
1086                                 }
1087                         }
1088
1089                         if (unlikely(rte_pktmbuf_data_len(m) ==
1090                                      avp->guest_mbuf_size)) {
1091                                 /* need a new destination mbuf */
1092                                 break;
1093                         }
1094
1095                 } while (buf != NULL);
1096         }
1097
1098         m = mbufs[0];
1099         m->ol_flags = ol_flags;
1100         m->nb_segs = count;
1101         rte_pktmbuf_pkt_len(m) = total_length;
1102         m->vlan_tci = vlan_tci;
1103
1104         __rte_mbuf_sanity_check(m, 1);
1105
1106         return m;
1107 }
1108
1109 static uint16_t
1110 avp_recv_scattered_pkts(void *rx_queue,
1111                         struct rte_mbuf **rx_pkts,
1112                         uint16_t nb_pkts)
1113 {
1114         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1115         struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1116         struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1117         struct avp_dev *avp = rxq->avp;
1118         struct rte_avp_desc *pkt_buf;
1119         struct rte_avp_fifo *free_q;
1120         struct rte_avp_fifo *rx_q;
1121         struct rte_avp_desc *buf;
1122         unsigned int count, avail, n;
1123         unsigned int guest_mbuf_size;
1124         struct rte_mbuf *m;
1125         unsigned int required;
1126         unsigned int buf_len;
1127         unsigned int port_id;
1128         unsigned int i;
1129
1130         guest_mbuf_size = avp->guest_mbuf_size;
1131         port_id = avp->port_id;
1132         rx_q = avp->rx_q[rxq->queue_id];
1133         free_q = avp->free_q[rxq->queue_id];
1134
1135         /* setup next queue to service */
1136         rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1137                 (rxq->queue_id + 1) : rxq->queue_base;
1138
1139         /* determine how many slots are available in the free queue */
1140         count = avp_fifo_free_count(free_q);
1141
1142         /* determine how many packets are available in the rx queue */
1143         avail = avp_fifo_count(rx_q);
1144
1145         /* determine how many packets can be received */
1146         count = RTE_MIN(count, avail);
1147         count = RTE_MIN(count, nb_pkts);
1148         count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1149
1150         if (unlikely(count == 0)) {
1151                 /* no free buffers, or no buffers on the rx queue */
1152                 return 0;
1153         }
1154
1155         /* retrieve pending packets */
1156         n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1157         PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1158                    count, rx_q);
1159
1160         count = 0;
1161         for (i = 0; i < n; i++) {
1162                 /* prefetch next entry while processing current one */
1163                 if (i + 1 < n) {
1164                         pkt_buf = avp_dev_translate_buffer(avp,
1165                                                            avp_bufs[i + 1]);
1166                         rte_prefetch0(pkt_buf);
1167                 }
1168                 buf = avp_bufs[i];
1169
1170                 /* Peek into the first buffer to determine the total length */
1171                 pkt_buf = avp_dev_translate_buffer(avp, buf);
1172                 buf_len = pkt_buf->pkt_len;
1173
1174                 /* Allocate enough mbufs to receive the entire packet */
1175                 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1176                 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1177                         rxq->dev_data->rx_mbuf_alloc_failed++;
1178                         continue;
1179                 }
1180
1181                 /* Copy the data from the buffers to our mbufs */
1182                 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1183
1184                 /* finalize mbuf */
1185                 m->port = port_id;
1186
1187                 if (_avp_mac_filter(avp, m) != 0) {
1188                         /* silently discard packets not destined to our MAC */
1189                         rte_pktmbuf_free(m);
1190                         continue;
1191                 }
1192
1193                 /* return new mbuf to caller */
1194                 rx_pkts[count++] = m;
1195                 rxq->bytes += buf_len;
1196         }
1197
1198         rxq->packets += count;
1199
1200         /* return the buffers to the free queue */
1201         avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1202
1203         return count;
1204 }
1205
1206
1207 static uint16_t
1208 avp_recv_pkts(void *rx_queue,
1209               struct rte_mbuf **rx_pkts,
1210               uint16_t nb_pkts)
1211 {
1212         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1213         struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1214         struct avp_dev *avp = rxq->avp;
1215         struct rte_avp_desc *pkt_buf;
1216         struct rte_avp_fifo *free_q;
1217         struct rte_avp_fifo *rx_q;
1218         unsigned int count, avail, n;
1219         unsigned int pkt_len;
1220         struct rte_mbuf *m;
1221         char *pkt_data;
1222         unsigned int i;
1223
1224         rx_q = avp->rx_q[rxq->queue_id];
1225         free_q = avp->free_q[rxq->queue_id];
1226
1227         /* setup next queue to service */
1228         rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1229                 (rxq->queue_id + 1) : rxq->queue_base;
1230
1231         /* determine how many slots are available in the free queue */
1232         count = avp_fifo_free_count(free_q);
1233
1234         /* determine how many packets are available in the rx queue */
1235         avail = avp_fifo_count(rx_q);
1236
1237         /* determine how many packets can be received */
1238         count = RTE_MIN(count, avail);
1239         count = RTE_MIN(count, nb_pkts);
1240         count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1241
1242         if (unlikely(count == 0)) {
1243                 /* no free buffers, or no buffers on the rx queue */
1244                 return 0;
1245         }
1246
1247         /* retrieve pending packets */
1248         n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1249         PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1250                    count, rx_q);
1251
1252         count = 0;
1253         for (i = 0; i < n; i++) {
1254                 /* prefetch next entry while processing current one */
1255                 if (i < n - 1) {
1256                         pkt_buf = avp_dev_translate_buffer(avp,
1257                                                            avp_bufs[i + 1]);
1258                         rte_prefetch0(pkt_buf);
1259                 }
1260
1261                 /* Adjust host pointers for guest addressing */
1262                 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1263                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1264                 pkt_len = pkt_buf->pkt_len;
1265
1266                 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1267                              (pkt_buf->nb_segs > 1))) {
1268                         /*
1269                          * application should be using the scattered receive
1270                          * function
1271                          */
1272                         rxq->errors++;
1273                         continue;
1274                 }
1275
1276                 /* process each packet to be transmitted */
1277                 m = rte_pktmbuf_alloc(avp->pool);
1278                 if (unlikely(m == NULL)) {
1279                         rxq->dev_data->rx_mbuf_alloc_failed++;
1280                         continue;
1281                 }
1282
1283                 /* copy data out of the host buffer to our buffer */
1284                 m->data_off = RTE_PKTMBUF_HEADROOM;
1285                 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1286
1287                 /* initialize the local mbuf */
1288                 rte_pktmbuf_data_len(m) = pkt_len;
1289                 rte_pktmbuf_pkt_len(m) = pkt_len;
1290                 m->port = avp->port_id;
1291
1292                 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1293                         m->ol_flags = PKT_RX_VLAN_PKT;
1294                         m->vlan_tci = pkt_buf->vlan_tci;
1295                 }
1296
1297                 if (_avp_mac_filter(avp, m) != 0) {
1298                         /* silently discard packets not destined to our MAC */
1299                         rte_pktmbuf_free(m);
1300                         continue;
1301                 }
1302
1303                 /* return new mbuf to caller */
1304                 rx_pkts[count++] = m;
1305                 rxq->bytes += pkt_len;
1306         }
1307
1308         rxq->packets += count;
1309
1310         /* return the buffers to the free queue */
1311         avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1312
1313         return count;
1314 }
1315
1316 /*
1317  * Copy a chained mbuf to a set of host buffers.  This function assumes that
1318  * there are sufficient destination buffers to contain the entire source
1319  * packet.
1320  */
1321 static inline uint16_t
1322 avp_dev_copy_to_buffers(struct avp_dev *avp,
1323                         struct rte_mbuf *mbuf,
1324                         struct rte_avp_desc **buffers,
1325                         unsigned int count)
1326 {
1327         struct rte_avp_desc *previous_buf = NULL;
1328         struct rte_avp_desc *first_buf = NULL;
1329         struct rte_avp_desc *pkt_buf;
1330         struct rte_avp_desc *buf;
1331         size_t total_length;
1332         struct rte_mbuf *m;
1333         size_t copy_length;
1334         size_t src_offset;
1335         char *pkt_data;
1336         unsigned int i;
1337
1338         __rte_mbuf_sanity_check(mbuf, 1);
1339
1340         m = mbuf;
1341         src_offset = 0;
1342         total_length = rte_pktmbuf_pkt_len(m);
1343         for (i = 0; (i < count) && (m != NULL); i++) {
1344                 /* fill each destination buffer */
1345                 buf = buffers[i];
1346
1347                 if (i < count - 1) {
1348                         /* prefetch next entry while processing this one */
1349                         pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1350                         rte_prefetch0(pkt_buf);
1351                 }
1352
1353                 /* Adjust pointers for guest addressing */
1354                 pkt_buf = avp_dev_translate_buffer(avp, buf);
1355                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1356
1357                 /* setup the buffer chain */
1358                 if (previous_buf != NULL)
1359                         previous_buf->next = buf;
1360                 else
1361                         first_buf = pkt_buf;
1362
1363                 previous_buf = pkt_buf;
1364
1365                 do {
1366                         /*
1367                          * copy as many source mbuf segments as will fit in the
1368                          * destination buffer.
1369                          */
1370                         copy_length = RTE_MIN((avp->host_mbuf_size -
1371                                                pkt_buf->data_len),
1372                                               (rte_pktmbuf_data_len(m) -
1373                                                src_offset));
1374                         rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1375                                    RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1376                                                src_offset),
1377                                    copy_length);
1378                         pkt_buf->data_len += copy_length;
1379                         src_offset += copy_length;
1380
1381                         if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1382                                 /* need a new source buffer */
1383                                 m = m->next;
1384                                 src_offset = 0;
1385                         }
1386
1387                         if (unlikely(pkt_buf->data_len ==
1388                                      avp->host_mbuf_size)) {
1389                                 /* need a new destination buffer */
1390                                 break;
1391                         }
1392
1393                 } while (m != NULL);
1394         }
1395
1396         first_buf->nb_segs = count;
1397         first_buf->pkt_len = total_length;
1398
1399         if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1400                 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1401                 first_buf->vlan_tci = mbuf->vlan_tci;
1402         }
1403
1404         avp_dev_buffer_sanity_check(avp, buffers[0]);
1405
1406         return total_length;
1407 }
1408
1409
1410 static uint16_t
1411 avp_xmit_scattered_pkts(void *tx_queue,
1412                         struct rte_mbuf **tx_pkts,
1413                         uint16_t nb_pkts)
1414 {
1415         struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1416                                        RTE_AVP_MAX_MBUF_SEGMENTS)];
1417         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1418         struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1419         struct avp_dev *avp = txq->avp;
1420         struct rte_avp_fifo *alloc_q;
1421         struct rte_avp_fifo *tx_q;
1422         unsigned int count, avail, n;
1423         unsigned int orig_nb_pkts;
1424         struct rte_mbuf *m;
1425         unsigned int required;
1426         unsigned int segments;
1427         unsigned int tx_bytes;
1428         unsigned int i;
1429
1430         orig_nb_pkts = nb_pkts;
1431         tx_q = avp->tx_q[txq->queue_id];
1432         alloc_q = avp->alloc_q[txq->queue_id];
1433
1434         /* limit the number of transmitted packets to the max burst size */
1435         if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1436                 nb_pkts = AVP_MAX_TX_BURST;
1437
1438         /* determine how many buffers are available to copy into */
1439         avail = avp_fifo_count(alloc_q);
1440         if (unlikely(avail > (AVP_MAX_TX_BURST *
1441                               RTE_AVP_MAX_MBUF_SEGMENTS)))
1442                 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1443
1444         /* determine how many slots are available in the transmit queue */
1445         count = avp_fifo_free_count(tx_q);
1446
1447         /* determine how many packets can be sent */
1448         nb_pkts = RTE_MIN(count, nb_pkts);
1449
1450         /* determine how many packets will fit in the available buffers */
1451         count = 0;
1452         segments = 0;
1453         for (i = 0; i < nb_pkts; i++) {
1454                 m = tx_pkts[i];
1455                 if (likely(i < (unsigned int)nb_pkts - 1)) {
1456                         /* prefetch next entry while processing this one */
1457                         rte_prefetch0(tx_pkts[i + 1]);
1458                 }
1459                 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1460                         avp->host_mbuf_size;
1461
1462                 if (unlikely((required == 0) ||
1463                              (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1464                         break;
1465                 else if (unlikely(required + segments > avail))
1466                         break;
1467                 segments += required;
1468                 count++;
1469         }
1470         nb_pkts = count;
1471
1472         if (unlikely(nb_pkts == 0)) {
1473                 /* no available buffers, or no space on the tx queue */
1474                 txq->errors += orig_nb_pkts;
1475                 return 0;
1476         }
1477
1478         PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1479                    nb_pkts, tx_q);
1480
1481         /* retrieve sufficient send buffers */
1482         n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1483         if (unlikely(n != segments)) {
1484                 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1485                            "n=%u, segments=%u, orig=%u\n",
1486                            n, segments, orig_nb_pkts);
1487                 txq->errors += orig_nb_pkts;
1488                 return 0;
1489         }
1490
1491         tx_bytes = 0;
1492         count = 0;
1493         for (i = 0; i < nb_pkts; i++) {
1494                 /* process each packet to be transmitted */
1495                 m = tx_pkts[i];
1496
1497                 /* determine how many buffers are required for this packet */
1498                 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1499                         avp->host_mbuf_size;
1500
1501                 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1502                                                     &avp_bufs[count], required);
1503                 tx_bufs[i] = avp_bufs[count];
1504                 count += required;
1505
1506                 /* free the original mbuf */
1507                 rte_pktmbuf_free(m);
1508         }
1509
1510         txq->packets += nb_pkts;
1511         txq->bytes += tx_bytes;
1512
1513 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1514         for (i = 0; i < nb_pkts; i++)
1515                 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1516 #endif
1517
1518         /* send the packets */
1519         n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1520         if (unlikely(n != orig_nb_pkts))
1521                 txq->errors += (orig_nb_pkts - n);
1522
1523         return n;
1524 }
1525
1526
1527 static uint16_t
1528 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1529 {
1530         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1531         struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1532         struct avp_dev *avp = txq->avp;
1533         struct rte_avp_desc *pkt_buf;
1534         struct rte_avp_fifo *alloc_q;
1535         struct rte_avp_fifo *tx_q;
1536         unsigned int count, avail, n;
1537         struct rte_mbuf *m;
1538         unsigned int pkt_len;
1539         unsigned int tx_bytes;
1540         char *pkt_data;
1541         unsigned int i;
1542
1543         tx_q = avp->tx_q[txq->queue_id];
1544         alloc_q = avp->alloc_q[txq->queue_id];
1545
1546         /* limit the number of transmitted packets to the max burst size */
1547         if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1548                 nb_pkts = AVP_MAX_TX_BURST;
1549
1550         /* determine how many buffers are available to copy into */
1551         avail = avp_fifo_count(alloc_q);
1552
1553         /* determine how many slots are available in the transmit queue */
1554         count = avp_fifo_free_count(tx_q);
1555
1556         /* determine how many packets can be sent */
1557         count = RTE_MIN(count, avail);
1558         count = RTE_MIN(count, nb_pkts);
1559
1560         if (unlikely(count == 0)) {
1561                 /* no available buffers, or no space on the tx queue */
1562                 txq->errors += nb_pkts;
1563                 return 0;
1564         }
1565
1566         PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1567                    count, tx_q);
1568
1569         /* retrieve sufficient send buffers */
1570         n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1571         if (unlikely(n != count)) {
1572                 txq->errors++;
1573                 return 0;
1574         }
1575
1576         tx_bytes = 0;
1577         for (i = 0; i < count; i++) {
1578                 /* prefetch next entry while processing the current one */
1579                 if (i < count - 1) {
1580                         pkt_buf = avp_dev_translate_buffer(avp,
1581                                                            avp_bufs[i + 1]);
1582                         rte_prefetch0(pkt_buf);
1583                 }
1584
1585                 /* process each packet to be transmitted */
1586                 m = tx_pkts[i];
1587
1588                 /* Adjust pointers for guest addressing */
1589                 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1590                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1591                 pkt_len = rte_pktmbuf_pkt_len(m);
1592
1593                 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1594                                          (pkt_len > avp->host_mbuf_size))) {
1595                         /*
1596                          * application should be using the scattered transmit
1597                          * function; send it truncated to avoid the performance
1598                          * hit of having to manage returning the already
1599                          * allocated buffer to the free list.  This should not
1600                          * happen since the application should have set the
1601                          * max_rx_pkt_len based on its MTU and it should be
1602                          * policing its own packet sizes.
1603                          */
1604                         txq->errors++;
1605                         pkt_len = RTE_MIN(avp->guest_mbuf_size,
1606                                           avp->host_mbuf_size);
1607                 }
1608
1609                 /* copy data out of our mbuf and into the AVP buffer */
1610                 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1611                 pkt_buf->pkt_len = pkt_len;
1612                 pkt_buf->data_len = pkt_len;
1613                 pkt_buf->nb_segs = 1;
1614                 pkt_buf->next = NULL;
1615
1616                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1617                         pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1618                         pkt_buf->vlan_tci = m->vlan_tci;
1619                 }
1620
1621                 tx_bytes += pkt_len;
1622
1623                 /* free the original mbuf */
1624                 rte_pktmbuf_free(m);
1625         }
1626
1627         txq->packets += count;
1628         txq->bytes += tx_bytes;
1629
1630         /* send the packets */
1631         n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1632
1633         return n;
1634 }
1635
1636 static void
1637 avp_dev_rx_queue_release(void *rx_queue)
1638 {
1639         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1640         struct avp_dev *avp = rxq->avp;
1641         struct rte_eth_dev_data *data = avp->dev_data;
1642         unsigned int i;
1643
1644         for (i = 0; i < avp->num_rx_queues; i++) {
1645                 if (data->rx_queues[i] == rxq)
1646                         data->rx_queues[i] = NULL;
1647         }
1648 }
1649
1650 static void
1651 avp_dev_tx_queue_release(void *tx_queue)
1652 {
1653         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1654         struct avp_dev *avp = txq->avp;
1655         struct rte_eth_dev_data *data = avp->dev_data;
1656         unsigned int i;
1657
1658         for (i = 0; i < avp->num_tx_queues; i++) {
1659                 if (data->tx_queues[i] == txq)
1660                         data->tx_queues[i] = NULL;
1661         }
1662 }
1663
1664 static int
1665 avp_dev_configure(struct rte_eth_dev *eth_dev)
1666 {
1667         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
1668         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1669         struct rte_avp_device_info *host_info;
1670         struct rte_avp_device_config config;
1671         int mask = 0;
1672         void *addr;
1673         int ret;
1674
1675         addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
1676         host_info = (struct rte_avp_device_info *)addr;
1677
1678         /* Setup required number of queues */
1679         _avp_set_queue_counts(eth_dev);
1680
1681         mask = (ETH_VLAN_STRIP_MASK |
1682                 ETH_VLAN_FILTER_MASK |
1683                 ETH_VLAN_EXTEND_MASK);
1684         avp_vlan_offload_set(eth_dev, mask);
1685
1686         /* update device config */
1687         memset(&config, 0, sizeof(config));
1688         config.device_id = host_info->device_id;
1689         config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
1690         config.driver_version = AVP_DPDK_DRIVER_VERSION;
1691         config.features = avp->features;
1692         config.num_tx_queues = avp->num_tx_queues;
1693         config.num_rx_queues = avp->num_rx_queues;
1694
1695         ret = avp_dev_ctrl_set_config(eth_dev, &config);
1696         if (ret < 0) {
1697                 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
1698                             ret);
1699                 goto unlock;
1700         }
1701
1702         avp->flags |= AVP_F_CONFIGURED;
1703         ret = 0;
1704
1705 unlock:
1706         return ret;
1707 }
1708
1709 static int
1710 avp_dev_start(struct rte_eth_dev *eth_dev)
1711 {
1712         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1713         int ret;
1714
1715         /* disable features that we do not support */
1716         eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
1717         eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
1718         eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
1719         eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
1720
1721         /* update link state */
1722         ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
1723         if (ret < 0) {
1724                 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
1725                             ret);
1726                 goto unlock;
1727         }
1728
1729         /* remember current link state */
1730         avp->flags |= AVP_F_LINKUP;
1731
1732         ret = 0;
1733
1734 unlock:
1735         return ret;
1736 }
1737
1738 static void
1739 avp_dev_stop(struct rte_eth_dev *eth_dev)
1740 {
1741         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1742         int ret;
1743
1744         avp->flags &= ~AVP_F_LINKUP;
1745
1746         /* update link state */
1747         ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
1748         if (ret < 0) {
1749                 PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
1750                             ret);
1751         }
1752 }
1753
1754 static void
1755 avp_dev_close(struct rte_eth_dev *eth_dev)
1756 {
1757         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1758         int ret;
1759
1760         /* remember current link state */
1761         avp->flags &= ~AVP_F_LINKUP;
1762         avp->flags &= ~AVP_F_CONFIGURED;
1763
1764         /* update device state */
1765         ret = avp_dev_ctrl_shutdown(eth_dev);
1766         if (ret < 0) {
1767                 PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
1768                             ret);
1769                 /* continue */
1770         }
1771 }
1772
1773 static int
1774 avp_dev_link_update(struct rte_eth_dev *eth_dev,
1775                                         __rte_unused int wait_to_complete)
1776 {
1777         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1778         struct rte_eth_link *link = &eth_dev->data->dev_link;
1779
1780         link->link_speed = ETH_SPEED_NUM_10G;
1781         link->link_duplex = ETH_LINK_FULL_DUPLEX;
1782         link->link_status = !!(avp->flags & AVP_F_LINKUP);
1783
1784         return -1;
1785 }
1786
1787 static void
1788 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1789 {
1790         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1791
1792         if ((avp->flags & AVP_F_PROMISC) == 0) {
1793                 avp->flags |= AVP_F_PROMISC;
1794                 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
1795                             eth_dev->data->port_id);
1796         }
1797 }
1798
1799 static void
1800 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1801 {
1802         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1803
1804         if ((avp->flags & AVP_F_PROMISC) != 0) {
1805                 avp->flags &= ~AVP_F_PROMISC;
1806                 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
1807                             eth_dev->data->port_id);
1808         }
1809 }
1810
1811 static void
1812 avp_dev_info_get(struct rte_eth_dev *eth_dev,
1813                  struct rte_eth_dev_info *dev_info)
1814 {
1815         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1816
1817         dev_info->driver_name = "rte_avp_pmd";
1818         dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1819         dev_info->max_rx_queues = avp->max_rx_queues;
1820         dev_info->max_tx_queues = avp->max_tx_queues;
1821         dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
1822         dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
1823         dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
1824         if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1825                 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1826                 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1827         }
1828 }
1829
1830 static void
1831 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1832 {
1833         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1834
1835         if (mask & ETH_VLAN_STRIP_MASK) {
1836                 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1837                         if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
1838                                 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
1839                         else
1840                                 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
1841                 } else {
1842                         PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
1843                 }
1844         }
1845
1846         if (mask & ETH_VLAN_FILTER_MASK) {
1847                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
1848                         PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
1849         }
1850
1851         if (mask & ETH_VLAN_EXTEND_MASK) {
1852                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
1853                         PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
1854         }
1855 }
1856
1857 static void
1858 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
1859 {
1860         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1861         unsigned int i;
1862
1863         for (i = 0; i < avp->num_rx_queues; i++) {
1864                 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1865
1866                 if (rxq) {
1867                         stats->ipackets += rxq->packets;
1868                         stats->ibytes += rxq->bytes;
1869                         stats->ierrors += rxq->errors;
1870
1871                         stats->q_ipackets[i] += rxq->packets;
1872                         stats->q_ibytes[i] += rxq->bytes;
1873                         stats->q_errors[i] += rxq->errors;
1874                 }
1875         }
1876
1877         for (i = 0; i < avp->num_tx_queues; i++) {
1878                 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1879
1880                 if (txq) {
1881                         stats->opackets += txq->packets;
1882                         stats->obytes += txq->bytes;
1883                         stats->oerrors += txq->errors;
1884
1885                         stats->q_opackets[i] += txq->packets;
1886                         stats->q_obytes[i] += txq->bytes;
1887                         stats->q_errors[i] += txq->errors;
1888                 }
1889         }
1890 }
1891
1892 static void
1893 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
1894 {
1895         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1896         unsigned int i;
1897
1898         for (i = 0; i < avp->num_rx_queues; i++) {
1899                 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1900
1901                 if (rxq) {
1902                         rxq->bytes = 0;
1903                         rxq->packets = 0;
1904                         rxq->errors = 0;
1905                 }
1906         }
1907
1908         for (i = 0; i < avp->num_tx_queues; i++) {
1909                 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1910
1911                 if (txq) {
1912                         txq->bytes = 0;
1913                         txq->packets = 0;
1914                         txq->errors = 0;
1915                 }
1916         }
1917 }
1918
1919 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
1920 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);