b37b31a5c017cf57e1394dd41c2b1ae756140604
[dpdk.git] / drivers / net / avp / avp_ethdev.c
1 /*
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2013-2017, Wind River Systems, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1) Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2) Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * 3) Neither the name of Wind River Systems nor the names of its contributors
17  * may be used to endorse or promote products derived from this software
18  * without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <stdint.h>
34 #include <string.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <unistd.h>
38
39 #include <rte_ethdev.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_memzone.h>
43 #include <rte_malloc.h>
44 #include <rte_atomic.h>
45 #include <rte_branch_prediction.h>
46 #include <rte_pci.h>
47 #include <rte_ether.h>
48 #include <rte_common.h>
49 #include <rte_cycles.h>
50 #include <rte_byteorder.h>
51 #include <rte_dev.h>
52 #include <rte_memory.h>
53 #include <rte_eal.h>
54 #include <rte_io.h>
55
56 #include "rte_avp_common.h"
57 #include "rte_avp_fifo.h"
58
59 #include "avp_logs.h"
60
61
62
63 static int avp_dev_configure(struct rte_eth_dev *dev);
64 static void avp_dev_info_get(struct rte_eth_dev *dev,
65                              struct rte_eth_dev_info *dev_info);
66 static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
67 static int avp_dev_link_update(struct rte_eth_dev *dev,
68                                __rte_unused int wait_to_complete);
69 static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
70 static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
71
72 static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
73                                   uint16_t rx_queue_id,
74                                   uint16_t nb_rx_desc,
75                                   unsigned int socket_id,
76                                   const struct rte_eth_rxconf *rx_conf,
77                                   struct rte_mempool *pool);
78
79 static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
80                                   uint16_t tx_queue_id,
81                                   uint16_t nb_tx_desc,
82                                   unsigned int socket_id,
83                                   const struct rte_eth_txconf *tx_conf);
84
85 static uint16_t avp_recv_scattered_pkts(void *rx_queue,
86                                         struct rte_mbuf **rx_pkts,
87                                         uint16_t nb_pkts);
88
89 static uint16_t avp_recv_pkts(void *rx_queue,
90                               struct rte_mbuf **rx_pkts,
91                               uint16_t nb_pkts);
92
93 static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
94                                         struct rte_mbuf **tx_pkts,
95                                         uint16_t nb_pkts);
96
97 static uint16_t avp_xmit_pkts(void *tx_queue,
98                               struct rte_mbuf **tx_pkts,
99                               uint16_t nb_pkts);
100
101 static void avp_dev_rx_queue_release(void *rxq);
102 static void avp_dev_tx_queue_release(void *txq);
103
104 static void avp_dev_stats_get(struct rte_eth_dev *dev,
105                               struct rte_eth_stats *stats);
106 static void avp_dev_stats_reset(struct rte_eth_dev *dev);
107
108
109 #define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
110
111
112 #define AVP_MAX_RX_BURST 64
113 #define AVP_MAX_TX_BURST 64
114 #define AVP_MAX_MAC_ADDRS 1
115 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
116
117
118 /*
119  * Defines the number of microseconds to wait before checking the response
120  * queue for completion.
121  */
122 #define AVP_REQUEST_DELAY_USECS (5000)
123
124 /*
125  * Defines the number times to check the response queue for completion before
126  * declaring a timeout.
127  */
128 #define AVP_MAX_REQUEST_RETRY (100)
129
130 /* Defines the current PCI driver version number */
131 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
132
133 /*
134  * The set of PCI devices this driver supports
135  */
136 static const struct rte_pci_id pci_id_avp_map[] = {
137         { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
138           .device_id = RTE_AVP_PCI_DEVICE_ID,
139           .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
140           .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
141           .class_id = RTE_CLASS_ANY_ID,
142         },
143
144         { .vendor_id = 0, /* sentinel */
145         },
146 };
147
148 /*
149  * dev_ops for avp, bare necessities for basic operation
150  */
151 static const struct eth_dev_ops avp_eth_dev_ops = {
152         .dev_configure       = avp_dev_configure,
153         .dev_infos_get       = avp_dev_info_get,
154         .vlan_offload_set    = avp_vlan_offload_set,
155         .stats_get           = avp_dev_stats_get,
156         .stats_reset         = avp_dev_stats_reset,
157         .link_update         = avp_dev_link_update,
158         .promiscuous_enable  = avp_dev_promiscuous_enable,
159         .promiscuous_disable = avp_dev_promiscuous_disable,
160         .rx_queue_setup      = avp_dev_rx_queue_setup,
161         .rx_queue_release    = avp_dev_rx_queue_release,
162         .tx_queue_setup      = avp_dev_tx_queue_setup,
163         .tx_queue_release    = avp_dev_tx_queue_release,
164 };
165
166 /**@{ AVP device flags */
167 #define AVP_F_PROMISC (1 << 1)
168 #define AVP_F_CONFIGURED (1 << 2)
169 #define AVP_F_LINKUP (1 << 3)
170 /**@} */
171
172 /* Ethernet device validation marker */
173 #define AVP_ETHDEV_MAGIC 0x92972862
174
175 /*
176  * Defines the AVP device attributes which are attached to an RTE ethernet
177  * device
178  */
179 struct avp_dev {
180         uint32_t magic; /**< Memory validation marker */
181         uint64_t device_id; /**< Unique system identifier */
182         struct ether_addr ethaddr; /**< Host specified MAC address */
183         struct rte_eth_dev_data *dev_data;
184         /**< Back pointer to ethernet device data */
185         volatile uint32_t flags; /**< Device operational flags */
186         uint8_t port_id; /**< Ethernet port identifier */
187         struct rte_mempool *pool; /**< pkt mbuf mempool */
188         unsigned int guest_mbuf_size; /**< local pool mbuf size */
189         unsigned int host_mbuf_size; /**< host mbuf size */
190         unsigned int max_rx_pkt_len; /**< maximum receive unit */
191         uint32_t host_features; /**< Supported feature bitmap */
192         uint32_t features; /**< Enabled feature bitmap */
193         unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
194         unsigned int max_tx_queues; /**< Maximum number of transmit queues */
195         unsigned int num_rx_queues; /**< Negotiated number of receive queues */
196         unsigned int max_rx_queues; /**< Maximum number of receive queues */
197
198         struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
199         struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
200         struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
201         /**< Allocated mbufs queue */
202         struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
203         /**< To be freed mbufs queue */
204
205         /* For request & response */
206         struct rte_avp_fifo *req_q; /**< Request queue */
207         struct rte_avp_fifo *resp_q; /**< Response queue */
208         void *host_sync_addr; /**< (host) Req/Resp Mem address */
209         void *sync_addr; /**< Req/Resp Mem address */
210         void *host_mbuf_addr; /**< (host) MBUF pool start address */
211         void *mbuf_addr; /**< MBUF pool start address */
212 } __rte_cache_aligned;
213
214 /* RTE ethernet private data */
215 struct avp_adapter {
216         struct avp_dev avp;
217 } __rte_cache_aligned;
218
219
220 /* 32-bit MMIO register write */
221 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
222
223 /* 32-bit MMIO register read */
224 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
225
226 /* Macro to cast the ethernet device private data to a AVP object */
227 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
228         (&((struct avp_adapter *)adapter)->avp)
229
230 /*
231  * Defines the structure of a AVP device queue for the purpose of handling the
232  * receive and transmit burst callback functions
233  */
234 struct avp_queue {
235         struct rte_eth_dev_data *dev_data;
236         /**< Backpointer to ethernet device data */
237         struct avp_dev *avp; /**< Backpointer to AVP device */
238         uint16_t queue_id;
239         /**< Queue identifier used for indexing current queue */
240         uint16_t queue_base;
241         /**< Base queue identifier for queue servicing */
242         uint16_t queue_limit;
243         /**< Maximum queue identifier for queue servicing */
244
245         uint64_t packets;
246         uint64_t bytes;
247         uint64_t errors;
248 };
249
250 /* send a request and wait for a response
251  *
252  * @warning must be called while holding the avp->lock spinlock.
253  */
254 static int
255 avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
256 {
257         unsigned int retry = AVP_MAX_REQUEST_RETRY;
258         void *resp_addr = NULL;
259         unsigned int count;
260         int ret;
261
262         PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
263
264         request->result = -ENOTSUP;
265
266         /* Discard any stale responses before starting a new request */
267         while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
268                 PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
269
270         rte_memcpy(avp->sync_addr, request, sizeof(*request));
271         count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
272         if (count < 1) {
273                 PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
274                             request->req_id);
275                 ret = -EBUSY;
276                 goto done;
277         }
278
279         while (retry--) {
280                 /* wait for a response */
281                 usleep(AVP_REQUEST_DELAY_USECS);
282
283                 count = avp_fifo_count(avp->resp_q);
284                 if (count >= 1) {
285                         /* response received */
286                         break;
287                 }
288
289                 if ((count < 1) && (retry == 0)) {
290                         PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
291                                     request->req_id);
292                         ret = -ETIME;
293                         goto done;
294                 }
295         }
296
297         /* retrieve the response */
298         count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
299         if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
300                 PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
301                             count, resp_addr, avp->host_sync_addr);
302                 ret = -ENODATA;
303                 goto done;
304         }
305
306         /* copy to user buffer */
307         rte_memcpy(request, avp->sync_addr, sizeof(*request));
308         ret = 0;
309
310         PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
311                     request->result, request->req_id);
312
313 done:
314         return ret;
315 }
316
317 static int
318 avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
319                         struct rte_avp_device_config *config)
320 {
321         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
322         struct rte_avp_request request;
323         int ret;
324
325         /* setup a configure request */
326         memset(&request, 0, sizeof(request));
327         request.req_id = RTE_AVP_REQ_CFG_DEVICE;
328         memcpy(&request.config, config, sizeof(request.config));
329
330         ret = avp_dev_process_request(avp, &request);
331
332         return ret == 0 ? request.result : ret;
333 }
334
335 /* translate from host mbuf virtual address to guest virtual address */
336 static inline void *
337 avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
338 {
339         return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
340                                        (uintptr_t)avp->host_mbuf_addr),
341                            (uintptr_t)avp->mbuf_addr);
342 }
343
344 /* translate from host physical address to guest virtual address */
345 static void *
346 avp_dev_translate_address(struct rte_eth_dev *eth_dev,
347                           phys_addr_t host_phys_addr)
348 {
349         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
350         struct rte_mem_resource *resource;
351         struct rte_avp_memmap_info *info;
352         struct rte_avp_memmap *map;
353         off_t offset;
354         void *addr;
355         unsigned int i;
356
357         addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
358         resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
359         info = (struct rte_avp_memmap_info *)resource->addr;
360
361         offset = 0;
362         for (i = 0; i < info->nb_maps; i++) {
363                 /* search all segments looking for a matching address */
364                 map = &info->maps[i];
365
366                 if ((host_phys_addr >= map->phys_addr) &&
367                         (host_phys_addr < (map->phys_addr + map->length))) {
368                         /* address is within this segment */
369                         offset += (host_phys_addr - map->phys_addr);
370                         addr = RTE_PTR_ADD(addr, offset);
371
372                         PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
373                                     host_phys_addr, addr);
374
375                         return addr;
376                 }
377                 offset += map->length;
378         }
379
380         return NULL;
381 }
382
383 /* verify that the incoming device version is compatible with our version */
384 static int
385 avp_dev_version_check(uint32_t version)
386 {
387         uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
388         uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
389
390         if (device <= driver) {
391                 /* the host driver version is less than or equal to ours */
392                 return 0;
393         }
394
395         return 1;
396 }
397
398 /* verify that memory regions have expected version and validation markers */
399 static int
400 avp_dev_check_regions(struct rte_eth_dev *eth_dev)
401 {
402         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
403         struct rte_avp_memmap_info *memmap;
404         struct rte_avp_device_info *info;
405         struct rte_mem_resource *resource;
406         unsigned int i;
407
408         /* Dump resource info for debug */
409         for (i = 0; i < PCI_MAX_RESOURCE; i++) {
410                 resource = &pci_dev->mem_resource[i];
411                 if ((resource->phys_addr == 0) || (resource->len == 0))
412                         continue;
413
414                 PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
415                             i, resource->phys_addr,
416                             resource->len, resource->addr);
417
418                 switch (i) {
419                 case RTE_AVP_PCI_MEMMAP_BAR:
420                         memmap = (struct rte_avp_memmap_info *)resource->addr;
421                         if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
422                             (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
423                                 PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
424                                             memmap->magic, memmap->version);
425                                 return -EINVAL;
426                         }
427                         break;
428
429                 case RTE_AVP_PCI_DEVICE_BAR:
430                         info = (struct rte_avp_device_info *)resource->addr;
431                         if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
432                             avp_dev_version_check(info->version)) {
433                                 PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
434                                             info->magic, info->version,
435                                             AVP_DPDK_DRIVER_VERSION);
436                                 return -EINVAL;
437                         }
438                         break;
439
440                 case RTE_AVP_PCI_MEMORY_BAR:
441                 case RTE_AVP_PCI_MMIO_BAR:
442                         if (resource->addr == NULL) {
443                                 PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
444                                             i);
445                                 return -EINVAL;
446                         }
447                         break;
448
449                 case RTE_AVP_PCI_MSIX_BAR:
450                 default:
451                         /* no validation required */
452                         break;
453                 }
454         }
455
456         return 0;
457 }
458
459 static void
460 _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
461 {
462         struct avp_dev *avp =
463                 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
464         struct avp_queue *rxq;
465         uint16_t queue_count;
466         uint16_t remainder;
467
468         rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
469
470         /*
471          * Must map all AVP fifos as evenly as possible between the configured
472          * device queues.  Each device queue will service a subset of the AVP
473          * fifos. If there is an odd number of device queues the first set of
474          * device queues will get the extra AVP fifos.
475          */
476         queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
477         remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
478         if (rx_queue_id < remainder) {
479                 /* these queues must service one extra FIFO */
480                 rxq->queue_base = rx_queue_id * (queue_count + 1);
481                 rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
482         } else {
483                 /* these queues service the regular number of FIFO */
484                 rxq->queue_base = ((remainder * (queue_count + 1)) +
485                                    ((rx_queue_id - remainder) * queue_count));
486                 rxq->queue_limit = rxq->queue_base + queue_count - 1;
487         }
488
489         PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
490                     rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
491
492         rxq->queue_id = rxq->queue_base;
493 }
494
495 static void
496 _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
497 {
498         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
499         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
500         struct rte_avp_device_info *host_info;
501         void *addr;
502
503         addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
504         host_info = (struct rte_avp_device_info *)addr;
505
506         /*
507          * the transmit direction is not negotiated beyond respecting the max
508          * number of queues because the host can handle arbitrary guest tx
509          * queues (host rx queues).
510          */
511         avp->num_tx_queues = eth_dev->data->nb_tx_queues;
512
513         /*
514          * the receive direction is more restrictive.  The host requires a
515          * minimum number of guest rx queues (host tx queues) therefore
516          * negotiate a value that is at least as large as the host minimum
517          * requirement.  If the host and guest values are not identical then a
518          * mapping will be established in the receive_queue_setup function.
519          */
520         avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
521                                      eth_dev->data->nb_rx_queues);
522
523         PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
524                     avp->num_tx_queues, avp->num_rx_queues);
525 }
526
527 /*
528  * create a AVP device using the supplied device info by first translating it
529  * to guest address space(s).
530  */
531 static int
532 avp_dev_create(struct rte_pci_device *pci_dev,
533                struct rte_eth_dev *eth_dev)
534 {
535         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
536         struct rte_avp_device_info *host_info;
537         struct rte_mem_resource *resource;
538         unsigned int i;
539
540         resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
541         if (resource->addr == NULL) {
542                 PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
543                             RTE_AVP_PCI_DEVICE_BAR);
544                 return -EFAULT;
545         }
546         host_info = (struct rte_avp_device_info *)resource->addr;
547
548         if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
549                 avp_dev_version_check(host_info->version)) {
550                 PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
551                             host_info->magic, host_info->version,
552                             AVP_DPDK_DRIVER_VERSION);
553                 return -EINVAL;
554         }
555
556         PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
557                     RTE_AVP_GET_RELEASE_VERSION(host_info->version),
558                     RTE_AVP_GET_MAJOR_VERSION(host_info->version),
559                     RTE_AVP_GET_MINOR_VERSION(host_info->version));
560
561         PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
562                     host_info->min_tx_queues, host_info->max_tx_queues);
563         PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
564                     host_info->min_rx_queues, host_info->max_rx_queues);
565         PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
566                     host_info->features);
567
568         if (avp->magic != AVP_ETHDEV_MAGIC) {
569                 /*
570                  * First time initialization (i.e., not during a VM
571                  * migration)
572                  */
573                 memset(avp, 0, sizeof(*avp));
574                 avp->magic = AVP_ETHDEV_MAGIC;
575                 avp->dev_data = eth_dev->data;
576                 avp->port_id = eth_dev->data->port_id;
577                 avp->host_mbuf_size = host_info->mbuf_size;
578                 avp->host_features = host_info->features;
579                 memcpy(&avp->ethaddr.addr_bytes[0],
580                        host_info->ethaddr, ETHER_ADDR_LEN);
581                 /* adjust max values to not exceed our max */
582                 avp->max_tx_queues =
583                         RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
584                 avp->max_rx_queues =
585                         RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
586         } else {
587                 /* Re-attaching during migration */
588
589                 /* TODO... requires validation of host values */
590                 if ((host_info->features & avp->features) != avp->features) {
591                         PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
592                                     avp->features, host_info->features);
593                         /* this should not be possible; continue for now */
594                 }
595         }
596
597         /* the device id is allowed to change over migrations */
598         avp->device_id = host_info->device_id;
599
600         /* translate incoming host addresses to guest address space */
601         PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
602                     host_info->tx_phys);
603         PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
604                     host_info->alloc_phys);
605         for (i = 0; i < avp->max_tx_queues; i++) {
606                 avp->tx_q[i] = avp_dev_translate_address(eth_dev,
607                         host_info->tx_phys + (i * host_info->tx_size));
608
609                 avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
610                         host_info->alloc_phys + (i * host_info->alloc_size));
611         }
612
613         PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
614                     host_info->rx_phys);
615         PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
616                     host_info->free_phys);
617         for (i = 0; i < avp->max_rx_queues; i++) {
618                 avp->rx_q[i] = avp_dev_translate_address(eth_dev,
619                         host_info->rx_phys + (i * host_info->rx_size));
620                 avp->free_q[i] = avp_dev_translate_address(eth_dev,
621                         host_info->free_phys + (i * host_info->free_size));
622         }
623
624         PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
625                     host_info->req_phys);
626         PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
627                     host_info->resp_phys);
628         PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
629                     host_info->sync_phys);
630         PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
631                     host_info->mbuf_phys);
632         avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
633         avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
634         avp->sync_addr =
635                 avp_dev_translate_address(eth_dev, host_info->sync_phys);
636         avp->mbuf_addr =
637                 avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
638
639         /*
640          * store the host mbuf virtual address so that we can calculate
641          * relative offsets for each mbuf as they are processed
642          */
643         avp->host_mbuf_addr = host_info->mbuf_va;
644         avp->host_sync_addr = host_info->sync_va;
645
646         /*
647          * store the maximum packet length that is supported by the host.
648          */
649         avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
650         PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
651                                 host_info->max_rx_pkt_len);
652
653         return 0;
654 }
655
656 /*
657  * This function is based on probe() function in avp_pci.c
658  * It returns 0 on success.
659  */
660 static int
661 eth_avp_dev_init(struct rte_eth_dev *eth_dev)
662 {
663         struct avp_dev *avp =
664                 AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
665         struct rte_pci_device *pci_dev;
666         int ret;
667
668         pci_dev = AVP_DEV_TO_PCI(eth_dev);
669         eth_dev->dev_ops = &avp_eth_dev_ops;
670         eth_dev->rx_pkt_burst = &avp_recv_pkts;
671         eth_dev->tx_pkt_burst = &avp_xmit_pkts;
672
673         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
674                 /*
675                  * no setup required on secondary processes.  All data is saved
676                  * in dev_private by the primary process. All resource should
677                  * be mapped to the same virtual address so all pointers should
678                  * be valid.
679                  */
680                 if (eth_dev->data->scattered_rx) {
681                         PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
682                         eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
683                         eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
684                 }
685                 return 0;
686         }
687
688         rte_eth_copy_pci_info(eth_dev, pci_dev);
689
690         eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
691
692         /* Check BAR resources */
693         ret = avp_dev_check_regions(eth_dev);
694         if (ret < 0) {
695                 PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
696                             ret);
697                 return ret;
698         }
699
700         /* Handle each subtype */
701         ret = avp_dev_create(pci_dev, eth_dev);
702         if (ret < 0) {
703                 PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
704                 return ret;
705         }
706
707         /* Allocate memory for storing MAC addresses */
708         eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
709         if (eth_dev->data->mac_addrs == NULL) {
710                 PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
711                             ETHER_ADDR_LEN);
712                 return -ENOMEM;
713         }
714
715         /* Get a mac from device config */
716         ether_addr_copy(&avp->ethaddr, &eth_dev->data->mac_addrs[0]);
717
718         return 0;
719 }
720
721 static int
722 eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
723 {
724         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
725                 return -EPERM;
726
727         if (eth_dev->data == NULL)
728                 return 0;
729
730         if (eth_dev->data->mac_addrs != NULL) {
731                 rte_free(eth_dev->data->mac_addrs);
732                 eth_dev->data->mac_addrs = NULL;
733         }
734
735         return 0;
736 }
737
738
739 static struct eth_driver rte_avp_pmd = {
740         {
741                 .id_table = pci_id_avp_map,
742                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
743                 .probe = rte_eth_dev_pci_probe,
744                 .remove = rte_eth_dev_pci_remove,
745         },
746         .eth_dev_init = eth_avp_dev_init,
747         .eth_dev_uninit = eth_avp_dev_uninit,
748         .dev_private_size = sizeof(struct avp_adapter),
749 };
750
751 static int
752 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
753                          struct avp_dev *avp)
754 {
755         unsigned int max_rx_pkt_len;
756
757         max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
758
759         if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
760             (max_rx_pkt_len > avp->host_mbuf_size)) {
761                 /*
762                  * If the guest MTU is greater than either the host or guest
763                  * buffers then chained mbufs have to be enabled in the TX
764                  * direction.  It is assumed that the application will not need
765                  * to send packets larger than their max_rx_pkt_len (MRU).
766                  */
767                 return 1;
768         }
769
770         if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
771             (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
772                 /*
773                  * If the host MRU is greater than its own mbuf size or the
774                  * guest mbuf size then chained mbufs have to be enabled in the
775                  * RX direction.
776                  */
777                 return 1;
778         }
779
780         return 0;
781 }
782
783 static int
784 avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
785                        uint16_t rx_queue_id,
786                        uint16_t nb_rx_desc,
787                        unsigned int socket_id,
788                        const struct rte_eth_rxconf *rx_conf,
789                        struct rte_mempool *pool)
790 {
791         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
792         struct rte_pktmbuf_pool_private *mbp_priv;
793         struct avp_queue *rxq;
794
795         if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
796                 PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
797                             rx_queue_id, eth_dev->data->nb_rx_queues);
798                 return -EINVAL;
799         }
800
801         /* Save mbuf pool pointer */
802         avp->pool = pool;
803
804         /* Save the local mbuf size */
805         mbp_priv = rte_mempool_get_priv(pool);
806         avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
807         avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
808
809         if (avp_dev_enable_scattered(eth_dev, avp)) {
810                 if (!eth_dev->data->scattered_rx) {
811                         PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
812                         eth_dev->data->scattered_rx = 1;
813                         eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
814                         eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
815                 }
816         }
817
818         PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
819                     avp->max_rx_pkt_len,
820                     eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
821                     avp->host_mbuf_size,
822                     avp->guest_mbuf_size);
823
824         /* allocate a queue object */
825         rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
826                                  RTE_CACHE_LINE_SIZE, socket_id);
827         if (rxq == NULL) {
828                 PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
829                 return -ENOMEM;
830         }
831
832         /* save back pointers to AVP and Ethernet devices */
833         rxq->avp = avp;
834         rxq->dev_data = eth_dev->data;
835         eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
836
837         /* setup the queue receive mapping for the current queue. */
838         _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
839
840         PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
841
842         (void)nb_rx_desc;
843         (void)rx_conf;
844         return 0;
845 }
846
847 static int
848 avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
849                        uint16_t tx_queue_id,
850                        uint16_t nb_tx_desc,
851                        unsigned int socket_id,
852                        const struct rte_eth_txconf *tx_conf)
853 {
854         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
855         struct avp_queue *txq;
856
857         if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
858                 PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
859                             tx_queue_id, eth_dev->data->nb_tx_queues);
860                 return -EINVAL;
861         }
862
863         /* allocate a queue object */
864         txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
865                                  RTE_CACHE_LINE_SIZE, socket_id);
866         if (txq == NULL) {
867                 PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
868                 return -ENOMEM;
869         }
870
871         /* only the configured set of transmit queues are used */
872         txq->queue_id = tx_queue_id;
873         txq->queue_base = tx_queue_id;
874         txq->queue_limit = tx_queue_id;
875
876         /* save back pointers to AVP and Ethernet devices */
877         txq->avp = avp;
878         txq->dev_data = eth_dev->data;
879         eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
880
881         PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
882
883         (void)nb_tx_desc;
884         (void)tx_conf;
885         return 0;
886 }
887
888 static inline int
889 _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
890 {
891         uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
892         uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
893         return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
894 }
895
896 static inline int
897 _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
898 {
899         struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
900
901         if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
902                 /* allow all packets destined to our address */
903                 return 0;
904         }
905
906         if (likely(is_broadcast_ether_addr(&eth->d_addr))) {
907                 /* allow all broadcast packets */
908                 return 0;
909         }
910
911         if (likely(is_multicast_ether_addr(&eth->d_addr))) {
912                 /* allow all multicast packets */
913                 return 0;
914         }
915
916         if (avp->flags & AVP_F_PROMISC) {
917                 /* allow all packets when in promiscuous mode */
918                 return 0;
919         }
920
921         return -1;
922 }
923
924 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
925 static inline void
926 __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
927 {
928         struct rte_avp_desc *first_buf;
929         struct rte_avp_desc *pkt_buf;
930         unsigned int pkt_len;
931         unsigned int nb_segs;
932         void *pkt_data;
933         unsigned int i;
934
935         first_buf = avp_dev_translate_buffer(avp, buf);
936
937         i = 0;
938         pkt_len = 0;
939         nb_segs = first_buf->nb_segs;
940         do {
941                 /* Adjust pointers for guest addressing */
942                 pkt_buf = avp_dev_translate_buffer(avp, buf);
943                 if (pkt_buf == NULL)
944                         rte_panic("bad buffer: segment %u has an invalid address %p\n",
945                                   i, buf);
946                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
947                 if (pkt_data == NULL)
948                         rte_panic("bad buffer: segment %u has a NULL data pointer\n",
949                                   i);
950                 if (pkt_buf->data_len == 0)
951                         rte_panic("bad buffer: segment %u has 0 data length\n",
952                                   i);
953                 pkt_len += pkt_buf->data_len;
954                 nb_segs--;
955                 i++;
956
957         } while (nb_segs && (buf = pkt_buf->next) != NULL);
958
959         if (nb_segs != 0)
960                 rte_panic("bad buffer: expected %u segments found %u\n",
961                           first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
962         if (pkt_len != first_buf->pkt_len)
963                 rte_panic("bad buffer: expected length %u found %u\n",
964                           first_buf->pkt_len, pkt_len);
965 }
966
967 #define avp_dev_buffer_sanity_check(a, b) \
968         __avp_dev_buffer_sanity_check((a), (b))
969
970 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
971
972 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
973
974 #endif
975
976 /*
977  * Copy a host buffer chain to a set of mbufs.  This function assumes that
978  * there exactly the required number of mbufs to copy all source bytes.
979  */
980 static inline struct rte_mbuf *
981 avp_dev_copy_from_buffers(struct avp_dev *avp,
982                           struct rte_avp_desc *buf,
983                           struct rte_mbuf **mbufs,
984                           unsigned int count)
985 {
986         struct rte_mbuf *m_previous = NULL;
987         struct rte_avp_desc *pkt_buf;
988         unsigned int total_length = 0;
989         unsigned int copy_length;
990         unsigned int src_offset;
991         struct rte_mbuf *m;
992         uint16_t ol_flags;
993         uint16_t vlan_tci;
994         void *pkt_data;
995         unsigned int i;
996
997         avp_dev_buffer_sanity_check(avp, buf);
998
999         /* setup the first source buffer */
1000         pkt_buf = avp_dev_translate_buffer(avp, buf);
1001         pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1002         total_length = pkt_buf->pkt_len;
1003         src_offset = 0;
1004
1005         if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1006                 ol_flags = PKT_RX_VLAN_PKT;
1007                 vlan_tci = pkt_buf->vlan_tci;
1008         } else {
1009                 ol_flags = 0;
1010                 vlan_tci = 0;
1011         }
1012
1013         for (i = 0; (i < count) && (buf != NULL); i++) {
1014                 /* fill each destination buffer */
1015                 m = mbufs[i];
1016
1017                 if (m_previous != NULL)
1018                         m_previous->next = m;
1019
1020                 m_previous = m;
1021
1022                 do {
1023                         /*
1024                          * Copy as many source buffers as will fit in the
1025                          * destination buffer.
1026                          */
1027                         copy_length = RTE_MIN((avp->guest_mbuf_size -
1028                                                rte_pktmbuf_data_len(m)),
1029                                               (pkt_buf->data_len -
1030                                                src_offset));
1031                         rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1032                                                rte_pktmbuf_data_len(m)),
1033                                    RTE_PTR_ADD(pkt_data, src_offset),
1034                                    copy_length);
1035                         rte_pktmbuf_data_len(m) += copy_length;
1036                         src_offset += copy_length;
1037
1038                         if (likely(src_offset == pkt_buf->data_len)) {
1039                                 /* need a new source buffer */
1040                                 buf = pkt_buf->next;
1041                                 if (buf != NULL) {
1042                                         pkt_buf = avp_dev_translate_buffer(
1043                                                 avp, buf);
1044                                         pkt_data = avp_dev_translate_buffer(
1045                                                 avp, pkt_buf->data);
1046                                         src_offset = 0;
1047                                 }
1048                         }
1049
1050                         if (unlikely(rte_pktmbuf_data_len(m) ==
1051                                      avp->guest_mbuf_size)) {
1052                                 /* need a new destination mbuf */
1053                                 break;
1054                         }
1055
1056                 } while (buf != NULL);
1057         }
1058
1059         m = mbufs[0];
1060         m->ol_flags = ol_flags;
1061         m->nb_segs = count;
1062         rte_pktmbuf_pkt_len(m) = total_length;
1063         m->vlan_tci = vlan_tci;
1064
1065         __rte_mbuf_sanity_check(m, 1);
1066
1067         return m;
1068 }
1069
1070 static uint16_t
1071 avp_recv_scattered_pkts(void *rx_queue,
1072                         struct rte_mbuf **rx_pkts,
1073                         uint16_t nb_pkts)
1074 {
1075         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1076         struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1077         struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
1078         struct avp_dev *avp = rxq->avp;
1079         struct rte_avp_desc *pkt_buf;
1080         struct rte_avp_fifo *free_q;
1081         struct rte_avp_fifo *rx_q;
1082         struct rte_avp_desc *buf;
1083         unsigned int count, avail, n;
1084         unsigned int guest_mbuf_size;
1085         struct rte_mbuf *m;
1086         unsigned int required;
1087         unsigned int buf_len;
1088         unsigned int port_id;
1089         unsigned int i;
1090
1091         guest_mbuf_size = avp->guest_mbuf_size;
1092         port_id = avp->port_id;
1093         rx_q = avp->rx_q[rxq->queue_id];
1094         free_q = avp->free_q[rxq->queue_id];
1095
1096         /* setup next queue to service */
1097         rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1098                 (rxq->queue_id + 1) : rxq->queue_base;
1099
1100         /* determine how many slots are available in the free queue */
1101         count = avp_fifo_free_count(free_q);
1102
1103         /* determine how many packets are available in the rx queue */
1104         avail = avp_fifo_count(rx_q);
1105
1106         /* determine how many packets can be received */
1107         count = RTE_MIN(count, avail);
1108         count = RTE_MIN(count, nb_pkts);
1109         count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1110
1111         if (unlikely(count == 0)) {
1112                 /* no free buffers, or no buffers on the rx queue */
1113                 return 0;
1114         }
1115
1116         /* retrieve pending packets */
1117         n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1118         PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1119                    count, rx_q);
1120
1121         count = 0;
1122         for (i = 0; i < n; i++) {
1123                 /* prefetch next entry while processing current one */
1124                 if (i + 1 < n) {
1125                         pkt_buf = avp_dev_translate_buffer(avp,
1126                                                            avp_bufs[i + 1]);
1127                         rte_prefetch0(pkt_buf);
1128                 }
1129                 buf = avp_bufs[i];
1130
1131                 /* Peek into the first buffer to determine the total length */
1132                 pkt_buf = avp_dev_translate_buffer(avp, buf);
1133                 buf_len = pkt_buf->pkt_len;
1134
1135                 /* Allocate enough mbufs to receive the entire packet */
1136                 required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
1137                 if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
1138                         rxq->dev_data->rx_mbuf_alloc_failed++;
1139                         continue;
1140                 }
1141
1142                 /* Copy the data from the buffers to our mbufs */
1143                 m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
1144
1145                 /* finalize mbuf */
1146                 m->port = port_id;
1147
1148                 if (_avp_mac_filter(avp, m) != 0) {
1149                         /* silently discard packets not destined to our MAC */
1150                         rte_pktmbuf_free(m);
1151                         continue;
1152                 }
1153
1154                 /* return new mbuf to caller */
1155                 rx_pkts[count++] = m;
1156                 rxq->bytes += buf_len;
1157         }
1158
1159         rxq->packets += count;
1160
1161         /* return the buffers to the free queue */
1162         avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1163
1164         return count;
1165 }
1166
1167
1168 static uint16_t
1169 avp_recv_pkts(void *rx_queue,
1170               struct rte_mbuf **rx_pkts,
1171               uint16_t nb_pkts)
1172 {
1173         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1174         struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
1175         struct avp_dev *avp = rxq->avp;
1176         struct rte_avp_desc *pkt_buf;
1177         struct rte_avp_fifo *free_q;
1178         struct rte_avp_fifo *rx_q;
1179         unsigned int count, avail, n;
1180         unsigned int pkt_len;
1181         struct rte_mbuf *m;
1182         char *pkt_data;
1183         unsigned int i;
1184
1185         rx_q = avp->rx_q[rxq->queue_id];
1186         free_q = avp->free_q[rxq->queue_id];
1187
1188         /* setup next queue to service */
1189         rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
1190                 (rxq->queue_id + 1) : rxq->queue_base;
1191
1192         /* determine how many slots are available in the free queue */
1193         count = avp_fifo_free_count(free_q);
1194
1195         /* determine how many packets are available in the rx queue */
1196         avail = avp_fifo_count(rx_q);
1197
1198         /* determine how many packets can be received */
1199         count = RTE_MIN(count, avail);
1200         count = RTE_MIN(count, nb_pkts);
1201         count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
1202
1203         if (unlikely(count == 0)) {
1204                 /* no free buffers, or no buffers on the rx queue */
1205                 return 0;
1206         }
1207
1208         /* retrieve pending packets */
1209         n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
1210         PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
1211                    count, rx_q);
1212
1213         count = 0;
1214         for (i = 0; i < n; i++) {
1215                 /* prefetch next entry while processing current one */
1216                 if (i < n - 1) {
1217                         pkt_buf = avp_dev_translate_buffer(avp,
1218                                                            avp_bufs[i + 1]);
1219                         rte_prefetch0(pkt_buf);
1220                 }
1221
1222                 /* Adjust host pointers for guest addressing */
1223                 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1224                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1225                 pkt_len = pkt_buf->pkt_len;
1226
1227                 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1228                              (pkt_buf->nb_segs > 1))) {
1229                         /*
1230                          * application should be using the scattered receive
1231                          * function
1232                          */
1233                         rxq->errors++;
1234                         continue;
1235                 }
1236
1237                 /* process each packet to be transmitted */
1238                 m = rte_pktmbuf_alloc(avp->pool);
1239                 if (unlikely(m == NULL)) {
1240                         rxq->dev_data->rx_mbuf_alloc_failed++;
1241                         continue;
1242                 }
1243
1244                 /* copy data out of the host buffer to our buffer */
1245                 m->data_off = RTE_PKTMBUF_HEADROOM;
1246                 rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
1247
1248                 /* initialize the local mbuf */
1249                 rte_pktmbuf_data_len(m) = pkt_len;
1250                 rte_pktmbuf_pkt_len(m) = pkt_len;
1251                 m->port = avp->port_id;
1252
1253                 if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
1254                         m->ol_flags = PKT_RX_VLAN_PKT;
1255                         m->vlan_tci = pkt_buf->vlan_tci;
1256                 }
1257
1258                 if (_avp_mac_filter(avp, m) != 0) {
1259                         /* silently discard packets not destined to our MAC */
1260                         rte_pktmbuf_free(m);
1261                         continue;
1262                 }
1263
1264                 /* return new mbuf to caller */
1265                 rx_pkts[count++] = m;
1266                 rxq->bytes += pkt_len;
1267         }
1268
1269         rxq->packets += count;
1270
1271         /* return the buffers to the free queue */
1272         avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
1273
1274         return count;
1275 }
1276
1277 /*
1278  * Copy a chained mbuf to a set of host buffers.  This function assumes that
1279  * there are sufficient destination buffers to contain the entire source
1280  * packet.
1281  */
1282 static inline uint16_t
1283 avp_dev_copy_to_buffers(struct avp_dev *avp,
1284                         struct rte_mbuf *mbuf,
1285                         struct rte_avp_desc **buffers,
1286                         unsigned int count)
1287 {
1288         struct rte_avp_desc *previous_buf = NULL;
1289         struct rte_avp_desc *first_buf = NULL;
1290         struct rte_avp_desc *pkt_buf;
1291         struct rte_avp_desc *buf;
1292         size_t total_length;
1293         struct rte_mbuf *m;
1294         size_t copy_length;
1295         size_t src_offset;
1296         char *pkt_data;
1297         unsigned int i;
1298
1299         __rte_mbuf_sanity_check(mbuf, 1);
1300
1301         m = mbuf;
1302         src_offset = 0;
1303         total_length = rte_pktmbuf_pkt_len(m);
1304         for (i = 0; (i < count) && (m != NULL); i++) {
1305                 /* fill each destination buffer */
1306                 buf = buffers[i];
1307
1308                 if (i < count - 1) {
1309                         /* prefetch next entry while processing this one */
1310                         pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
1311                         rte_prefetch0(pkt_buf);
1312                 }
1313
1314                 /* Adjust pointers for guest addressing */
1315                 pkt_buf = avp_dev_translate_buffer(avp, buf);
1316                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1317
1318                 /* setup the buffer chain */
1319                 if (previous_buf != NULL)
1320                         previous_buf->next = buf;
1321                 else
1322                         first_buf = pkt_buf;
1323
1324                 previous_buf = pkt_buf;
1325
1326                 do {
1327                         /*
1328                          * copy as many source mbuf segments as will fit in the
1329                          * destination buffer.
1330                          */
1331                         copy_length = RTE_MIN((avp->host_mbuf_size -
1332                                                pkt_buf->data_len),
1333                                               (rte_pktmbuf_data_len(m) -
1334                                                src_offset));
1335                         rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
1336                                    RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
1337                                                src_offset),
1338                                    copy_length);
1339                         pkt_buf->data_len += copy_length;
1340                         src_offset += copy_length;
1341
1342                         if (likely(src_offset == rte_pktmbuf_data_len(m))) {
1343                                 /* need a new source buffer */
1344                                 m = m->next;
1345                                 src_offset = 0;
1346                         }
1347
1348                         if (unlikely(pkt_buf->data_len ==
1349                                      avp->host_mbuf_size)) {
1350                                 /* need a new destination buffer */
1351                                 break;
1352                         }
1353
1354                 } while (m != NULL);
1355         }
1356
1357         first_buf->nb_segs = count;
1358         first_buf->pkt_len = total_length;
1359
1360         if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1361                 first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1362                 first_buf->vlan_tci = mbuf->vlan_tci;
1363         }
1364
1365         avp_dev_buffer_sanity_check(avp, buffers[0]);
1366
1367         return total_length;
1368 }
1369
1370
1371 static uint16_t
1372 avp_xmit_scattered_pkts(void *tx_queue,
1373                         struct rte_mbuf **tx_pkts,
1374                         uint16_t nb_pkts)
1375 {
1376         struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
1377                                        RTE_AVP_MAX_MBUF_SEGMENTS)];
1378         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1379         struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
1380         struct avp_dev *avp = txq->avp;
1381         struct rte_avp_fifo *alloc_q;
1382         struct rte_avp_fifo *tx_q;
1383         unsigned int count, avail, n;
1384         unsigned int orig_nb_pkts;
1385         struct rte_mbuf *m;
1386         unsigned int required;
1387         unsigned int segments;
1388         unsigned int tx_bytes;
1389         unsigned int i;
1390
1391         orig_nb_pkts = nb_pkts;
1392         tx_q = avp->tx_q[txq->queue_id];
1393         alloc_q = avp->alloc_q[txq->queue_id];
1394
1395         /* limit the number of transmitted packets to the max burst size */
1396         if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1397                 nb_pkts = AVP_MAX_TX_BURST;
1398
1399         /* determine how many buffers are available to copy into */
1400         avail = avp_fifo_count(alloc_q);
1401         if (unlikely(avail > (AVP_MAX_TX_BURST *
1402                               RTE_AVP_MAX_MBUF_SEGMENTS)))
1403                 avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
1404
1405         /* determine how many slots are available in the transmit queue */
1406         count = avp_fifo_free_count(tx_q);
1407
1408         /* determine how many packets can be sent */
1409         nb_pkts = RTE_MIN(count, nb_pkts);
1410
1411         /* determine how many packets will fit in the available buffers */
1412         count = 0;
1413         segments = 0;
1414         for (i = 0; i < nb_pkts; i++) {
1415                 m = tx_pkts[i];
1416                 if (likely(i < (unsigned int)nb_pkts - 1)) {
1417                         /* prefetch next entry while processing this one */
1418                         rte_prefetch0(tx_pkts[i + 1]);
1419                 }
1420                 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1421                         avp->host_mbuf_size;
1422
1423                 if (unlikely((required == 0) ||
1424                              (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
1425                         break;
1426                 else if (unlikely(required + segments > avail))
1427                         break;
1428                 segments += required;
1429                 count++;
1430         }
1431         nb_pkts = count;
1432
1433         if (unlikely(nb_pkts == 0)) {
1434                 /* no available buffers, or no space on the tx queue */
1435                 txq->errors += orig_nb_pkts;
1436                 return 0;
1437         }
1438
1439         PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1440                    nb_pkts, tx_q);
1441
1442         /* retrieve sufficient send buffers */
1443         n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
1444         if (unlikely(n != segments)) {
1445                 PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
1446                            "n=%u, segments=%u, orig=%u\n",
1447                            n, segments, orig_nb_pkts);
1448                 txq->errors += orig_nb_pkts;
1449                 return 0;
1450         }
1451
1452         tx_bytes = 0;
1453         count = 0;
1454         for (i = 0; i < nb_pkts; i++) {
1455                 /* process each packet to be transmitted */
1456                 m = tx_pkts[i];
1457
1458                 /* determine how many buffers are required for this packet */
1459                 required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
1460                         avp->host_mbuf_size;
1461
1462                 tx_bytes += avp_dev_copy_to_buffers(avp, m,
1463                                                     &avp_bufs[count], required);
1464                 tx_bufs[i] = avp_bufs[count];
1465                 count += required;
1466
1467                 /* free the original mbuf */
1468                 rte_pktmbuf_free(m);
1469         }
1470
1471         txq->packets += nb_pkts;
1472         txq->bytes += tx_bytes;
1473
1474 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1475         for (i = 0; i < nb_pkts; i++)
1476                 avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
1477 #endif
1478
1479         /* send the packets */
1480         n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
1481         if (unlikely(n != orig_nb_pkts))
1482                 txq->errors += (orig_nb_pkts - n);
1483
1484         return n;
1485 }
1486
1487
1488 static uint16_t
1489 avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1490 {
1491         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1492         struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
1493         struct avp_dev *avp = txq->avp;
1494         struct rte_avp_desc *pkt_buf;
1495         struct rte_avp_fifo *alloc_q;
1496         struct rte_avp_fifo *tx_q;
1497         unsigned int count, avail, n;
1498         struct rte_mbuf *m;
1499         unsigned int pkt_len;
1500         unsigned int tx_bytes;
1501         char *pkt_data;
1502         unsigned int i;
1503
1504         tx_q = avp->tx_q[txq->queue_id];
1505         alloc_q = avp->alloc_q[txq->queue_id];
1506
1507         /* limit the number of transmitted packets to the max burst size */
1508         if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
1509                 nb_pkts = AVP_MAX_TX_BURST;
1510
1511         /* determine how many buffers are available to copy into */
1512         avail = avp_fifo_count(alloc_q);
1513
1514         /* determine how many slots are available in the transmit queue */
1515         count = avp_fifo_free_count(tx_q);
1516
1517         /* determine how many packets can be sent */
1518         count = RTE_MIN(count, avail);
1519         count = RTE_MIN(count, nb_pkts);
1520
1521         if (unlikely(count == 0)) {
1522                 /* no available buffers, or no space on the tx queue */
1523                 txq->errors += nb_pkts;
1524                 return 0;
1525         }
1526
1527         PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
1528                    count, tx_q);
1529
1530         /* retrieve sufficient send buffers */
1531         n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
1532         if (unlikely(n != count)) {
1533                 txq->errors++;
1534                 return 0;
1535         }
1536
1537         tx_bytes = 0;
1538         for (i = 0; i < count; i++) {
1539                 /* prefetch next entry while processing the current one */
1540                 if (i < count - 1) {
1541                         pkt_buf = avp_dev_translate_buffer(avp,
1542                                                            avp_bufs[i + 1]);
1543                         rte_prefetch0(pkt_buf);
1544                 }
1545
1546                 /* process each packet to be transmitted */
1547                 m = tx_pkts[i];
1548
1549                 /* Adjust pointers for guest addressing */
1550                 pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
1551                 pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
1552                 pkt_len = rte_pktmbuf_pkt_len(m);
1553
1554                 if (unlikely((pkt_len > avp->guest_mbuf_size) ||
1555                                          (pkt_len > avp->host_mbuf_size))) {
1556                         /*
1557                          * application should be using the scattered transmit
1558                          * function; send it truncated to avoid the performance
1559                          * hit of having to manage returning the already
1560                          * allocated buffer to the free list.  This should not
1561                          * happen since the application should have set the
1562                          * max_rx_pkt_len based on its MTU and it should be
1563                          * policing its own packet sizes.
1564                          */
1565                         txq->errors++;
1566                         pkt_len = RTE_MIN(avp->guest_mbuf_size,
1567                                           avp->host_mbuf_size);
1568                 }
1569
1570                 /* copy data out of our mbuf and into the AVP buffer */
1571                 rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
1572                 pkt_buf->pkt_len = pkt_len;
1573                 pkt_buf->data_len = pkt_len;
1574                 pkt_buf->nb_segs = 1;
1575                 pkt_buf->next = NULL;
1576
1577                 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1578                         pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
1579                         pkt_buf->vlan_tci = m->vlan_tci;
1580                 }
1581
1582                 tx_bytes += pkt_len;
1583
1584                 /* free the original mbuf */
1585                 rte_pktmbuf_free(m);
1586         }
1587
1588         txq->packets += count;
1589         txq->bytes += tx_bytes;
1590
1591         /* send the packets */
1592         n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
1593
1594         return n;
1595 }
1596
1597 static void
1598 avp_dev_rx_queue_release(void *rx_queue)
1599 {
1600         struct avp_queue *rxq = (struct avp_queue *)rx_queue;
1601         struct avp_dev *avp = rxq->avp;
1602         struct rte_eth_dev_data *data = avp->dev_data;
1603         unsigned int i;
1604
1605         for (i = 0; i < avp->num_rx_queues; i++) {
1606                 if (data->rx_queues[i] == rxq)
1607                         data->rx_queues[i] = NULL;
1608         }
1609 }
1610
1611 static void
1612 avp_dev_tx_queue_release(void *tx_queue)
1613 {
1614         struct avp_queue *txq = (struct avp_queue *)tx_queue;
1615         struct avp_dev *avp = txq->avp;
1616         struct rte_eth_dev_data *data = avp->dev_data;
1617         unsigned int i;
1618
1619         for (i = 0; i < avp->num_tx_queues; i++) {
1620                 if (data->tx_queues[i] == txq)
1621                         data->tx_queues[i] = NULL;
1622         }
1623 }
1624
1625 static int
1626 avp_dev_configure(struct rte_eth_dev *eth_dev)
1627 {
1628         struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
1629         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1630         struct rte_avp_device_info *host_info;
1631         struct rte_avp_device_config config;
1632         int mask = 0;
1633         void *addr;
1634         int ret;
1635
1636         addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
1637         host_info = (struct rte_avp_device_info *)addr;
1638
1639         /* Setup required number of queues */
1640         _avp_set_queue_counts(eth_dev);
1641
1642         mask = (ETH_VLAN_STRIP_MASK |
1643                 ETH_VLAN_FILTER_MASK |
1644                 ETH_VLAN_EXTEND_MASK);
1645         avp_vlan_offload_set(eth_dev, mask);
1646
1647         /* update device config */
1648         memset(&config, 0, sizeof(config));
1649         config.device_id = host_info->device_id;
1650         config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
1651         config.driver_version = AVP_DPDK_DRIVER_VERSION;
1652         config.features = avp->features;
1653         config.num_tx_queues = avp->num_tx_queues;
1654         config.num_rx_queues = avp->num_rx_queues;
1655
1656         ret = avp_dev_ctrl_set_config(eth_dev, &config);
1657         if (ret < 0) {
1658                 PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
1659                             ret);
1660                 goto unlock;
1661         }
1662
1663         avp->flags |= AVP_F_CONFIGURED;
1664         ret = 0;
1665
1666 unlock:
1667         return ret;
1668 }
1669
1670
1671 static int
1672 avp_dev_link_update(struct rte_eth_dev *eth_dev,
1673                                         __rte_unused int wait_to_complete)
1674 {
1675         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1676         struct rte_eth_link *link = &eth_dev->data->dev_link;
1677
1678         link->link_speed = ETH_SPEED_NUM_10G;
1679         link->link_duplex = ETH_LINK_FULL_DUPLEX;
1680         link->link_status = !!(avp->flags & AVP_F_LINKUP);
1681
1682         return -1;
1683 }
1684
1685 static void
1686 avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1687 {
1688         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1689
1690         if ((avp->flags & AVP_F_PROMISC) == 0) {
1691                 avp->flags |= AVP_F_PROMISC;
1692                 PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
1693                             eth_dev->data->port_id);
1694         }
1695 }
1696
1697 static void
1698 avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1699 {
1700         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1701
1702         if ((avp->flags & AVP_F_PROMISC) != 0) {
1703                 avp->flags &= ~AVP_F_PROMISC;
1704                 PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
1705                             eth_dev->data->port_id);
1706         }
1707 }
1708
1709 static void
1710 avp_dev_info_get(struct rte_eth_dev *eth_dev,
1711                  struct rte_eth_dev_info *dev_info)
1712 {
1713         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1714
1715         dev_info->driver_name = "rte_avp_pmd";
1716         dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1717         dev_info->max_rx_queues = avp->max_rx_queues;
1718         dev_info->max_tx_queues = avp->max_tx_queues;
1719         dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
1720         dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
1721         dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
1722         if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1723                 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1724                 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1725         }
1726 }
1727
1728 static void
1729 avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
1730 {
1731         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1732
1733         if (mask & ETH_VLAN_STRIP_MASK) {
1734                 if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
1735                         if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
1736                                 avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
1737                         else
1738                                 avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
1739                 } else {
1740                         PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
1741                 }
1742         }
1743
1744         if (mask & ETH_VLAN_FILTER_MASK) {
1745                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
1746                         PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
1747         }
1748
1749         if (mask & ETH_VLAN_EXTEND_MASK) {
1750                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
1751                         PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
1752         }
1753 }
1754
1755 static void
1756 avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
1757 {
1758         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1759         unsigned int i;
1760
1761         for (i = 0; i < avp->num_rx_queues; i++) {
1762                 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1763
1764                 if (rxq) {
1765                         stats->ipackets += rxq->packets;
1766                         stats->ibytes += rxq->bytes;
1767                         stats->ierrors += rxq->errors;
1768
1769                         stats->q_ipackets[i] += rxq->packets;
1770                         stats->q_ibytes[i] += rxq->bytes;
1771                         stats->q_errors[i] += rxq->errors;
1772                 }
1773         }
1774
1775         for (i = 0; i < avp->num_tx_queues; i++) {
1776                 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1777
1778                 if (txq) {
1779                         stats->opackets += txq->packets;
1780                         stats->obytes += txq->bytes;
1781                         stats->oerrors += txq->errors;
1782
1783                         stats->q_opackets[i] += txq->packets;
1784                         stats->q_obytes[i] += txq->bytes;
1785                         stats->q_errors[i] += txq->errors;
1786                 }
1787         }
1788 }
1789
1790 static void
1791 avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
1792 {
1793         struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1794         unsigned int i;
1795
1796         for (i = 0; i < avp->num_rx_queues; i++) {
1797                 struct avp_queue *rxq = avp->dev_data->rx_queues[i];
1798
1799                 if (rxq) {
1800                         rxq->bytes = 0;
1801                         rxq->packets = 0;
1802                         rxq->errors = 0;
1803                 }
1804         }
1805
1806         for (i = 0; i < avp->num_tx_queues; i++) {
1807                 struct avp_queue *txq = avp->dev_data->tx_queues[i];
1808
1809                 if (txq) {
1810                         txq->bytes = 0;
1811                         txq->packets = 0;
1812                         txq->errors = 0;
1813                 }
1814         }
1815 }
1816
1817 RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
1818 RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);