0fb7b959522044a5b4493ddeb2ec118158b0b149
[dpdk.git] / lib / librte_pmd_fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 #define FM10K_RX_BUFF_ALIGN 512
45 /* Default delay to acquire mailbox lock */
46 #define FM10K_MBXLOCK_DELAY_US 20
47
48 /* Number of chars per uint32 type */
49 #define CHARS_PER_UINT32 (sizeof(uint32_t))
50 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
51
52 static void
53 fm10k_mbx_initlock(struct fm10k_hw *hw)
54 {
55         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
56 }
57
58 static void
59 fm10k_mbx_lock(struct fm10k_hw *hw)
60 {
61         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
62                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
63 }
64
65 static void
66 fm10k_mbx_unlock(struct fm10k_hw *hw)
67 {
68         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
69 }
70
71 /*
72  * reset queue to initial state, allocate software buffers used when starting
73  * device.
74  * return 0 on success
75  * return -ENOMEM if buffers cannot be allocated
76  * return -EINVAL if buffers do not satisfy alignment condition
77  */
78 static inline int
79 rx_queue_reset(struct fm10k_rx_queue *q)
80 {
81         uint64_t dma_addr;
82         int i, diag;
83         PMD_INIT_FUNC_TRACE();
84
85         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
86         if (diag != 0)
87                 return -ENOMEM;
88
89         for (i = 0; i < q->nb_desc; ++i) {
90                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
91                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
92                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
93                                                 q->nb_desc);
94                         return -EINVAL;
95                 }
96                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
97                 q->hw_ring[i].q.pkt_addr = dma_addr;
98                 q->hw_ring[i].q.hdr_addr = dma_addr;
99         }
100
101         q->next_dd = 0;
102         q->next_alloc = 0;
103         q->next_trigger = q->alloc_thresh - 1;
104         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
105         return 0;
106 }
107
108 /*
109  * clean queue, descriptor rings, free software buffers used when stopping
110  * device.
111  */
112 static inline void
113 rx_queue_clean(struct fm10k_rx_queue *q)
114 {
115         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
116         uint32_t i;
117         PMD_INIT_FUNC_TRACE();
118
119         /* zero descriptor rings */
120         for (i = 0; i < q->nb_desc; ++i)
121                 q->hw_ring[i] = zero;
122
123         /* free software buffers */
124         for (i = 0; i < q->nb_desc; ++i) {
125                 if (q->sw_ring[i]) {
126                         rte_pktmbuf_free_seg(q->sw_ring[i]);
127                         q->sw_ring[i] = NULL;
128                 }
129         }
130 }
131
132 /*
133  * free all queue memory used when releasing the queue (i.e. configure)
134  */
135 static inline void
136 rx_queue_free(struct fm10k_rx_queue *q)
137 {
138         PMD_INIT_FUNC_TRACE();
139         if (q) {
140                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
141                 rx_queue_clean(q);
142                 if (q->sw_ring)
143                         rte_free(q->sw_ring);
144                 rte_free(q);
145         }
146 }
147
148 /*
149  * disable RX queue, wait unitl HW finished necessary flush operation
150  */
151 static inline int
152 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
153 {
154         uint32_t reg, i;
155
156         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
157         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
158                         reg & ~FM10K_RXQCTL_ENABLE);
159
160         /* Wait 100us at most */
161         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
162                 rte_delay_us(1);
163                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
164                 if (!(reg & FM10K_RXQCTL_ENABLE))
165                         break;
166         }
167
168         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
169                 return -1;
170
171         return 0;
172 }
173
174 /*
175  * reset queue to initial state, allocate software buffers used when starting
176  * device
177  */
178 static inline void
179 tx_queue_reset(struct fm10k_tx_queue *q)
180 {
181         PMD_INIT_FUNC_TRACE();
182         q->last_free = 0;
183         q->next_free = 0;
184         q->nb_used = 0;
185         q->nb_free = q->nb_desc - 1;
186         q->free_trigger = q->nb_free - q->free_thresh;
187         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
188         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
189 }
190
191 /*
192  * clean queue, descriptor rings, free software buffers used when stopping
193  * device
194  */
195 static inline void
196 tx_queue_clean(struct fm10k_tx_queue *q)
197 {
198         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
199         uint32_t i;
200         PMD_INIT_FUNC_TRACE();
201
202         /* zero descriptor rings */
203         for (i = 0; i < q->nb_desc; ++i)
204                 q->hw_ring[i] = zero;
205
206         /* free software buffers */
207         for (i = 0; i < q->nb_desc; ++i) {
208                 if (q->sw_ring[i]) {
209                         rte_pktmbuf_free_seg(q->sw_ring[i]);
210                         q->sw_ring[i] = NULL;
211                 }
212         }
213 }
214
215 /*
216  * free all queue memory used when releasing the queue (i.e. configure)
217  */
218 static inline void
219 tx_queue_free(struct fm10k_tx_queue *q)
220 {
221         PMD_INIT_FUNC_TRACE();
222         if (q) {
223                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
224                 tx_queue_clean(q);
225                 if (q->rs_tracker.list)
226                         rte_free(q->rs_tracker.list);
227                 if (q->sw_ring)
228                         rte_free(q->sw_ring);
229                 rte_free(q);
230         }
231 }
232
233 /*
234  * disable TX queue, wait unitl HW finished necessary flush operation
235  */
236 static inline int
237 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
238 {
239         uint32_t reg, i;
240
241         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
242         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
243                         reg & ~FM10K_TXDCTL_ENABLE);
244
245         /* Wait 100us at most */
246         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
247                 rte_delay_us(1);
248                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
249                 if (!(reg & FM10K_TXDCTL_ENABLE))
250                         break;
251         }
252
253         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
254                 return -1;
255
256         return 0;
257 }
258
259 static int
260 fm10k_dev_configure(struct rte_eth_dev *dev)
261 {
262         PMD_INIT_FUNC_TRACE();
263
264         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
265                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
266
267         return 0;
268 }
269
270 static int
271 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
272 {
273         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
274         int err = -1;
275         uint32_t reg;
276         struct fm10k_rx_queue *rxq;
277
278         PMD_INIT_FUNC_TRACE();
279
280         if (rx_queue_id < dev->data->nb_rx_queues) {
281                 rxq = dev->data->rx_queues[rx_queue_id];
282                 err = rx_queue_reset(rxq);
283                 if (err == -ENOMEM) {
284                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
285                         return err;
286                 } else if (err == -EINVAL) {
287                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
288                                 " %d", err);
289                         return err;
290                 }
291
292                 /* Setup the HW Rx Head and Tail Descriptor Pointers
293                  * Note: this must be done AFTER the queue is enabled on real
294                  * hardware, but BEFORE the queue is enabled when using the
295                  * emulation platform. Do it in both places for now and remove
296                  * this comment and the following two register writes when the
297                  * emulation platform is no longer being used.
298                  */
299                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
300                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
301
302                 /* Set PF ownership flag for PF devices */
303                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
304                 if (hw->mac.type == fm10k_mac_pf)
305                         reg |= FM10K_RXQCTL_PF;
306                 reg |= FM10K_RXQCTL_ENABLE;
307                 /* enable RX queue */
308                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
309                 FM10K_WRITE_FLUSH(hw);
310
311                 /* Setup the HW Rx Head and Tail Descriptor Pointers
312                  * Note: this must be done AFTER the queue is enabled
313                  */
314                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
315                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
316         }
317
318         return err;
319 }
320
321 static int
322 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
323 {
324         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
325
326         PMD_INIT_FUNC_TRACE();
327
328         if (rx_queue_id < dev->data->nb_rx_queues) {
329                 /* Disable RX queue */
330                 rx_queue_disable(hw, rx_queue_id);
331
332                 /* Free mbuf and clean HW ring */
333                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
334         }
335
336         return 0;
337 }
338
339 static int
340 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
341 {
342         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
343         /** @todo - this should be defined in the shared code */
344 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
345         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
346         int err = 0;
347
348         PMD_INIT_FUNC_TRACE();
349
350         if (tx_queue_id < dev->data->nb_tx_queues) {
351                 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
352
353                 /* reset head and tail pointers */
354                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
355                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
356
357                 /* enable TX queue */
358                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
359                                         FM10K_TXDCTL_ENABLE | txdctl);
360                 FM10K_WRITE_FLUSH(hw);
361         } else
362                 err = -1;
363
364         return err;
365 }
366
367 static int
368 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
369 {
370         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
371
372         PMD_INIT_FUNC_TRACE();
373
374         if (tx_queue_id < dev->data->nb_tx_queues) {
375                 tx_queue_disable(hw, tx_queue_id);
376                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
377         }
378
379         return 0;
380 }
381
382 static int
383 fm10k_link_update(struct rte_eth_dev *dev,
384         __rte_unused int wait_to_complete)
385 {
386         PMD_INIT_FUNC_TRACE();
387
388         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
389          * x8 PCIe interface. For now, we leave the speed undefined since there
390          * is no 50Gbps Ethernet. */
391         dev->data->dev_link.link_speed  = 0;
392         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
393         dev->data->dev_link.link_status = 1;
394
395         return 0;
396 }
397
398 static void
399 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
400 {
401         uint64_t ipackets, opackets, ibytes, obytes;
402         struct fm10k_hw *hw =
403                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
404         struct fm10k_hw_stats *hw_stats =
405                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
406         int i;
407
408         PMD_INIT_FUNC_TRACE();
409
410         fm10k_update_hw_stats(hw, hw_stats);
411
412         ipackets = opackets = ibytes = obytes = 0;
413         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
414                 (i < FM10K_MAX_QUEUES_PF); ++i) {
415                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
416                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
417                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
418                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
419                 ipackets += stats->q_ipackets[i];
420                 opackets += stats->q_opackets[i];
421                 ibytes   += stats->q_ibytes[i];
422                 obytes   += stats->q_obytes[i];
423         }
424         stats->ipackets = ipackets;
425         stats->opackets = opackets;
426         stats->ibytes = ibytes;
427         stats->obytes = obytes;
428 }
429
430 static void
431 fm10k_stats_reset(struct rte_eth_dev *dev)
432 {
433         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
434         struct fm10k_hw_stats *hw_stats =
435                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
436
437         PMD_INIT_FUNC_TRACE();
438
439         memset(hw_stats, 0, sizeof(*hw_stats));
440         fm10k_rebind_hw_stats(hw, hw_stats);
441 }
442
443 static void
444 fm10k_dev_infos_get(struct rte_eth_dev *dev,
445         struct rte_eth_dev_info *dev_info)
446 {
447         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
448
449         PMD_INIT_FUNC_TRACE();
450
451         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
452         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
453         dev_info->max_rx_queues      = hw->mac.max_queues;
454         dev_info->max_tx_queues      = hw->mac.max_queues;
455         dev_info->max_mac_addrs      = 1;
456         dev_info->max_hash_mac_addrs = 0;
457         dev_info->max_vfs            = FM10K_MAX_VF_NUM;
458         dev_info->max_vmdq_pools     = ETH_64_POOLS;
459         dev_info->rx_offload_capa =
460                 DEV_RX_OFFLOAD_IPV4_CKSUM |
461                 DEV_RX_OFFLOAD_UDP_CKSUM  |
462                 DEV_RX_OFFLOAD_TCP_CKSUM;
463         dev_info->tx_offload_capa    = 0;
464         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
465
466         dev_info->default_rxconf = (struct rte_eth_rxconf) {
467                 .rx_thresh = {
468                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
469                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
470                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
471                 },
472                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
473                 .rx_drop_en = 0,
474         };
475
476         dev_info->default_txconf = (struct rte_eth_txconf) {
477                 .tx_thresh = {
478                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
479                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
480                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
481                 },
482                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
483                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
484                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
485                                 ETH_TXQ_FLAGS_NOOFFLOADS,
486         };
487
488 }
489
490 static inline int
491 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
492 {
493         if ((request < min) || (request > max) || ((request % mult) != 0))
494                 return -1;
495         else
496                 return 0;
497 }
498
499 /*
500  * Create a memzone for hardware descriptor rings. Malloc cannot be used since
501  * the physical address is required. If the memzone is already created, then
502  * this function returns a pointer to the existing memzone.
503  */
504 static inline const struct rte_memzone *
505 allocate_hw_ring(const char *driver_name, const char *ring_name,
506         uint8_t port_id, uint16_t queue_id, int socket_id,
507         uint32_t size, uint32_t align)
508 {
509         char name[RTE_MEMZONE_NAMESIZE];
510         const struct rte_memzone *mz;
511
512         snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
513                  driver_name, ring_name, port_id, queue_id, socket_id);
514
515         /* return the memzone if it already exists */
516         mz = rte_memzone_lookup(name);
517         if (mz)
518                 return mz;
519
520 #ifdef RTE_LIBRTE_XEN_DOM0
521         return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
522                                            RTE_PGSIZE_2M);
523 #else
524         return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
525 #endif
526 }
527
528 static inline int
529 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
530 {
531         if ((request < min) || (request > max) || ((div % request) != 0))
532                 return -1;
533         else
534                 return 0;
535 }
536
537 static inline int
538 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
539 {
540         uint16_t rx_free_thresh;
541
542         if (conf->rx_free_thresh == 0)
543                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
544         else
545                 rx_free_thresh = conf->rx_free_thresh;
546
547         /* make sure the requested threshold satisfies the constraints */
548         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
549                         FM10K_RX_FREE_THRESH_MAX(q),
550                         FM10K_RX_FREE_THRESH_DIV(q),
551                         rx_free_thresh)) {
552                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
553                         "less than or equal to %u, "
554                         "greater than or equal to %u, "
555                         "and a divisor of %u",
556                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
557                         FM10K_RX_FREE_THRESH_MIN(q),
558                         FM10K_RX_FREE_THRESH_DIV(q));
559                 return (-EINVAL);
560         }
561
562         q->alloc_thresh = rx_free_thresh;
563         q->drop_en = conf->rx_drop_en;
564         q->rx_deferred_start = conf->rx_deferred_start;
565
566         return 0;
567 }
568
569 /*
570  * Hardware requires specific alignment for Rx packet buffers. At
571  * least one of the following two conditions must be satisfied.
572  *  1. Address is 512B aligned
573  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
574  *
575  * As such, the driver may need to adjust the DMA address within the
576  * buffer by up to 512B. The mempool element size is checked here
577  * to make sure a maximally sized Ethernet frame can still be wholly
578  * contained within the buffer after 512B alignment.
579  *
580  * return 1 if the element size is valid, otherwise return 0.
581  */
582 static int
583 mempool_element_size_valid(struct rte_mempool *mp)
584 {
585         uint32_t min_size;
586
587         /* elt_size includes mbuf header and headroom */
588         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
589                         RTE_PKTMBUF_HEADROOM;
590
591         /* account for up to 512B of alignment */
592         min_size -= FM10K_RX_BUFF_ALIGN;
593
594         /* sanity check for overflow */
595         if (min_size > mp->elt_size)
596                 return 0;
597
598         if (min_size < ETHER_MAX_VLAN_FRAME_LEN)
599                 return 0;
600
601         /* size is valid */
602         return 1;
603 }
604
605 static int
606 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
607         uint16_t nb_desc, unsigned int socket_id,
608         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
609 {
610         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611         struct fm10k_rx_queue *q;
612         const struct rte_memzone *mz;
613
614         PMD_INIT_FUNC_TRACE();
615
616         /* make sure the mempool element size can account for alignment. */
617         if (!mempool_element_size_valid(mp)) {
618                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
619                 return (-EINVAL);
620         }
621
622         /* make sure a valid number of descriptors have been requested */
623         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
624                                 FM10K_MULT_RX_DESC, nb_desc)) {
625                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
626                         "less than or equal to %"PRIu32", "
627                         "greater than or equal to %u, "
628                         "and a multiple of %u",
629                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
630                         FM10K_MULT_RX_DESC);
631                 return (-EINVAL);
632         }
633
634         /*
635          * if this queue existed already, free the associated memory. The
636          * queue cannot be reused in case we need to allocate memory on
637          * different socket than was previously used.
638          */
639         if (dev->data->rx_queues[queue_id] != NULL) {
640                 rx_queue_free(dev->data->rx_queues[queue_id]);
641                 dev->data->rx_queues[queue_id] = NULL;
642         }
643
644         /* allocate memory for the queue structure */
645         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
646                                 socket_id);
647         if (q == NULL) {
648                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
649                 return (-ENOMEM);
650         }
651
652         /* setup queue */
653         q->mp = mp;
654         q->nb_desc = nb_desc;
655         q->port_id = dev->data->port_id;
656         q->queue_id = queue_id;
657         q->tail_ptr = (volatile uint32_t *)
658                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
659         if (handle_rxconf(q, conf))
660                 return (-EINVAL);
661
662         /* allocate memory for the software ring */
663         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
664                                         nb_desc * sizeof(struct rte_mbuf *),
665                                         RTE_CACHE_LINE_SIZE, socket_id);
666         if (q->sw_ring == NULL) {
667                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
668                 rte_free(q);
669                 return (-ENOMEM);
670         }
671
672         /*
673          * allocate memory for the hardware descriptor ring. A memzone large
674          * enough to hold the maximum ring size is requested to allow for
675          * resizing in later calls to the queue setup function.
676          */
677         mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
678                                 dev->data->port_id, queue_id, socket_id,
679                                 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
680         if (mz == NULL) {
681                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
682                 rte_free(q->sw_ring);
683                 rte_free(q);
684                 return (-ENOMEM);
685         }
686         q->hw_ring = mz->addr;
687         q->hw_ring_phys_addr = mz->phys_addr;
688
689         dev->data->rx_queues[queue_id] = q;
690         return 0;
691 }
692
693 static void
694 fm10k_rx_queue_release(void *queue)
695 {
696         PMD_INIT_FUNC_TRACE();
697
698         rx_queue_free(queue);
699 }
700
701 static inline int
702 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
703 {
704         uint16_t tx_free_thresh;
705         uint16_t tx_rs_thresh;
706
707         /* constraint MACROs require that tx_free_thresh is configured
708          * before tx_rs_thresh */
709         if (conf->tx_free_thresh == 0)
710                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
711         else
712                 tx_free_thresh = conf->tx_free_thresh;
713
714         /* make sure the requested threshold satisfies the constraints */
715         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
716                         FM10K_TX_FREE_THRESH_MAX(q),
717                         FM10K_TX_FREE_THRESH_DIV(q),
718                         tx_free_thresh)) {
719                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
720                         "less than or equal to %u, "
721                         "greater than or equal to %u, "
722                         "and a divisor of %u",
723                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
724                         FM10K_TX_FREE_THRESH_MIN(q),
725                         FM10K_TX_FREE_THRESH_DIV(q));
726                 return (-EINVAL);
727         }
728
729         q->free_thresh = tx_free_thresh;
730
731         if (conf->tx_rs_thresh == 0)
732                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
733         else
734                 tx_rs_thresh = conf->tx_rs_thresh;
735
736         q->tx_deferred_start = conf->tx_deferred_start;
737
738         /* make sure the requested threshold satisfies the constraints */
739         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
740                         FM10K_TX_RS_THRESH_MAX(q),
741                         FM10K_TX_RS_THRESH_DIV(q),
742                         tx_rs_thresh)) {
743                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
744                         "less than or equal to %u, "
745                         "greater than or equal to %u, "
746                         "and a divisor of %u",
747                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
748                         FM10K_TX_RS_THRESH_MIN(q),
749                         FM10K_TX_RS_THRESH_DIV(q));
750                 return (-EINVAL);
751         }
752
753         q->rs_thresh = tx_rs_thresh;
754
755         return 0;
756 }
757
758 static int
759 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
760         uint16_t nb_desc, unsigned int socket_id,
761         const struct rte_eth_txconf *conf)
762 {
763         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
764         struct fm10k_tx_queue *q;
765         const struct rte_memzone *mz;
766
767         PMD_INIT_FUNC_TRACE();
768
769         /* make sure a valid number of descriptors have been requested */
770         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
771                                 FM10K_MULT_TX_DESC, nb_desc)) {
772                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
773                         "less than or equal to %"PRIu32", "
774                         "greater than or equal to %u, "
775                         "and a multiple of %u",
776                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
777                         FM10K_MULT_TX_DESC);
778                 return (-EINVAL);
779         }
780
781         /*
782          * if this queue existed already, free the associated memory. The
783          * queue cannot be reused in case we need to allocate memory on
784          * different socket than was previously used.
785          */
786         if (dev->data->tx_queues[queue_id] != NULL) {
787                 tx_queue_free(dev->data->tx_queues[queue_id]);
788                 dev->data->tx_queues[queue_id] = NULL;
789         }
790
791         /* allocate memory for the queue structure */
792         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
793                                 socket_id);
794         if (q == NULL) {
795                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
796                 return (-ENOMEM);
797         }
798
799         /* setup queue */
800         q->nb_desc = nb_desc;
801         q->port_id = dev->data->port_id;
802         q->queue_id = queue_id;
803         q->tail_ptr = (volatile uint32_t *)
804                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
805         if (handle_txconf(q, conf))
806                 return (-EINVAL);
807
808         /* allocate memory for the software ring */
809         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
810                                         nb_desc * sizeof(struct rte_mbuf *),
811                                         RTE_CACHE_LINE_SIZE, socket_id);
812         if (q->sw_ring == NULL) {
813                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
814                 rte_free(q);
815                 return (-ENOMEM);
816         }
817
818         /*
819          * allocate memory for the hardware descriptor ring. A memzone large
820          * enough to hold the maximum ring size is requested to allow for
821          * resizing in later calls to the queue setup function.
822          */
823         mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
824                                 dev->data->port_id, queue_id, socket_id,
825                                 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
826         if (mz == NULL) {
827                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
828                 rte_free(q->sw_ring);
829                 rte_free(q);
830                 return (-ENOMEM);
831         }
832         q->hw_ring = mz->addr;
833         q->hw_ring_phys_addr = mz->phys_addr;
834
835         /*
836          * allocate memory for the RS bit tracker. Enough slots to hold the
837          * descriptor index for each RS bit needing to be set are required.
838          */
839         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
840                                 ((nb_desc + 1) / q->rs_thresh) *
841                                 sizeof(uint16_t),
842                                 RTE_CACHE_LINE_SIZE, socket_id);
843         if (q->rs_tracker.list == NULL) {
844                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
845                 rte_free(q->sw_ring);
846                 rte_free(q);
847                 return (-ENOMEM);
848         }
849
850         dev->data->tx_queues[queue_id] = q;
851         return 0;
852 }
853
854 static void
855 fm10k_tx_queue_release(void *queue)
856 {
857         PMD_INIT_FUNC_TRACE();
858
859         tx_queue_free(queue);
860 }
861
862 static int
863 fm10k_reta_update(struct rte_eth_dev *dev,
864                         struct rte_eth_rss_reta_entry64 *reta_conf,
865                         uint16_t reta_size)
866 {
867         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868         uint16_t i, j, idx, shift;
869         uint8_t mask;
870         uint32_t reta;
871
872         PMD_INIT_FUNC_TRACE();
873
874         if (reta_size > FM10K_MAX_RSS_INDICES) {
875                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
876                         "(%d) doesn't match the number hardware can supported "
877                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
878                 return -EINVAL;
879         }
880
881         /*
882          * Update Redirection Table RETA[n], n=0..31. The redirection table has
883          * 128-entries in 32 registers
884          */
885         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
886                 idx = i / RTE_RETA_GROUP_SIZE;
887                 shift = i % RTE_RETA_GROUP_SIZE;
888                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
889                                 BIT_MASK_PER_UINT32);
890                 if (mask == 0)
891                         continue;
892
893                 reta = 0;
894                 if (mask != BIT_MASK_PER_UINT32)
895                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
896
897                 for (j = 0; j < CHARS_PER_UINT32; j++) {
898                         if (mask & (0x1 << j)) {
899                                 if (mask != 0xF)
900                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
901                                 reta |= reta_conf[idx].reta[shift + j] <<
902                                                 (CHAR_BIT * j);
903                         }
904                 }
905                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
906         }
907
908         return 0;
909 }
910
911 static int
912 fm10k_reta_query(struct rte_eth_dev *dev,
913                         struct rte_eth_rss_reta_entry64 *reta_conf,
914                         uint16_t reta_size)
915 {
916         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
917         uint16_t i, j, idx, shift;
918         uint8_t mask;
919         uint32_t reta;
920
921         PMD_INIT_FUNC_TRACE();
922
923         if (reta_size < FM10K_MAX_RSS_INDICES) {
924                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
925                         "(%d) doesn't match the number hardware can supported "
926                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
927                 return -EINVAL;
928         }
929
930         /*
931          * Read Redirection Table RETA[n], n=0..31. The redirection table has
932          * 128-entries in 32 registers
933          */
934         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
935                 idx = i / RTE_RETA_GROUP_SIZE;
936                 shift = i % RTE_RETA_GROUP_SIZE;
937                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
938                                 BIT_MASK_PER_UINT32);
939                 if (mask == 0)
940                         continue;
941
942                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
943                 for (j = 0; j < CHARS_PER_UINT32; j++) {
944                         if (mask & (0x1 << j))
945                                 reta_conf[idx].reta[shift + j] = ((reta >>
946                                         CHAR_BIT * j) & UINT8_MAX);
947                 }
948         }
949
950         return 0;
951 }
952
953 /* Mailbox message handler in VF */
954 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
955         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
956         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
957         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
958         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
959 };
960
961 /* Mailbox message handler in PF */
962 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
963         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
964         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
965         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
966         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
967         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
968         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
969         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
970 };
971
972 static int
973 fm10k_setup_mbx_service(struct fm10k_hw *hw)
974 {
975         int err;
976
977         /* Initialize mailbox lock */
978         fm10k_mbx_initlock(hw);
979
980         /* Replace default message handler with new ones */
981         if (hw->mac.type == fm10k_mac_pf)
982                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
983         else
984                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
985
986         if (err) {
987                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
988                                 err);
989                 return err;
990         }
991         /* Connect to SM for PF device or PF for VF device */
992         return hw->mbx.ops.connect(hw, &hw->mbx);
993 }
994
995 static struct eth_dev_ops fm10k_eth_dev_ops = {
996         .dev_configure          = fm10k_dev_configure,
997         .stats_get              = fm10k_stats_get,
998         .stats_reset            = fm10k_stats_reset,
999         .link_update            = fm10k_link_update,
1000         .dev_infos_get          = fm10k_dev_infos_get,
1001         .rx_queue_start         = fm10k_dev_rx_queue_start,
1002         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
1003         .tx_queue_start         = fm10k_dev_tx_queue_start,
1004         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
1005         .rx_queue_setup         = fm10k_rx_queue_setup,
1006         .rx_queue_release       = fm10k_rx_queue_release,
1007         .tx_queue_setup         = fm10k_tx_queue_setup,
1008         .tx_queue_release       = fm10k_tx_queue_release,
1009         .reta_update            = fm10k_reta_update,
1010         .reta_query             = fm10k_reta_query,
1011 };
1012
1013 static int
1014 eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv,
1015         struct rte_eth_dev *dev)
1016 {
1017         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1018         int diag;
1019
1020         PMD_INIT_FUNC_TRACE();
1021
1022         dev->dev_ops = &fm10k_eth_dev_ops;
1023
1024         /* only initialize in the primary process */
1025         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1026                 return 0;
1027
1028         /* Vendor and Device ID need to be set before init of shared code */
1029         memset(hw, 0, sizeof(*hw));
1030         hw->device_id = dev->pci_dev->id.device_id;
1031         hw->vendor_id = dev->pci_dev->id.vendor_id;
1032         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
1033         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
1034         hw->revision_id = 0;
1035         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
1036         if (hw->hw_addr == NULL) {
1037                 PMD_INIT_LOG(ERR, "Bad mem resource."
1038                         " Try to blacklist unused devices.");
1039                 return -EIO;
1040         }
1041
1042         /* Store fm10k_adapter pointer */
1043         hw->back = dev->data->dev_private;
1044
1045         /* Initialize the shared code */
1046         diag = fm10k_init_shared_code(hw);
1047         if (diag != FM10K_SUCCESS) {
1048                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1049                 return -EIO;
1050         }
1051
1052         /*
1053          * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
1054          * there is no way to get link status without reading BAR4.  Until this
1055          * works, assume we have maximum bandwidth.
1056          * @todo - fix bus info
1057          */
1058         hw->bus_caps.speed = fm10k_bus_speed_8000;
1059         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
1060         hw->bus_caps.payload = fm10k_bus_payload_512;
1061         hw->bus.speed = fm10k_bus_speed_8000;
1062         hw->bus.width = fm10k_bus_width_pcie_x8;
1063         hw->bus.payload = fm10k_bus_payload_256;
1064
1065         /* Initialize the hw */
1066         diag = fm10k_init_hw(hw);
1067         if (diag != FM10K_SUCCESS) {
1068                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1069                 return -EIO;
1070         }
1071
1072         /* Initialize MAC address(es) */
1073         dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
1074         if (dev->data->mac_addrs == NULL) {
1075                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
1076                 return -ENOMEM;
1077         }
1078
1079         diag = fm10k_read_mac_addr(hw);
1080         if (diag != FM10K_SUCCESS) {
1081                 /*
1082                  * TODO: remove special handling on VF. Need shared code to
1083                  * fix first.
1084                  */
1085                 if (hw->mac.type == fm10k_mac_pf) {
1086                         PMD_INIT_LOG(ERR, "Read MAC addr failed: %d", diag);
1087                         return -EIO;
1088                 } else {
1089                         /* Generate a random addr */
1090                         eth_random_addr(hw->mac.addr);
1091                         memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
1092                 }
1093         }
1094
1095         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
1096                         &dev->data->mac_addrs[0]);
1097
1098         /* Reset the hw statistics */
1099         fm10k_stats_reset(dev);
1100
1101         /* Reset the hw */
1102         diag = fm10k_reset_hw(hw);
1103         if (diag != FM10K_SUCCESS) {
1104                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
1105                 return -EIO;
1106         }
1107
1108         /* Setup mailbox service */
1109         diag = fm10k_setup_mbx_service(hw);
1110         if (diag != FM10K_SUCCESS) {
1111                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
1112                 return -EIO;
1113         }
1114
1115         /*
1116          * Below function will trigger operations on mailbox, acquire lock to
1117          * avoid race condition from interrupt handler. Operations on mailbox
1118          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
1119          * will handle and generate an interrupt to our side. Then,  FIFO in
1120          * mailbox will be touched.
1121          */
1122         fm10k_mbx_lock(hw);
1123         /* Enable port first */
1124         hw->mac.ops.update_lport_state(hw, 0, 0, 1);
1125
1126         /* Update default vlan */
1127         hw->mac.ops.update_vlan(hw, hw->mac.default_vid, 0, true);
1128
1129         /*
1130          * Add default mac/vlan filter. glort is assigned by SM for PF, while is
1131          * unused for VF. PF will assign correct glort for VF.
1132          */
1133         hw->mac.ops.update_uc_addr(hw, hw->mac.dglort_map, hw->mac.addr,
1134                               hw->mac.default_vid, 1, 0);
1135
1136         /* Set unicast mode by default. App can change to other mode in other
1137          * API func.
1138          */
1139         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1140                                         FM10K_XCAST_MODE_MULTI);
1141
1142         fm10k_mbx_unlock(hw);
1143
1144         return 0;
1145 }
1146
1147 /*
1148  * The set of PCI devices this driver supports. This driver will enable both PF
1149  * and SRIOV-VF devices.
1150  */
1151 static struct rte_pci_id pci_id_fm10k_map[] = {
1152 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
1153 #include "rte_pci_dev_ids.h"
1154         { .vendor_id = 0, /* sentinel */ },
1155 };
1156
1157 static struct eth_driver rte_pmd_fm10k = {
1158         {
1159                 .name = "rte_pmd_fm10k",
1160                 .id_table = pci_id_fm10k_map,
1161                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1162         },
1163         .eth_dev_init = eth_fm10k_dev_init,
1164         .dev_private_size = sizeof(struct fm10k_adapter),
1165 };
1166
1167 /*
1168  * Driver initialization routine.
1169  * Invoked once at EAL init time.
1170  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
1171  */
1172 static int
1173 rte_pmd_fm10k_init(__rte_unused const char *name,
1174         __rte_unused const char *params)
1175 {
1176         PMD_INIT_FUNC_TRACE();
1177         rte_eth_driver_register(&rte_pmd_fm10k);
1178         return 0;
1179 }
1180
1181 static struct rte_driver rte_fm10k_driver = {
1182         .type = PMD_PDEV,
1183         .init = rte_pmd_fm10k_init,
1184 };
1185
1186 PMD_REGISTER_DRIVER(rte_fm10k_driver);