fm10k: fix error when adding default vlan
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US    100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
55
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
62 static int
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
64 static void
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
66 static void
67 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
68
69 static void
70 fm10k_mbx_initlock(struct fm10k_hw *hw)
71 {
72         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
73 }
74
75 static void
76 fm10k_mbx_lock(struct fm10k_hw *hw)
77 {
78         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
79                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
80 }
81
82 static void
83 fm10k_mbx_unlock(struct fm10k_hw *hw)
84 {
85         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
86 }
87
88 /*
89  * reset queue to initial state, allocate software buffers used when starting
90  * device.
91  * return 0 on success
92  * return -ENOMEM if buffers cannot be allocated
93  * return -EINVAL if buffers do not satisfy alignment condition
94  */
95 static inline int
96 rx_queue_reset(struct fm10k_rx_queue *q)
97 {
98         uint64_t dma_addr;
99         int i, diag;
100         PMD_INIT_FUNC_TRACE();
101
102         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
103         if (diag != 0)
104                 return -ENOMEM;
105
106         for (i = 0; i < q->nb_desc; ++i) {
107                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
108                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
109                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
110                                                 q->nb_desc);
111                         return -EINVAL;
112                 }
113                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
114                 q->hw_ring[i].q.pkt_addr = dma_addr;
115                 q->hw_ring[i].q.hdr_addr = dma_addr;
116         }
117
118         q->next_dd = 0;
119         q->next_alloc = 0;
120         q->next_trigger = q->alloc_thresh - 1;
121         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
122         return 0;
123 }
124
125 /*
126  * clean queue, descriptor rings, free software buffers used when stopping
127  * device.
128  */
129 static inline void
130 rx_queue_clean(struct fm10k_rx_queue *q)
131 {
132         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
133         uint32_t i;
134         PMD_INIT_FUNC_TRACE();
135
136         /* zero descriptor rings */
137         for (i = 0; i < q->nb_desc; ++i)
138                 q->hw_ring[i] = zero;
139
140         /* free software buffers */
141         for (i = 0; i < q->nb_desc; ++i) {
142                 if (q->sw_ring[i]) {
143                         rte_pktmbuf_free_seg(q->sw_ring[i]);
144                         q->sw_ring[i] = NULL;
145                 }
146         }
147 }
148
149 /*
150  * free all queue memory used when releasing the queue (i.e. configure)
151  */
152 static inline void
153 rx_queue_free(struct fm10k_rx_queue *q)
154 {
155         PMD_INIT_FUNC_TRACE();
156         if (q) {
157                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
158                 rx_queue_clean(q);
159                 if (q->sw_ring) {
160                         rte_free(q->sw_ring);
161                         q->sw_ring = NULL;
162                 }
163                 rte_free(q);
164                 q = NULL;
165         }
166 }
167
168 /*
169  * disable RX queue, wait unitl HW finished necessary flush operation
170  */
171 static inline int
172 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
173 {
174         uint32_t reg, i;
175
176         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
177         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
178                         reg & ~FM10K_RXQCTL_ENABLE);
179
180         /* Wait 100us at most */
181         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
182                 rte_delay_us(1);
183                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
184                 if (!(reg & FM10K_RXQCTL_ENABLE))
185                         break;
186         }
187
188         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
189                 return -1;
190
191         return 0;
192 }
193
194 /*
195  * reset queue to initial state, allocate software buffers used when starting
196  * device
197  */
198 static inline void
199 tx_queue_reset(struct fm10k_tx_queue *q)
200 {
201         PMD_INIT_FUNC_TRACE();
202         q->last_free = 0;
203         q->next_free = 0;
204         q->nb_used = 0;
205         q->nb_free = q->nb_desc - 1;
206         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
207         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
208 }
209
210 /*
211  * clean queue, descriptor rings, free software buffers used when stopping
212  * device
213  */
214 static inline void
215 tx_queue_clean(struct fm10k_tx_queue *q)
216 {
217         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
218         uint32_t i;
219         PMD_INIT_FUNC_TRACE();
220
221         /* zero descriptor rings */
222         for (i = 0; i < q->nb_desc; ++i)
223                 q->hw_ring[i] = zero;
224
225         /* free software buffers */
226         for (i = 0; i < q->nb_desc; ++i) {
227                 if (q->sw_ring[i]) {
228                         rte_pktmbuf_free_seg(q->sw_ring[i]);
229                         q->sw_ring[i] = NULL;
230                 }
231         }
232 }
233
234 /*
235  * free all queue memory used when releasing the queue (i.e. configure)
236  */
237 static inline void
238 tx_queue_free(struct fm10k_tx_queue *q)
239 {
240         PMD_INIT_FUNC_TRACE();
241         if (q) {
242                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
243                 tx_queue_clean(q);
244                 if (q->rs_tracker.list) {
245                         rte_free(q->rs_tracker.list);
246                         q->rs_tracker.list = NULL;
247                 }
248                 if (q->sw_ring) {
249                         rte_free(q->sw_ring);
250                         q->sw_ring = NULL;
251                 }
252                 rte_free(q);
253                 q = NULL;
254         }
255 }
256
257 /*
258  * disable TX queue, wait unitl HW finished necessary flush operation
259  */
260 static inline int
261 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
262 {
263         uint32_t reg, i;
264
265         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
266         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
267                         reg & ~FM10K_TXDCTL_ENABLE);
268
269         /* Wait 100us at most */
270         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
271                 rte_delay_us(1);
272                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
273                 if (!(reg & FM10K_TXDCTL_ENABLE))
274                         break;
275         }
276
277         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
278                 return -1;
279
280         return 0;
281 }
282
283 static int
284 fm10k_dev_configure(struct rte_eth_dev *dev)
285 {
286         PMD_INIT_FUNC_TRACE();
287
288         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
289                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
290
291         return 0;
292 }
293
294 static void
295 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
296 {
297         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
298         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
299         uint32_t mrqc, *key, i, reta, j;
300         uint64_t hf;
301
302 #define RSS_KEY_SIZE 40
303         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
304                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
305                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
306                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
307                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
308                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
309         };
310
311         if (dev->data->nb_rx_queues == 1 ||
312             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
313             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
314                 return;
315
316         /* random key is rss_intel_key (default) or user provided (rss_key) */
317         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
318                 key = (uint32_t *)rss_intel_key;
319         else
320                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
321
322         /* Now fill our hash function seeds, 4 bytes at a time */
323         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
324                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
325
326         /*
327          * Fill in redirection table
328          * The byte-swap is needed because NIC registers are in
329          * little-endian order.
330          */
331         reta = 0;
332         for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
333                 if (j == dev->data->nb_rx_queues)
334                         j = 0;
335                 reta = (reta << CHAR_BIT) | j;
336                 if ((i & 3) == 3)
337                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
338                                         rte_bswap32(reta));
339         }
340
341         /*
342          * Generate RSS hash based on packet types, TCP/UDP
343          * port numbers and/or IPv4/v6 src and dst addresses
344          */
345         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
346         mrqc = 0;
347         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
348         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
349         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
350         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
351         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
352         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
353         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
354         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
355         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
356
357         if (mrqc == 0) {
358                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
359                         "supported", hf);
360                 return;
361         }
362
363         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
364 }
365
366 static int
367 fm10k_dev_tx_init(struct rte_eth_dev *dev)
368 {
369         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370         int i, ret;
371         struct fm10k_tx_queue *txq;
372         uint64_t base_addr;
373         uint32_t size;
374
375         /* Disable TXINT to avoid possible interrupt */
376         for (i = 0; i < hw->mac.max_queues; i++)
377                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
378                                 3 << FM10K_TXINT_TIMER_SHIFT);
379
380         /* Setup TX queue */
381         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
382                 txq = dev->data->tx_queues[i];
383                 base_addr = txq->hw_ring_phys_addr;
384                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
385
386                 /* disable queue to avoid issues while updating state */
387                 ret = tx_queue_disable(hw, i);
388                 if (ret) {
389                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
390                         return -1;
391                 }
392
393                 /* set location and size for descriptor ring */
394                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
395                                 base_addr & UINT64_LOWER_32BITS_MASK);
396                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
397                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
398                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
399         }
400         return 0;
401 }
402
403 static int
404 fm10k_dev_rx_init(struct rte_eth_dev *dev)
405 {
406         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
407         int i, ret;
408         struct fm10k_rx_queue *rxq;
409         uint64_t base_addr;
410         uint32_t size;
411         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
412         uint16_t buf_size;
413
414         /* Disable RXINT to avoid possible interrupt */
415         for (i = 0; i < hw->mac.max_queues; i++)
416                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
417                                 3 << FM10K_RXINT_TIMER_SHIFT);
418
419         /* Setup RX queues */
420         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
421                 rxq = dev->data->rx_queues[i];
422                 base_addr = rxq->hw_ring_phys_addr;
423                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
424
425                 /* disable queue to avoid issues while updating state */
426                 ret = rx_queue_disable(hw, i);
427                 if (ret) {
428                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
429                         return -1;
430                 }
431
432                 /* Setup the Base and Length of the Rx Descriptor Ring */
433                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
434                                 base_addr & UINT64_LOWER_32BITS_MASK);
435                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
436                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
437                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
438
439                 /* Configure the Rx buffer size for one buff without split */
440                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
441                         RTE_PKTMBUF_HEADROOM);
442                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
443                  * reserved for this purpose, and the worst case could be 511B.
444                  * But SRR reg assumes all buffers have the same size. In order
445                  * to fill the gap, we'll have to consider the worst case and
446                  * assume 512B is reserved. If we don't do so, it's possible
447                  * for HW to overwrite data to next mbuf.
448                  */
449                 buf_size -= FM10K_RX_DATABUF_ALIGN;
450
451                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
452                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
453
454                 /* It adds dual VLAN length for supporting dual VLAN */
455                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
456                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
457                         dev->data->dev_conf.rxmode.enable_scatter) {
458                         uint32_t reg;
459                         dev->data->scattered_rx = 1;
460                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
461                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
462                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
463                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
464                 }
465
466                 /* Enable drop on empty, it's RO for VF */
467                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
468                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
469
470                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
471                 FM10K_WRITE_FLUSH(hw);
472         }
473
474         /* Configure RSS if applicable */
475         fm10k_dev_mq_rx_configure(dev);
476         return 0;
477 }
478
479 static int
480 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
481 {
482         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
483         int err = -1;
484         uint32_t reg;
485         struct fm10k_rx_queue *rxq;
486
487         PMD_INIT_FUNC_TRACE();
488
489         if (rx_queue_id < dev->data->nb_rx_queues) {
490                 rxq = dev->data->rx_queues[rx_queue_id];
491                 err = rx_queue_reset(rxq);
492                 if (err == -ENOMEM) {
493                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
494                         return err;
495                 } else if (err == -EINVAL) {
496                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
497                                 " %d", err);
498                         return err;
499                 }
500
501                 /* Setup the HW Rx Head and Tail Descriptor Pointers
502                  * Note: this must be done AFTER the queue is enabled on real
503                  * hardware, but BEFORE the queue is enabled when using the
504                  * emulation platform. Do it in both places for now and remove
505                  * this comment and the following two register writes when the
506                  * emulation platform is no longer being used.
507                  */
508                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
509                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
510
511                 /* Set PF ownership flag for PF devices */
512                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
513                 if (hw->mac.type == fm10k_mac_pf)
514                         reg |= FM10K_RXQCTL_PF;
515                 reg |= FM10K_RXQCTL_ENABLE;
516                 /* enable RX queue */
517                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
518                 FM10K_WRITE_FLUSH(hw);
519
520                 /* Setup the HW Rx Head and Tail Descriptor Pointers
521                  * Note: this must be done AFTER the queue is enabled
522                  */
523                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
524                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
525         }
526
527         return err;
528 }
529
530 static int
531 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
532 {
533         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534
535         PMD_INIT_FUNC_TRACE();
536
537         if (rx_queue_id < dev->data->nb_rx_queues) {
538                 /* Disable RX queue */
539                 rx_queue_disable(hw, rx_queue_id);
540
541                 /* Free mbuf and clean HW ring */
542                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
543         }
544
545         return 0;
546 }
547
548 static int
549 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
550 {
551         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
552         /** @todo - this should be defined in the shared code */
553 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
554         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
555         int err = 0;
556
557         PMD_INIT_FUNC_TRACE();
558
559         if (tx_queue_id < dev->data->nb_tx_queues) {
560                 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
561
562                 /* reset head and tail pointers */
563                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
564                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
565
566                 /* enable TX queue */
567                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
568                                         FM10K_TXDCTL_ENABLE | txdctl);
569                 FM10K_WRITE_FLUSH(hw);
570         } else
571                 err = -1;
572
573         return err;
574 }
575
576 static int
577 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
578 {
579         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580
581         PMD_INIT_FUNC_TRACE();
582
583         if (tx_queue_id < dev->data->nb_tx_queues) {
584                 tx_queue_disable(hw, tx_queue_id);
585                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
586         }
587
588         return 0;
589 }
590
591 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
592 {
593         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
594                 != FM10K_DGLORTMAP_NONE);
595 }
596
597 static void
598 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
599 {
600         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601         int status;
602
603         PMD_INIT_FUNC_TRACE();
604
605         /* Return if it didn't acquire valid glort range */
606         if (!fm10k_glort_valid(hw))
607                 return;
608
609         fm10k_mbx_lock(hw);
610         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
611                                 FM10K_XCAST_MODE_PROMISC);
612         fm10k_mbx_unlock(hw);
613
614         if (status != FM10K_SUCCESS)
615                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
616 }
617
618 static void
619 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
620 {
621         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         uint8_t mode;
623         int status;
624
625         PMD_INIT_FUNC_TRACE();
626
627         /* Return if it didn't acquire valid glort range */
628         if (!fm10k_glort_valid(hw))
629                 return;
630
631         if (dev->data->all_multicast == 1)
632                 mode = FM10K_XCAST_MODE_ALLMULTI;
633         else
634                 mode = FM10K_XCAST_MODE_NONE;
635
636         fm10k_mbx_lock(hw);
637         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
638                                 mode);
639         fm10k_mbx_unlock(hw);
640
641         if (status != FM10K_SUCCESS)
642                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
643 }
644
645 static void
646 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
647 {
648         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         int status;
650
651         PMD_INIT_FUNC_TRACE();
652
653         /* Return if it didn't acquire valid glort range */
654         if (!fm10k_glort_valid(hw))
655                 return;
656
657         /* If promiscuous mode is enabled, it doesn't make sense to enable
658          * allmulticast and disable promiscuous since fm10k only can select
659          * one of the modes.
660          */
661         if (dev->data->promiscuous) {
662                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
663                         "needn't enable allmulticast");
664                 return;
665         }
666
667         fm10k_mbx_lock(hw);
668         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
669                                 FM10K_XCAST_MODE_ALLMULTI);
670         fm10k_mbx_unlock(hw);
671
672         if (status != FM10K_SUCCESS)
673                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
674 }
675
676 static void
677 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
678 {
679         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         int status;
681
682         PMD_INIT_FUNC_TRACE();
683
684         /* Return if it didn't acquire valid glort range */
685         if (!fm10k_glort_valid(hw))
686                 return;
687
688         if (dev->data->promiscuous) {
689                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
690                         "since promisc mode is enabled");
691                 return;
692         }
693
694         fm10k_mbx_lock(hw);
695         /* Change mode to unicast mode */
696         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
697                                 FM10K_XCAST_MODE_NONE);
698         fm10k_mbx_unlock(hw);
699
700         if (status != FM10K_SUCCESS)
701                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
702 }
703
704 /* fls = find last set bit = 32 minus the number of leading zeros */
705 #ifndef fls
706 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
707 #endif
708 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
709 static int
710 fm10k_dev_start(struct rte_eth_dev *dev)
711 {
712         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713         int i, diag;
714
715         PMD_INIT_FUNC_TRACE();
716
717         /* stop, init, then start the hw */
718         diag = fm10k_stop_hw(hw);
719         if (diag != FM10K_SUCCESS) {
720                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
721                 return -EIO;
722         }
723
724         diag = fm10k_init_hw(hw);
725         if (diag != FM10K_SUCCESS) {
726                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
727                 return -EIO;
728         }
729
730         diag = fm10k_start_hw(hw);
731         if (diag != FM10K_SUCCESS) {
732                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
733                 return -EIO;
734         }
735
736         diag = fm10k_dev_tx_init(dev);
737         if (diag) {
738                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
739                 return diag;
740         }
741
742         diag = fm10k_dev_rx_init(dev);
743         if (diag) {
744                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
745                 return diag;
746         }
747
748         if (hw->mac.type == fm10k_mac_pf) {
749                 /* Establish only VSI 0 as valid */
750                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
751
752                 /* Configure RSS bits used in RETA table */
753                 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
754                                 fls(dev->data->nb_rx_queues - 1) <<
755                                 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
756
757                 /* Invalidate all other GLORT entries */
758                 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
759                         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
760                                         FM10K_DGLORTMAP_NONE);
761         }
762
763         for (i = 0; i < dev->data->nb_rx_queues; i++) {
764                 struct fm10k_rx_queue *rxq;
765                 rxq = dev->data->rx_queues[i];
766
767                 if (rxq->rx_deferred_start)
768                         continue;
769                 diag = fm10k_dev_rx_queue_start(dev, i);
770                 if (diag != 0) {
771                         int j;
772                         for (j = 0; j < i; ++j)
773                                 rx_queue_clean(dev->data->rx_queues[j]);
774                         return diag;
775                 }
776         }
777
778         for (i = 0; i < dev->data->nb_tx_queues; i++) {
779                 struct fm10k_tx_queue *txq;
780                 txq = dev->data->tx_queues[i];
781
782                 if (txq->tx_deferred_start)
783                         continue;
784                 diag = fm10k_dev_tx_queue_start(dev, i);
785                 if (diag != 0) {
786                         int j;
787                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
788                                 rx_queue_clean(dev->data->rx_queues[j]);
789                         return diag;
790                 }
791         }
792
793         /* Update default vlan */
794         if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID)
795                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
796
797         return 0;
798 }
799
800 static void
801 fm10k_dev_stop(struct rte_eth_dev *dev)
802 {
803         int i;
804
805         PMD_INIT_FUNC_TRACE();
806
807         for (i = 0; i < dev->data->nb_tx_queues; i++)
808                 fm10k_dev_tx_queue_stop(dev, i);
809
810         for (i = 0; i < dev->data->nb_rx_queues; i++)
811                 fm10k_dev_rx_queue_stop(dev, i);
812 }
813
814 static void
815 fm10k_dev_close(struct rte_eth_dev *dev)
816 {
817         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818
819         PMD_INIT_FUNC_TRACE();
820
821         fm10k_MACVLAN_remove_all(dev);
822
823         /* Stop mailbox service first */
824         fm10k_close_mbx_service(hw);
825         fm10k_dev_stop(dev);
826         fm10k_stop_hw(hw);
827 }
828
829 static int
830 fm10k_link_update(struct rte_eth_dev *dev,
831         __rte_unused int wait_to_complete)
832 {
833         PMD_INIT_FUNC_TRACE();
834
835         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
836          * x8 PCIe interface. For now, we leave the speed undefined since there
837          * is no 50Gbps Ethernet. */
838         dev->data->dev_link.link_speed  = 0;
839         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
840         dev->data->dev_link.link_status = 1;
841
842         return 0;
843 }
844
845 static void
846 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
847 {
848         uint64_t ipackets, opackets, ibytes, obytes;
849         struct fm10k_hw *hw =
850                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
851         struct fm10k_hw_stats *hw_stats =
852                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
853         int i;
854
855         PMD_INIT_FUNC_TRACE();
856
857         fm10k_update_hw_stats(hw, hw_stats);
858
859         ipackets = opackets = ibytes = obytes = 0;
860         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
861                 (i < hw->mac.max_queues); ++i) {
862                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
863                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
864                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
865                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
866                 ipackets += stats->q_ipackets[i];
867                 opackets += stats->q_opackets[i];
868                 ibytes   += stats->q_ibytes[i];
869                 obytes   += stats->q_obytes[i];
870         }
871         stats->ipackets = ipackets;
872         stats->opackets = opackets;
873         stats->ibytes = ibytes;
874         stats->obytes = obytes;
875 }
876
877 static void
878 fm10k_stats_reset(struct rte_eth_dev *dev)
879 {
880         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
881         struct fm10k_hw_stats *hw_stats =
882                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
883
884         PMD_INIT_FUNC_TRACE();
885
886         memset(hw_stats, 0, sizeof(*hw_stats));
887         fm10k_rebind_hw_stats(hw, hw_stats);
888 }
889
890 static void
891 fm10k_dev_infos_get(struct rte_eth_dev *dev,
892         struct rte_eth_dev_info *dev_info)
893 {
894         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
895
896         PMD_INIT_FUNC_TRACE();
897
898         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
899         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
900         dev_info->max_rx_queues      = hw->mac.max_queues;
901         dev_info->max_tx_queues      = hw->mac.max_queues;
902         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
903         dev_info->max_hash_mac_addrs = 0;
904         dev_info->max_vfs            = dev->pci_dev->max_vfs;
905         dev_info->max_vmdq_pools     = ETH_64_POOLS;
906         dev_info->rx_offload_capa =
907                 DEV_RX_OFFLOAD_VLAN_STRIP |
908                 DEV_RX_OFFLOAD_IPV4_CKSUM |
909                 DEV_RX_OFFLOAD_UDP_CKSUM  |
910                 DEV_RX_OFFLOAD_TCP_CKSUM;
911         dev_info->tx_offload_capa =
912                 DEV_TX_OFFLOAD_VLAN_INSERT;
913         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
914         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
915
916         dev_info->default_rxconf = (struct rte_eth_rxconf) {
917                 .rx_thresh = {
918                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
919                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
920                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
921                 },
922                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
923                 .rx_drop_en = 0,
924         };
925
926         dev_info->default_txconf = (struct rte_eth_txconf) {
927                 .tx_thresh = {
928                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
929                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
930                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
931                 },
932                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
933                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
934                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
935                                 ETH_TXQ_FLAGS_NOOFFLOADS,
936         };
937
938 }
939
940 static int
941 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
942 {
943         s32 result;
944         uint16_t mac_num = 0;
945         uint32_t vid_idx, vid_bit, mac_index;
946         struct fm10k_hw *hw;
947         struct fm10k_macvlan_filter_info *macvlan;
948         struct rte_eth_dev_data *data = dev->data;
949
950         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
952
953         if (vlan_id > ETH_VLAN_ID_MAX) {
954                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
955                 return (-EINVAL);
956         }
957
958         vid_idx = FM10K_VFTA_IDX(vlan_id);
959         vid_bit = FM10K_VFTA_BIT(vlan_id);
960         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
961         if (on && (macvlan->vfta[vid_idx] & vid_bit))
962                 return 0;
963         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
964         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
965                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
966                         "in the VLAN filter table");
967                 return (-EINVAL);
968         }
969
970         fm10k_mbx_lock(hw);
971         result = fm10k_update_vlan(hw, vlan_id, 0, on);
972         fm10k_mbx_unlock(hw);
973         if (result != FM10K_SUCCESS) {
974                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
975                 return (-EIO);
976         }
977
978         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
979                         (result == FM10K_SUCCESS); mac_index++) {
980                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
981                         continue;
982                 if (mac_num > macvlan->mac_num - 1) {
983                         PMD_INIT_LOG(ERR, "MAC address number "
984                                         "not match");
985                         break;
986                 }
987                 fm10k_mbx_lock(hw);
988                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
989                         data->mac_addrs[mac_index].addr_bytes,
990                         vlan_id, on, 0);
991                 fm10k_mbx_unlock(hw);
992                 mac_num++;
993         }
994         if (result != FM10K_SUCCESS) {
995                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
996                 return (-EIO);
997         }
998
999         if (on) {
1000                 macvlan->vlan_num++;
1001                 macvlan->vfta[vid_idx] |= vid_bit;
1002         } else {
1003                 macvlan->vlan_num--;
1004                 macvlan->vfta[vid_idx] &= ~vid_bit;
1005         }
1006         return 0;
1007 }
1008
1009 static void
1010 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1011 {
1012         if (mask & ETH_VLAN_STRIP_MASK) {
1013                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1014                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1015                                         "always on in fm10k");
1016         }
1017
1018         if (mask & ETH_VLAN_EXTEND_MASK) {
1019                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1020                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1021                                         "supported in fm10k");
1022         }
1023
1024         if (mask & ETH_VLAN_FILTER_MASK) {
1025                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1026                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1027         }
1028 }
1029
1030 /* Add/Remove a MAC address, and update filters */
1031 static void
1032 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1033 {
1034         uint32_t i, j, k;
1035         struct fm10k_hw *hw;
1036         struct fm10k_macvlan_filter_info *macvlan;
1037
1038         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1040
1041         i = 0;
1042         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1043                 if (macvlan->vfta[j]) {
1044                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1045                                 if (macvlan->vfta[j] & (1 << k)) {
1046                                         if (i + 1 > macvlan->vlan_num) {
1047                                                 PMD_INIT_LOG(ERR, "vlan number "
1048                                                                 "not match");
1049                                                 return;
1050                                         }
1051                                         fm10k_mbx_lock(hw);
1052                                         fm10k_update_uc_addr(hw,
1053                                                 hw->mac.dglort_map, mac,
1054                                                 j * FM10K_UINT32_BIT_SIZE + k,
1055                                                 add, 0);
1056                                         fm10k_mbx_unlock(hw);
1057                                         i++;
1058                                 }
1059                         }
1060                 }
1061         }
1062
1063         if (add)
1064                 macvlan->mac_num++;
1065         else
1066                 macvlan->mac_num--;
1067 }
1068
1069 /* Add a MAC address, and update filters */
1070 static void
1071 fm10k_macaddr_add(struct rte_eth_dev *dev,
1072                  struct ether_addr *mac_addr,
1073                  __rte_unused uint32_t index,
1074                  __rte_unused uint32_t pool)
1075 {
1076         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1077 }
1078
1079 /* Remove a MAC address, and update filters */
1080 static void
1081 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1082 {
1083         struct rte_eth_dev_data *data = dev->data;
1084
1085         if (index < FM10K_MAX_MACADDR_NUM)
1086                 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1087                                 FALSE);
1088 }
1089
1090 /* Remove all VLAN and MAC address table entries */
1091 static void
1092 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev)
1093 {
1094         uint32_t j, k;
1095         struct fm10k_macvlan_filter_info *macvlan;
1096
1097         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1098         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1099                 if (macvlan->vfta[j]) {
1100                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1101                                 if (macvlan->vfta[j] & (1 << k))
1102                                         fm10k_vlan_filter_set(dev,
1103                                                 j * FM10K_UINT32_BIT_SIZE + k, false);
1104                         }
1105                 }
1106         }
1107 }
1108
1109 static inline int
1110 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1111 {
1112         if ((request < min) || (request > max) || ((request % mult) != 0))
1113                 return -1;
1114         else
1115                 return 0;
1116 }
1117
1118 /*
1119  * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1120  * the physical address is required. If the memzone is already created, then
1121  * this function returns a pointer to the existing memzone.
1122  */
1123 static inline const struct rte_memzone *
1124 allocate_hw_ring(const char *driver_name, const char *ring_name,
1125         uint8_t port_id, uint16_t queue_id, int socket_id,
1126         uint32_t size, uint32_t align)
1127 {
1128         char name[RTE_MEMZONE_NAMESIZE];
1129         const struct rte_memzone *mz;
1130
1131         snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1132                  driver_name, ring_name, port_id, queue_id, socket_id);
1133
1134         /* return the memzone if it already exists */
1135         mz = rte_memzone_lookup(name);
1136         if (mz)
1137                 return mz;
1138
1139 #ifdef RTE_LIBRTE_XEN_DOM0
1140         return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1141                                            RTE_PGSIZE_2M);
1142 #else
1143         return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1144 #endif
1145 }
1146
1147 static inline int
1148 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1149 {
1150         if ((request < min) || (request > max) || ((div % request) != 0))
1151                 return -1;
1152         else
1153                 return 0;
1154 }
1155
1156 static inline int
1157 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1158 {
1159         uint16_t rx_free_thresh;
1160
1161         if (conf->rx_free_thresh == 0)
1162                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1163         else
1164                 rx_free_thresh = conf->rx_free_thresh;
1165
1166         /* make sure the requested threshold satisfies the constraints */
1167         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1168                         FM10K_RX_FREE_THRESH_MAX(q),
1169                         FM10K_RX_FREE_THRESH_DIV(q),
1170                         rx_free_thresh)) {
1171                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1172                         "less than or equal to %u, "
1173                         "greater than or equal to %u, "
1174                         "and a divisor of %u",
1175                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1176                         FM10K_RX_FREE_THRESH_MIN(q),
1177                         FM10K_RX_FREE_THRESH_DIV(q));
1178                 return (-EINVAL);
1179         }
1180
1181         q->alloc_thresh = rx_free_thresh;
1182         q->drop_en = conf->rx_drop_en;
1183         q->rx_deferred_start = conf->rx_deferred_start;
1184
1185         return 0;
1186 }
1187
1188 /*
1189  * Hardware requires specific alignment for Rx packet buffers. At
1190  * least one of the following two conditions must be satisfied.
1191  *  1. Address is 512B aligned
1192  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1193  *
1194  * As such, the driver may need to adjust the DMA address within the
1195  * buffer by up to 512B.
1196  *
1197  * return 1 if the element size is valid, otherwise return 0.
1198  */
1199 static int
1200 mempool_element_size_valid(struct rte_mempool *mp)
1201 {
1202         uint32_t min_size;
1203
1204         /* elt_size includes mbuf header and headroom */
1205         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1206                         RTE_PKTMBUF_HEADROOM;
1207
1208         /* account for up to 512B of alignment */
1209         min_size -= FM10K_RX_DATABUF_ALIGN;
1210
1211         /* sanity check for overflow */
1212         if (min_size > mp->elt_size)
1213                 return 0;
1214
1215         /* size is valid */
1216         return 1;
1217 }
1218
1219 static int
1220 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1221         uint16_t nb_desc, unsigned int socket_id,
1222         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1223 {
1224         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1225         struct fm10k_rx_queue *q;
1226         const struct rte_memzone *mz;
1227
1228         PMD_INIT_FUNC_TRACE();
1229
1230         /* make sure the mempool element size can account for alignment. */
1231         if (!mempool_element_size_valid(mp)) {
1232                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1233                 return (-EINVAL);
1234         }
1235
1236         /* make sure a valid number of descriptors have been requested */
1237         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1238                                 FM10K_MULT_RX_DESC, nb_desc)) {
1239                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1240                         "less than or equal to %"PRIu32", "
1241                         "greater than or equal to %u, "
1242                         "and a multiple of %u",
1243                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1244                         FM10K_MULT_RX_DESC);
1245                 return (-EINVAL);
1246         }
1247
1248         /*
1249          * if this queue existed already, free the associated memory. The
1250          * queue cannot be reused in case we need to allocate memory on
1251          * different socket than was previously used.
1252          */
1253         if (dev->data->rx_queues[queue_id] != NULL) {
1254                 rx_queue_free(dev->data->rx_queues[queue_id]);
1255                 dev->data->rx_queues[queue_id] = NULL;
1256         }
1257
1258         /* allocate memory for the queue structure */
1259         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1260                                 socket_id);
1261         if (q == NULL) {
1262                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1263                 return (-ENOMEM);
1264         }
1265
1266         /* setup queue */
1267         q->mp = mp;
1268         q->nb_desc = nb_desc;
1269         q->port_id = dev->data->port_id;
1270         q->queue_id = queue_id;
1271         q->tail_ptr = (volatile uint32_t *)
1272                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1273         if (handle_rxconf(q, conf))
1274                 return (-EINVAL);
1275
1276         /* allocate memory for the software ring */
1277         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1278                                         nb_desc * sizeof(struct rte_mbuf *),
1279                                         RTE_CACHE_LINE_SIZE, socket_id);
1280         if (q->sw_ring == NULL) {
1281                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1282                 rte_free(q);
1283                 return (-ENOMEM);
1284         }
1285
1286         /*
1287          * allocate memory for the hardware descriptor ring. A memzone large
1288          * enough to hold the maximum ring size is requested to allow for
1289          * resizing in later calls to the queue setup function.
1290          */
1291         mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1292                                 dev->data->port_id, queue_id, socket_id,
1293                                 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1294         if (mz == NULL) {
1295                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1296                 rte_free(q->sw_ring);
1297                 rte_free(q);
1298                 return (-ENOMEM);
1299         }
1300         q->hw_ring = mz->addr;
1301 #ifdef RTE_LIBRTE_XEN_DOM0
1302         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1303 #else
1304         q->hw_ring_phys_addr = mz->phys_addr;
1305 #endif
1306
1307         dev->data->rx_queues[queue_id] = q;
1308         return 0;
1309 }
1310
1311 static void
1312 fm10k_rx_queue_release(void *queue)
1313 {
1314         PMD_INIT_FUNC_TRACE();
1315
1316         rx_queue_free(queue);
1317 }
1318
1319 static inline int
1320 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1321 {
1322         uint16_t tx_free_thresh;
1323         uint16_t tx_rs_thresh;
1324
1325         /* constraint MACROs require that tx_free_thresh is configured
1326          * before tx_rs_thresh */
1327         if (conf->tx_free_thresh == 0)
1328                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1329         else
1330                 tx_free_thresh = conf->tx_free_thresh;
1331
1332         /* make sure the requested threshold satisfies the constraints */
1333         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1334                         FM10K_TX_FREE_THRESH_MAX(q),
1335                         FM10K_TX_FREE_THRESH_DIV(q),
1336                         tx_free_thresh)) {
1337                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1338                         "less than or equal to %u, "
1339                         "greater than or equal to %u, "
1340                         "and a divisor of %u",
1341                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1342                         FM10K_TX_FREE_THRESH_MIN(q),
1343                         FM10K_TX_FREE_THRESH_DIV(q));
1344                 return (-EINVAL);
1345         }
1346
1347         q->free_thresh = tx_free_thresh;
1348
1349         if (conf->tx_rs_thresh == 0)
1350                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1351         else
1352                 tx_rs_thresh = conf->tx_rs_thresh;
1353
1354         q->tx_deferred_start = conf->tx_deferred_start;
1355
1356         /* make sure the requested threshold satisfies the constraints */
1357         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1358                         FM10K_TX_RS_THRESH_MAX(q),
1359                         FM10K_TX_RS_THRESH_DIV(q),
1360                         tx_rs_thresh)) {
1361                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1362                         "less than or equal to %u, "
1363                         "greater than or equal to %u, "
1364                         "and a divisor of %u",
1365                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1366                         FM10K_TX_RS_THRESH_MIN(q),
1367                         FM10K_TX_RS_THRESH_DIV(q));
1368                 return (-EINVAL);
1369         }
1370
1371         q->rs_thresh = tx_rs_thresh;
1372
1373         return 0;
1374 }
1375
1376 static int
1377 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1378         uint16_t nb_desc, unsigned int socket_id,
1379         const struct rte_eth_txconf *conf)
1380 {
1381         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1382         struct fm10k_tx_queue *q;
1383         const struct rte_memzone *mz;
1384
1385         PMD_INIT_FUNC_TRACE();
1386
1387         /* make sure a valid number of descriptors have been requested */
1388         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1389                                 FM10K_MULT_TX_DESC, nb_desc)) {
1390                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1391                         "less than or equal to %"PRIu32", "
1392                         "greater than or equal to %u, "
1393                         "and a multiple of %u",
1394                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1395                         FM10K_MULT_TX_DESC);
1396                 return (-EINVAL);
1397         }
1398
1399         /*
1400          * if this queue existed already, free the associated memory. The
1401          * queue cannot be reused in case we need to allocate memory on
1402          * different socket than was previously used.
1403          */
1404         if (dev->data->tx_queues[queue_id] != NULL) {
1405                 tx_queue_free(dev->data->tx_queues[queue_id]);
1406                 dev->data->tx_queues[queue_id] = NULL;
1407         }
1408
1409         /* allocate memory for the queue structure */
1410         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1411                                 socket_id);
1412         if (q == NULL) {
1413                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1414                 return (-ENOMEM);
1415         }
1416
1417         /* setup queue */
1418         q->nb_desc = nb_desc;
1419         q->port_id = dev->data->port_id;
1420         q->queue_id = queue_id;
1421         q->tail_ptr = (volatile uint32_t *)
1422                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1423         if (handle_txconf(q, conf))
1424                 return (-EINVAL);
1425
1426         /* allocate memory for the software ring */
1427         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1428                                         nb_desc * sizeof(struct rte_mbuf *),
1429                                         RTE_CACHE_LINE_SIZE, socket_id);
1430         if (q->sw_ring == NULL) {
1431                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1432                 rte_free(q);
1433                 return (-ENOMEM);
1434         }
1435
1436         /*
1437          * allocate memory for the hardware descriptor ring. A memzone large
1438          * enough to hold the maximum ring size is requested to allow for
1439          * resizing in later calls to the queue setup function.
1440          */
1441         mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1442                                 dev->data->port_id, queue_id, socket_id,
1443                                 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1444         if (mz == NULL) {
1445                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1446                 rte_free(q->sw_ring);
1447                 rte_free(q);
1448                 return (-ENOMEM);
1449         }
1450         q->hw_ring = mz->addr;
1451 #ifdef RTE_LIBRTE_XEN_DOM0
1452         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1453 #else
1454         q->hw_ring_phys_addr = mz->phys_addr;
1455 #endif
1456
1457         /*
1458          * allocate memory for the RS bit tracker. Enough slots to hold the
1459          * descriptor index for each RS bit needing to be set are required.
1460          */
1461         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1462                                 ((nb_desc + 1) / q->rs_thresh) *
1463                                 sizeof(uint16_t),
1464                                 RTE_CACHE_LINE_SIZE, socket_id);
1465         if (q->rs_tracker.list == NULL) {
1466                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1467                 rte_free(q->sw_ring);
1468                 rte_free(q);
1469                 return (-ENOMEM);
1470         }
1471
1472         dev->data->tx_queues[queue_id] = q;
1473         return 0;
1474 }
1475
1476 static void
1477 fm10k_tx_queue_release(void *queue)
1478 {
1479         PMD_INIT_FUNC_TRACE();
1480
1481         tx_queue_free(queue);
1482 }
1483
1484 static int
1485 fm10k_reta_update(struct rte_eth_dev *dev,
1486                         struct rte_eth_rss_reta_entry64 *reta_conf,
1487                         uint16_t reta_size)
1488 {
1489         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490         uint16_t i, j, idx, shift;
1491         uint8_t mask;
1492         uint32_t reta;
1493
1494         PMD_INIT_FUNC_TRACE();
1495
1496         if (reta_size > FM10K_MAX_RSS_INDICES) {
1497                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1498                         "(%d) doesn't match the number hardware can supported "
1499                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1500                 return -EINVAL;
1501         }
1502
1503         /*
1504          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1505          * 128-entries in 32 registers
1506          */
1507         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1508                 idx = i / RTE_RETA_GROUP_SIZE;
1509                 shift = i % RTE_RETA_GROUP_SIZE;
1510                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1511                                 BIT_MASK_PER_UINT32);
1512                 if (mask == 0)
1513                         continue;
1514
1515                 reta = 0;
1516                 if (mask != BIT_MASK_PER_UINT32)
1517                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1518
1519                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1520                         if (mask & (0x1 << j)) {
1521                                 if (mask != 0xF)
1522                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1523                                 reta |= reta_conf[idx].reta[shift + j] <<
1524                                                 (CHAR_BIT * j);
1525                         }
1526                 }
1527                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1528         }
1529
1530         return 0;
1531 }
1532
1533 static int
1534 fm10k_reta_query(struct rte_eth_dev *dev,
1535                         struct rte_eth_rss_reta_entry64 *reta_conf,
1536                         uint16_t reta_size)
1537 {
1538         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539         uint16_t i, j, idx, shift;
1540         uint8_t mask;
1541         uint32_t reta;
1542
1543         PMD_INIT_FUNC_TRACE();
1544
1545         if (reta_size < FM10K_MAX_RSS_INDICES) {
1546                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1547                         "(%d) doesn't match the number hardware can supported "
1548                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1549                 return -EINVAL;
1550         }
1551
1552         /*
1553          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1554          * 128-entries in 32 registers
1555          */
1556         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1557                 idx = i / RTE_RETA_GROUP_SIZE;
1558                 shift = i % RTE_RETA_GROUP_SIZE;
1559                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1560                                 BIT_MASK_PER_UINT32);
1561                 if (mask == 0)
1562                         continue;
1563
1564                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1565                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1566                         if (mask & (0x1 << j))
1567                                 reta_conf[idx].reta[shift + j] = ((reta >>
1568                                         CHAR_BIT * j) & UINT8_MAX);
1569                 }
1570         }
1571
1572         return 0;
1573 }
1574
1575 static int
1576 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1577         struct rte_eth_rss_conf *rss_conf)
1578 {
1579         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1581         uint32_t mrqc;
1582         uint64_t hf = rss_conf->rss_hf;
1583         int i;
1584
1585         PMD_INIT_FUNC_TRACE();
1586
1587         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1588                 FM10K_RSSRK_ENTRIES_PER_REG)
1589                 return -EINVAL;
1590
1591         if (hf == 0)
1592                 return -EINVAL;
1593
1594         mrqc = 0;
1595         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
1596         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
1597         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
1598         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
1599         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
1600         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
1601         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
1602         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
1603         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
1604
1605         /* If the mapping doesn't fit any supported, return */
1606         if (mrqc == 0)
1607                 return -EINVAL;
1608
1609         if (key != NULL)
1610                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1611                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1612
1613         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1614
1615         return 0;
1616 }
1617
1618 static int
1619 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1620         struct rte_eth_rss_conf *rss_conf)
1621 {
1622         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1623         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1624         uint32_t mrqc;
1625         uint64_t hf;
1626         int i;
1627
1628         PMD_INIT_FUNC_TRACE();
1629
1630         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1631                                 FM10K_RSSRK_ENTRIES_PER_REG)
1632                 return -EINVAL;
1633
1634         if (key != NULL)
1635                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1636                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1637
1638         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1639         hf = 0;
1640         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
1641         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
1642         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
1643         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
1644         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
1645         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
1646         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
1647         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
1648         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
1649
1650         rss_conf->rss_hf = hf;
1651
1652         return 0;
1653 }
1654
1655 static void
1656 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1657 {
1658         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1659         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1660
1661         /* Bind all local non-queue interrupt to vector 0 */
1662         int_map |= 0;
1663
1664         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1665         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1666         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1667         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1668         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1669         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1670
1671         /* Enable misc causes */
1672         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1673                                 FM10K_EIMR_ENABLE(THI_FAULT) |
1674                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
1675                                 FM10K_EIMR_ENABLE(MAILBOX) |
1676                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
1677                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1678                                 FM10K_EIMR_ENABLE(SRAMERROR) |
1679                                 FM10K_EIMR_ENABLE(VFLR));
1680
1681         /* Enable ITR 0 */
1682         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1683                                         FM10K_ITR_MASK_CLEAR);
1684         FM10K_WRITE_FLUSH(hw);
1685 }
1686
1687 static void
1688 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1689 {
1690         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1691         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1692
1693         /* Bind all local non-queue interrupt to vector 0 */
1694         int_map |= 0;
1695
1696         /* Only INT 0 available, other 15 are reserved. */
1697         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1698
1699         /* Enable ITR 0 */
1700         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1701                                         FM10K_ITR_MASK_CLEAR);
1702         FM10K_WRITE_FLUSH(hw);
1703 }
1704
1705 static int
1706 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1707 {
1708         struct fm10k_fault fault;
1709         int err;
1710         const char *estr = "Unknown error";
1711
1712         /* Process PCA fault */
1713         if (eicr & FM10K_EIMR_PCA_FAULT) {
1714                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1715                 if (err)
1716                         goto error;
1717                 switch (fault.type) {
1718                 case PCA_NO_FAULT:
1719                         estr = "PCA_NO_FAULT"; break;
1720                 case PCA_UNMAPPED_ADDR:
1721                         estr = "PCA_UNMAPPED_ADDR"; break;
1722                 case PCA_BAD_QACCESS_PF:
1723                         estr = "PCA_BAD_QACCESS_PF"; break;
1724                 case PCA_BAD_QACCESS_VF:
1725                         estr = "PCA_BAD_QACCESS_VF"; break;
1726                 case PCA_MALICIOUS_REQ:
1727                         estr = "PCA_MALICIOUS_REQ"; break;
1728                 case PCA_POISONED_TLP:
1729                         estr = "PCA_POISONED_TLP"; break;
1730                 case PCA_TLP_ABORT:
1731                         estr = "PCA_TLP_ABORT"; break;
1732                 default:
1733                         goto error;
1734                 }
1735                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1736                         estr, fault.func ? "VF" : "PF", fault.func,
1737                         fault.address, fault.specinfo);
1738         }
1739
1740         /* Process THI fault */
1741         if (eicr & FM10K_EIMR_THI_FAULT) {
1742                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1743                 if (err)
1744                         goto error;
1745                 switch (fault.type) {
1746                 case THI_NO_FAULT:
1747                         estr = "THI_NO_FAULT"; break;
1748                 case THI_MAL_DIS_Q_FAULT:
1749                         estr = "THI_MAL_DIS_Q_FAULT"; break;
1750                 default:
1751                         goto error;
1752                 }
1753                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1754                         estr, fault.func ? "VF" : "PF", fault.func,
1755                         fault.address, fault.specinfo);
1756         }
1757
1758         /* Process FUM fault */
1759         if (eicr & FM10K_EIMR_FUM_FAULT) {
1760                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1761                 if (err)
1762                         goto error;
1763                 switch (fault.type) {
1764                 case FUM_NO_FAULT:
1765                         estr = "FUM_NO_FAULT"; break;
1766                 case FUM_UNMAPPED_ADDR:
1767                         estr = "FUM_UNMAPPED_ADDR"; break;
1768                 case FUM_POISONED_TLP:
1769                         estr = "FUM_POISONED_TLP"; break;
1770                 case FUM_BAD_VF_QACCESS:
1771                         estr = "FUM_BAD_VF_QACCESS"; break;
1772                 case FUM_ADD_DECODE_ERR:
1773                         estr = "FUM_ADD_DECODE_ERR"; break;
1774                 case FUM_RO_ERROR:
1775                         estr = "FUM_RO_ERROR"; break;
1776                 case FUM_QPRC_CRC_ERROR:
1777                         estr = "FUM_QPRC_CRC_ERROR"; break;
1778                 case FUM_CSR_TIMEOUT:
1779                         estr = "FUM_CSR_TIMEOUT"; break;
1780                 case FUM_INVALID_TYPE:
1781                         estr = "FUM_INVALID_TYPE"; break;
1782                 case FUM_INVALID_LENGTH:
1783                         estr = "FUM_INVALID_LENGTH"; break;
1784                 case FUM_INVALID_BE:
1785                         estr = "FUM_INVALID_BE"; break;
1786                 case FUM_INVALID_ALIGN:
1787                         estr = "FUM_INVALID_ALIGN"; break;
1788                 default:
1789                         goto error;
1790                 }
1791                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1792                         estr, fault.func ? "VF" : "PF", fault.func,
1793                         fault.address, fault.specinfo);
1794         }
1795
1796         if (estr)
1797                 return 0;
1798         return 0;
1799 error:
1800         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1801         return err;
1802 }
1803
1804 /**
1805  * PF interrupt handler triggered by NIC for handling specific interrupt.
1806  *
1807  * @param handle
1808  *  Pointer to interrupt handle.
1809  * @param param
1810  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1811  *
1812  * @return
1813  *  void
1814  */
1815 static void
1816 fm10k_dev_interrupt_handler_pf(
1817                         __rte_unused struct rte_intr_handle *handle,
1818                         void *param)
1819 {
1820         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1821         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822         uint32_t cause, status;
1823
1824         if (hw->mac.type != fm10k_mac_pf)
1825                 return;
1826
1827         cause = FM10K_READ_REG(hw, FM10K_EICR);
1828
1829         /* Handle PCI fault cases */
1830         if (cause & FM10K_EICR_FAULT_MASK) {
1831                 PMD_INIT_LOG(ERR, "INT: find fault!");
1832                 fm10k_dev_handle_fault(hw, cause);
1833         }
1834
1835         /* Handle switch up/down */
1836         if (cause & FM10K_EICR_SWITCHNOTREADY)
1837                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1838
1839         if (cause & FM10K_EICR_SWITCHREADY)
1840                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1841
1842         /* Handle mailbox message */
1843         fm10k_mbx_lock(hw);
1844         hw->mbx.ops.process(hw, &hw->mbx);
1845         fm10k_mbx_unlock(hw);
1846
1847         /* Handle SRAM error */
1848         if (cause & FM10K_EICR_SRAMERROR) {
1849                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1850
1851                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1852                 /* Write to clear pending bits */
1853                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1854
1855                 /* Todo: print out error message after shared code  updates */
1856         }
1857
1858         /* Clear these 3 events if having any */
1859         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1860                  FM10K_EICR_SWITCHREADY;
1861         if (cause)
1862                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1863
1864         /* Re-enable interrupt from device side */
1865         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1866                                         FM10K_ITR_MASK_CLEAR);
1867         /* Re-enable interrupt from host side */
1868         rte_intr_enable(&(dev->pci_dev->intr_handle));
1869 }
1870
1871 /**
1872  * VF interrupt handler triggered by NIC for handling specific interrupt.
1873  *
1874  * @param handle
1875  *  Pointer to interrupt handle.
1876  * @param param
1877  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1878  *
1879  * @return
1880  *  void
1881  */
1882 static void
1883 fm10k_dev_interrupt_handler_vf(
1884                         __rte_unused struct rte_intr_handle *handle,
1885                         void *param)
1886 {
1887         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1888         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1889
1890         if (hw->mac.type != fm10k_mac_vf)
1891                 return;
1892
1893         /* Handle mailbox message if lock is acquired */
1894         fm10k_mbx_lock(hw);
1895         hw->mbx.ops.process(hw, &hw->mbx);
1896         fm10k_mbx_unlock(hw);
1897
1898         /* Re-enable interrupt from device side */
1899         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1900                                         FM10K_ITR_MASK_CLEAR);
1901         /* Re-enable interrupt from host side */
1902         rte_intr_enable(&(dev->pci_dev->intr_handle));
1903 }
1904
1905 /* Mailbox message handler in VF */
1906 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1907         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1908         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1909         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1910         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1911 };
1912
1913 /* Mailbox message handler in PF */
1914 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1915         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1916         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1917         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1918         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1919         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1920         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1921         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1922 };
1923
1924 static int
1925 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1926 {
1927         int err;
1928
1929         /* Initialize mailbox lock */
1930         fm10k_mbx_initlock(hw);
1931
1932         /* Replace default message handler with new ones */
1933         if (hw->mac.type == fm10k_mac_pf)
1934                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1935         else
1936                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1937
1938         if (err) {
1939                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1940                                 err);
1941                 return err;
1942         }
1943         /* Connect to SM for PF device or PF for VF device */
1944         return hw->mbx.ops.connect(hw, &hw->mbx);
1945 }
1946
1947 static void
1948 fm10k_close_mbx_service(struct fm10k_hw *hw)
1949 {
1950         /* Disconnect from SM for PF device or PF for VF device */
1951         hw->mbx.ops.disconnect(hw, &hw->mbx);
1952 }
1953
1954 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1955         .dev_configure          = fm10k_dev_configure,
1956         .dev_start              = fm10k_dev_start,
1957         .dev_stop               = fm10k_dev_stop,
1958         .dev_close              = fm10k_dev_close,
1959         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
1960         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
1961         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
1962         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
1963         .stats_get              = fm10k_stats_get,
1964         .stats_reset            = fm10k_stats_reset,
1965         .link_update            = fm10k_link_update,
1966         .dev_infos_get          = fm10k_dev_infos_get,
1967         .vlan_filter_set        = fm10k_vlan_filter_set,
1968         .vlan_offload_set       = fm10k_vlan_offload_set,
1969         .mac_addr_add           = fm10k_macaddr_add,
1970         .mac_addr_remove        = fm10k_macaddr_remove,
1971         .rx_queue_start         = fm10k_dev_rx_queue_start,
1972         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
1973         .tx_queue_start         = fm10k_dev_tx_queue_start,
1974         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
1975         .rx_queue_setup         = fm10k_rx_queue_setup,
1976         .rx_queue_release       = fm10k_rx_queue_release,
1977         .tx_queue_setup         = fm10k_tx_queue_setup,
1978         .tx_queue_release       = fm10k_tx_queue_release,
1979         .reta_update            = fm10k_reta_update,
1980         .reta_query             = fm10k_reta_query,
1981         .rss_hash_update        = fm10k_rss_hash_update,
1982         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
1983 };
1984
1985 static int
1986 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1987 {
1988         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1989         int diag;
1990         struct fm10k_macvlan_filter_info *macvlan;
1991
1992         PMD_INIT_FUNC_TRACE();
1993
1994         dev->dev_ops = &fm10k_eth_dev_ops;
1995         dev->rx_pkt_burst = &fm10k_recv_pkts;
1996         dev->tx_pkt_burst = &fm10k_xmit_pkts;
1997
1998         if (dev->data->scattered_rx)
1999                 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2000
2001         /* only initialize in the primary process */
2002         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2003                 return 0;
2004
2005         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2006         memset(macvlan, 0, sizeof(*macvlan));
2007         /* Vendor and Device ID need to be set before init of shared code */
2008         memset(hw, 0, sizeof(*hw));
2009         hw->device_id = dev->pci_dev->id.device_id;
2010         hw->vendor_id = dev->pci_dev->id.vendor_id;
2011         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2012         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2013         hw->revision_id = 0;
2014         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2015         if (hw->hw_addr == NULL) {
2016                 PMD_INIT_LOG(ERR, "Bad mem resource."
2017                         " Try to blacklist unused devices.");
2018                 return -EIO;
2019         }
2020
2021         /* Store fm10k_adapter pointer */
2022         hw->back = dev->data->dev_private;
2023
2024         /* Initialize the shared code */
2025         diag = fm10k_init_shared_code(hw);
2026         if (diag != FM10K_SUCCESS) {
2027                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2028                 return -EIO;
2029         }
2030
2031         /*
2032          * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2033          * there is no way to get link status without reading BAR4.  Until this
2034          * works, assume we have maximum bandwidth.
2035          * @todo - fix bus info
2036          */
2037         hw->bus_caps.speed = fm10k_bus_speed_8000;
2038         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2039         hw->bus_caps.payload = fm10k_bus_payload_512;
2040         hw->bus.speed = fm10k_bus_speed_8000;
2041         hw->bus.width = fm10k_bus_width_pcie_x8;
2042         hw->bus.payload = fm10k_bus_payload_256;
2043
2044         /* Initialize the hw */
2045         diag = fm10k_init_hw(hw);
2046         if (diag != FM10K_SUCCESS) {
2047                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2048                 return -EIO;
2049         }
2050
2051         /* Initialize MAC address(es) */
2052         dev->data->mac_addrs = rte_zmalloc("fm10k",
2053                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2054         if (dev->data->mac_addrs == NULL) {
2055                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2056                 return -ENOMEM;
2057         }
2058
2059         diag = fm10k_read_mac_addr(hw);
2060
2061         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2062                         &dev->data->mac_addrs[0]);
2063
2064         if (diag != FM10K_SUCCESS ||
2065                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2066
2067                 /* Generate a random addr */
2068                 eth_random_addr(hw->mac.addr);
2069                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2070                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2071                 &dev->data->mac_addrs[0]);
2072         }
2073
2074         /* Reset the hw statistics */
2075         fm10k_stats_reset(dev);
2076
2077         /* Reset the hw */
2078         diag = fm10k_reset_hw(hw);
2079         if (diag != FM10K_SUCCESS) {
2080                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2081                 return -EIO;
2082         }
2083
2084         /* Setup mailbox service */
2085         diag = fm10k_setup_mbx_service(hw);
2086         if (diag != FM10K_SUCCESS) {
2087                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2088                 return -EIO;
2089         }
2090
2091         /*PF/VF has different interrupt handling mechanism */
2092         if (hw->mac.type == fm10k_mac_pf) {
2093                 /* register callback func to eal lib */
2094                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2095                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2096
2097                 /* enable MISC interrupt */
2098                 fm10k_dev_enable_intr_pf(dev);
2099         } else { /* VF */
2100                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2101                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2102
2103                 fm10k_dev_enable_intr_vf(dev);
2104         }
2105
2106         /* Enable uio intr after callback registered */
2107         rte_intr_enable(&(dev->pci_dev->intr_handle));
2108
2109         hw->mac.ops.update_int_moderator(hw);
2110
2111         /* Make sure Switch Manager is ready before going forward. */
2112         if (hw->mac.type == fm10k_mac_pf) {
2113                 int switch_ready = 0;
2114                 int i;
2115
2116                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2117                         fm10k_mbx_lock(hw);
2118                         hw->mac.ops.get_host_state(hw, &switch_ready);
2119                         fm10k_mbx_unlock(hw);
2120                         if (switch_ready)
2121                                 break;
2122                         /* Delay some time to acquire async LPORT_MAP info. */
2123                         rte_delay_us(WAIT_SWITCH_MSG_US);
2124                 }
2125
2126                 if (switch_ready == 0) {
2127                         PMD_INIT_LOG(ERR, "switch is not ready");
2128                         return -1;
2129                 }
2130         }
2131
2132         /*
2133          * Below function will trigger operations on mailbox, acquire lock to
2134          * avoid race condition from interrupt handler. Operations on mailbox
2135          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2136          * will handle and generate an interrupt to our side. Then,  FIFO in
2137          * mailbox will be touched.
2138          */
2139         fm10k_mbx_lock(hw);
2140         /* Enable port first */
2141         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2142
2143         /* Set unicast mode by default. App can change to other mode in other
2144          * API func.
2145          */
2146         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2147                                         FM10K_XCAST_MODE_NONE);
2148
2149         fm10k_mbx_unlock(hw);
2150
2151         /* Add default mac address */
2152         fm10k_MAC_filter_set(dev, hw->mac.addr, true);
2153
2154         return 0;
2155 }
2156
2157 /*
2158  * The set of PCI devices this driver supports. This driver will enable both PF
2159  * and SRIOV-VF devices.
2160  */
2161 static const struct rte_pci_id pci_id_fm10k_map[] = {
2162 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2163 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2164 #include "rte_pci_dev_ids.h"
2165         { .vendor_id = 0, /* sentinel */ },
2166 };
2167
2168 static struct eth_driver rte_pmd_fm10k = {
2169         .pci_drv = {
2170                 .name = "rte_pmd_fm10k",
2171                 .id_table = pci_id_fm10k_map,
2172                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2173         },
2174         .eth_dev_init = eth_fm10k_dev_init,
2175         .dev_private_size = sizeof(struct fm10k_adapter),
2176 };
2177
2178 /*
2179  * Driver initialization routine.
2180  * Invoked once at EAL init time.
2181  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2182  */
2183 static int
2184 rte_pmd_fm10k_init(__rte_unused const char *name,
2185         __rte_unused const char *params)
2186 {
2187         PMD_INIT_FUNC_TRACE();
2188         rte_eth_driver_register(&rte_pmd_fm10k);
2189         return 0;
2190 }
2191
2192 static struct rte_driver rte_fm10k_driver = {
2193         .type = PMD_PDEV,
2194         .init = rte_pmd_fm10k_init,
2195 };
2196
2197 PMD_REGISTER_DRIVER(rte_fm10k_driver);