ethdev: fix check of threshold for Tx freeing
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US    100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
55
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
62 static int
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
64 static void
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
66 static void
67 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
68
69 static void
70 fm10k_mbx_initlock(struct fm10k_hw *hw)
71 {
72         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
73 }
74
75 static void
76 fm10k_mbx_lock(struct fm10k_hw *hw)
77 {
78         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
79                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
80 }
81
82 static void
83 fm10k_mbx_unlock(struct fm10k_hw *hw)
84 {
85         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
86 }
87
88 /*
89  * reset queue to initial state, allocate software buffers used when starting
90  * device.
91  * return 0 on success
92  * return -ENOMEM if buffers cannot be allocated
93  * return -EINVAL if buffers do not satisfy alignment condition
94  */
95 static inline int
96 rx_queue_reset(struct fm10k_rx_queue *q)
97 {
98         uint64_t dma_addr;
99         int i, diag;
100         PMD_INIT_FUNC_TRACE();
101
102         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
103         if (diag != 0)
104                 return -ENOMEM;
105
106         for (i = 0; i < q->nb_desc; ++i) {
107                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
108                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
109                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
110                                                 q->nb_desc);
111                         return -EINVAL;
112                 }
113                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
114                 q->hw_ring[i].q.pkt_addr = dma_addr;
115                 q->hw_ring[i].q.hdr_addr = dma_addr;
116         }
117
118         q->next_dd = 0;
119         q->next_alloc = 0;
120         q->next_trigger = q->alloc_thresh - 1;
121         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
122         return 0;
123 }
124
125 /*
126  * clean queue, descriptor rings, free software buffers used when stopping
127  * device.
128  */
129 static inline void
130 rx_queue_clean(struct fm10k_rx_queue *q)
131 {
132         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
133         uint32_t i;
134         PMD_INIT_FUNC_TRACE();
135
136         /* zero descriptor rings */
137         for (i = 0; i < q->nb_desc; ++i)
138                 q->hw_ring[i] = zero;
139
140         /* free software buffers */
141         for (i = 0; i < q->nb_desc; ++i) {
142                 if (q->sw_ring[i]) {
143                         rte_pktmbuf_free_seg(q->sw_ring[i]);
144                         q->sw_ring[i] = NULL;
145                 }
146         }
147 }
148
149 /*
150  * free all queue memory used when releasing the queue (i.e. configure)
151  */
152 static inline void
153 rx_queue_free(struct fm10k_rx_queue *q)
154 {
155         PMD_INIT_FUNC_TRACE();
156         if (q) {
157                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
158                 rx_queue_clean(q);
159                 if (q->sw_ring) {
160                         rte_free(q->sw_ring);
161                         q->sw_ring = NULL;
162                 }
163                 rte_free(q);
164                 q = NULL;
165         }
166 }
167
168 /*
169  * disable RX queue, wait unitl HW finished necessary flush operation
170  */
171 static inline int
172 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
173 {
174         uint32_t reg, i;
175
176         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
177         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
178                         reg & ~FM10K_RXQCTL_ENABLE);
179
180         /* Wait 100us at most */
181         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
182                 rte_delay_us(1);
183                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
184                 if (!(reg & FM10K_RXQCTL_ENABLE))
185                         break;
186         }
187
188         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
189                 return -1;
190
191         return 0;
192 }
193
194 /*
195  * reset queue to initial state, allocate software buffers used when starting
196  * device
197  */
198 static inline void
199 tx_queue_reset(struct fm10k_tx_queue *q)
200 {
201         PMD_INIT_FUNC_TRACE();
202         q->last_free = 0;
203         q->next_free = 0;
204         q->nb_used = 0;
205         q->nb_free = q->nb_desc - 1;
206         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
207         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
208 }
209
210 /*
211  * clean queue, descriptor rings, free software buffers used when stopping
212  * device
213  */
214 static inline void
215 tx_queue_clean(struct fm10k_tx_queue *q)
216 {
217         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
218         uint32_t i;
219         PMD_INIT_FUNC_TRACE();
220
221         /* zero descriptor rings */
222         for (i = 0; i < q->nb_desc; ++i)
223                 q->hw_ring[i] = zero;
224
225         /* free software buffers */
226         for (i = 0; i < q->nb_desc; ++i) {
227                 if (q->sw_ring[i]) {
228                         rte_pktmbuf_free_seg(q->sw_ring[i]);
229                         q->sw_ring[i] = NULL;
230                 }
231         }
232 }
233
234 /*
235  * free all queue memory used when releasing the queue (i.e. configure)
236  */
237 static inline void
238 tx_queue_free(struct fm10k_tx_queue *q)
239 {
240         PMD_INIT_FUNC_TRACE();
241         if (q) {
242                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
243                 tx_queue_clean(q);
244                 if (q->rs_tracker.list) {
245                         rte_free(q->rs_tracker.list);
246                         q->rs_tracker.list = NULL;
247                 }
248                 if (q->sw_ring) {
249                         rte_free(q->sw_ring);
250                         q->sw_ring = NULL;
251                 }
252                 rte_free(q);
253                 q = NULL;
254         }
255 }
256
257 /*
258  * disable TX queue, wait unitl HW finished necessary flush operation
259  */
260 static inline int
261 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
262 {
263         uint32_t reg, i;
264
265         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
266         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
267                         reg & ~FM10K_TXDCTL_ENABLE);
268
269         /* Wait 100us at most */
270         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
271                 rte_delay_us(1);
272                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
273                 if (!(reg & FM10K_TXDCTL_ENABLE))
274                         break;
275         }
276
277         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
278                 return -1;
279
280         return 0;
281 }
282
283 static int
284 fm10k_dev_configure(struct rte_eth_dev *dev)
285 {
286         PMD_INIT_FUNC_TRACE();
287
288         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
289                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
290
291         return 0;
292 }
293
294 static void
295 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
296 {
297         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
298         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
299         uint32_t mrqc, *key, i, reta, j;
300         uint64_t hf;
301
302 #define RSS_KEY_SIZE 40
303         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
304                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
305                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
306                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
307                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
308                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
309         };
310
311         if (dev->data->nb_rx_queues == 1 ||
312             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
313             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
314                 return;
315
316         /* random key is rss_intel_key (default) or user provided (rss_key) */
317         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
318                 key = (uint32_t *)rss_intel_key;
319         else
320                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
321
322         /* Now fill our hash function seeds, 4 bytes at a time */
323         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
324                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
325
326         /*
327          * Fill in redirection table
328          * The byte-swap is needed because NIC registers are in
329          * little-endian order.
330          */
331         reta = 0;
332         for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
333                 if (j == dev->data->nb_rx_queues)
334                         j = 0;
335                 reta = (reta << CHAR_BIT) | j;
336                 if ((i & 3) == 3)
337                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
338                                         rte_bswap32(reta));
339         }
340
341         /*
342          * Generate RSS hash based on packet types, TCP/UDP
343          * port numbers and/or IPv4/v6 src and dst addresses
344          */
345         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
346         mrqc = 0;
347         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
348         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
349         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
350         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
351         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
352         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
353         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
354         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
355         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
356
357         if (mrqc == 0) {
358                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
359                         "supported", hf);
360                 return;
361         }
362
363         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
364 }
365
366 static int
367 fm10k_dev_tx_init(struct rte_eth_dev *dev)
368 {
369         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370         int i, ret;
371         struct fm10k_tx_queue *txq;
372         uint64_t base_addr;
373         uint32_t size;
374
375         /* Disable TXINT to avoid possible interrupt */
376         for (i = 0; i < hw->mac.max_queues; i++)
377                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
378                                 3 << FM10K_TXINT_TIMER_SHIFT);
379
380         /* Setup TX queue */
381         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
382                 txq = dev->data->tx_queues[i];
383                 base_addr = txq->hw_ring_phys_addr;
384                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
385
386                 /* disable queue to avoid issues while updating state */
387                 ret = tx_queue_disable(hw, i);
388                 if (ret) {
389                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
390                         return -1;
391                 }
392
393                 /* set location and size for descriptor ring */
394                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
395                                 base_addr & UINT64_LOWER_32BITS_MASK);
396                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
397                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
398                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
399         }
400         return 0;
401 }
402
403 static int
404 fm10k_dev_rx_init(struct rte_eth_dev *dev)
405 {
406         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
407         int i, ret;
408         struct fm10k_rx_queue *rxq;
409         uint64_t base_addr;
410         uint32_t size;
411         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
412         uint16_t buf_size;
413
414         /* Disable RXINT to avoid possible interrupt */
415         for (i = 0; i < hw->mac.max_queues; i++)
416                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
417                                 3 << FM10K_RXINT_TIMER_SHIFT);
418
419         /* Setup RX queues */
420         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
421                 rxq = dev->data->rx_queues[i];
422                 base_addr = rxq->hw_ring_phys_addr;
423                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
424
425                 /* disable queue to avoid issues while updating state */
426                 ret = rx_queue_disable(hw, i);
427                 if (ret) {
428                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
429                         return -1;
430                 }
431
432                 /* Setup the Base and Length of the Rx Descriptor Ring */
433                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
434                                 base_addr & UINT64_LOWER_32BITS_MASK);
435                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
436                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
437                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
438
439                 /* Configure the Rx buffer size for one buff without split */
440                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
441                         RTE_PKTMBUF_HEADROOM);
442                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
443                  * reserved for this purpose, and the worst case could be 511B.
444                  * But SRR reg assumes all buffers have the same size. In order
445                  * to fill the gap, we'll have to consider the worst case and
446                  * assume 512B is reserved. If we don't do so, it's possible
447                  * for HW to overwrite data to next mbuf.
448                  */
449                 buf_size -= FM10K_RX_DATABUF_ALIGN;
450
451                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
452                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
453
454                 /* It adds dual VLAN length for supporting dual VLAN */
455                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
456                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
457                         dev->data->dev_conf.rxmode.enable_scatter) {
458                         uint32_t reg;
459                         dev->data->scattered_rx = 1;
460                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
461                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
462                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
463                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
464                 }
465
466                 /* Enable drop on empty, it's RO for VF */
467                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
468                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
469
470                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
471                 FM10K_WRITE_FLUSH(hw);
472         }
473
474         /* Configure RSS if applicable */
475         fm10k_dev_mq_rx_configure(dev);
476         return 0;
477 }
478
479 static int
480 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
481 {
482         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
483         int err = -1;
484         uint32_t reg;
485         struct fm10k_rx_queue *rxq;
486
487         PMD_INIT_FUNC_TRACE();
488
489         if (rx_queue_id < dev->data->nb_rx_queues) {
490                 rxq = dev->data->rx_queues[rx_queue_id];
491                 err = rx_queue_reset(rxq);
492                 if (err == -ENOMEM) {
493                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
494                         return err;
495                 } else if (err == -EINVAL) {
496                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
497                                 " %d", err);
498                         return err;
499                 }
500
501                 /* Setup the HW Rx Head and Tail Descriptor Pointers
502                  * Note: this must be done AFTER the queue is enabled on real
503                  * hardware, but BEFORE the queue is enabled when using the
504                  * emulation platform. Do it in both places for now and remove
505                  * this comment and the following two register writes when the
506                  * emulation platform is no longer being used.
507                  */
508                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
509                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
510
511                 /* Set PF ownership flag for PF devices */
512                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
513                 if (hw->mac.type == fm10k_mac_pf)
514                         reg |= FM10K_RXQCTL_PF;
515                 reg |= FM10K_RXQCTL_ENABLE;
516                 /* enable RX queue */
517                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
518                 FM10K_WRITE_FLUSH(hw);
519
520                 /* Setup the HW Rx Head and Tail Descriptor Pointers
521                  * Note: this must be done AFTER the queue is enabled
522                  */
523                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
524                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
525         }
526
527         return err;
528 }
529
530 static int
531 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
532 {
533         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534
535         PMD_INIT_FUNC_TRACE();
536
537         if (rx_queue_id < dev->data->nb_rx_queues) {
538                 /* Disable RX queue */
539                 rx_queue_disable(hw, rx_queue_id);
540
541                 /* Free mbuf and clean HW ring */
542                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
543         }
544
545         return 0;
546 }
547
548 static int
549 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
550 {
551         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
552         /** @todo - this should be defined in the shared code */
553 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
554         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
555         int err = 0;
556
557         PMD_INIT_FUNC_TRACE();
558
559         if (tx_queue_id < dev->data->nb_tx_queues) {
560                 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
561
562                 /* reset head and tail pointers */
563                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
564                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
565
566                 /* enable TX queue */
567                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
568                                         FM10K_TXDCTL_ENABLE | txdctl);
569                 FM10K_WRITE_FLUSH(hw);
570         } else
571                 err = -1;
572
573         return err;
574 }
575
576 static int
577 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
578 {
579         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580
581         PMD_INIT_FUNC_TRACE();
582
583         if (tx_queue_id < dev->data->nb_tx_queues) {
584                 tx_queue_disable(hw, tx_queue_id);
585                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
586         }
587
588         return 0;
589 }
590
591 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
592 {
593         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
594                 != FM10K_DGLORTMAP_NONE);
595 }
596
597 static void
598 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
599 {
600         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601         int status;
602
603         PMD_INIT_FUNC_TRACE();
604
605         /* Return if it didn't acquire valid glort range */
606         if (!fm10k_glort_valid(hw))
607                 return;
608
609         fm10k_mbx_lock(hw);
610         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
611                                 FM10K_XCAST_MODE_PROMISC);
612         fm10k_mbx_unlock(hw);
613
614         if (status != FM10K_SUCCESS)
615                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
616 }
617
618 static void
619 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
620 {
621         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         uint8_t mode;
623         int status;
624
625         PMD_INIT_FUNC_TRACE();
626
627         /* Return if it didn't acquire valid glort range */
628         if (!fm10k_glort_valid(hw))
629                 return;
630
631         if (dev->data->all_multicast == 1)
632                 mode = FM10K_XCAST_MODE_ALLMULTI;
633         else
634                 mode = FM10K_XCAST_MODE_NONE;
635
636         fm10k_mbx_lock(hw);
637         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
638                                 mode);
639         fm10k_mbx_unlock(hw);
640
641         if (status != FM10K_SUCCESS)
642                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
643 }
644
645 static void
646 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
647 {
648         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         int status;
650
651         PMD_INIT_FUNC_TRACE();
652
653         /* Return if it didn't acquire valid glort range */
654         if (!fm10k_glort_valid(hw))
655                 return;
656
657         /* If promiscuous mode is enabled, it doesn't make sense to enable
658          * allmulticast and disable promiscuous since fm10k only can select
659          * one of the modes.
660          */
661         if (dev->data->promiscuous) {
662                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
663                         "needn't enable allmulticast");
664                 return;
665         }
666
667         fm10k_mbx_lock(hw);
668         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
669                                 FM10K_XCAST_MODE_ALLMULTI);
670         fm10k_mbx_unlock(hw);
671
672         if (status != FM10K_SUCCESS)
673                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
674 }
675
676 static void
677 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
678 {
679         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         int status;
681
682         PMD_INIT_FUNC_TRACE();
683
684         /* Return if it didn't acquire valid glort range */
685         if (!fm10k_glort_valid(hw))
686                 return;
687
688         if (dev->data->promiscuous) {
689                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
690                         "since promisc mode is enabled");
691                 return;
692         }
693
694         fm10k_mbx_lock(hw);
695         /* Change mode to unicast mode */
696         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
697                                 FM10K_XCAST_MODE_NONE);
698         fm10k_mbx_unlock(hw);
699
700         if (status != FM10K_SUCCESS)
701                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
702 }
703
704 /* fls = find last set bit = 32 minus the number of leading zeros */
705 #ifndef fls
706 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
707 #endif
708 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
709 static int
710 fm10k_dev_start(struct rte_eth_dev *dev)
711 {
712         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713         int i, diag;
714
715         PMD_INIT_FUNC_TRACE();
716
717         /* stop, init, then start the hw */
718         diag = fm10k_stop_hw(hw);
719         if (diag != FM10K_SUCCESS) {
720                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
721                 return -EIO;
722         }
723
724         diag = fm10k_init_hw(hw);
725         if (diag != FM10K_SUCCESS) {
726                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
727                 return -EIO;
728         }
729
730         diag = fm10k_start_hw(hw);
731         if (diag != FM10K_SUCCESS) {
732                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
733                 return -EIO;
734         }
735
736         diag = fm10k_dev_tx_init(dev);
737         if (diag) {
738                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
739                 return diag;
740         }
741
742         diag = fm10k_dev_rx_init(dev);
743         if (diag) {
744                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
745                 return diag;
746         }
747
748         if (hw->mac.type == fm10k_mac_pf) {
749                 /* Establish only VSI 0 as valid */
750                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
751
752                 /* Configure RSS bits used in RETA table */
753                 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
754                                 fls(dev->data->nb_rx_queues - 1) <<
755                                 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
756
757                 /* Invalidate all other GLORT entries */
758                 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
759                         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
760                                         FM10K_DGLORTMAP_NONE);
761         }
762
763         for (i = 0; i < dev->data->nb_rx_queues; i++) {
764                 struct fm10k_rx_queue *rxq;
765                 rxq = dev->data->rx_queues[i];
766
767                 if (rxq->rx_deferred_start)
768                         continue;
769                 diag = fm10k_dev_rx_queue_start(dev, i);
770                 if (diag != 0) {
771                         int j;
772                         for (j = 0; j < i; ++j)
773                                 rx_queue_clean(dev->data->rx_queues[j]);
774                         return diag;
775                 }
776         }
777
778         for (i = 0; i < dev->data->nb_tx_queues; i++) {
779                 struct fm10k_tx_queue *txq;
780                 txq = dev->data->tx_queues[i];
781
782                 if (txq->tx_deferred_start)
783                         continue;
784                 diag = fm10k_dev_tx_queue_start(dev, i);
785                 if (diag != 0) {
786                         int j;
787                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
788                                 rx_queue_clean(dev->data->rx_queues[j]);
789                         return diag;
790                 }
791         }
792
793         if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) {
794                 /* Update default vlan */
795                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
796
797                 /* Add default mac/vlan filter to PF/Switch manager */
798                 fm10k_MAC_filter_set(dev, hw->mac.addr, true);
799         }
800
801         return 0;
802 }
803
804 static void
805 fm10k_dev_stop(struct rte_eth_dev *dev)
806 {
807         int i;
808
809         PMD_INIT_FUNC_TRACE();
810
811         for (i = 0; i < dev->data->nb_tx_queues; i++)
812                 fm10k_dev_tx_queue_stop(dev, i);
813
814         for (i = 0; i < dev->data->nb_rx_queues; i++)
815                 fm10k_dev_rx_queue_stop(dev, i);
816 }
817
818 static void
819 fm10k_dev_close(struct rte_eth_dev *dev)
820 {
821         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822
823         PMD_INIT_FUNC_TRACE();
824
825         fm10k_MACVLAN_remove_all(dev);
826
827         /* Stop mailbox service first */
828         fm10k_close_mbx_service(hw);
829         fm10k_dev_stop(dev);
830         fm10k_stop_hw(hw);
831 }
832
833 static int
834 fm10k_link_update(struct rte_eth_dev *dev,
835         __rte_unused int wait_to_complete)
836 {
837         PMD_INIT_FUNC_TRACE();
838
839         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
840          * x8 PCIe interface. For now, we leave the speed undefined since there
841          * is no 50Gbps Ethernet. */
842         dev->data->dev_link.link_speed  = 0;
843         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
844         dev->data->dev_link.link_status = 1;
845
846         return 0;
847 }
848
849 static void
850 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
851 {
852         uint64_t ipackets, opackets, ibytes, obytes;
853         struct fm10k_hw *hw =
854                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855         struct fm10k_hw_stats *hw_stats =
856                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
857         int i;
858
859         PMD_INIT_FUNC_TRACE();
860
861         fm10k_update_hw_stats(hw, hw_stats);
862
863         ipackets = opackets = ibytes = obytes = 0;
864         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
865                 (i < hw->mac.max_queues); ++i) {
866                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
867                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
868                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
869                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
870                 ipackets += stats->q_ipackets[i];
871                 opackets += stats->q_opackets[i];
872                 ibytes   += stats->q_ibytes[i];
873                 obytes   += stats->q_obytes[i];
874         }
875         stats->ipackets = ipackets;
876         stats->opackets = opackets;
877         stats->ibytes = ibytes;
878         stats->obytes = obytes;
879 }
880
881 static void
882 fm10k_stats_reset(struct rte_eth_dev *dev)
883 {
884         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
885         struct fm10k_hw_stats *hw_stats =
886                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
887
888         PMD_INIT_FUNC_TRACE();
889
890         memset(hw_stats, 0, sizeof(*hw_stats));
891         fm10k_rebind_hw_stats(hw, hw_stats);
892 }
893
894 static void
895 fm10k_dev_infos_get(struct rte_eth_dev *dev,
896         struct rte_eth_dev_info *dev_info)
897 {
898         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899
900         PMD_INIT_FUNC_TRACE();
901
902         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
903         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
904         dev_info->max_rx_queues      = hw->mac.max_queues;
905         dev_info->max_tx_queues      = hw->mac.max_queues;
906         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
907         dev_info->max_hash_mac_addrs = 0;
908         dev_info->max_vfs            = dev->pci_dev->max_vfs;
909         dev_info->max_vmdq_pools     = ETH_64_POOLS;
910         dev_info->rx_offload_capa =
911                 DEV_RX_OFFLOAD_VLAN_STRIP |
912                 DEV_RX_OFFLOAD_IPV4_CKSUM |
913                 DEV_RX_OFFLOAD_UDP_CKSUM  |
914                 DEV_RX_OFFLOAD_TCP_CKSUM;
915         dev_info->tx_offload_capa =
916                 DEV_TX_OFFLOAD_VLAN_INSERT;
917         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
918
919         dev_info->default_rxconf = (struct rte_eth_rxconf) {
920                 .rx_thresh = {
921                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
922                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
923                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
924                 },
925                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
926                 .rx_drop_en = 0,
927         };
928
929         dev_info->default_txconf = (struct rte_eth_txconf) {
930                 .tx_thresh = {
931                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
932                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
933                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
934                 },
935                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
936                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
937                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
938                                 ETH_TXQ_FLAGS_NOOFFLOADS,
939         };
940
941 }
942
943 static int
944 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
945 {
946         s32 result;
947         uint16_t mac_num = 0;
948         uint32_t vid_idx, vid_bit, mac_index;
949         struct fm10k_hw *hw;
950         struct fm10k_macvlan_filter_info *macvlan;
951         struct rte_eth_dev_data *data = dev->data;
952
953         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
954         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
955
956         if (vlan_id > ETH_VLAN_ID_MAX) {
957                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
958                 return (-EINVAL);
959         }
960
961         vid_idx = FM10K_VFTA_IDX(vlan_id);
962         vid_bit = FM10K_VFTA_BIT(vlan_id);
963         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
964         if (on && (macvlan->vfta[vid_idx] & vid_bit))
965                 return 0;
966         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
967         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
968                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
969                         "in the VLAN filter table");
970                 return (-EINVAL);
971         }
972
973         fm10k_mbx_lock(hw);
974         result = fm10k_update_vlan(hw, vlan_id, 0, on);
975         fm10k_mbx_unlock(hw);
976         if (result != FM10K_SUCCESS) {
977                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
978                 return (-EIO);
979         }
980
981         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
982                         (result == FM10K_SUCCESS); mac_index++) {
983                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
984                         continue;
985                 if (mac_num > macvlan->mac_num - 1) {
986                         PMD_INIT_LOG(ERR, "MAC address number "
987                                         "not match");
988                         break;
989                 }
990                 fm10k_mbx_lock(hw);
991                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
992                         data->mac_addrs[mac_index].addr_bytes,
993                         vlan_id, on, 0);
994                 fm10k_mbx_unlock(hw);
995                 mac_num++;
996         }
997         if (result != FM10K_SUCCESS) {
998                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
999                 return (-EIO);
1000         }
1001
1002         if (on) {
1003                 macvlan->vlan_num++;
1004                 macvlan->vfta[vid_idx] |= vid_bit;
1005         } else {
1006                 macvlan->vlan_num--;
1007                 macvlan->vfta[vid_idx] &= ~vid_bit;
1008         }
1009         return 0;
1010 }
1011
1012 static void
1013 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1014 {
1015         if (mask & ETH_VLAN_STRIP_MASK) {
1016                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1017                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1018                                         "always on in fm10k");
1019         }
1020
1021         if (mask & ETH_VLAN_EXTEND_MASK) {
1022                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1023                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1024                                         "supported in fm10k");
1025         }
1026
1027         if (mask & ETH_VLAN_FILTER_MASK) {
1028                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1029                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1030         }
1031 }
1032
1033 /* Add/Remove a MAC address, and update filters */
1034 static void
1035 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1036 {
1037         uint32_t i, j, k;
1038         struct fm10k_hw *hw;
1039         struct fm10k_macvlan_filter_info *macvlan;
1040
1041         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1042         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1043
1044         i = 0;
1045         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1046                 if (macvlan->vfta[j]) {
1047                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1048                                 if (macvlan->vfta[j] & (1 << k)) {
1049                                         if (i + 1 > macvlan->vlan_num) {
1050                                                 PMD_INIT_LOG(ERR, "vlan number "
1051                                                                 "not match");
1052                                                 return;
1053                                         }
1054                                         fm10k_mbx_lock(hw);
1055                                         fm10k_update_uc_addr(hw,
1056                                                 hw->mac.dglort_map, mac,
1057                                                 j * FM10K_UINT32_BIT_SIZE + k,
1058                                                 add, 0);
1059                                         fm10k_mbx_unlock(hw);
1060                                         i++;
1061                                 }
1062                         }
1063                 }
1064         }
1065
1066         if (add)
1067                 macvlan->mac_num++;
1068         else
1069                 macvlan->mac_num--;
1070 }
1071
1072 /* Add a MAC address, and update filters */
1073 static void
1074 fm10k_macaddr_add(struct rte_eth_dev *dev,
1075                  struct ether_addr *mac_addr,
1076                  __rte_unused uint32_t index,
1077                  __rte_unused uint32_t pool)
1078 {
1079         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1080 }
1081
1082 /* Remove a MAC address, and update filters */
1083 static void
1084 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1085 {
1086         struct rte_eth_dev_data *data = dev->data;
1087
1088         if (index < FM10K_MAX_MACADDR_NUM)
1089                 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1090                                 FALSE);
1091 }
1092
1093 /* Remove all VLAN and MAC address table entries */
1094 static void
1095 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev)
1096 {
1097         uint32_t j, k;
1098         struct fm10k_macvlan_filter_info *macvlan;
1099
1100         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1101         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1102                 if (macvlan->vfta[j]) {
1103                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1104                                 if (macvlan->vfta[j] & (1 << k))
1105                                         fm10k_vlan_filter_set(dev,
1106                                                 j * FM10K_UINT32_BIT_SIZE + k, false);
1107                         }
1108                 }
1109         }
1110 }
1111
1112 static inline int
1113 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1114 {
1115         if ((request < min) || (request > max) || ((request % mult) != 0))
1116                 return -1;
1117         else
1118                 return 0;
1119 }
1120
1121 /*
1122  * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1123  * the physical address is required. If the memzone is already created, then
1124  * this function returns a pointer to the existing memzone.
1125  */
1126 static inline const struct rte_memzone *
1127 allocate_hw_ring(const char *driver_name, const char *ring_name,
1128         uint8_t port_id, uint16_t queue_id, int socket_id,
1129         uint32_t size, uint32_t align)
1130 {
1131         char name[RTE_MEMZONE_NAMESIZE];
1132         const struct rte_memzone *mz;
1133
1134         snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1135                  driver_name, ring_name, port_id, queue_id, socket_id);
1136
1137         /* return the memzone if it already exists */
1138         mz = rte_memzone_lookup(name);
1139         if (mz)
1140                 return mz;
1141
1142 #ifdef RTE_LIBRTE_XEN_DOM0
1143         return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1144                                            RTE_PGSIZE_2M);
1145 #else
1146         return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1147 #endif
1148 }
1149
1150 static inline int
1151 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1152 {
1153         if ((request < min) || (request > max) || ((div % request) != 0))
1154                 return -1;
1155         else
1156                 return 0;
1157 }
1158
1159 static inline int
1160 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1161 {
1162         uint16_t rx_free_thresh;
1163
1164         if (conf->rx_free_thresh == 0)
1165                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1166         else
1167                 rx_free_thresh = conf->rx_free_thresh;
1168
1169         /* make sure the requested threshold satisfies the constraints */
1170         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1171                         FM10K_RX_FREE_THRESH_MAX(q),
1172                         FM10K_RX_FREE_THRESH_DIV(q),
1173                         rx_free_thresh)) {
1174                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1175                         "less than or equal to %u, "
1176                         "greater than or equal to %u, "
1177                         "and a divisor of %u",
1178                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1179                         FM10K_RX_FREE_THRESH_MIN(q),
1180                         FM10K_RX_FREE_THRESH_DIV(q));
1181                 return (-EINVAL);
1182         }
1183
1184         q->alloc_thresh = rx_free_thresh;
1185         q->drop_en = conf->rx_drop_en;
1186         q->rx_deferred_start = conf->rx_deferred_start;
1187
1188         return 0;
1189 }
1190
1191 /*
1192  * Hardware requires specific alignment for Rx packet buffers. At
1193  * least one of the following two conditions must be satisfied.
1194  *  1. Address is 512B aligned
1195  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1196  *
1197  * As such, the driver may need to adjust the DMA address within the
1198  * buffer by up to 512B.
1199  *
1200  * return 1 if the element size is valid, otherwise return 0.
1201  */
1202 static int
1203 mempool_element_size_valid(struct rte_mempool *mp)
1204 {
1205         uint32_t min_size;
1206
1207         /* elt_size includes mbuf header and headroom */
1208         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1209                         RTE_PKTMBUF_HEADROOM;
1210
1211         /* account for up to 512B of alignment */
1212         min_size -= FM10K_RX_DATABUF_ALIGN;
1213
1214         /* sanity check for overflow */
1215         if (min_size > mp->elt_size)
1216                 return 0;
1217
1218         /* size is valid */
1219         return 1;
1220 }
1221
1222 static int
1223 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1224         uint16_t nb_desc, unsigned int socket_id,
1225         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1226 {
1227         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1228         struct fm10k_rx_queue *q;
1229         const struct rte_memzone *mz;
1230
1231         PMD_INIT_FUNC_TRACE();
1232
1233         /* make sure the mempool element size can account for alignment. */
1234         if (!mempool_element_size_valid(mp)) {
1235                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1236                 return (-EINVAL);
1237         }
1238
1239         /* make sure a valid number of descriptors have been requested */
1240         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1241                                 FM10K_MULT_RX_DESC, nb_desc)) {
1242                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1243                         "less than or equal to %"PRIu32", "
1244                         "greater than or equal to %u, "
1245                         "and a multiple of %u",
1246                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1247                         FM10K_MULT_RX_DESC);
1248                 return (-EINVAL);
1249         }
1250
1251         /*
1252          * if this queue existed already, free the associated memory. The
1253          * queue cannot be reused in case we need to allocate memory on
1254          * different socket than was previously used.
1255          */
1256         if (dev->data->rx_queues[queue_id] != NULL) {
1257                 rx_queue_free(dev->data->rx_queues[queue_id]);
1258                 dev->data->rx_queues[queue_id] = NULL;
1259         }
1260
1261         /* allocate memory for the queue structure */
1262         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1263                                 socket_id);
1264         if (q == NULL) {
1265                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1266                 return (-ENOMEM);
1267         }
1268
1269         /* setup queue */
1270         q->mp = mp;
1271         q->nb_desc = nb_desc;
1272         q->port_id = dev->data->port_id;
1273         q->queue_id = queue_id;
1274         q->tail_ptr = (volatile uint32_t *)
1275                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1276         if (handle_rxconf(q, conf))
1277                 return (-EINVAL);
1278
1279         /* allocate memory for the software ring */
1280         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1281                                         nb_desc * sizeof(struct rte_mbuf *),
1282                                         RTE_CACHE_LINE_SIZE, socket_id);
1283         if (q->sw_ring == NULL) {
1284                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1285                 rte_free(q);
1286                 return (-ENOMEM);
1287         }
1288
1289         /*
1290          * allocate memory for the hardware descriptor ring. A memzone large
1291          * enough to hold the maximum ring size is requested to allow for
1292          * resizing in later calls to the queue setup function.
1293          */
1294         mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1295                                 dev->data->port_id, queue_id, socket_id,
1296                                 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1297         if (mz == NULL) {
1298                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1299                 rte_free(q->sw_ring);
1300                 rte_free(q);
1301                 return (-ENOMEM);
1302         }
1303         q->hw_ring = mz->addr;
1304 #ifdef RTE_LIBRTE_XEN_DOM0
1305         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1306 #else
1307         q->hw_ring_phys_addr = mz->phys_addr;
1308 #endif
1309
1310         dev->data->rx_queues[queue_id] = q;
1311         return 0;
1312 }
1313
1314 static void
1315 fm10k_rx_queue_release(void *queue)
1316 {
1317         PMD_INIT_FUNC_TRACE();
1318
1319         rx_queue_free(queue);
1320 }
1321
1322 static inline int
1323 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1324 {
1325         uint16_t tx_free_thresh;
1326         uint16_t tx_rs_thresh;
1327
1328         /* constraint MACROs require that tx_free_thresh is configured
1329          * before tx_rs_thresh */
1330         if (conf->tx_free_thresh == 0)
1331                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1332         else
1333                 tx_free_thresh = conf->tx_free_thresh;
1334
1335         /* make sure the requested threshold satisfies the constraints */
1336         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1337                         FM10K_TX_FREE_THRESH_MAX(q),
1338                         FM10K_TX_FREE_THRESH_DIV(q),
1339                         tx_free_thresh)) {
1340                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1341                         "less than or equal to %u, "
1342                         "greater than or equal to %u, "
1343                         "and a divisor of %u",
1344                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1345                         FM10K_TX_FREE_THRESH_MIN(q),
1346                         FM10K_TX_FREE_THRESH_DIV(q));
1347                 return (-EINVAL);
1348         }
1349
1350         q->free_thresh = tx_free_thresh;
1351
1352         if (conf->tx_rs_thresh == 0)
1353                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1354         else
1355                 tx_rs_thresh = conf->tx_rs_thresh;
1356
1357         q->tx_deferred_start = conf->tx_deferred_start;
1358
1359         /* make sure the requested threshold satisfies the constraints */
1360         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1361                         FM10K_TX_RS_THRESH_MAX(q),
1362                         FM10K_TX_RS_THRESH_DIV(q),
1363                         tx_rs_thresh)) {
1364                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1365                         "less than or equal to %u, "
1366                         "greater than or equal to %u, "
1367                         "and a divisor of %u",
1368                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1369                         FM10K_TX_RS_THRESH_MIN(q),
1370                         FM10K_TX_RS_THRESH_DIV(q));
1371                 return (-EINVAL);
1372         }
1373
1374         q->rs_thresh = tx_rs_thresh;
1375
1376         return 0;
1377 }
1378
1379 static int
1380 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1381         uint16_t nb_desc, unsigned int socket_id,
1382         const struct rte_eth_txconf *conf)
1383 {
1384         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1385         struct fm10k_tx_queue *q;
1386         const struct rte_memzone *mz;
1387
1388         PMD_INIT_FUNC_TRACE();
1389
1390         /* make sure a valid number of descriptors have been requested */
1391         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1392                                 FM10K_MULT_TX_DESC, nb_desc)) {
1393                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1394                         "less than or equal to %"PRIu32", "
1395                         "greater than or equal to %u, "
1396                         "and a multiple of %u",
1397                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1398                         FM10K_MULT_TX_DESC);
1399                 return (-EINVAL);
1400         }
1401
1402         /*
1403          * if this queue existed already, free the associated memory. The
1404          * queue cannot be reused in case we need to allocate memory on
1405          * different socket than was previously used.
1406          */
1407         if (dev->data->tx_queues[queue_id] != NULL) {
1408                 tx_queue_free(dev->data->tx_queues[queue_id]);
1409                 dev->data->tx_queues[queue_id] = NULL;
1410         }
1411
1412         /* allocate memory for the queue structure */
1413         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1414                                 socket_id);
1415         if (q == NULL) {
1416                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1417                 return (-ENOMEM);
1418         }
1419
1420         /* setup queue */
1421         q->nb_desc = nb_desc;
1422         q->port_id = dev->data->port_id;
1423         q->queue_id = queue_id;
1424         q->tail_ptr = (volatile uint32_t *)
1425                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1426         if (handle_txconf(q, conf))
1427                 return (-EINVAL);
1428
1429         /* allocate memory for the software ring */
1430         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1431                                         nb_desc * sizeof(struct rte_mbuf *),
1432                                         RTE_CACHE_LINE_SIZE, socket_id);
1433         if (q->sw_ring == NULL) {
1434                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1435                 rte_free(q);
1436                 return (-ENOMEM);
1437         }
1438
1439         /*
1440          * allocate memory for the hardware descriptor ring. A memzone large
1441          * enough to hold the maximum ring size is requested to allow for
1442          * resizing in later calls to the queue setup function.
1443          */
1444         mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1445                                 dev->data->port_id, queue_id, socket_id,
1446                                 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1447         if (mz == NULL) {
1448                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1449                 rte_free(q->sw_ring);
1450                 rte_free(q);
1451                 return (-ENOMEM);
1452         }
1453         q->hw_ring = mz->addr;
1454 #ifdef RTE_LIBRTE_XEN_DOM0
1455         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1456 #else
1457         q->hw_ring_phys_addr = mz->phys_addr;
1458 #endif
1459
1460         /*
1461          * allocate memory for the RS bit tracker. Enough slots to hold the
1462          * descriptor index for each RS bit needing to be set are required.
1463          */
1464         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1465                                 ((nb_desc + 1) / q->rs_thresh) *
1466                                 sizeof(uint16_t),
1467                                 RTE_CACHE_LINE_SIZE, socket_id);
1468         if (q->rs_tracker.list == NULL) {
1469                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1470                 rte_free(q->sw_ring);
1471                 rte_free(q);
1472                 return (-ENOMEM);
1473         }
1474
1475         dev->data->tx_queues[queue_id] = q;
1476         return 0;
1477 }
1478
1479 static void
1480 fm10k_tx_queue_release(void *queue)
1481 {
1482         PMD_INIT_FUNC_TRACE();
1483
1484         tx_queue_free(queue);
1485 }
1486
1487 static int
1488 fm10k_reta_update(struct rte_eth_dev *dev,
1489                         struct rte_eth_rss_reta_entry64 *reta_conf,
1490                         uint16_t reta_size)
1491 {
1492         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1493         uint16_t i, j, idx, shift;
1494         uint8_t mask;
1495         uint32_t reta;
1496
1497         PMD_INIT_FUNC_TRACE();
1498
1499         if (reta_size > FM10K_MAX_RSS_INDICES) {
1500                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1501                         "(%d) doesn't match the number hardware can supported "
1502                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1503                 return -EINVAL;
1504         }
1505
1506         /*
1507          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1508          * 128-entries in 32 registers
1509          */
1510         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1511                 idx = i / RTE_RETA_GROUP_SIZE;
1512                 shift = i % RTE_RETA_GROUP_SIZE;
1513                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1514                                 BIT_MASK_PER_UINT32);
1515                 if (mask == 0)
1516                         continue;
1517
1518                 reta = 0;
1519                 if (mask != BIT_MASK_PER_UINT32)
1520                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1521
1522                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1523                         if (mask & (0x1 << j)) {
1524                                 if (mask != 0xF)
1525                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1526                                 reta |= reta_conf[idx].reta[shift + j] <<
1527                                                 (CHAR_BIT * j);
1528                         }
1529                 }
1530                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1531         }
1532
1533         return 0;
1534 }
1535
1536 static int
1537 fm10k_reta_query(struct rte_eth_dev *dev,
1538                         struct rte_eth_rss_reta_entry64 *reta_conf,
1539                         uint16_t reta_size)
1540 {
1541         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542         uint16_t i, j, idx, shift;
1543         uint8_t mask;
1544         uint32_t reta;
1545
1546         PMD_INIT_FUNC_TRACE();
1547
1548         if (reta_size < FM10K_MAX_RSS_INDICES) {
1549                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1550                         "(%d) doesn't match the number hardware can supported "
1551                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1552                 return -EINVAL;
1553         }
1554
1555         /*
1556          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1557          * 128-entries in 32 registers
1558          */
1559         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1560                 idx = i / RTE_RETA_GROUP_SIZE;
1561                 shift = i % RTE_RETA_GROUP_SIZE;
1562                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1563                                 BIT_MASK_PER_UINT32);
1564                 if (mask == 0)
1565                         continue;
1566
1567                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1568                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1569                         if (mask & (0x1 << j))
1570                                 reta_conf[idx].reta[shift + j] = ((reta >>
1571                                         CHAR_BIT * j) & UINT8_MAX);
1572                 }
1573         }
1574
1575         return 0;
1576 }
1577
1578 static int
1579 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1580         struct rte_eth_rss_conf *rss_conf)
1581 {
1582         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1583         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1584         uint32_t mrqc;
1585         uint64_t hf = rss_conf->rss_hf;
1586         int i;
1587
1588         PMD_INIT_FUNC_TRACE();
1589
1590         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1591                 FM10K_RSSRK_ENTRIES_PER_REG)
1592                 return -EINVAL;
1593
1594         if (hf == 0)
1595                 return -EINVAL;
1596
1597         mrqc = 0;
1598         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
1599         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
1600         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
1601         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
1602         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
1603         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
1604         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
1605         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
1606         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
1607
1608         /* If the mapping doesn't fit any supported, return */
1609         if (mrqc == 0)
1610                 return -EINVAL;
1611
1612         if (key != NULL)
1613                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1614                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1615
1616         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1617
1618         return 0;
1619 }
1620
1621 static int
1622 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1623         struct rte_eth_rss_conf *rss_conf)
1624 {
1625         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1626         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1627         uint32_t mrqc;
1628         uint64_t hf;
1629         int i;
1630
1631         PMD_INIT_FUNC_TRACE();
1632
1633         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1634                                 FM10K_RSSRK_ENTRIES_PER_REG)
1635                 return -EINVAL;
1636
1637         if (key != NULL)
1638                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1639                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1640
1641         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1642         hf = 0;
1643         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
1644         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
1645         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
1646         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
1647         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
1648         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
1649         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
1650         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
1651         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
1652
1653         rss_conf->rss_hf = hf;
1654
1655         return 0;
1656 }
1657
1658 static void
1659 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1660 {
1661         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1662         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1663
1664         /* Bind all local non-queue interrupt to vector 0 */
1665         int_map |= 0;
1666
1667         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1668         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1669         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1670         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1671         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1672         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1673
1674         /* Enable misc causes */
1675         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1676                                 FM10K_EIMR_ENABLE(THI_FAULT) |
1677                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
1678                                 FM10K_EIMR_ENABLE(MAILBOX) |
1679                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
1680                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1681                                 FM10K_EIMR_ENABLE(SRAMERROR) |
1682                                 FM10K_EIMR_ENABLE(VFLR));
1683
1684         /* Enable ITR 0 */
1685         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1686                                         FM10K_ITR_MASK_CLEAR);
1687         FM10K_WRITE_FLUSH(hw);
1688 }
1689
1690 static void
1691 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1692 {
1693         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1694         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1695
1696         /* Bind all local non-queue interrupt to vector 0 */
1697         int_map |= 0;
1698
1699         /* Only INT 0 available, other 15 are reserved. */
1700         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1701
1702         /* Enable ITR 0 */
1703         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1704                                         FM10K_ITR_MASK_CLEAR);
1705         FM10K_WRITE_FLUSH(hw);
1706 }
1707
1708 static int
1709 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1710 {
1711         struct fm10k_fault fault;
1712         int err;
1713         const char *estr = "Unknown error";
1714
1715         /* Process PCA fault */
1716         if (eicr & FM10K_EIMR_PCA_FAULT) {
1717                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1718                 if (err)
1719                         goto error;
1720                 switch (fault.type) {
1721                 case PCA_NO_FAULT:
1722                         estr = "PCA_NO_FAULT"; break;
1723                 case PCA_UNMAPPED_ADDR:
1724                         estr = "PCA_UNMAPPED_ADDR"; break;
1725                 case PCA_BAD_QACCESS_PF:
1726                         estr = "PCA_BAD_QACCESS_PF"; break;
1727                 case PCA_BAD_QACCESS_VF:
1728                         estr = "PCA_BAD_QACCESS_VF"; break;
1729                 case PCA_MALICIOUS_REQ:
1730                         estr = "PCA_MALICIOUS_REQ"; break;
1731                 case PCA_POISONED_TLP:
1732                         estr = "PCA_POISONED_TLP"; break;
1733                 case PCA_TLP_ABORT:
1734                         estr = "PCA_TLP_ABORT"; break;
1735                 default:
1736                         goto error;
1737                 }
1738                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1739                         estr, fault.func ? "VF" : "PF", fault.func,
1740                         fault.address, fault.specinfo);
1741         }
1742
1743         /* Process THI fault */
1744         if (eicr & FM10K_EIMR_THI_FAULT) {
1745                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1746                 if (err)
1747                         goto error;
1748                 switch (fault.type) {
1749                 case THI_NO_FAULT:
1750                         estr = "THI_NO_FAULT"; break;
1751                 case THI_MAL_DIS_Q_FAULT:
1752                         estr = "THI_MAL_DIS_Q_FAULT"; break;
1753                 default:
1754                         goto error;
1755                 }
1756                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1757                         estr, fault.func ? "VF" : "PF", fault.func,
1758                         fault.address, fault.specinfo);
1759         }
1760
1761         /* Process FUM fault */
1762         if (eicr & FM10K_EIMR_FUM_FAULT) {
1763                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1764                 if (err)
1765                         goto error;
1766                 switch (fault.type) {
1767                 case FUM_NO_FAULT:
1768                         estr = "FUM_NO_FAULT"; break;
1769                 case FUM_UNMAPPED_ADDR:
1770                         estr = "FUM_UNMAPPED_ADDR"; break;
1771                 case FUM_POISONED_TLP:
1772                         estr = "FUM_POISONED_TLP"; break;
1773                 case FUM_BAD_VF_QACCESS:
1774                         estr = "FUM_BAD_VF_QACCESS"; break;
1775                 case FUM_ADD_DECODE_ERR:
1776                         estr = "FUM_ADD_DECODE_ERR"; break;
1777                 case FUM_RO_ERROR:
1778                         estr = "FUM_RO_ERROR"; break;
1779                 case FUM_QPRC_CRC_ERROR:
1780                         estr = "FUM_QPRC_CRC_ERROR"; break;
1781                 case FUM_CSR_TIMEOUT:
1782                         estr = "FUM_CSR_TIMEOUT"; break;
1783                 case FUM_INVALID_TYPE:
1784                         estr = "FUM_INVALID_TYPE"; break;
1785                 case FUM_INVALID_LENGTH:
1786                         estr = "FUM_INVALID_LENGTH"; break;
1787                 case FUM_INVALID_BE:
1788                         estr = "FUM_INVALID_BE"; break;
1789                 case FUM_INVALID_ALIGN:
1790                         estr = "FUM_INVALID_ALIGN"; break;
1791                 default:
1792                         goto error;
1793                 }
1794                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1795                         estr, fault.func ? "VF" : "PF", fault.func,
1796                         fault.address, fault.specinfo);
1797         }
1798
1799         if (estr)
1800                 return 0;
1801         return 0;
1802 error:
1803         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1804         return err;
1805 }
1806
1807 /**
1808  * PF interrupt handler triggered by NIC for handling specific interrupt.
1809  *
1810  * @param handle
1811  *  Pointer to interrupt handle.
1812  * @param param
1813  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1814  *
1815  * @return
1816  *  void
1817  */
1818 static void
1819 fm10k_dev_interrupt_handler_pf(
1820                         __rte_unused struct rte_intr_handle *handle,
1821                         void *param)
1822 {
1823         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1824         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1825         uint32_t cause, status;
1826
1827         if (hw->mac.type != fm10k_mac_pf)
1828                 return;
1829
1830         cause = FM10K_READ_REG(hw, FM10K_EICR);
1831
1832         /* Handle PCI fault cases */
1833         if (cause & FM10K_EICR_FAULT_MASK) {
1834                 PMD_INIT_LOG(ERR, "INT: find fault!");
1835                 fm10k_dev_handle_fault(hw, cause);
1836         }
1837
1838         /* Handle switch up/down */
1839         if (cause & FM10K_EICR_SWITCHNOTREADY)
1840                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1841
1842         if (cause & FM10K_EICR_SWITCHREADY)
1843                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1844
1845         /* Handle mailbox message */
1846         fm10k_mbx_lock(hw);
1847         hw->mbx.ops.process(hw, &hw->mbx);
1848         fm10k_mbx_unlock(hw);
1849
1850         /* Handle SRAM error */
1851         if (cause & FM10K_EICR_SRAMERROR) {
1852                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1853
1854                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1855                 /* Write to clear pending bits */
1856                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1857
1858                 /* Todo: print out error message after shared code  updates */
1859         }
1860
1861         /* Clear these 3 events if having any */
1862         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1863                  FM10K_EICR_SWITCHREADY;
1864         if (cause)
1865                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1866
1867         /* Re-enable interrupt from device side */
1868         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1869                                         FM10K_ITR_MASK_CLEAR);
1870         /* Re-enable interrupt from host side */
1871         rte_intr_enable(&(dev->pci_dev->intr_handle));
1872 }
1873
1874 /**
1875  * VF interrupt handler triggered by NIC for handling specific interrupt.
1876  *
1877  * @param handle
1878  *  Pointer to interrupt handle.
1879  * @param param
1880  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1881  *
1882  * @return
1883  *  void
1884  */
1885 static void
1886 fm10k_dev_interrupt_handler_vf(
1887                         __rte_unused struct rte_intr_handle *handle,
1888                         void *param)
1889 {
1890         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1891         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892
1893         if (hw->mac.type != fm10k_mac_vf)
1894                 return;
1895
1896         /* Handle mailbox message if lock is acquired */
1897         fm10k_mbx_lock(hw);
1898         hw->mbx.ops.process(hw, &hw->mbx);
1899         fm10k_mbx_unlock(hw);
1900
1901         /* Re-enable interrupt from device side */
1902         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1903                                         FM10K_ITR_MASK_CLEAR);
1904         /* Re-enable interrupt from host side */
1905         rte_intr_enable(&(dev->pci_dev->intr_handle));
1906 }
1907
1908 /* Mailbox message handler in VF */
1909 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1910         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1911         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1912         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1913         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1914 };
1915
1916 /* Mailbox message handler in PF */
1917 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1918         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1919         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1920         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1921         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1922         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1923         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1924         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1925 };
1926
1927 static int
1928 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1929 {
1930         int err;
1931
1932         /* Initialize mailbox lock */
1933         fm10k_mbx_initlock(hw);
1934
1935         /* Replace default message handler with new ones */
1936         if (hw->mac.type == fm10k_mac_pf)
1937                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1938         else
1939                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1940
1941         if (err) {
1942                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1943                                 err);
1944                 return err;
1945         }
1946         /* Connect to SM for PF device or PF for VF device */
1947         return hw->mbx.ops.connect(hw, &hw->mbx);
1948 }
1949
1950 static void
1951 fm10k_close_mbx_service(struct fm10k_hw *hw)
1952 {
1953         /* Disconnect from SM for PF device or PF for VF device */
1954         hw->mbx.ops.disconnect(hw, &hw->mbx);
1955 }
1956
1957 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1958         .dev_configure          = fm10k_dev_configure,
1959         .dev_start              = fm10k_dev_start,
1960         .dev_stop               = fm10k_dev_stop,
1961         .dev_close              = fm10k_dev_close,
1962         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
1963         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
1964         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
1965         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
1966         .stats_get              = fm10k_stats_get,
1967         .stats_reset            = fm10k_stats_reset,
1968         .link_update            = fm10k_link_update,
1969         .dev_infos_get          = fm10k_dev_infos_get,
1970         .vlan_filter_set        = fm10k_vlan_filter_set,
1971         .vlan_offload_set       = fm10k_vlan_offload_set,
1972         .mac_addr_add           = fm10k_macaddr_add,
1973         .mac_addr_remove        = fm10k_macaddr_remove,
1974         .rx_queue_start         = fm10k_dev_rx_queue_start,
1975         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
1976         .tx_queue_start         = fm10k_dev_tx_queue_start,
1977         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
1978         .rx_queue_setup         = fm10k_rx_queue_setup,
1979         .rx_queue_release       = fm10k_rx_queue_release,
1980         .tx_queue_setup         = fm10k_tx_queue_setup,
1981         .tx_queue_release       = fm10k_tx_queue_release,
1982         .reta_update            = fm10k_reta_update,
1983         .reta_query             = fm10k_reta_query,
1984         .rss_hash_update        = fm10k_rss_hash_update,
1985         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
1986 };
1987
1988 static int
1989 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1990 {
1991         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1992         int diag;
1993         struct fm10k_macvlan_filter_info *macvlan;
1994
1995         PMD_INIT_FUNC_TRACE();
1996
1997         dev->dev_ops = &fm10k_eth_dev_ops;
1998         dev->rx_pkt_burst = &fm10k_recv_pkts;
1999         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2000
2001         if (dev->data->scattered_rx)
2002                 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2003
2004         /* only initialize in the primary process */
2005         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2006                 return 0;
2007
2008         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2009         memset(macvlan, 0, sizeof(*macvlan));
2010         /* Vendor and Device ID need to be set before init of shared code */
2011         memset(hw, 0, sizeof(*hw));
2012         hw->device_id = dev->pci_dev->id.device_id;
2013         hw->vendor_id = dev->pci_dev->id.vendor_id;
2014         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2015         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2016         hw->revision_id = 0;
2017         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2018         if (hw->hw_addr == NULL) {
2019                 PMD_INIT_LOG(ERR, "Bad mem resource."
2020                         " Try to blacklist unused devices.");
2021                 return -EIO;
2022         }
2023
2024         /* Store fm10k_adapter pointer */
2025         hw->back = dev->data->dev_private;
2026
2027         /* Initialize the shared code */
2028         diag = fm10k_init_shared_code(hw);
2029         if (diag != FM10K_SUCCESS) {
2030                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2031                 return -EIO;
2032         }
2033
2034         /*
2035          * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2036          * there is no way to get link status without reading BAR4.  Until this
2037          * works, assume we have maximum bandwidth.
2038          * @todo - fix bus info
2039          */
2040         hw->bus_caps.speed = fm10k_bus_speed_8000;
2041         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2042         hw->bus_caps.payload = fm10k_bus_payload_512;
2043         hw->bus.speed = fm10k_bus_speed_8000;
2044         hw->bus.width = fm10k_bus_width_pcie_x8;
2045         hw->bus.payload = fm10k_bus_payload_256;
2046
2047         /* Initialize the hw */
2048         diag = fm10k_init_hw(hw);
2049         if (diag != FM10K_SUCCESS) {
2050                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2051                 return -EIO;
2052         }
2053
2054         /* Initialize MAC address(es) */
2055         dev->data->mac_addrs = rte_zmalloc("fm10k",
2056                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2057         if (dev->data->mac_addrs == NULL) {
2058                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2059                 return -ENOMEM;
2060         }
2061
2062         diag = fm10k_read_mac_addr(hw);
2063
2064         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2065                         &dev->data->mac_addrs[0]);
2066
2067         if (diag != FM10K_SUCCESS ||
2068                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2069
2070                 /* Generate a random addr */
2071                 eth_random_addr(hw->mac.addr);
2072                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2073                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2074                 &dev->data->mac_addrs[0]);
2075         }
2076
2077         /* Reset the hw statistics */
2078         fm10k_stats_reset(dev);
2079
2080         /* Reset the hw */
2081         diag = fm10k_reset_hw(hw);
2082         if (diag != FM10K_SUCCESS) {
2083                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2084                 return -EIO;
2085         }
2086
2087         /* Setup mailbox service */
2088         diag = fm10k_setup_mbx_service(hw);
2089         if (diag != FM10K_SUCCESS) {
2090                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2091                 return -EIO;
2092         }
2093
2094         /*PF/VF has different interrupt handling mechanism */
2095         if (hw->mac.type == fm10k_mac_pf) {
2096                 /* register callback func to eal lib */
2097                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2098                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2099
2100                 /* enable MISC interrupt */
2101                 fm10k_dev_enable_intr_pf(dev);
2102         } else { /* VF */
2103                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2104                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2105
2106                 fm10k_dev_enable_intr_vf(dev);
2107         }
2108
2109         /* Enable uio intr after callback registered */
2110         rte_intr_enable(&(dev->pci_dev->intr_handle));
2111
2112         hw->mac.ops.update_int_moderator(hw);
2113
2114         /* Make sure Switch Manager is ready before going forward. */
2115         if (hw->mac.type == fm10k_mac_pf) {
2116                 int switch_ready = 0;
2117                 int i;
2118
2119                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2120                         fm10k_mbx_lock(hw);
2121                         hw->mac.ops.get_host_state(hw, &switch_ready);
2122                         fm10k_mbx_unlock(hw);
2123                         if (switch_ready)
2124                                 break;
2125                         /* Delay some time to acquire async LPORT_MAP info. */
2126                         rte_delay_us(WAIT_SWITCH_MSG_US);
2127                 }
2128
2129                 if (switch_ready == 0) {
2130                         PMD_INIT_LOG(ERR, "switch is not ready");
2131                         return -1;
2132                 }
2133         }
2134
2135         /*
2136          * Below function will trigger operations on mailbox, acquire lock to
2137          * avoid race condition from interrupt handler. Operations on mailbox
2138          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2139          * will handle and generate an interrupt to our side. Then,  FIFO in
2140          * mailbox will be touched.
2141          */
2142         fm10k_mbx_lock(hw);
2143         /* Enable port first */
2144         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2145
2146         /* Set unicast mode by default. App can change to other mode in other
2147          * API func.
2148          */
2149         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2150                                         FM10K_XCAST_MODE_NONE);
2151
2152         fm10k_mbx_unlock(hw);
2153
2154
2155         return 0;
2156 }
2157
2158 /*
2159  * The set of PCI devices this driver supports. This driver will enable both PF
2160  * and SRIOV-VF devices.
2161  */
2162 static const struct rte_pci_id pci_id_fm10k_map[] = {
2163 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2164 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2165 #include "rte_pci_dev_ids.h"
2166         { .vendor_id = 0, /* sentinel */ },
2167 };
2168
2169 static struct eth_driver rte_pmd_fm10k = {
2170         .pci_drv = {
2171                 .name = "rte_pmd_fm10k",
2172                 .id_table = pci_id_fm10k_map,
2173                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2174         },
2175         .eth_dev_init = eth_fm10k_dev_init,
2176         .dev_private_size = sizeof(struct fm10k_adapter),
2177 };
2178
2179 /*
2180  * Driver initialization routine.
2181  * Invoked once at EAL init time.
2182  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2183  */
2184 static int
2185 rte_pmd_fm10k_init(__rte_unused const char *name,
2186         __rte_unused const char *params)
2187 {
2188         PMD_INIT_FUNC_TRACE();
2189         rte_eth_driver_register(&rte_pmd_fm10k);
2190         return 0;
2191 }
2192
2193 static struct rte_driver rte_fm10k_driver = {
2194         .type = PMD_PDEV,
2195         .init = rte_pmd_fm10k_init,
2196 };
2197
2198 PMD_REGISTER_DRIVER(rte_fm10k_driver);