ethdev: add the hash key size per device
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 /* Max try times to acquire switch status */
49 #define MAX_QUERY_SWITCH_STATE_TIMES 10
50 /* Wait interval to get switch status */
51 #define WAIT_SWITCH_MSG_US    100000
52 /* Number of chars per uint32 type */
53 #define CHARS_PER_UINT32 (sizeof(uint32_t))
54 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
55
56 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
57 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
58 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
59 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
60 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
61 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
62 static int
63 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
64 static void
65 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add);
66 static void
67 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
68
69 static void
70 fm10k_mbx_initlock(struct fm10k_hw *hw)
71 {
72         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
73 }
74
75 static void
76 fm10k_mbx_lock(struct fm10k_hw *hw)
77 {
78         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
79                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
80 }
81
82 static void
83 fm10k_mbx_unlock(struct fm10k_hw *hw)
84 {
85         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
86 }
87
88 /*
89  * reset queue to initial state, allocate software buffers used when starting
90  * device.
91  * return 0 on success
92  * return -ENOMEM if buffers cannot be allocated
93  * return -EINVAL if buffers do not satisfy alignment condition
94  */
95 static inline int
96 rx_queue_reset(struct fm10k_rx_queue *q)
97 {
98         uint64_t dma_addr;
99         int i, diag;
100         PMD_INIT_FUNC_TRACE();
101
102         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
103         if (diag != 0)
104                 return -ENOMEM;
105
106         for (i = 0; i < q->nb_desc; ++i) {
107                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
108                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
109                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
110                                                 q->nb_desc);
111                         return -EINVAL;
112                 }
113                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
114                 q->hw_ring[i].q.pkt_addr = dma_addr;
115                 q->hw_ring[i].q.hdr_addr = dma_addr;
116         }
117
118         q->next_dd = 0;
119         q->next_alloc = 0;
120         q->next_trigger = q->alloc_thresh - 1;
121         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
122         return 0;
123 }
124
125 /*
126  * clean queue, descriptor rings, free software buffers used when stopping
127  * device.
128  */
129 static inline void
130 rx_queue_clean(struct fm10k_rx_queue *q)
131 {
132         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
133         uint32_t i;
134         PMD_INIT_FUNC_TRACE();
135
136         /* zero descriptor rings */
137         for (i = 0; i < q->nb_desc; ++i)
138                 q->hw_ring[i] = zero;
139
140         /* free software buffers */
141         for (i = 0; i < q->nb_desc; ++i) {
142                 if (q->sw_ring[i]) {
143                         rte_pktmbuf_free_seg(q->sw_ring[i]);
144                         q->sw_ring[i] = NULL;
145                 }
146         }
147 }
148
149 /*
150  * free all queue memory used when releasing the queue (i.e. configure)
151  */
152 static inline void
153 rx_queue_free(struct fm10k_rx_queue *q)
154 {
155         PMD_INIT_FUNC_TRACE();
156         if (q) {
157                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
158                 rx_queue_clean(q);
159                 if (q->sw_ring) {
160                         rte_free(q->sw_ring);
161                         q->sw_ring = NULL;
162                 }
163                 rte_free(q);
164                 q = NULL;
165         }
166 }
167
168 /*
169  * disable RX queue, wait unitl HW finished necessary flush operation
170  */
171 static inline int
172 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
173 {
174         uint32_t reg, i;
175
176         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
177         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
178                         reg & ~FM10K_RXQCTL_ENABLE);
179
180         /* Wait 100us at most */
181         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
182                 rte_delay_us(1);
183                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i));
184                 if (!(reg & FM10K_RXQCTL_ENABLE))
185                         break;
186         }
187
188         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
189                 return -1;
190
191         return 0;
192 }
193
194 /*
195  * reset queue to initial state, allocate software buffers used when starting
196  * device
197  */
198 static inline void
199 tx_queue_reset(struct fm10k_tx_queue *q)
200 {
201         PMD_INIT_FUNC_TRACE();
202         q->last_free = 0;
203         q->next_free = 0;
204         q->nb_used = 0;
205         q->nb_free = q->nb_desc - 1;
206         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
207         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
208 }
209
210 /*
211  * clean queue, descriptor rings, free software buffers used when stopping
212  * device
213  */
214 static inline void
215 tx_queue_clean(struct fm10k_tx_queue *q)
216 {
217         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
218         uint32_t i;
219         PMD_INIT_FUNC_TRACE();
220
221         /* zero descriptor rings */
222         for (i = 0; i < q->nb_desc; ++i)
223                 q->hw_ring[i] = zero;
224
225         /* free software buffers */
226         for (i = 0; i < q->nb_desc; ++i) {
227                 if (q->sw_ring[i]) {
228                         rte_pktmbuf_free_seg(q->sw_ring[i]);
229                         q->sw_ring[i] = NULL;
230                 }
231         }
232 }
233
234 /*
235  * free all queue memory used when releasing the queue (i.e. configure)
236  */
237 static inline void
238 tx_queue_free(struct fm10k_tx_queue *q)
239 {
240         PMD_INIT_FUNC_TRACE();
241         if (q) {
242                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
243                 tx_queue_clean(q);
244                 if (q->rs_tracker.list) {
245                         rte_free(q->rs_tracker.list);
246                         q->rs_tracker.list = NULL;
247                 }
248                 if (q->sw_ring) {
249                         rte_free(q->sw_ring);
250                         q->sw_ring = NULL;
251                 }
252                 rte_free(q);
253                 q = NULL;
254         }
255 }
256
257 /*
258  * disable TX queue, wait unitl HW finished necessary flush operation
259  */
260 static inline int
261 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
262 {
263         uint32_t reg, i;
264
265         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
266         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
267                         reg & ~FM10K_TXDCTL_ENABLE);
268
269         /* Wait 100us at most */
270         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
271                 rte_delay_us(1);
272                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
273                 if (!(reg & FM10K_TXDCTL_ENABLE))
274                         break;
275         }
276
277         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
278                 return -1;
279
280         return 0;
281 }
282
283 static int
284 fm10k_dev_configure(struct rte_eth_dev *dev)
285 {
286         PMD_INIT_FUNC_TRACE();
287
288         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
289                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
290
291         return 0;
292 }
293
294 static void
295 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
296 {
297         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
298         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
299         uint32_t mrqc, *key, i, reta, j;
300         uint64_t hf;
301
302 #define RSS_KEY_SIZE 40
303         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
304                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
305                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
306                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
307                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
308                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
309         };
310
311         if (dev->data->nb_rx_queues == 1 ||
312             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
313             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
314                 return;
315
316         /* random key is rss_intel_key (default) or user provided (rss_key) */
317         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
318                 key = (uint32_t *)rss_intel_key;
319         else
320                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
321
322         /* Now fill our hash function seeds, 4 bytes at a time */
323         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
324                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
325
326         /*
327          * Fill in redirection table
328          * The byte-swap is needed because NIC registers are in
329          * little-endian order.
330          */
331         reta = 0;
332         for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
333                 if (j == dev->data->nb_rx_queues)
334                         j = 0;
335                 reta = (reta << CHAR_BIT) | j;
336                 if ((i & 3) == 3)
337                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
338                                         rte_bswap32(reta));
339         }
340
341         /*
342          * Generate RSS hash based on packet types, TCP/UDP
343          * port numbers and/or IPv4/v6 src and dst addresses
344          */
345         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
346         mrqc = 0;
347         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
348         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
349         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
350         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
351         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
352         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
353         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
354         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
355         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
356
357         if (mrqc == 0) {
358                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
359                         "supported", hf);
360                 return;
361         }
362
363         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
364 }
365
366 static int
367 fm10k_dev_tx_init(struct rte_eth_dev *dev)
368 {
369         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370         int i, ret;
371         struct fm10k_tx_queue *txq;
372         uint64_t base_addr;
373         uint32_t size;
374
375         /* Disable TXINT to avoid possible interrupt */
376         for (i = 0; i < hw->mac.max_queues; i++)
377                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
378                                 3 << FM10K_TXINT_TIMER_SHIFT);
379
380         /* Setup TX queue */
381         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
382                 txq = dev->data->tx_queues[i];
383                 base_addr = txq->hw_ring_phys_addr;
384                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
385
386                 /* disable queue to avoid issues while updating state */
387                 ret = tx_queue_disable(hw, i);
388                 if (ret) {
389                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
390                         return -1;
391                 }
392
393                 /* set location and size for descriptor ring */
394                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
395                                 base_addr & UINT64_LOWER_32BITS_MASK);
396                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
397                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
398                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
399         }
400         return 0;
401 }
402
403 static int
404 fm10k_dev_rx_init(struct rte_eth_dev *dev)
405 {
406         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
407         int i, ret;
408         struct fm10k_rx_queue *rxq;
409         uint64_t base_addr;
410         uint32_t size;
411         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
412         uint16_t buf_size;
413
414         /* Disable RXINT to avoid possible interrupt */
415         for (i = 0; i < hw->mac.max_queues; i++)
416                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
417                                 3 << FM10K_RXINT_TIMER_SHIFT);
418
419         /* Setup RX queues */
420         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
421                 rxq = dev->data->rx_queues[i];
422                 base_addr = rxq->hw_ring_phys_addr;
423                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
424
425                 /* disable queue to avoid issues while updating state */
426                 ret = rx_queue_disable(hw, i);
427                 if (ret) {
428                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
429                         return -1;
430                 }
431
432                 /* Setup the Base and Length of the Rx Descriptor Ring */
433                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
434                                 base_addr & UINT64_LOWER_32BITS_MASK);
435                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
436                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
437                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
438
439                 /* Configure the Rx buffer size for one buff without split */
440                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
441                         RTE_PKTMBUF_HEADROOM);
442                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
443                  * reserved for this purpose, and the worst case could be 511B.
444                  * But SRR reg assumes all buffers have the same size. In order
445                  * to fill the gap, we'll have to consider the worst case and
446                  * assume 512B is reserved. If we don't do so, it's possible
447                  * for HW to overwrite data to next mbuf.
448                  */
449                 buf_size -= FM10K_RX_DATABUF_ALIGN;
450
451                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
452                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
453
454                 /* It adds dual VLAN length for supporting dual VLAN */
455                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
456                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
457                         dev->data->dev_conf.rxmode.enable_scatter) {
458                         uint32_t reg;
459                         dev->data->scattered_rx = 1;
460                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
461                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
462                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
463                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
464                 }
465
466                 /* Enable drop on empty, it's RO for VF */
467                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
468                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
469
470                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
471                 FM10K_WRITE_FLUSH(hw);
472         }
473
474         /* Configure RSS if applicable */
475         fm10k_dev_mq_rx_configure(dev);
476         return 0;
477 }
478
479 static int
480 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
481 {
482         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
483         int err = -1;
484         uint32_t reg;
485         struct fm10k_rx_queue *rxq;
486
487         PMD_INIT_FUNC_TRACE();
488
489         if (rx_queue_id < dev->data->nb_rx_queues) {
490                 rxq = dev->data->rx_queues[rx_queue_id];
491                 err = rx_queue_reset(rxq);
492                 if (err == -ENOMEM) {
493                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
494                         return err;
495                 } else if (err == -EINVAL) {
496                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
497                                 " %d", err);
498                         return err;
499                 }
500
501                 /* Setup the HW Rx Head and Tail Descriptor Pointers
502                  * Note: this must be done AFTER the queue is enabled on real
503                  * hardware, but BEFORE the queue is enabled when using the
504                  * emulation platform. Do it in both places for now and remove
505                  * this comment and the following two register writes when the
506                  * emulation platform is no longer being used.
507                  */
508                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
509                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
510
511                 /* Set PF ownership flag for PF devices */
512                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
513                 if (hw->mac.type == fm10k_mac_pf)
514                         reg |= FM10K_RXQCTL_PF;
515                 reg |= FM10K_RXQCTL_ENABLE;
516                 /* enable RX queue */
517                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
518                 FM10K_WRITE_FLUSH(hw);
519
520                 /* Setup the HW Rx Head and Tail Descriptor Pointers
521                  * Note: this must be done AFTER the queue is enabled
522                  */
523                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
524                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
525         }
526
527         return err;
528 }
529
530 static int
531 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
532 {
533         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534
535         PMD_INIT_FUNC_TRACE();
536
537         if (rx_queue_id < dev->data->nb_rx_queues) {
538                 /* Disable RX queue */
539                 rx_queue_disable(hw, rx_queue_id);
540
541                 /* Free mbuf and clean HW ring */
542                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
543         }
544
545         return 0;
546 }
547
548 static int
549 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
550 {
551         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
552         /** @todo - this should be defined in the shared code */
553 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
554         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
555         int err = 0;
556
557         PMD_INIT_FUNC_TRACE();
558
559         if (tx_queue_id < dev->data->nb_tx_queues) {
560                 tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
561
562                 /* reset head and tail pointers */
563                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
564                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
565
566                 /* enable TX queue */
567                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
568                                         FM10K_TXDCTL_ENABLE | txdctl);
569                 FM10K_WRITE_FLUSH(hw);
570         } else
571                 err = -1;
572
573         return err;
574 }
575
576 static int
577 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
578 {
579         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580
581         PMD_INIT_FUNC_TRACE();
582
583         if (tx_queue_id < dev->data->nb_tx_queues) {
584                 tx_queue_disable(hw, tx_queue_id);
585                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
586         }
587
588         return 0;
589 }
590
591 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
592 {
593         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
594                 != FM10K_DGLORTMAP_NONE);
595 }
596
597 static void
598 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
599 {
600         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601         int status;
602
603         PMD_INIT_FUNC_TRACE();
604
605         /* Return if it didn't acquire valid glort range */
606         if (!fm10k_glort_valid(hw))
607                 return;
608
609         fm10k_mbx_lock(hw);
610         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
611                                 FM10K_XCAST_MODE_PROMISC);
612         fm10k_mbx_unlock(hw);
613
614         if (status != FM10K_SUCCESS)
615                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
616 }
617
618 static void
619 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
620 {
621         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         uint8_t mode;
623         int status;
624
625         PMD_INIT_FUNC_TRACE();
626
627         /* Return if it didn't acquire valid glort range */
628         if (!fm10k_glort_valid(hw))
629                 return;
630
631         if (dev->data->all_multicast == 1)
632                 mode = FM10K_XCAST_MODE_ALLMULTI;
633         else
634                 mode = FM10K_XCAST_MODE_NONE;
635
636         fm10k_mbx_lock(hw);
637         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
638                                 mode);
639         fm10k_mbx_unlock(hw);
640
641         if (status != FM10K_SUCCESS)
642                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
643 }
644
645 static void
646 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
647 {
648         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649         int status;
650
651         PMD_INIT_FUNC_TRACE();
652
653         /* Return if it didn't acquire valid glort range */
654         if (!fm10k_glort_valid(hw))
655                 return;
656
657         /* If promiscuous mode is enabled, it doesn't make sense to enable
658          * allmulticast and disable promiscuous since fm10k only can select
659          * one of the modes.
660          */
661         if (dev->data->promiscuous) {
662                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
663                         "needn't enable allmulticast");
664                 return;
665         }
666
667         fm10k_mbx_lock(hw);
668         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
669                                 FM10K_XCAST_MODE_ALLMULTI);
670         fm10k_mbx_unlock(hw);
671
672         if (status != FM10K_SUCCESS)
673                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
674 }
675
676 static void
677 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
678 {
679         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         int status;
681
682         PMD_INIT_FUNC_TRACE();
683
684         /* Return if it didn't acquire valid glort range */
685         if (!fm10k_glort_valid(hw))
686                 return;
687
688         if (dev->data->promiscuous) {
689                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
690                         "since promisc mode is enabled");
691                 return;
692         }
693
694         fm10k_mbx_lock(hw);
695         /* Change mode to unicast mode */
696         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
697                                 FM10K_XCAST_MODE_NONE);
698         fm10k_mbx_unlock(hw);
699
700         if (status != FM10K_SUCCESS)
701                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
702 }
703
704 /* fls = find last set bit = 32 minus the number of leading zeros */
705 #ifndef fls
706 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
707 #endif
708 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
709 static int
710 fm10k_dev_start(struct rte_eth_dev *dev)
711 {
712         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713         int i, diag;
714
715         PMD_INIT_FUNC_TRACE();
716
717         /* stop, init, then start the hw */
718         diag = fm10k_stop_hw(hw);
719         if (diag != FM10K_SUCCESS) {
720                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
721                 return -EIO;
722         }
723
724         diag = fm10k_init_hw(hw);
725         if (diag != FM10K_SUCCESS) {
726                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
727                 return -EIO;
728         }
729
730         diag = fm10k_start_hw(hw);
731         if (diag != FM10K_SUCCESS) {
732                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
733                 return -EIO;
734         }
735
736         diag = fm10k_dev_tx_init(dev);
737         if (diag) {
738                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
739                 return diag;
740         }
741
742         diag = fm10k_dev_rx_init(dev);
743         if (diag) {
744                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
745                 return diag;
746         }
747
748         if (hw->mac.type == fm10k_mac_pf) {
749                 /* Establish only VSI 0 as valid */
750                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
751
752                 /* Configure RSS bits used in RETA table */
753                 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
754                                 fls(dev->data->nb_rx_queues - 1) <<
755                                 FM10K_DGLORTDEC_RSSLENGTH_SHIFT);
756
757                 /* Invalidate all other GLORT entries */
758                 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
759                         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
760                                         FM10K_DGLORTMAP_NONE);
761         }
762
763         for (i = 0; i < dev->data->nb_rx_queues; i++) {
764                 struct fm10k_rx_queue *rxq;
765                 rxq = dev->data->rx_queues[i];
766
767                 if (rxq->rx_deferred_start)
768                         continue;
769                 diag = fm10k_dev_rx_queue_start(dev, i);
770                 if (diag != 0) {
771                         int j;
772                         for (j = 0; j < i; ++j)
773                                 rx_queue_clean(dev->data->rx_queues[j]);
774                         return diag;
775                 }
776         }
777
778         for (i = 0; i < dev->data->nb_tx_queues; i++) {
779                 struct fm10k_tx_queue *txq;
780                 txq = dev->data->tx_queues[i];
781
782                 if (txq->tx_deferred_start)
783                         continue;
784                 diag = fm10k_dev_tx_queue_start(dev, i);
785                 if (diag != 0) {
786                         int j;
787                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
788                                 rx_queue_clean(dev->data->rx_queues[j]);
789                         return diag;
790                 }
791         }
792
793         if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) {
794                 /* Update default vlan */
795                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
796
797                 /* Add default mac/vlan filter to PF/Switch manager */
798                 fm10k_MAC_filter_set(dev, hw->mac.addr, true);
799         }
800
801         return 0;
802 }
803
804 static void
805 fm10k_dev_stop(struct rte_eth_dev *dev)
806 {
807         int i;
808
809         PMD_INIT_FUNC_TRACE();
810
811         for (i = 0; i < dev->data->nb_tx_queues; i++)
812                 fm10k_dev_tx_queue_stop(dev, i);
813
814         for (i = 0; i < dev->data->nb_rx_queues; i++)
815                 fm10k_dev_rx_queue_stop(dev, i);
816 }
817
818 static void
819 fm10k_dev_close(struct rte_eth_dev *dev)
820 {
821         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
822
823         PMD_INIT_FUNC_TRACE();
824
825         fm10k_MACVLAN_remove_all(dev);
826
827         /* Stop mailbox service first */
828         fm10k_close_mbx_service(hw);
829         fm10k_dev_stop(dev);
830         fm10k_stop_hw(hw);
831 }
832
833 static int
834 fm10k_link_update(struct rte_eth_dev *dev,
835         __rte_unused int wait_to_complete)
836 {
837         PMD_INIT_FUNC_TRACE();
838
839         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
840          * x8 PCIe interface. For now, we leave the speed undefined since there
841          * is no 50Gbps Ethernet. */
842         dev->data->dev_link.link_speed  = 0;
843         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
844         dev->data->dev_link.link_status = 1;
845
846         return 0;
847 }
848
849 static void
850 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
851 {
852         uint64_t ipackets, opackets, ibytes, obytes;
853         struct fm10k_hw *hw =
854                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855         struct fm10k_hw_stats *hw_stats =
856                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
857         int i;
858
859         PMD_INIT_FUNC_TRACE();
860
861         fm10k_update_hw_stats(hw, hw_stats);
862
863         ipackets = opackets = ibytes = obytes = 0;
864         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
865                 (i < hw->mac.max_queues); ++i) {
866                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
867                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
868                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
869                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
870                 ipackets += stats->q_ipackets[i];
871                 opackets += stats->q_opackets[i];
872                 ibytes   += stats->q_ibytes[i];
873                 obytes   += stats->q_obytes[i];
874         }
875         stats->ipackets = ipackets;
876         stats->opackets = opackets;
877         stats->ibytes = ibytes;
878         stats->obytes = obytes;
879 }
880
881 static void
882 fm10k_stats_reset(struct rte_eth_dev *dev)
883 {
884         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
885         struct fm10k_hw_stats *hw_stats =
886                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
887
888         PMD_INIT_FUNC_TRACE();
889
890         memset(hw_stats, 0, sizeof(*hw_stats));
891         fm10k_rebind_hw_stats(hw, hw_stats);
892 }
893
894 static void
895 fm10k_dev_infos_get(struct rte_eth_dev *dev,
896         struct rte_eth_dev_info *dev_info)
897 {
898         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899
900         PMD_INIT_FUNC_TRACE();
901
902         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
903         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
904         dev_info->max_rx_queues      = hw->mac.max_queues;
905         dev_info->max_tx_queues      = hw->mac.max_queues;
906         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
907         dev_info->max_hash_mac_addrs = 0;
908         dev_info->max_vfs            = dev->pci_dev->max_vfs;
909         dev_info->max_vmdq_pools     = ETH_64_POOLS;
910         dev_info->rx_offload_capa =
911                 DEV_RX_OFFLOAD_VLAN_STRIP |
912                 DEV_RX_OFFLOAD_IPV4_CKSUM |
913                 DEV_RX_OFFLOAD_UDP_CKSUM  |
914                 DEV_RX_OFFLOAD_TCP_CKSUM;
915         dev_info->tx_offload_capa =
916                 DEV_TX_OFFLOAD_VLAN_INSERT;
917         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
918         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
919
920         dev_info->default_rxconf = (struct rte_eth_rxconf) {
921                 .rx_thresh = {
922                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
923                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
924                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
925                 },
926                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
927                 .rx_drop_en = 0,
928         };
929
930         dev_info->default_txconf = (struct rte_eth_txconf) {
931                 .tx_thresh = {
932                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
933                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
934                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
935                 },
936                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
937                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
938                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
939                                 ETH_TXQ_FLAGS_NOOFFLOADS,
940         };
941
942 }
943
944 static int
945 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
946 {
947         s32 result;
948         uint16_t mac_num = 0;
949         uint32_t vid_idx, vid_bit, mac_index;
950         struct fm10k_hw *hw;
951         struct fm10k_macvlan_filter_info *macvlan;
952         struct rte_eth_dev_data *data = dev->data;
953
954         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
956
957         if (vlan_id > ETH_VLAN_ID_MAX) {
958                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
959                 return (-EINVAL);
960         }
961
962         vid_idx = FM10K_VFTA_IDX(vlan_id);
963         vid_bit = FM10K_VFTA_BIT(vlan_id);
964         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
965         if (on && (macvlan->vfta[vid_idx] & vid_bit))
966                 return 0;
967         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
968         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
969                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
970                         "in the VLAN filter table");
971                 return (-EINVAL);
972         }
973
974         fm10k_mbx_lock(hw);
975         result = fm10k_update_vlan(hw, vlan_id, 0, on);
976         fm10k_mbx_unlock(hw);
977         if (result != FM10K_SUCCESS) {
978                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
979                 return (-EIO);
980         }
981
982         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
983                         (result == FM10K_SUCCESS); mac_index++) {
984                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
985                         continue;
986                 if (mac_num > macvlan->mac_num - 1) {
987                         PMD_INIT_LOG(ERR, "MAC address number "
988                                         "not match");
989                         break;
990                 }
991                 fm10k_mbx_lock(hw);
992                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
993                         data->mac_addrs[mac_index].addr_bytes,
994                         vlan_id, on, 0);
995                 fm10k_mbx_unlock(hw);
996                 mac_num++;
997         }
998         if (result != FM10K_SUCCESS) {
999                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1000                 return (-EIO);
1001         }
1002
1003         if (on) {
1004                 macvlan->vlan_num++;
1005                 macvlan->vfta[vid_idx] |= vid_bit;
1006         } else {
1007                 macvlan->vlan_num--;
1008                 macvlan->vfta[vid_idx] &= ~vid_bit;
1009         }
1010         return 0;
1011 }
1012
1013 static void
1014 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1015 {
1016         if (mask & ETH_VLAN_STRIP_MASK) {
1017                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1018                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1019                                         "always on in fm10k");
1020         }
1021
1022         if (mask & ETH_VLAN_EXTEND_MASK) {
1023                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1024                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1025                                         "supported in fm10k");
1026         }
1027
1028         if (mask & ETH_VLAN_FILTER_MASK) {
1029                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1030                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1031         }
1032 }
1033
1034 /* Add/Remove a MAC address, and update filters */
1035 static void
1036 fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add)
1037 {
1038         uint32_t i, j, k;
1039         struct fm10k_hw *hw;
1040         struct fm10k_macvlan_filter_info *macvlan;
1041
1042         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1044
1045         i = 0;
1046         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1047                 if (macvlan->vfta[j]) {
1048                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1049                                 if (macvlan->vfta[j] & (1 << k)) {
1050                                         if (i + 1 > macvlan->vlan_num) {
1051                                                 PMD_INIT_LOG(ERR, "vlan number "
1052                                                                 "not match");
1053                                                 return;
1054                                         }
1055                                         fm10k_mbx_lock(hw);
1056                                         fm10k_update_uc_addr(hw,
1057                                                 hw->mac.dglort_map, mac,
1058                                                 j * FM10K_UINT32_BIT_SIZE + k,
1059                                                 add, 0);
1060                                         fm10k_mbx_unlock(hw);
1061                                         i++;
1062                                 }
1063                         }
1064                 }
1065         }
1066
1067         if (add)
1068                 macvlan->mac_num++;
1069         else
1070                 macvlan->mac_num--;
1071 }
1072
1073 /* Add a MAC address, and update filters */
1074 static void
1075 fm10k_macaddr_add(struct rte_eth_dev *dev,
1076                  struct ether_addr *mac_addr,
1077                  __rte_unused uint32_t index,
1078                  __rte_unused uint32_t pool)
1079 {
1080         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE);
1081 }
1082
1083 /* Remove a MAC address, and update filters */
1084 static void
1085 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1086 {
1087         struct rte_eth_dev_data *data = dev->data;
1088
1089         if (index < FM10K_MAX_MACADDR_NUM)
1090                 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1091                                 FALSE);
1092 }
1093
1094 /* Remove all VLAN and MAC address table entries */
1095 static void
1096 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev)
1097 {
1098         uint32_t j, k;
1099         struct fm10k_macvlan_filter_info *macvlan;
1100
1101         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1102         for (j = 0; j < FM10K_VFTA_SIZE; j++) {
1103                 if (macvlan->vfta[j]) {
1104                         for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1105                                 if (macvlan->vfta[j] & (1 << k))
1106                                         fm10k_vlan_filter_set(dev,
1107                                                 j * FM10K_UINT32_BIT_SIZE + k, false);
1108                         }
1109                 }
1110         }
1111 }
1112
1113 static inline int
1114 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1115 {
1116         if ((request < min) || (request > max) || ((request % mult) != 0))
1117                 return -1;
1118         else
1119                 return 0;
1120 }
1121
1122 /*
1123  * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1124  * the physical address is required. If the memzone is already created, then
1125  * this function returns a pointer to the existing memzone.
1126  */
1127 static inline const struct rte_memzone *
1128 allocate_hw_ring(const char *driver_name, const char *ring_name,
1129         uint8_t port_id, uint16_t queue_id, int socket_id,
1130         uint32_t size, uint32_t align)
1131 {
1132         char name[RTE_MEMZONE_NAMESIZE];
1133         const struct rte_memzone *mz;
1134
1135         snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1136                  driver_name, ring_name, port_id, queue_id, socket_id);
1137
1138         /* return the memzone if it already exists */
1139         mz = rte_memzone_lookup(name);
1140         if (mz)
1141                 return mz;
1142
1143 #ifdef RTE_LIBRTE_XEN_DOM0
1144         return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1145                                            RTE_PGSIZE_2M);
1146 #else
1147         return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1148 #endif
1149 }
1150
1151 static inline int
1152 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1153 {
1154         if ((request < min) || (request > max) || ((div % request) != 0))
1155                 return -1;
1156         else
1157                 return 0;
1158 }
1159
1160 static inline int
1161 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1162 {
1163         uint16_t rx_free_thresh;
1164
1165         if (conf->rx_free_thresh == 0)
1166                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1167         else
1168                 rx_free_thresh = conf->rx_free_thresh;
1169
1170         /* make sure the requested threshold satisfies the constraints */
1171         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1172                         FM10K_RX_FREE_THRESH_MAX(q),
1173                         FM10K_RX_FREE_THRESH_DIV(q),
1174                         rx_free_thresh)) {
1175                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1176                         "less than or equal to %u, "
1177                         "greater than or equal to %u, "
1178                         "and a divisor of %u",
1179                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1180                         FM10K_RX_FREE_THRESH_MIN(q),
1181                         FM10K_RX_FREE_THRESH_DIV(q));
1182                 return (-EINVAL);
1183         }
1184
1185         q->alloc_thresh = rx_free_thresh;
1186         q->drop_en = conf->rx_drop_en;
1187         q->rx_deferred_start = conf->rx_deferred_start;
1188
1189         return 0;
1190 }
1191
1192 /*
1193  * Hardware requires specific alignment for Rx packet buffers. At
1194  * least one of the following two conditions must be satisfied.
1195  *  1. Address is 512B aligned
1196  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1197  *
1198  * As such, the driver may need to adjust the DMA address within the
1199  * buffer by up to 512B.
1200  *
1201  * return 1 if the element size is valid, otherwise return 0.
1202  */
1203 static int
1204 mempool_element_size_valid(struct rte_mempool *mp)
1205 {
1206         uint32_t min_size;
1207
1208         /* elt_size includes mbuf header and headroom */
1209         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1210                         RTE_PKTMBUF_HEADROOM;
1211
1212         /* account for up to 512B of alignment */
1213         min_size -= FM10K_RX_DATABUF_ALIGN;
1214
1215         /* sanity check for overflow */
1216         if (min_size > mp->elt_size)
1217                 return 0;
1218
1219         /* size is valid */
1220         return 1;
1221 }
1222
1223 static int
1224 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1225         uint16_t nb_desc, unsigned int socket_id,
1226         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1227 {
1228         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1229         struct fm10k_rx_queue *q;
1230         const struct rte_memzone *mz;
1231
1232         PMD_INIT_FUNC_TRACE();
1233
1234         /* make sure the mempool element size can account for alignment. */
1235         if (!mempool_element_size_valid(mp)) {
1236                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1237                 return (-EINVAL);
1238         }
1239
1240         /* make sure a valid number of descriptors have been requested */
1241         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1242                                 FM10K_MULT_RX_DESC, nb_desc)) {
1243                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1244                         "less than or equal to %"PRIu32", "
1245                         "greater than or equal to %u, "
1246                         "and a multiple of %u",
1247                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1248                         FM10K_MULT_RX_DESC);
1249                 return (-EINVAL);
1250         }
1251
1252         /*
1253          * if this queue existed already, free the associated memory. The
1254          * queue cannot be reused in case we need to allocate memory on
1255          * different socket than was previously used.
1256          */
1257         if (dev->data->rx_queues[queue_id] != NULL) {
1258                 rx_queue_free(dev->data->rx_queues[queue_id]);
1259                 dev->data->rx_queues[queue_id] = NULL;
1260         }
1261
1262         /* allocate memory for the queue structure */
1263         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1264                                 socket_id);
1265         if (q == NULL) {
1266                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1267                 return (-ENOMEM);
1268         }
1269
1270         /* setup queue */
1271         q->mp = mp;
1272         q->nb_desc = nb_desc;
1273         q->port_id = dev->data->port_id;
1274         q->queue_id = queue_id;
1275         q->tail_ptr = (volatile uint32_t *)
1276                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1277         if (handle_rxconf(q, conf))
1278                 return (-EINVAL);
1279
1280         /* allocate memory for the software ring */
1281         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1282                                         nb_desc * sizeof(struct rte_mbuf *),
1283                                         RTE_CACHE_LINE_SIZE, socket_id);
1284         if (q->sw_ring == NULL) {
1285                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1286                 rte_free(q);
1287                 return (-ENOMEM);
1288         }
1289
1290         /*
1291          * allocate memory for the hardware descriptor ring. A memzone large
1292          * enough to hold the maximum ring size is requested to allow for
1293          * resizing in later calls to the queue setup function.
1294          */
1295         mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1296                                 dev->data->port_id, queue_id, socket_id,
1297                                 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1298         if (mz == NULL) {
1299                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1300                 rte_free(q->sw_ring);
1301                 rte_free(q);
1302                 return (-ENOMEM);
1303         }
1304         q->hw_ring = mz->addr;
1305 #ifdef RTE_LIBRTE_XEN_DOM0
1306         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1307 #else
1308         q->hw_ring_phys_addr = mz->phys_addr;
1309 #endif
1310
1311         dev->data->rx_queues[queue_id] = q;
1312         return 0;
1313 }
1314
1315 static void
1316 fm10k_rx_queue_release(void *queue)
1317 {
1318         PMD_INIT_FUNC_TRACE();
1319
1320         rx_queue_free(queue);
1321 }
1322
1323 static inline int
1324 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1325 {
1326         uint16_t tx_free_thresh;
1327         uint16_t tx_rs_thresh;
1328
1329         /* constraint MACROs require that tx_free_thresh is configured
1330          * before tx_rs_thresh */
1331         if (conf->tx_free_thresh == 0)
1332                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1333         else
1334                 tx_free_thresh = conf->tx_free_thresh;
1335
1336         /* make sure the requested threshold satisfies the constraints */
1337         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1338                         FM10K_TX_FREE_THRESH_MAX(q),
1339                         FM10K_TX_FREE_THRESH_DIV(q),
1340                         tx_free_thresh)) {
1341                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1342                         "less than or equal to %u, "
1343                         "greater than or equal to %u, "
1344                         "and a divisor of %u",
1345                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1346                         FM10K_TX_FREE_THRESH_MIN(q),
1347                         FM10K_TX_FREE_THRESH_DIV(q));
1348                 return (-EINVAL);
1349         }
1350
1351         q->free_thresh = tx_free_thresh;
1352
1353         if (conf->tx_rs_thresh == 0)
1354                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1355         else
1356                 tx_rs_thresh = conf->tx_rs_thresh;
1357
1358         q->tx_deferred_start = conf->tx_deferred_start;
1359
1360         /* make sure the requested threshold satisfies the constraints */
1361         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1362                         FM10K_TX_RS_THRESH_MAX(q),
1363                         FM10K_TX_RS_THRESH_DIV(q),
1364                         tx_rs_thresh)) {
1365                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1366                         "less than or equal to %u, "
1367                         "greater than or equal to %u, "
1368                         "and a divisor of %u",
1369                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1370                         FM10K_TX_RS_THRESH_MIN(q),
1371                         FM10K_TX_RS_THRESH_DIV(q));
1372                 return (-EINVAL);
1373         }
1374
1375         q->rs_thresh = tx_rs_thresh;
1376
1377         return 0;
1378 }
1379
1380 static int
1381 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1382         uint16_t nb_desc, unsigned int socket_id,
1383         const struct rte_eth_txconf *conf)
1384 {
1385         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1386         struct fm10k_tx_queue *q;
1387         const struct rte_memzone *mz;
1388
1389         PMD_INIT_FUNC_TRACE();
1390
1391         /* make sure a valid number of descriptors have been requested */
1392         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1393                                 FM10K_MULT_TX_DESC, nb_desc)) {
1394                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1395                         "less than or equal to %"PRIu32", "
1396                         "greater than or equal to %u, "
1397                         "and a multiple of %u",
1398                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1399                         FM10K_MULT_TX_DESC);
1400                 return (-EINVAL);
1401         }
1402
1403         /*
1404          * if this queue existed already, free the associated memory. The
1405          * queue cannot be reused in case we need to allocate memory on
1406          * different socket than was previously used.
1407          */
1408         if (dev->data->tx_queues[queue_id] != NULL) {
1409                 tx_queue_free(dev->data->tx_queues[queue_id]);
1410                 dev->data->tx_queues[queue_id] = NULL;
1411         }
1412
1413         /* allocate memory for the queue structure */
1414         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1415                                 socket_id);
1416         if (q == NULL) {
1417                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1418                 return (-ENOMEM);
1419         }
1420
1421         /* setup queue */
1422         q->nb_desc = nb_desc;
1423         q->port_id = dev->data->port_id;
1424         q->queue_id = queue_id;
1425         q->tail_ptr = (volatile uint32_t *)
1426                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1427         if (handle_txconf(q, conf))
1428                 return (-EINVAL);
1429
1430         /* allocate memory for the software ring */
1431         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1432                                         nb_desc * sizeof(struct rte_mbuf *),
1433                                         RTE_CACHE_LINE_SIZE, socket_id);
1434         if (q->sw_ring == NULL) {
1435                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1436                 rte_free(q);
1437                 return (-ENOMEM);
1438         }
1439
1440         /*
1441          * allocate memory for the hardware descriptor ring. A memzone large
1442          * enough to hold the maximum ring size is requested to allow for
1443          * resizing in later calls to the queue setup function.
1444          */
1445         mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1446                                 dev->data->port_id, queue_id, socket_id,
1447                                 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1448         if (mz == NULL) {
1449                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1450                 rte_free(q->sw_ring);
1451                 rte_free(q);
1452                 return (-ENOMEM);
1453         }
1454         q->hw_ring = mz->addr;
1455 #ifdef RTE_LIBRTE_XEN_DOM0
1456         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1457 #else
1458         q->hw_ring_phys_addr = mz->phys_addr;
1459 #endif
1460
1461         /*
1462          * allocate memory for the RS bit tracker. Enough slots to hold the
1463          * descriptor index for each RS bit needing to be set are required.
1464          */
1465         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1466                                 ((nb_desc + 1) / q->rs_thresh) *
1467                                 sizeof(uint16_t),
1468                                 RTE_CACHE_LINE_SIZE, socket_id);
1469         if (q->rs_tracker.list == NULL) {
1470                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1471                 rte_free(q->sw_ring);
1472                 rte_free(q);
1473                 return (-ENOMEM);
1474         }
1475
1476         dev->data->tx_queues[queue_id] = q;
1477         return 0;
1478 }
1479
1480 static void
1481 fm10k_tx_queue_release(void *queue)
1482 {
1483         PMD_INIT_FUNC_TRACE();
1484
1485         tx_queue_free(queue);
1486 }
1487
1488 static int
1489 fm10k_reta_update(struct rte_eth_dev *dev,
1490                         struct rte_eth_rss_reta_entry64 *reta_conf,
1491                         uint16_t reta_size)
1492 {
1493         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint16_t i, j, idx, shift;
1495         uint8_t mask;
1496         uint32_t reta;
1497
1498         PMD_INIT_FUNC_TRACE();
1499
1500         if (reta_size > FM10K_MAX_RSS_INDICES) {
1501                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1502                         "(%d) doesn't match the number hardware can supported "
1503                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1504                 return -EINVAL;
1505         }
1506
1507         /*
1508          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1509          * 128-entries in 32 registers
1510          */
1511         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1512                 idx = i / RTE_RETA_GROUP_SIZE;
1513                 shift = i % RTE_RETA_GROUP_SIZE;
1514                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1515                                 BIT_MASK_PER_UINT32);
1516                 if (mask == 0)
1517                         continue;
1518
1519                 reta = 0;
1520                 if (mask != BIT_MASK_PER_UINT32)
1521                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1522
1523                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1524                         if (mask & (0x1 << j)) {
1525                                 if (mask != 0xF)
1526                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1527                                 reta |= reta_conf[idx].reta[shift + j] <<
1528                                                 (CHAR_BIT * j);
1529                         }
1530                 }
1531                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1532         }
1533
1534         return 0;
1535 }
1536
1537 static int
1538 fm10k_reta_query(struct rte_eth_dev *dev,
1539                         struct rte_eth_rss_reta_entry64 *reta_conf,
1540                         uint16_t reta_size)
1541 {
1542         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1543         uint16_t i, j, idx, shift;
1544         uint8_t mask;
1545         uint32_t reta;
1546
1547         PMD_INIT_FUNC_TRACE();
1548
1549         if (reta_size < FM10K_MAX_RSS_INDICES) {
1550                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1551                         "(%d) doesn't match the number hardware can supported "
1552                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1553                 return -EINVAL;
1554         }
1555
1556         /*
1557          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1558          * 128-entries in 32 registers
1559          */
1560         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1561                 idx = i / RTE_RETA_GROUP_SIZE;
1562                 shift = i % RTE_RETA_GROUP_SIZE;
1563                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1564                                 BIT_MASK_PER_UINT32);
1565                 if (mask == 0)
1566                         continue;
1567
1568                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1569                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1570                         if (mask & (0x1 << j))
1571                                 reta_conf[idx].reta[shift + j] = ((reta >>
1572                                         CHAR_BIT * j) & UINT8_MAX);
1573                 }
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int
1580 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1581         struct rte_eth_rss_conf *rss_conf)
1582 {
1583         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1585         uint32_t mrqc;
1586         uint64_t hf = rss_conf->rss_hf;
1587         int i;
1588
1589         PMD_INIT_FUNC_TRACE();
1590
1591         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1592                 FM10K_RSSRK_ENTRIES_PER_REG)
1593                 return -EINVAL;
1594
1595         if (hf == 0)
1596                 return -EINVAL;
1597
1598         mrqc = 0;
1599         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
1600         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
1601         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
1602         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
1603         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
1604         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
1605         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
1606         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
1607         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
1608
1609         /* If the mapping doesn't fit any supported, return */
1610         if (mrqc == 0)
1611                 return -EINVAL;
1612
1613         if (key != NULL)
1614                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1615                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1616
1617         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1618
1619         return 0;
1620 }
1621
1622 static int
1623 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1624         struct rte_eth_rss_conf *rss_conf)
1625 {
1626         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1627         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1628         uint32_t mrqc;
1629         uint64_t hf;
1630         int i;
1631
1632         PMD_INIT_FUNC_TRACE();
1633
1634         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1635                                 FM10K_RSSRK_ENTRIES_PER_REG)
1636                 return -EINVAL;
1637
1638         if (key != NULL)
1639                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1640                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1641
1642         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1643         hf = 0;
1644         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
1645         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
1646         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
1647         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
1648         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
1649         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
1650         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
1651         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
1652         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
1653
1654         rss_conf->rss_hf = hf;
1655
1656         return 0;
1657 }
1658
1659 static void
1660 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
1661 {
1662         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1663         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1664
1665         /* Bind all local non-queue interrupt to vector 0 */
1666         int_map |= 0;
1667
1668         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
1669         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
1670         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
1671         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
1672         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
1673         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
1674
1675         /* Enable misc causes */
1676         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1677                                 FM10K_EIMR_ENABLE(THI_FAULT) |
1678                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
1679                                 FM10K_EIMR_ENABLE(MAILBOX) |
1680                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
1681                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1682                                 FM10K_EIMR_ENABLE(SRAMERROR) |
1683                                 FM10K_EIMR_ENABLE(VFLR));
1684
1685         /* Enable ITR 0 */
1686         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1687                                         FM10K_ITR_MASK_CLEAR);
1688         FM10K_WRITE_FLUSH(hw);
1689 }
1690
1691 static void
1692 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
1693 {
1694         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1695         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
1696
1697         /* Bind all local non-queue interrupt to vector 0 */
1698         int_map |= 0;
1699
1700         /* Only INT 0 available, other 15 are reserved. */
1701         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
1702
1703         /* Enable ITR 0 */
1704         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1705                                         FM10K_ITR_MASK_CLEAR);
1706         FM10K_WRITE_FLUSH(hw);
1707 }
1708
1709 static int
1710 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
1711 {
1712         struct fm10k_fault fault;
1713         int err;
1714         const char *estr = "Unknown error";
1715
1716         /* Process PCA fault */
1717         if (eicr & FM10K_EIMR_PCA_FAULT) {
1718                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
1719                 if (err)
1720                         goto error;
1721                 switch (fault.type) {
1722                 case PCA_NO_FAULT:
1723                         estr = "PCA_NO_FAULT"; break;
1724                 case PCA_UNMAPPED_ADDR:
1725                         estr = "PCA_UNMAPPED_ADDR"; break;
1726                 case PCA_BAD_QACCESS_PF:
1727                         estr = "PCA_BAD_QACCESS_PF"; break;
1728                 case PCA_BAD_QACCESS_VF:
1729                         estr = "PCA_BAD_QACCESS_VF"; break;
1730                 case PCA_MALICIOUS_REQ:
1731                         estr = "PCA_MALICIOUS_REQ"; break;
1732                 case PCA_POISONED_TLP:
1733                         estr = "PCA_POISONED_TLP"; break;
1734                 case PCA_TLP_ABORT:
1735                         estr = "PCA_TLP_ABORT"; break;
1736                 default:
1737                         goto error;
1738                 }
1739                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1740                         estr, fault.func ? "VF" : "PF", fault.func,
1741                         fault.address, fault.specinfo);
1742         }
1743
1744         /* Process THI fault */
1745         if (eicr & FM10K_EIMR_THI_FAULT) {
1746                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
1747                 if (err)
1748                         goto error;
1749                 switch (fault.type) {
1750                 case THI_NO_FAULT:
1751                         estr = "THI_NO_FAULT"; break;
1752                 case THI_MAL_DIS_Q_FAULT:
1753                         estr = "THI_MAL_DIS_Q_FAULT"; break;
1754                 default:
1755                         goto error;
1756                 }
1757                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1758                         estr, fault.func ? "VF" : "PF", fault.func,
1759                         fault.address, fault.specinfo);
1760         }
1761
1762         /* Process FUM fault */
1763         if (eicr & FM10K_EIMR_FUM_FAULT) {
1764                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
1765                 if (err)
1766                         goto error;
1767                 switch (fault.type) {
1768                 case FUM_NO_FAULT:
1769                         estr = "FUM_NO_FAULT"; break;
1770                 case FUM_UNMAPPED_ADDR:
1771                         estr = "FUM_UNMAPPED_ADDR"; break;
1772                 case FUM_POISONED_TLP:
1773                         estr = "FUM_POISONED_TLP"; break;
1774                 case FUM_BAD_VF_QACCESS:
1775                         estr = "FUM_BAD_VF_QACCESS"; break;
1776                 case FUM_ADD_DECODE_ERR:
1777                         estr = "FUM_ADD_DECODE_ERR"; break;
1778                 case FUM_RO_ERROR:
1779                         estr = "FUM_RO_ERROR"; break;
1780                 case FUM_QPRC_CRC_ERROR:
1781                         estr = "FUM_QPRC_CRC_ERROR"; break;
1782                 case FUM_CSR_TIMEOUT:
1783                         estr = "FUM_CSR_TIMEOUT"; break;
1784                 case FUM_INVALID_TYPE:
1785                         estr = "FUM_INVALID_TYPE"; break;
1786                 case FUM_INVALID_LENGTH:
1787                         estr = "FUM_INVALID_LENGTH"; break;
1788                 case FUM_INVALID_BE:
1789                         estr = "FUM_INVALID_BE"; break;
1790                 case FUM_INVALID_ALIGN:
1791                         estr = "FUM_INVALID_ALIGN"; break;
1792                 default:
1793                         goto error;
1794                 }
1795                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
1796                         estr, fault.func ? "VF" : "PF", fault.func,
1797                         fault.address, fault.specinfo);
1798         }
1799
1800         if (estr)
1801                 return 0;
1802         return 0;
1803 error:
1804         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
1805         return err;
1806 }
1807
1808 /**
1809  * PF interrupt handler triggered by NIC for handling specific interrupt.
1810  *
1811  * @param handle
1812  *  Pointer to interrupt handle.
1813  * @param param
1814  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1815  *
1816  * @return
1817  *  void
1818  */
1819 static void
1820 fm10k_dev_interrupt_handler_pf(
1821                         __rte_unused struct rte_intr_handle *handle,
1822                         void *param)
1823 {
1824         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1825         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1826         uint32_t cause, status;
1827
1828         if (hw->mac.type != fm10k_mac_pf)
1829                 return;
1830
1831         cause = FM10K_READ_REG(hw, FM10K_EICR);
1832
1833         /* Handle PCI fault cases */
1834         if (cause & FM10K_EICR_FAULT_MASK) {
1835                 PMD_INIT_LOG(ERR, "INT: find fault!");
1836                 fm10k_dev_handle_fault(hw, cause);
1837         }
1838
1839         /* Handle switch up/down */
1840         if (cause & FM10K_EICR_SWITCHNOTREADY)
1841                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
1842
1843         if (cause & FM10K_EICR_SWITCHREADY)
1844                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
1845
1846         /* Handle mailbox message */
1847         fm10k_mbx_lock(hw);
1848         hw->mbx.ops.process(hw, &hw->mbx);
1849         fm10k_mbx_unlock(hw);
1850
1851         /* Handle SRAM error */
1852         if (cause & FM10K_EICR_SRAMERROR) {
1853                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
1854
1855                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
1856                 /* Write to clear pending bits */
1857                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
1858
1859                 /* Todo: print out error message after shared code  updates */
1860         }
1861
1862         /* Clear these 3 events if having any */
1863         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
1864                  FM10K_EICR_SWITCHREADY;
1865         if (cause)
1866                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
1867
1868         /* Re-enable interrupt from device side */
1869         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
1870                                         FM10K_ITR_MASK_CLEAR);
1871         /* Re-enable interrupt from host side */
1872         rte_intr_enable(&(dev->pci_dev->intr_handle));
1873 }
1874
1875 /**
1876  * VF interrupt handler triggered by NIC for handling specific interrupt.
1877  *
1878  * @param handle
1879  *  Pointer to interrupt handle.
1880  * @param param
1881  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1882  *
1883  * @return
1884  *  void
1885  */
1886 static void
1887 fm10k_dev_interrupt_handler_vf(
1888                         __rte_unused struct rte_intr_handle *handle,
1889                         void *param)
1890 {
1891         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1892         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1893
1894         if (hw->mac.type != fm10k_mac_vf)
1895                 return;
1896
1897         /* Handle mailbox message if lock is acquired */
1898         fm10k_mbx_lock(hw);
1899         hw->mbx.ops.process(hw, &hw->mbx);
1900         fm10k_mbx_unlock(hw);
1901
1902         /* Re-enable interrupt from device side */
1903         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
1904                                         FM10K_ITR_MASK_CLEAR);
1905         /* Re-enable interrupt from host side */
1906         rte_intr_enable(&(dev->pci_dev->intr_handle));
1907 }
1908
1909 /* Mailbox message handler in VF */
1910 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
1911         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1912         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
1913         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1914         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1915 };
1916
1917 /* Mailbox message handler in PF */
1918 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
1919         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1920         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1921         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1922         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1923         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1924         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1925         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1926 };
1927
1928 static int
1929 fm10k_setup_mbx_service(struct fm10k_hw *hw)
1930 {
1931         int err;
1932
1933         /* Initialize mailbox lock */
1934         fm10k_mbx_initlock(hw);
1935
1936         /* Replace default message handler with new ones */
1937         if (hw->mac.type == fm10k_mac_pf)
1938                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
1939         else
1940                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
1941
1942         if (err) {
1943                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
1944                                 err);
1945                 return err;
1946         }
1947         /* Connect to SM for PF device or PF for VF device */
1948         return hw->mbx.ops.connect(hw, &hw->mbx);
1949 }
1950
1951 static void
1952 fm10k_close_mbx_service(struct fm10k_hw *hw)
1953 {
1954         /* Disconnect from SM for PF device or PF for VF device */
1955         hw->mbx.ops.disconnect(hw, &hw->mbx);
1956 }
1957
1958 static const struct eth_dev_ops fm10k_eth_dev_ops = {
1959         .dev_configure          = fm10k_dev_configure,
1960         .dev_start              = fm10k_dev_start,
1961         .dev_stop               = fm10k_dev_stop,
1962         .dev_close              = fm10k_dev_close,
1963         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
1964         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
1965         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
1966         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
1967         .stats_get              = fm10k_stats_get,
1968         .stats_reset            = fm10k_stats_reset,
1969         .link_update            = fm10k_link_update,
1970         .dev_infos_get          = fm10k_dev_infos_get,
1971         .vlan_filter_set        = fm10k_vlan_filter_set,
1972         .vlan_offload_set       = fm10k_vlan_offload_set,
1973         .mac_addr_add           = fm10k_macaddr_add,
1974         .mac_addr_remove        = fm10k_macaddr_remove,
1975         .rx_queue_start         = fm10k_dev_rx_queue_start,
1976         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
1977         .tx_queue_start         = fm10k_dev_tx_queue_start,
1978         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
1979         .rx_queue_setup         = fm10k_rx_queue_setup,
1980         .rx_queue_release       = fm10k_rx_queue_release,
1981         .tx_queue_setup         = fm10k_tx_queue_setup,
1982         .tx_queue_release       = fm10k_tx_queue_release,
1983         .reta_update            = fm10k_reta_update,
1984         .reta_query             = fm10k_reta_query,
1985         .rss_hash_update        = fm10k_rss_hash_update,
1986         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
1987 };
1988
1989 static int
1990 eth_fm10k_dev_init(struct rte_eth_dev *dev)
1991 {
1992         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1993         int diag;
1994         struct fm10k_macvlan_filter_info *macvlan;
1995
1996         PMD_INIT_FUNC_TRACE();
1997
1998         dev->dev_ops = &fm10k_eth_dev_ops;
1999         dev->rx_pkt_burst = &fm10k_recv_pkts;
2000         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2001
2002         if (dev->data->scattered_rx)
2003                 dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;
2004
2005         /* only initialize in the primary process */
2006         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2007                 return 0;
2008
2009         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2010         memset(macvlan, 0, sizeof(*macvlan));
2011         /* Vendor and Device ID need to be set before init of shared code */
2012         memset(hw, 0, sizeof(*hw));
2013         hw->device_id = dev->pci_dev->id.device_id;
2014         hw->vendor_id = dev->pci_dev->id.vendor_id;
2015         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2016         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2017         hw->revision_id = 0;
2018         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2019         if (hw->hw_addr == NULL) {
2020                 PMD_INIT_LOG(ERR, "Bad mem resource."
2021                         " Try to blacklist unused devices.");
2022                 return -EIO;
2023         }
2024
2025         /* Store fm10k_adapter pointer */
2026         hw->back = dev->data->dev_private;
2027
2028         /* Initialize the shared code */
2029         diag = fm10k_init_shared_code(hw);
2030         if (diag != FM10K_SUCCESS) {
2031                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2032                 return -EIO;
2033         }
2034
2035         /*
2036          * Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2037          * there is no way to get link status without reading BAR4.  Until this
2038          * works, assume we have maximum bandwidth.
2039          * @todo - fix bus info
2040          */
2041         hw->bus_caps.speed = fm10k_bus_speed_8000;
2042         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2043         hw->bus_caps.payload = fm10k_bus_payload_512;
2044         hw->bus.speed = fm10k_bus_speed_8000;
2045         hw->bus.width = fm10k_bus_width_pcie_x8;
2046         hw->bus.payload = fm10k_bus_payload_256;
2047
2048         /* Initialize the hw */
2049         diag = fm10k_init_hw(hw);
2050         if (diag != FM10K_SUCCESS) {
2051                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2052                 return -EIO;
2053         }
2054
2055         /* Initialize MAC address(es) */
2056         dev->data->mac_addrs = rte_zmalloc("fm10k",
2057                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2058         if (dev->data->mac_addrs == NULL) {
2059                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2060                 return -ENOMEM;
2061         }
2062
2063         diag = fm10k_read_mac_addr(hw);
2064
2065         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2066                         &dev->data->mac_addrs[0]);
2067
2068         if (diag != FM10K_SUCCESS ||
2069                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2070
2071                 /* Generate a random addr */
2072                 eth_random_addr(hw->mac.addr);
2073                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2074                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2075                 &dev->data->mac_addrs[0]);
2076         }
2077
2078         /* Reset the hw statistics */
2079         fm10k_stats_reset(dev);
2080
2081         /* Reset the hw */
2082         diag = fm10k_reset_hw(hw);
2083         if (diag != FM10K_SUCCESS) {
2084                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2085                 return -EIO;
2086         }
2087
2088         /* Setup mailbox service */
2089         diag = fm10k_setup_mbx_service(hw);
2090         if (diag != FM10K_SUCCESS) {
2091                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2092                 return -EIO;
2093         }
2094
2095         /*PF/VF has different interrupt handling mechanism */
2096         if (hw->mac.type == fm10k_mac_pf) {
2097                 /* register callback func to eal lib */
2098                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2099                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2100
2101                 /* enable MISC interrupt */
2102                 fm10k_dev_enable_intr_pf(dev);
2103         } else { /* VF */
2104                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2105                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2106
2107                 fm10k_dev_enable_intr_vf(dev);
2108         }
2109
2110         /* Enable uio intr after callback registered */
2111         rte_intr_enable(&(dev->pci_dev->intr_handle));
2112
2113         hw->mac.ops.update_int_moderator(hw);
2114
2115         /* Make sure Switch Manager is ready before going forward. */
2116         if (hw->mac.type == fm10k_mac_pf) {
2117                 int switch_ready = 0;
2118                 int i;
2119
2120                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2121                         fm10k_mbx_lock(hw);
2122                         hw->mac.ops.get_host_state(hw, &switch_ready);
2123                         fm10k_mbx_unlock(hw);
2124                         if (switch_ready)
2125                                 break;
2126                         /* Delay some time to acquire async LPORT_MAP info. */
2127                         rte_delay_us(WAIT_SWITCH_MSG_US);
2128                 }
2129
2130                 if (switch_ready == 0) {
2131                         PMD_INIT_LOG(ERR, "switch is not ready");
2132                         return -1;
2133                 }
2134         }
2135
2136         /*
2137          * Below function will trigger operations on mailbox, acquire lock to
2138          * avoid race condition from interrupt handler. Operations on mailbox
2139          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2140          * will handle and generate an interrupt to our side. Then,  FIFO in
2141          * mailbox will be touched.
2142          */
2143         fm10k_mbx_lock(hw);
2144         /* Enable port first */
2145         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2146
2147         /* Set unicast mode by default. App can change to other mode in other
2148          * API func.
2149          */
2150         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2151                                         FM10K_XCAST_MODE_NONE);
2152
2153         fm10k_mbx_unlock(hw);
2154
2155
2156         return 0;
2157 }
2158
2159 /*
2160  * The set of PCI devices this driver supports. This driver will enable both PF
2161  * and SRIOV-VF devices.
2162  */
2163 static const struct rte_pci_id pci_id_fm10k_map[] = {
2164 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2165 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2166 #include "rte_pci_dev_ids.h"
2167         { .vendor_id = 0, /* sentinel */ },
2168 };
2169
2170 static struct eth_driver rte_pmd_fm10k = {
2171         .pci_drv = {
2172                 .name = "rte_pmd_fm10k",
2173                 .id_table = pci_id_fm10k_map,
2174                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2175         },
2176         .eth_dev_init = eth_fm10k_dev_init,
2177         .dev_private_size = sizeof(struct fm10k_adapter),
2178 };
2179
2180 /*
2181  * Driver initialization routine.
2182  * Invoked once at EAL init time.
2183  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2184  */
2185 static int
2186 rte_pmd_fm10k_init(__rte_unused const char *name,
2187         __rte_unused const char *params)
2188 {
2189         PMD_INIT_FUNC_TRACE();
2190         rte_eth_driver_register(&rte_pmd_fm10k);
2191         return 0;
2192 }
2193
2194 static struct rte_driver rte_fm10k_driver = {
2195         .type = PMD_PDEV,
2196         .init = rte_pmd_fm10k_init,
2197 };
2198
2199 PMD_REGISTER_DRIVER(rte_fm10k_driver);