net/fm10k: remove RSS restriction with num of queues
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013-2016 Intel Corporation
3  */
4
5 #include <rte_ethdev.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10 #include <rte_dev.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
13
14 #include "fm10k.h"
15 #include "base/fm10k_api.h"
16
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
20
21 #define MAIN_VSI_POOL_NUMBER 0
22
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US    100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
32
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
35
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM    128
38 #define GLORT_FD_Q_BASE  0x40
39 #define GLORT_PF_MASK    0xFFC0
40 #define GLORT_FD_MASK    GLORT_PF_MASK
41 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
42
43 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
44 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
45 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
46 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
47 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
48 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
49 static int
50 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
51 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
52         const u8 *mac, bool add, uint32_t pool);
53 static void fm10k_tx_queue_release(void *queue);
54 static void fm10k_rx_queue_release(void *queue);
55 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
56 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
57 static int fm10k_check_ftag(struct rte_devargs *devargs);
58 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
59
60 struct fm10k_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
66         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
67         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
68         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
69         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
70         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
71         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
72         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
73         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
74                 nodesc_drop)},
75 };
76
77 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
78                 sizeof(fm10k_hw_stats_strings[0]))
79
80 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
81         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
82         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
83         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
84 };
85
86 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
87                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
88
89 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
90         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
91         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
92 };
93
94 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
95                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
96
97 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
98                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
99 static int
100 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
101
102 static void
103 fm10k_mbx_initlock(struct fm10k_hw *hw)
104 {
105         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
106 }
107
108 static void
109 fm10k_mbx_lock(struct fm10k_hw *hw)
110 {
111         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
112                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
113 }
114
115 static void
116 fm10k_mbx_unlock(struct fm10k_hw *hw)
117 {
118         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
119 }
120
121 /* Stubs needed for linkage when vPMD is disabled */
122 int __attribute__((weak))
123 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
124 {
125         return -1;
126 }
127
128 uint16_t __attribute__((weak))
129 fm10k_recv_pkts_vec(
130         __rte_unused void *rx_queue,
131         __rte_unused struct rte_mbuf **rx_pkts,
132         __rte_unused uint16_t nb_pkts)
133 {
134         return 0;
135 }
136
137 uint16_t __attribute__((weak))
138 fm10k_recv_scattered_pkts_vec(
139                 __rte_unused void *rx_queue,
140                 __rte_unused struct rte_mbuf **rx_pkts,
141                 __rte_unused uint16_t nb_pkts)
142 {
143         return 0;
144 }
145
146 int __attribute__((weak))
147 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
148
149 {
150         return -1;
151 }
152
153 void __attribute__((weak))
154 fm10k_rx_queue_release_mbufs_vec(
155                 __rte_unused struct fm10k_rx_queue *rxq)
156 {
157         return;
158 }
159
160 void __attribute__((weak))
161 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
162 {
163         return;
164 }
165
166 int __attribute__((weak))
167 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
168 {
169         return -1;
170 }
171
172 uint16_t __attribute__((weak))
173 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
174                            __rte_unused struct rte_mbuf **tx_pkts,
175                            __rte_unused uint16_t nb_pkts)
176 {
177         return 0;
178 }
179
180 /*
181  * reset queue to initial state, allocate software buffers used when starting
182  * device.
183  * return 0 on success
184  * return -ENOMEM if buffers cannot be allocated
185  * return -EINVAL if buffers do not satisfy alignment condition
186  */
187 static inline int
188 rx_queue_reset(struct fm10k_rx_queue *q)
189 {
190         static const union fm10k_rx_desc zero = {{0} };
191         uint64_t dma_addr;
192         int i, diag;
193         PMD_INIT_FUNC_TRACE();
194
195         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
196         if (diag != 0)
197                 return -ENOMEM;
198
199         for (i = 0; i < q->nb_desc; ++i) {
200                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
201                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
202                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
203                                                 q->nb_desc);
204                         return -EINVAL;
205                 }
206                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
207                 q->hw_ring[i].q.pkt_addr = dma_addr;
208                 q->hw_ring[i].q.hdr_addr = dma_addr;
209         }
210
211         /* initialize extra software ring entries. Space for these extra
212          * entries is always allocated.
213          */
214         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
215         for (i = 0; i < q->nb_fake_desc; ++i) {
216                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
217                 q->hw_ring[q->nb_desc + i] = zero;
218         }
219
220         q->next_dd = 0;
221         q->next_alloc = 0;
222         q->next_trigger = q->alloc_thresh - 1;
223         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
224         q->rxrearm_start = 0;
225         q->rxrearm_nb = 0;
226
227         return 0;
228 }
229
230 /*
231  * clean queue, descriptor rings, free software buffers used when stopping
232  * device.
233  */
234 static inline void
235 rx_queue_clean(struct fm10k_rx_queue *q)
236 {
237         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
238         uint32_t i;
239         PMD_INIT_FUNC_TRACE();
240
241         /* zero descriptor rings */
242         for (i = 0; i < q->nb_desc; ++i)
243                 q->hw_ring[i] = zero;
244
245         /* zero faked descriptors */
246         for (i = 0; i < q->nb_fake_desc; ++i)
247                 q->hw_ring[q->nb_desc + i] = zero;
248
249         /* vPMD driver has a different way of releasing mbufs. */
250         if (q->rx_using_sse) {
251                 fm10k_rx_queue_release_mbufs_vec(q);
252                 return;
253         }
254
255         /* free software buffers */
256         for (i = 0; i < q->nb_desc; ++i) {
257                 if (q->sw_ring[i]) {
258                         rte_pktmbuf_free_seg(q->sw_ring[i]);
259                         q->sw_ring[i] = NULL;
260                 }
261         }
262 }
263
264 /*
265  * free all queue memory used when releasing the queue (i.e. configure)
266  */
267 static inline void
268 rx_queue_free(struct fm10k_rx_queue *q)
269 {
270         PMD_INIT_FUNC_TRACE();
271         if (q) {
272                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
273                 rx_queue_clean(q);
274                 if (q->sw_ring) {
275                         rte_free(q->sw_ring);
276                         q->sw_ring = NULL;
277                 }
278                 rte_free(q);
279                 q = NULL;
280         }
281 }
282
283 /*
284  * disable RX queue, wait unitl HW finished necessary flush operation
285  */
286 static inline int
287 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
288 {
289         uint32_t reg, i;
290
291         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
292         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
293                         reg & ~FM10K_RXQCTL_ENABLE);
294
295         /* Wait 100us at most */
296         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
297                 rte_delay_us(1);
298                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
299                 if (!(reg & FM10K_RXQCTL_ENABLE))
300                         break;
301         }
302
303         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
304                 return -1;
305
306         return 0;
307 }
308
309 /*
310  * reset queue to initial state, allocate software buffers used when starting
311  * device
312  */
313 static inline void
314 tx_queue_reset(struct fm10k_tx_queue *q)
315 {
316         PMD_INIT_FUNC_TRACE();
317         q->last_free = 0;
318         q->next_free = 0;
319         q->nb_used = 0;
320         q->nb_free = q->nb_desc - 1;
321         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
322         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
323 }
324
325 /*
326  * clean queue, descriptor rings, free software buffers used when stopping
327  * device
328  */
329 static inline void
330 tx_queue_clean(struct fm10k_tx_queue *q)
331 {
332         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
333         uint32_t i;
334         PMD_INIT_FUNC_TRACE();
335
336         /* zero descriptor rings */
337         for (i = 0; i < q->nb_desc; ++i)
338                 q->hw_ring[i] = zero;
339
340         /* free software buffers */
341         for (i = 0; i < q->nb_desc; ++i) {
342                 if (q->sw_ring[i]) {
343                         rte_pktmbuf_free_seg(q->sw_ring[i]);
344                         q->sw_ring[i] = NULL;
345                 }
346         }
347 }
348
349 /*
350  * free all queue memory used when releasing the queue (i.e. configure)
351  */
352 static inline void
353 tx_queue_free(struct fm10k_tx_queue *q)
354 {
355         PMD_INIT_FUNC_TRACE();
356         if (q) {
357                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
358                 tx_queue_clean(q);
359                 if (q->rs_tracker.list) {
360                         rte_free(q->rs_tracker.list);
361                         q->rs_tracker.list = NULL;
362                 }
363                 if (q->sw_ring) {
364                         rte_free(q->sw_ring);
365                         q->sw_ring = NULL;
366                 }
367                 rte_free(q);
368                 q = NULL;
369         }
370 }
371
372 /*
373  * disable TX queue, wait unitl HW finished necessary flush operation
374  */
375 static inline int
376 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
377 {
378         uint32_t reg, i;
379
380         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
381         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
382                         reg & ~FM10K_TXDCTL_ENABLE);
383
384         /* Wait 100us at most */
385         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
386                 rte_delay_us(1);
387                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
388                 if (!(reg & FM10K_TXDCTL_ENABLE))
389                         break;
390         }
391
392         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
393                 return -1;
394
395         return 0;
396 }
397
398 static int
399 fm10k_check_mq_mode(struct rte_eth_dev *dev)
400 {
401         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
402         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
403         struct rte_eth_vmdq_rx_conf *vmdq_conf;
404         uint16_t nb_rx_q = dev->data->nb_rx_queues;
405
406         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
407
408         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
409                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
410                 return -EINVAL;
411         }
412
413         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
414                 return 0;
415
416         if (hw->mac.type == fm10k_mac_vf) {
417                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
418                 return -EINVAL;
419         }
420
421         /* Check VMDQ queue pool number */
422         if (vmdq_conf->nb_queue_pools >
423                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
424                         vmdq_conf->nb_queue_pools > nb_rx_q) {
425                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
426                         vmdq_conf->nb_queue_pools);
427                 return -EINVAL;
428         }
429
430         return 0;
431 }
432
433 static const struct fm10k_txq_ops def_txq_ops = {
434         .reset = tx_queue_reset,
435 };
436
437 static int
438 fm10k_dev_configure(struct rte_eth_dev *dev)
439 {
440         int ret;
441
442         PMD_INIT_FUNC_TRACE();
443
444         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
445                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
446         /* multipe queue mode checking */
447         ret  = fm10k_check_mq_mode(dev);
448         if (ret != 0) {
449                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
450                             ret);
451                 return ret;
452         }
453
454         return 0;
455 }
456
457 /* fls = find last set bit = 32 minus the number of leading zeros */
458 #ifndef fls
459 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
460 #endif
461
462 static void
463 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
464 {
465         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
466         struct rte_eth_vmdq_rx_conf *vmdq_conf;
467         uint32_t i;
468
469         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
470
471         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
472                 if (!vmdq_conf->pool_map[i].pools)
473                         continue;
474                 fm10k_mbx_lock(hw);
475                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
476                 fm10k_mbx_unlock(hw);
477         }
478 }
479
480 static void
481 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
482 {
483         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484
485         /* Add default mac address */
486         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
487                 MAIN_VSI_POOL_NUMBER);
488 }
489
490 static void
491 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
492 {
493         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
494         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
495         uint32_t mrqc, *key, i, reta, j;
496         uint64_t hf;
497
498 #define RSS_KEY_SIZE 40
499         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
500                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
501                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
502                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
503                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
504                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
505         };
506
507         if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
508                 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
509                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
510                 return;
511         }
512
513         /* random key is rss_intel_key (default) or user provided (rss_key) */
514         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
515                 key = (uint32_t *)rss_intel_key;
516         else
517                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
518
519         /* Now fill our hash function seeds, 4 bytes at a time */
520         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
521                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
522
523         /*
524          * Fill in redirection table
525          * The byte-swap is needed because NIC registers are in
526          * little-endian order.
527          */
528         reta = 0;
529         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
530                 if (j == dev->data->nb_rx_queues)
531                         j = 0;
532                 reta = (reta << CHAR_BIT) | j;
533                 if ((i & 3) == 3)
534                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
535                                         rte_bswap32(reta));
536         }
537
538         /*
539          * Generate RSS hash based on packet types, TCP/UDP
540          * port numbers and/or IPv4/v6 src and dst addresses
541          */
542         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
543         mrqc = 0;
544         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
545         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
546         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
547         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
548         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
549         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
550         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
551         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
552         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
553
554         if (mrqc == 0) {
555                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
556                         "supported", hf);
557                 return;
558         }
559
560         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
561 }
562
563 static void
564 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
565 {
566         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
567         uint32_t i;
568
569         for (i = 0; i < nb_lport_new; i++) {
570                 /* Set unicast mode by default. App can change
571                  * to other mode in other API func.
572                  */
573                 fm10k_mbx_lock(hw);
574                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
575                         FM10K_XCAST_MODE_NONE);
576                 fm10k_mbx_unlock(hw);
577         }
578 }
579
580 static void
581 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
582 {
583         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
584         struct rte_eth_vmdq_rx_conf *vmdq_conf;
585         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
586         struct fm10k_macvlan_filter_info *macvlan;
587         uint16_t nb_queue_pools = 0; /* pool number in configuration */
588         uint16_t nb_lport_new;
589
590         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
591         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
592
593         fm10k_dev_rss_configure(dev);
594
595         /* only PF supports VMDQ */
596         if (hw->mac.type != fm10k_mac_pf)
597                 return;
598
599         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
600                 nb_queue_pools = vmdq_conf->nb_queue_pools;
601
602         /* no pool number change, no need to update logic port and VLAN/MAC */
603         if (macvlan->nb_queue_pools == nb_queue_pools)
604                 return;
605
606         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
607         fm10k_dev_logic_port_update(dev, nb_lport_new);
608
609         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
610         memset(dev->data->mac_addrs, 0,
611                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
612         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
613                 &dev->data->mac_addrs[0]);
614         memset(macvlan, 0, sizeof(*macvlan));
615         macvlan->nb_queue_pools = nb_queue_pools;
616
617         if (nb_queue_pools)
618                 fm10k_dev_vmdq_rx_configure(dev);
619         else
620                 fm10k_dev_pf_main_vsi_reset(dev);
621 }
622
623 static int
624 fm10k_dev_tx_init(struct rte_eth_dev *dev)
625 {
626         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
627         int i, ret;
628         struct fm10k_tx_queue *txq;
629         uint64_t base_addr;
630         uint32_t size;
631
632         /* Disable TXINT to avoid possible interrupt */
633         for (i = 0; i < hw->mac.max_queues; i++)
634                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
635                                 3 << FM10K_TXINT_TIMER_SHIFT);
636
637         /* Setup TX queue */
638         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
639                 txq = dev->data->tx_queues[i];
640                 base_addr = txq->hw_ring_phys_addr;
641                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
642
643                 /* disable queue to avoid issues while updating state */
644                 ret = tx_queue_disable(hw, i);
645                 if (ret) {
646                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
647                         return -1;
648                 }
649                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
650                  * register is read-only for VF.
651                  */
652                 if (fm10k_check_ftag(dev->device->devargs)) {
653                         if (hw->mac.type == fm10k_mac_pf) {
654                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
655                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
656                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
657                         } else {
658                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
659                                 return -ENOTSUP;
660                         }
661                 }
662
663                 /* set location and size for descriptor ring */
664                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
665                                 base_addr & UINT64_LOWER_32BITS_MASK);
666                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
667                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
668                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
669
670                 /* assign default SGLORT for each TX queue by PF */
671                 if (hw->mac.type == fm10k_mac_pf)
672                         FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
673         }
674
675         /* set up vector or scalar TX function as appropriate */
676         fm10k_set_tx_function(dev);
677
678         return 0;
679 }
680
681 static int
682 fm10k_dev_rx_init(struct rte_eth_dev *dev)
683 {
684         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685         struct fm10k_macvlan_filter_info *macvlan;
686         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
687         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
688         int i, ret;
689         struct fm10k_rx_queue *rxq;
690         uint64_t base_addr;
691         uint32_t size;
692         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
693         uint32_t logic_port = hw->mac.dglort_map;
694         uint16_t buf_size;
695         uint16_t queue_stride = 0;
696
697         /* enable RXINT for interrupt mode */
698         i = 0;
699         if (rte_intr_dp_is_en(intr_handle)) {
700                 for (; i < dev->data->nb_rx_queues; i++) {
701                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
702                         if (hw->mac.type == fm10k_mac_pf)
703                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
704                                         FM10K_ITR_AUTOMASK |
705                                         FM10K_ITR_MASK_CLEAR);
706                         else
707                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
708                                         FM10K_ITR_AUTOMASK |
709                                         FM10K_ITR_MASK_CLEAR);
710                 }
711         }
712         /* Disable other RXINT to avoid possible interrupt */
713         for (; i < hw->mac.max_queues; i++)
714                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
715                         3 << FM10K_RXINT_TIMER_SHIFT);
716
717         /* Setup RX queues */
718         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
719                 rxq = dev->data->rx_queues[i];
720                 base_addr = rxq->hw_ring_phys_addr;
721                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
722
723                 /* disable queue to avoid issues while updating state */
724                 ret = rx_queue_disable(hw, i);
725                 if (ret) {
726                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
727                         return -1;
728                 }
729
730                 /* Setup the Base and Length of the Rx Descriptor Ring */
731                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
732                                 base_addr & UINT64_LOWER_32BITS_MASK);
733                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
734                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
735                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
736
737                 /* Configure the Rx buffer size for one buff without split */
738                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
739                         RTE_PKTMBUF_HEADROOM);
740                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
741                  * reserved for this purpose, and the worst case could be 511B.
742                  * But SRR reg assumes all buffers have the same size. In order
743                  * to fill the gap, we'll have to consider the worst case and
744                  * assume 512B is reserved. If we don't do so, it's possible
745                  * for HW to overwrite data to next mbuf.
746                  */
747                 buf_size -= FM10K_RX_DATABUF_ALIGN;
748
749                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
750                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
751                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
752
753                 /* It adds dual VLAN length for supporting dual VLAN */
754                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
755                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
756                         dev->data->dev_conf.rxmode.enable_scatter) {
757                         uint32_t reg;
758                         dev->data->scattered_rx = 1;
759                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
760                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
761                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
762                 }
763
764                 /* Enable drop on empty, it's RO for VF */
765                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
766                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
767
768                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
769                 FM10K_WRITE_FLUSH(hw);
770         }
771
772         /* Configure VMDQ/RSS if applicable */
773         fm10k_dev_mq_rx_configure(dev);
774
775         /* Decide the best RX function */
776         fm10k_set_rx_function(dev);
777
778         /* update RX_SGLORT for loopback suppress*/
779         if (hw->mac.type != fm10k_mac_pf)
780                 return 0;
781         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
782         if (macvlan->nb_queue_pools)
783                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
784         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
785                 if (i && queue_stride && !(i % queue_stride))
786                         logic_port++;
787                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
788         }
789
790         return 0;
791 }
792
793 static int
794 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
795 {
796         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
797         int err = -1;
798         uint32_t reg;
799         struct fm10k_rx_queue *rxq;
800
801         PMD_INIT_FUNC_TRACE();
802
803         if (rx_queue_id < dev->data->nb_rx_queues) {
804                 rxq = dev->data->rx_queues[rx_queue_id];
805                 err = rx_queue_reset(rxq);
806                 if (err == -ENOMEM) {
807                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
808                         return err;
809                 } else if (err == -EINVAL) {
810                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
811                                 " %d", err);
812                         return err;
813                 }
814
815                 /* Setup the HW Rx Head and Tail Descriptor Pointers
816                  * Note: this must be done AFTER the queue is enabled on real
817                  * hardware, but BEFORE the queue is enabled when using the
818                  * emulation platform. Do it in both places for now and remove
819                  * this comment and the following two register writes when the
820                  * emulation platform is no longer being used.
821                  */
822                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
823                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
824
825                 /* Set PF ownership flag for PF devices */
826                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
827                 if (hw->mac.type == fm10k_mac_pf)
828                         reg |= FM10K_RXQCTL_PF;
829                 reg |= FM10K_RXQCTL_ENABLE;
830                 /* enable RX queue */
831                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
832                 FM10K_WRITE_FLUSH(hw);
833
834                 /* Setup the HW Rx Head and Tail Descriptor Pointers
835                  * Note: this must be done AFTER the queue is enabled
836                  */
837                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
838                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
839                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
840         }
841
842         return err;
843 }
844
845 static int
846 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
847 {
848         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
849
850         PMD_INIT_FUNC_TRACE();
851
852         if (rx_queue_id < dev->data->nb_rx_queues) {
853                 /* Disable RX queue */
854                 rx_queue_disable(hw, rx_queue_id);
855
856                 /* Free mbuf and clean HW ring */
857                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
858                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
859         }
860
861         return 0;
862 }
863
864 static int
865 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
866 {
867         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868         /** @todo - this should be defined in the shared code */
869 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
870         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
871         int err = 0;
872
873         PMD_INIT_FUNC_TRACE();
874
875         if (tx_queue_id < dev->data->nb_tx_queues) {
876                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
877
878                 q->ops->reset(q);
879
880                 /* reset head and tail pointers */
881                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
882                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
883
884                 /* enable TX queue */
885                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
886                                         FM10K_TXDCTL_ENABLE | txdctl);
887                 FM10K_WRITE_FLUSH(hw);
888                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
889         } else
890                 err = -1;
891
892         return err;
893 }
894
895 static int
896 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
897 {
898         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
899
900         PMD_INIT_FUNC_TRACE();
901
902         if (tx_queue_id < dev->data->nb_tx_queues) {
903                 tx_queue_disable(hw, tx_queue_id);
904                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
905                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
906         }
907
908         return 0;
909 }
910
911 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
912 {
913         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
914                 != FM10K_DGLORTMAP_NONE);
915 }
916
917 static void
918 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
919 {
920         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
921         int status;
922
923         PMD_INIT_FUNC_TRACE();
924
925         /* Return if it didn't acquire valid glort range */
926         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
927                 return;
928
929         fm10k_mbx_lock(hw);
930         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
931                                 FM10K_XCAST_MODE_PROMISC);
932         fm10k_mbx_unlock(hw);
933
934         if (status != FM10K_SUCCESS)
935                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
936 }
937
938 static void
939 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
940 {
941         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942         uint8_t mode;
943         int status;
944
945         PMD_INIT_FUNC_TRACE();
946
947         /* Return if it didn't acquire valid glort range */
948         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
949                 return;
950
951         if (dev->data->all_multicast == 1)
952                 mode = FM10K_XCAST_MODE_ALLMULTI;
953         else
954                 mode = FM10K_XCAST_MODE_NONE;
955
956         fm10k_mbx_lock(hw);
957         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
958                                 mode);
959         fm10k_mbx_unlock(hw);
960
961         if (status != FM10K_SUCCESS)
962                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
963 }
964
965 static void
966 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
967 {
968         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
969         int status;
970
971         PMD_INIT_FUNC_TRACE();
972
973         /* Return if it didn't acquire valid glort range */
974         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
975                 return;
976
977         /* If promiscuous mode is enabled, it doesn't make sense to enable
978          * allmulticast and disable promiscuous since fm10k only can select
979          * one of the modes.
980          */
981         if (dev->data->promiscuous) {
982                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
983                         "needn't enable allmulticast");
984                 return;
985         }
986
987         fm10k_mbx_lock(hw);
988         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
989                                 FM10K_XCAST_MODE_ALLMULTI);
990         fm10k_mbx_unlock(hw);
991
992         if (status != FM10K_SUCCESS)
993                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
994 }
995
996 static void
997 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
998 {
999         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1000         int status;
1001
1002         PMD_INIT_FUNC_TRACE();
1003
1004         /* Return if it didn't acquire valid glort range */
1005         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1006                 return;
1007
1008         if (dev->data->promiscuous) {
1009                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1010                         "since promisc mode is enabled");
1011                 return;
1012         }
1013
1014         fm10k_mbx_lock(hw);
1015         /* Change mode to unicast mode */
1016         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1017                                 FM10K_XCAST_MODE_NONE);
1018         fm10k_mbx_unlock(hw);
1019
1020         if (status != FM10K_SUCCESS)
1021                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1022 }
1023
1024 static void
1025 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1026 {
1027         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1029         uint16_t nb_queue_pools;
1030         struct fm10k_macvlan_filter_info *macvlan;
1031
1032         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1033         nb_queue_pools = macvlan->nb_queue_pools;
1034         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1035         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1036
1037         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1038         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1039         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1040                         hw->mac.dglort_map;
1041         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1042         /* Configure VMDQ/RSS DGlort Decoder */
1043         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1044
1045         /* Flow Director configurations, only queue number is valid. */
1046         dglortdec = fls(dev->data->nb_rx_queues - 1);
1047         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1048                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1049         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1050         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1051
1052         /* Invalidate all other GLORT entries */
1053         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1054                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1055                                 FM10K_DGLORTMAP_NONE);
1056 }
1057
1058 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1059 static int
1060 fm10k_dev_start(struct rte_eth_dev *dev)
1061 {
1062         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063         int i, diag;
1064
1065         PMD_INIT_FUNC_TRACE();
1066
1067         /* stop, init, then start the hw */
1068         diag = fm10k_stop_hw(hw);
1069         if (diag != FM10K_SUCCESS) {
1070                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1071                 return -EIO;
1072         }
1073
1074         diag = fm10k_init_hw(hw);
1075         if (diag != FM10K_SUCCESS) {
1076                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1077                 return -EIO;
1078         }
1079
1080         diag = fm10k_start_hw(hw);
1081         if (diag != FM10K_SUCCESS) {
1082                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1083                 return -EIO;
1084         }
1085
1086         diag = fm10k_dev_tx_init(dev);
1087         if (diag) {
1088                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1089                 return diag;
1090         }
1091
1092         if (fm10k_dev_rxq_interrupt_setup(dev))
1093                 return -EIO;
1094
1095         diag = fm10k_dev_rx_init(dev);
1096         if (diag) {
1097                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1098                 return diag;
1099         }
1100
1101         if (hw->mac.type == fm10k_mac_pf)
1102                 fm10k_dev_dglort_map_configure(dev);
1103
1104         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1105                 struct fm10k_rx_queue *rxq;
1106                 rxq = dev->data->rx_queues[i];
1107
1108                 if (rxq->rx_deferred_start)
1109                         continue;
1110                 diag = fm10k_dev_rx_queue_start(dev, i);
1111                 if (diag != 0) {
1112                         int j;
1113                         for (j = 0; j < i; ++j)
1114                                 rx_queue_clean(dev->data->rx_queues[j]);
1115                         return diag;
1116                 }
1117         }
1118
1119         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1120                 struct fm10k_tx_queue *txq;
1121                 txq = dev->data->tx_queues[i];
1122
1123                 if (txq->tx_deferred_start)
1124                         continue;
1125                 diag = fm10k_dev_tx_queue_start(dev, i);
1126                 if (diag != 0) {
1127                         int j;
1128                         for (j = 0; j < i; ++j)
1129                                 tx_queue_clean(dev->data->tx_queues[j]);
1130                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1131                                 rx_queue_clean(dev->data->rx_queues[j]);
1132                         return diag;
1133                 }
1134         }
1135
1136         /* Update default vlan when not in VMDQ mode */
1137         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1138                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1139
1140         fm10k_link_update(dev, 0);
1141
1142         return 0;
1143 }
1144
1145 static void
1146 fm10k_dev_stop(struct rte_eth_dev *dev)
1147 {
1148         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1149         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1150         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1151         int i;
1152
1153         PMD_INIT_FUNC_TRACE();
1154
1155         if (dev->data->tx_queues)
1156                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1157                         fm10k_dev_tx_queue_stop(dev, i);
1158
1159         if (dev->data->rx_queues)
1160                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1161                         fm10k_dev_rx_queue_stop(dev, i);
1162
1163         /* Disable datapath event */
1164         if (rte_intr_dp_is_en(intr_handle)) {
1165                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1166                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1167                                 3 << FM10K_RXINT_TIMER_SHIFT);
1168                         if (hw->mac.type == fm10k_mac_pf)
1169                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1170                                         FM10K_ITR_MASK_SET);
1171                         else
1172                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1173                                         FM10K_ITR_MASK_SET);
1174                 }
1175         }
1176         /* Clean datapath event and queue/vec mapping */
1177         rte_intr_efd_disable(intr_handle);
1178         rte_free(intr_handle->intr_vec);
1179         intr_handle->intr_vec = NULL;
1180 }
1181
1182 static void
1183 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1184 {
1185         int i;
1186
1187         PMD_INIT_FUNC_TRACE();
1188
1189         if (dev->data->tx_queues) {
1190                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1191                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1192
1193                         tx_queue_free(txq);
1194                 }
1195         }
1196
1197         if (dev->data->rx_queues) {
1198                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1199                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1200         }
1201 }
1202
1203 static void
1204 fm10k_dev_close(struct rte_eth_dev *dev)
1205 {
1206         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207
1208         PMD_INIT_FUNC_TRACE();
1209
1210         fm10k_mbx_lock(hw);
1211         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1212                 MAX_LPORT_NUM, false);
1213         fm10k_mbx_unlock(hw);
1214
1215         /* allow 100ms for device to quiesce */
1216         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1217
1218         /* Stop mailbox service first */
1219         fm10k_close_mbx_service(hw);
1220         fm10k_dev_stop(dev);
1221         fm10k_dev_queue_release(dev);
1222         fm10k_stop_hw(hw);
1223 }
1224
1225 static int
1226 fm10k_link_update(struct rte_eth_dev *dev,
1227         __rte_unused int wait_to_complete)
1228 {
1229         struct fm10k_dev_info *dev_info =
1230                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1231         PMD_INIT_FUNC_TRACE();
1232
1233         /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1234          * leave the speed undefined since there is no 50Gbps Ethernet.
1235          */
1236         dev->data->dev_link.link_speed  = 0;
1237         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1238         dev->data->dev_link.link_status =
1239                 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1240
1241         return 0;
1242 }
1243
1244 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1245         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1246 {
1247         unsigned i, q;
1248         unsigned count = 0;
1249
1250         if (xstats_names != NULL) {
1251                 /* Note: limit checked in rte_eth_xstats_names() */
1252
1253                 /* Global stats */
1254                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1255                         snprintf(xstats_names[count].name,
1256                                 sizeof(xstats_names[count].name),
1257                                 "%s", fm10k_hw_stats_strings[count].name);
1258                         count++;
1259                 }
1260
1261                 /* PF queue stats */
1262                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1263                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1264                                 snprintf(xstats_names[count].name,
1265                                         sizeof(xstats_names[count].name),
1266                                         "rx_q%u_%s", q,
1267                                         fm10k_hw_stats_rx_q_strings[i].name);
1268                                 count++;
1269                         }
1270                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1271                                 snprintf(xstats_names[count].name,
1272                                         sizeof(xstats_names[count].name),
1273                                         "tx_q%u_%s", q,
1274                                         fm10k_hw_stats_tx_q_strings[i].name);
1275                                 count++;
1276                         }
1277                 }
1278         }
1279         return FM10K_NB_XSTATS;
1280 }
1281
1282 static int
1283 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1284                  unsigned n)
1285 {
1286         struct fm10k_hw_stats *hw_stats =
1287                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1288         unsigned i, q, count = 0;
1289
1290         if (n < FM10K_NB_XSTATS)
1291                 return FM10K_NB_XSTATS;
1292
1293         /* Global stats */
1294         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1295                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1296                         fm10k_hw_stats_strings[count].offset);
1297                 xstats[count].id = count;
1298                 count++;
1299         }
1300
1301         /* PF queue stats */
1302         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1303                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1304                         xstats[count].value =
1305                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1306                                 fm10k_hw_stats_rx_q_strings[i].offset);
1307                         xstats[count].id = count;
1308                         count++;
1309                 }
1310                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1311                         xstats[count].value =
1312                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1313                                 fm10k_hw_stats_tx_q_strings[i].offset);
1314                         xstats[count].id = count;
1315                         count++;
1316                 }
1317         }
1318
1319         return FM10K_NB_XSTATS;
1320 }
1321
1322 static int
1323 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1324 {
1325         uint64_t ipackets, opackets, ibytes, obytes;
1326         struct fm10k_hw *hw =
1327                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1328         struct fm10k_hw_stats *hw_stats =
1329                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1330         int i;
1331
1332         PMD_INIT_FUNC_TRACE();
1333
1334         fm10k_update_hw_stats(hw, hw_stats);
1335
1336         ipackets = opackets = ibytes = obytes = 0;
1337         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1338                 (i < hw->mac.max_queues); ++i) {
1339                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1340                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1341                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1342                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1343                 ipackets += stats->q_ipackets[i];
1344                 opackets += stats->q_opackets[i];
1345                 ibytes   += stats->q_ibytes[i];
1346                 obytes   += stats->q_obytes[i];
1347         }
1348         stats->ipackets = ipackets;
1349         stats->opackets = opackets;
1350         stats->ibytes = ibytes;
1351         stats->obytes = obytes;
1352         return 0;
1353 }
1354
1355 static void
1356 fm10k_stats_reset(struct rte_eth_dev *dev)
1357 {
1358         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359         struct fm10k_hw_stats *hw_stats =
1360                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1361
1362         PMD_INIT_FUNC_TRACE();
1363
1364         memset(hw_stats, 0, sizeof(*hw_stats));
1365         fm10k_rebind_hw_stats(hw, hw_stats);
1366 }
1367
1368 static void
1369 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1370         struct rte_eth_dev_info *dev_info)
1371 {
1372         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1374
1375         PMD_INIT_FUNC_TRACE();
1376
1377         dev_info->pci_dev            = pdev;
1378         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1379         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1380         dev_info->max_rx_queues      = hw->mac.max_queues;
1381         dev_info->max_tx_queues      = hw->mac.max_queues;
1382         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1383         dev_info->max_hash_mac_addrs = 0;
1384         dev_info->max_vfs            = pdev->max_vfs;
1385         dev_info->vmdq_pool_base     = 0;
1386         dev_info->vmdq_queue_base    = 0;
1387         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1388         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1389         dev_info->rx_offload_capa =
1390                 DEV_RX_OFFLOAD_VLAN_STRIP |
1391                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1392                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1393                 DEV_RX_OFFLOAD_TCP_CKSUM;
1394         dev_info->tx_offload_capa =
1395                 DEV_TX_OFFLOAD_VLAN_INSERT |
1396                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1397                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1398                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1399                 DEV_TX_OFFLOAD_TCP_TSO;
1400
1401         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1402         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1403
1404         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1405                 .rx_thresh = {
1406                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1407                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1408                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1409                 },
1410                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1411                 .rx_drop_en = 0,
1412         };
1413
1414         dev_info->default_txconf = (struct rte_eth_txconf) {
1415                 .tx_thresh = {
1416                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1417                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1418                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1419                 },
1420                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1421                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1422                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1423         };
1424
1425         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1426                 .nb_max = FM10K_MAX_RX_DESC,
1427                 .nb_min = FM10K_MIN_RX_DESC,
1428                 .nb_align = FM10K_MULT_RX_DESC,
1429         };
1430
1431         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1432                 .nb_max = FM10K_MAX_TX_DESC,
1433                 .nb_min = FM10K_MIN_TX_DESC,
1434                 .nb_align = FM10K_MULT_TX_DESC,
1435                 .nb_seg_max = FM10K_TX_MAX_SEG,
1436                 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1437         };
1438
1439         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1440                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1441                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1442 }
1443
1444 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1445 static const uint32_t *
1446 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1447 {
1448         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1449             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1450                 static uint32_t ptypes[] = {
1451                         /* refers to rx_desc_to_ol_flags() */
1452                         RTE_PTYPE_L2_ETHER,
1453                         RTE_PTYPE_L3_IPV4,
1454                         RTE_PTYPE_L3_IPV4_EXT,
1455                         RTE_PTYPE_L3_IPV6,
1456                         RTE_PTYPE_L3_IPV6_EXT,
1457                         RTE_PTYPE_L4_TCP,
1458                         RTE_PTYPE_L4_UDP,
1459                         RTE_PTYPE_UNKNOWN
1460                 };
1461
1462                 return ptypes;
1463         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1464                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1465                 static uint32_t ptypes_vec[] = {
1466                         /* refers to fm10k_desc_to_pktype_v() */
1467                         RTE_PTYPE_L3_IPV4,
1468                         RTE_PTYPE_L3_IPV4_EXT,
1469                         RTE_PTYPE_L3_IPV6,
1470                         RTE_PTYPE_L3_IPV6_EXT,
1471                         RTE_PTYPE_L4_TCP,
1472                         RTE_PTYPE_L4_UDP,
1473                         RTE_PTYPE_TUNNEL_GENEVE,
1474                         RTE_PTYPE_TUNNEL_NVGRE,
1475                         RTE_PTYPE_TUNNEL_VXLAN,
1476                         RTE_PTYPE_TUNNEL_GRE,
1477                         RTE_PTYPE_UNKNOWN
1478                 };
1479
1480                 return ptypes_vec;
1481         }
1482
1483         return NULL;
1484 }
1485 #else
1486 static const uint32_t *
1487 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1488 {
1489         return NULL;
1490 }
1491 #endif
1492
1493 static int
1494 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1495 {
1496         s32 result;
1497         uint16_t mac_num = 0;
1498         uint32_t vid_idx, vid_bit, mac_index;
1499         struct fm10k_hw *hw;
1500         struct fm10k_macvlan_filter_info *macvlan;
1501         struct rte_eth_dev_data *data = dev->data;
1502
1503         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1505
1506         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1507                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1508                 return -EINVAL;
1509         }
1510
1511         if (vlan_id > ETH_VLAN_ID_MAX) {
1512                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1513                 return -EINVAL;
1514         }
1515
1516         vid_idx = FM10K_VFTA_IDX(vlan_id);
1517         vid_bit = FM10K_VFTA_BIT(vlan_id);
1518         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1519         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1520                 return 0;
1521         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1522         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1523                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1524                         "in the VLAN filter table");
1525                 return -EINVAL;
1526         }
1527
1528         fm10k_mbx_lock(hw);
1529         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1530         fm10k_mbx_unlock(hw);
1531         if (result != FM10K_SUCCESS) {
1532                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1533                 return -EIO;
1534         }
1535
1536         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1537                         (result == FM10K_SUCCESS); mac_index++) {
1538                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1539                         continue;
1540                 if (mac_num > macvlan->mac_num - 1) {
1541                         PMD_INIT_LOG(ERR, "MAC address number "
1542                                         "not match");
1543                         break;
1544                 }
1545                 fm10k_mbx_lock(hw);
1546                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1547                         data->mac_addrs[mac_index].addr_bytes,
1548                         vlan_id, on, 0);
1549                 fm10k_mbx_unlock(hw);
1550                 mac_num++;
1551         }
1552         if (result != FM10K_SUCCESS) {
1553                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1554                 return -EIO;
1555         }
1556
1557         if (on) {
1558                 macvlan->vlan_num++;
1559                 macvlan->vfta[vid_idx] |= vid_bit;
1560         } else {
1561                 macvlan->vlan_num--;
1562                 macvlan->vfta[vid_idx] &= ~vid_bit;
1563         }
1564         return 0;
1565 }
1566
1567 static int
1568 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1569 {
1570         if (mask & ETH_VLAN_STRIP_MASK) {
1571                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1572                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1573                                         "always on in fm10k");
1574         }
1575
1576         if (mask & ETH_VLAN_EXTEND_MASK) {
1577                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1578                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1579                                         "supported in fm10k");
1580         }
1581
1582         if (mask & ETH_VLAN_FILTER_MASK) {
1583                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1584                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1585         }
1586
1587         return 0;
1588 }
1589
1590 /* Add/Remove a MAC address, and update filters to main VSI */
1591 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1592                 const u8 *mac, bool add, uint32_t pool)
1593 {
1594         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1595         struct fm10k_macvlan_filter_info *macvlan;
1596         uint32_t i, j, k;
1597
1598         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1599
1600         if (pool != MAIN_VSI_POOL_NUMBER) {
1601                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1602                         "mac to pool %u", pool);
1603                 return;
1604         }
1605         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1606                 if (!macvlan->vfta[j])
1607                         continue;
1608                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1609                         if (!(macvlan->vfta[j] & (1 << k)))
1610                                 continue;
1611                         if (i + 1 > macvlan->vlan_num) {
1612                                 PMD_INIT_LOG(ERR, "vlan number not match");
1613                                 return;
1614                         }
1615                         fm10k_mbx_lock(hw);
1616                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1617                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1618                         fm10k_mbx_unlock(hw);
1619                         i++;
1620                 }
1621         }
1622 }
1623
1624 /* Add/Remove a MAC address, and update filters to VMDQ */
1625 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1626                 const u8 *mac, bool add, uint32_t pool)
1627 {
1628         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629         struct fm10k_macvlan_filter_info *macvlan;
1630         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1631         uint32_t i;
1632
1633         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1634         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1635
1636         if (pool > macvlan->nb_queue_pools) {
1637                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1638                         " Max pool is %u",
1639                         pool, macvlan->nb_queue_pools);
1640                 return;
1641         }
1642         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1643                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1644                         continue;
1645                 fm10k_mbx_lock(hw);
1646                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1647                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1648                 fm10k_mbx_unlock(hw);
1649         }
1650 }
1651
1652 /* Add/Remove a MAC address, and update filters */
1653 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1654                 const u8 *mac, bool add, uint32_t pool)
1655 {
1656         struct fm10k_macvlan_filter_info *macvlan;
1657
1658         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1659
1660         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1661                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1662         else
1663                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1664
1665         if (add)
1666                 macvlan->mac_num++;
1667         else
1668                 macvlan->mac_num--;
1669 }
1670
1671 /* Add a MAC address, and update filters */
1672 static int
1673 fm10k_macaddr_add(struct rte_eth_dev *dev,
1674                 struct ether_addr *mac_addr,
1675                 uint32_t index,
1676                 uint32_t pool)
1677 {
1678         struct fm10k_macvlan_filter_info *macvlan;
1679
1680         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1681         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1682         macvlan->mac_vmdq_id[index] = pool;
1683         return 0;
1684 }
1685
1686 /* Remove a MAC address, and update filters */
1687 static void
1688 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1689 {
1690         struct rte_eth_dev_data *data = dev->data;
1691         struct fm10k_macvlan_filter_info *macvlan;
1692
1693         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1694         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1695                         FALSE, macvlan->mac_vmdq_id[index]);
1696         macvlan->mac_vmdq_id[index] = 0;
1697 }
1698
1699 static inline int
1700 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1701 {
1702         if ((request < min) || (request > max) || ((request % mult) != 0))
1703                 return -1;
1704         else
1705                 return 0;
1706 }
1707
1708
1709 static inline int
1710 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1711 {
1712         if ((request < min) || (request > max) || ((div % request) != 0))
1713                 return -1;
1714         else
1715                 return 0;
1716 }
1717
1718 static inline int
1719 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1720 {
1721         uint16_t rx_free_thresh;
1722
1723         if (conf->rx_free_thresh == 0)
1724                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1725         else
1726                 rx_free_thresh = conf->rx_free_thresh;
1727
1728         /* make sure the requested threshold satisfies the constraints */
1729         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1730                         FM10K_RX_FREE_THRESH_MAX(q),
1731                         FM10K_RX_FREE_THRESH_DIV(q),
1732                         rx_free_thresh)) {
1733                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1734                         "less than or equal to %u, "
1735                         "greater than or equal to %u, "
1736                         "and a divisor of %u",
1737                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1738                         FM10K_RX_FREE_THRESH_MIN(q),
1739                         FM10K_RX_FREE_THRESH_DIV(q));
1740                 return -EINVAL;
1741         }
1742
1743         q->alloc_thresh = rx_free_thresh;
1744         q->drop_en = conf->rx_drop_en;
1745         q->rx_deferred_start = conf->rx_deferred_start;
1746
1747         return 0;
1748 }
1749
1750 /*
1751  * Hardware requires specific alignment for Rx packet buffers. At
1752  * least one of the following two conditions must be satisfied.
1753  *  1. Address is 512B aligned
1754  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1755  *
1756  * As such, the driver may need to adjust the DMA address within the
1757  * buffer by up to 512B.
1758  *
1759  * return 1 if the element size is valid, otherwise return 0.
1760  */
1761 static int
1762 mempool_element_size_valid(struct rte_mempool *mp)
1763 {
1764         uint32_t min_size;
1765
1766         /* elt_size includes mbuf header and headroom */
1767         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1768                         RTE_PKTMBUF_HEADROOM;
1769
1770         /* account for up to 512B of alignment */
1771         min_size -= FM10K_RX_DATABUF_ALIGN;
1772
1773         /* sanity check for overflow */
1774         if (min_size > mp->elt_size)
1775                 return 0;
1776
1777         /* size is valid */
1778         return 1;
1779 }
1780
1781 static int
1782 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1783         uint16_t nb_desc, unsigned int socket_id,
1784         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1785 {
1786         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1787         struct fm10k_dev_info *dev_info =
1788                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1789         struct fm10k_rx_queue *q;
1790         const struct rte_memzone *mz;
1791
1792         PMD_INIT_FUNC_TRACE();
1793
1794         /* make sure the mempool element size can account for alignment. */
1795         if (!mempool_element_size_valid(mp)) {
1796                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1797                 return -EINVAL;
1798         }
1799
1800         /* make sure a valid number of descriptors have been requested */
1801         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1802                                 FM10K_MULT_RX_DESC, nb_desc)) {
1803                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1804                         "less than or equal to %"PRIu32", "
1805                         "greater than or equal to %u, "
1806                         "and a multiple of %u",
1807                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1808                         FM10K_MULT_RX_DESC);
1809                 return -EINVAL;
1810         }
1811
1812         /*
1813          * if this queue existed already, free the associated memory. The
1814          * queue cannot be reused in case we need to allocate memory on
1815          * different socket than was previously used.
1816          */
1817         if (dev->data->rx_queues[queue_id] != NULL) {
1818                 rx_queue_free(dev->data->rx_queues[queue_id]);
1819                 dev->data->rx_queues[queue_id] = NULL;
1820         }
1821
1822         /* allocate memory for the queue structure */
1823         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1824                                 socket_id);
1825         if (q == NULL) {
1826                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1827                 return -ENOMEM;
1828         }
1829
1830         /* setup queue */
1831         q->mp = mp;
1832         q->nb_desc = nb_desc;
1833         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1834         q->port_id = dev->data->port_id;
1835         q->queue_id = queue_id;
1836         q->tail_ptr = (volatile uint32_t *)
1837                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1838         if (handle_rxconf(q, conf))
1839                 return -EINVAL;
1840
1841         /* allocate memory for the software ring */
1842         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1843                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1844                         RTE_CACHE_LINE_SIZE, socket_id);
1845         if (q->sw_ring == NULL) {
1846                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1847                 rte_free(q);
1848                 return -ENOMEM;
1849         }
1850
1851         /*
1852          * allocate memory for the hardware descriptor ring. A memzone large
1853          * enough to hold the maximum ring size is requested to allow for
1854          * resizing in later calls to the queue setup function.
1855          */
1856         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1857                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1858                                       socket_id);
1859         if (mz == NULL) {
1860                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1861                 rte_free(q->sw_ring);
1862                 rte_free(q);
1863                 return -ENOMEM;
1864         }
1865         q->hw_ring = mz->addr;
1866         q->hw_ring_phys_addr = mz->iova;
1867
1868         /* Check if number of descs satisfied Vector requirement */
1869         if (!rte_is_power_of_2(nb_desc)) {
1870                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1871                                     "preconditions - canceling the feature for "
1872                                     "the whole port[%d]",
1873                              q->queue_id, q->port_id);
1874                 dev_info->rx_vec_allowed = false;
1875         } else
1876                 fm10k_rxq_vec_setup(q);
1877
1878         dev->data->rx_queues[queue_id] = q;
1879         return 0;
1880 }
1881
1882 static void
1883 fm10k_rx_queue_release(void *queue)
1884 {
1885         PMD_INIT_FUNC_TRACE();
1886
1887         rx_queue_free(queue);
1888 }
1889
1890 static inline int
1891 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1892 {
1893         uint16_t tx_free_thresh;
1894         uint16_t tx_rs_thresh;
1895
1896         /* constraint MACROs require that tx_free_thresh is configured
1897          * before tx_rs_thresh */
1898         if (conf->tx_free_thresh == 0)
1899                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1900         else
1901                 tx_free_thresh = conf->tx_free_thresh;
1902
1903         /* make sure the requested threshold satisfies the constraints */
1904         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1905                         FM10K_TX_FREE_THRESH_MAX(q),
1906                         FM10K_TX_FREE_THRESH_DIV(q),
1907                         tx_free_thresh)) {
1908                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1909                         "less than or equal to %u, "
1910                         "greater than or equal to %u, "
1911                         "and a divisor of %u",
1912                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1913                         FM10K_TX_FREE_THRESH_MIN(q),
1914                         FM10K_TX_FREE_THRESH_DIV(q));
1915                 return -EINVAL;
1916         }
1917
1918         q->free_thresh = tx_free_thresh;
1919
1920         if (conf->tx_rs_thresh == 0)
1921                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1922         else
1923                 tx_rs_thresh = conf->tx_rs_thresh;
1924
1925         q->tx_deferred_start = conf->tx_deferred_start;
1926
1927         /* make sure the requested threshold satisfies the constraints */
1928         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1929                         FM10K_TX_RS_THRESH_MAX(q),
1930                         FM10K_TX_RS_THRESH_DIV(q),
1931                         tx_rs_thresh)) {
1932                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1933                         "less than or equal to %u, "
1934                         "greater than or equal to %u, "
1935                         "and a divisor of %u",
1936                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1937                         FM10K_TX_RS_THRESH_MIN(q),
1938                         FM10K_TX_RS_THRESH_DIV(q));
1939                 return -EINVAL;
1940         }
1941
1942         q->rs_thresh = tx_rs_thresh;
1943
1944         return 0;
1945 }
1946
1947 static int
1948 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1949         uint16_t nb_desc, unsigned int socket_id,
1950         const struct rte_eth_txconf *conf)
1951 {
1952         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1953         struct fm10k_tx_queue *q;
1954         const struct rte_memzone *mz;
1955
1956         PMD_INIT_FUNC_TRACE();
1957
1958         /* make sure a valid number of descriptors have been requested */
1959         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1960                                 FM10K_MULT_TX_DESC, nb_desc)) {
1961                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1962                         "less than or equal to %"PRIu32", "
1963                         "greater than or equal to %u, "
1964                         "and a multiple of %u",
1965                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1966                         FM10K_MULT_TX_DESC);
1967                 return -EINVAL;
1968         }
1969
1970         /*
1971          * if this queue existed already, free the associated memory. The
1972          * queue cannot be reused in case we need to allocate memory on
1973          * different socket than was previously used.
1974          */
1975         if (dev->data->tx_queues[queue_id] != NULL) {
1976                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1977
1978                 tx_queue_free(txq);
1979                 dev->data->tx_queues[queue_id] = NULL;
1980         }
1981
1982         /* allocate memory for the queue structure */
1983         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1984                                 socket_id);
1985         if (q == NULL) {
1986                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1987                 return -ENOMEM;
1988         }
1989
1990         /* setup queue */
1991         q->nb_desc = nb_desc;
1992         q->port_id = dev->data->port_id;
1993         q->queue_id = queue_id;
1994         q->txq_flags = conf->txq_flags;
1995         q->ops = &def_txq_ops;
1996         q->tail_ptr = (volatile uint32_t *)
1997                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1998         if (handle_txconf(q, conf))
1999                 return -EINVAL;
2000
2001         /* allocate memory for the software ring */
2002         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2003                                         nb_desc * sizeof(struct rte_mbuf *),
2004                                         RTE_CACHE_LINE_SIZE, socket_id);
2005         if (q->sw_ring == NULL) {
2006                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2007                 rte_free(q);
2008                 return -ENOMEM;
2009         }
2010
2011         /*
2012          * allocate memory for the hardware descriptor ring. A memzone large
2013          * enough to hold the maximum ring size is requested to allow for
2014          * resizing in later calls to the queue setup function.
2015          */
2016         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2017                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2018                                       socket_id);
2019         if (mz == NULL) {
2020                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2021                 rte_free(q->sw_ring);
2022                 rte_free(q);
2023                 return -ENOMEM;
2024         }
2025         q->hw_ring = mz->addr;
2026         q->hw_ring_phys_addr = mz->iova;
2027
2028         /*
2029          * allocate memory for the RS bit tracker. Enough slots to hold the
2030          * descriptor index for each RS bit needing to be set are required.
2031          */
2032         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2033                                 ((nb_desc + 1) / q->rs_thresh) *
2034                                 sizeof(uint16_t),
2035                                 RTE_CACHE_LINE_SIZE, socket_id);
2036         if (q->rs_tracker.list == NULL) {
2037                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2038                 rte_free(q->sw_ring);
2039                 rte_free(q);
2040                 return -ENOMEM;
2041         }
2042
2043         dev->data->tx_queues[queue_id] = q;
2044         return 0;
2045 }
2046
2047 static void
2048 fm10k_tx_queue_release(void *queue)
2049 {
2050         struct fm10k_tx_queue *q = queue;
2051         PMD_INIT_FUNC_TRACE();
2052
2053         tx_queue_free(q);
2054 }
2055
2056 static int
2057 fm10k_reta_update(struct rte_eth_dev *dev,
2058                         struct rte_eth_rss_reta_entry64 *reta_conf,
2059                         uint16_t reta_size)
2060 {
2061         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2062         uint16_t i, j, idx, shift;
2063         uint8_t mask;
2064         uint32_t reta;
2065
2066         PMD_INIT_FUNC_TRACE();
2067
2068         if (reta_size > FM10K_MAX_RSS_INDICES) {
2069                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2070                         "(%d) doesn't match the number hardware can supported "
2071                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2072                 return -EINVAL;
2073         }
2074
2075         /*
2076          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2077          * 128-entries in 32 registers
2078          */
2079         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2080                 idx = i / RTE_RETA_GROUP_SIZE;
2081                 shift = i % RTE_RETA_GROUP_SIZE;
2082                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2083                                 BIT_MASK_PER_UINT32);
2084                 if (mask == 0)
2085                         continue;
2086
2087                 reta = 0;
2088                 if (mask != BIT_MASK_PER_UINT32)
2089                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2090
2091                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2092                         if (mask & (0x1 << j)) {
2093                                 if (mask != 0xF)
2094                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2095                                 reta |= reta_conf[idx].reta[shift + j] <<
2096                                                 (CHAR_BIT * j);
2097                         }
2098                 }
2099                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2100         }
2101
2102         return 0;
2103 }
2104
2105 static int
2106 fm10k_reta_query(struct rte_eth_dev *dev,
2107                         struct rte_eth_rss_reta_entry64 *reta_conf,
2108                         uint16_t reta_size)
2109 {
2110         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2111         uint16_t i, j, idx, shift;
2112         uint8_t mask;
2113         uint32_t reta;
2114
2115         PMD_INIT_FUNC_TRACE();
2116
2117         if (reta_size < FM10K_MAX_RSS_INDICES) {
2118                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2119                         "(%d) doesn't match the number hardware can supported "
2120                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2121                 return -EINVAL;
2122         }
2123
2124         /*
2125          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2126          * 128-entries in 32 registers
2127          */
2128         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2129                 idx = i / RTE_RETA_GROUP_SIZE;
2130                 shift = i % RTE_RETA_GROUP_SIZE;
2131                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2132                                 BIT_MASK_PER_UINT32);
2133                 if (mask == 0)
2134                         continue;
2135
2136                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2137                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2138                         if (mask & (0x1 << j))
2139                                 reta_conf[idx].reta[shift + j] = ((reta >>
2140                                         CHAR_BIT * j) & UINT8_MAX);
2141                 }
2142         }
2143
2144         return 0;
2145 }
2146
2147 static int
2148 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2149         struct rte_eth_rss_conf *rss_conf)
2150 {
2151         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2152         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2153         uint32_t mrqc;
2154         uint64_t hf = rss_conf->rss_hf;
2155         int i;
2156
2157         PMD_INIT_FUNC_TRACE();
2158
2159         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2160                                 FM10K_RSSRK_ENTRIES_PER_REG))
2161                 return -EINVAL;
2162
2163         if (hf == 0)
2164                 return -EINVAL;
2165
2166         mrqc = 0;
2167         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2168         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2169         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2170         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2171         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2172         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2173         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2174         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2175         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2176
2177         /* If the mapping doesn't fit any supported, return */
2178         if (mrqc == 0)
2179                 return -EINVAL;
2180
2181         if (key != NULL)
2182                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2183                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2184
2185         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2186
2187         return 0;
2188 }
2189
2190 static int
2191 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2192         struct rte_eth_rss_conf *rss_conf)
2193 {
2194         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2195         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2196         uint32_t mrqc;
2197         uint64_t hf;
2198         int i;
2199
2200         PMD_INIT_FUNC_TRACE();
2201
2202         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2203                                 FM10K_RSSRK_ENTRIES_PER_REG))
2204                 return -EINVAL;
2205
2206         if (key != NULL)
2207                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2208                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2209
2210         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2211         hf = 0;
2212         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2213         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2214         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2215         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2216         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2217         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2218         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2219         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2220         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2221
2222         rss_conf->rss_hf = hf;
2223
2224         return 0;
2225 }
2226
2227 static void
2228 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2229 {
2230         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2231         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2232
2233         /* Bind all local non-queue interrupt to vector 0 */
2234         int_map |= FM10K_MISC_VEC_ID;
2235
2236         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2237         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2238         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2239         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2240         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2241         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2242
2243         /* Enable misc causes */
2244         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2245                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2246                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2247                                 FM10K_EIMR_ENABLE(MAILBOX) |
2248                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2249                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2250                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2251                                 FM10K_EIMR_ENABLE(VFLR));
2252
2253         /* Enable ITR 0 */
2254         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2255                                         FM10K_ITR_MASK_CLEAR);
2256         FM10K_WRITE_FLUSH(hw);
2257 }
2258
2259 static void
2260 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2261 {
2262         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2263         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2264
2265         int_map |= FM10K_MISC_VEC_ID;
2266
2267         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2268         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2269         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2270         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2271         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2272         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2273
2274         /* Disable misc causes */
2275         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2276                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2277                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2278                                 FM10K_EIMR_DISABLE(MAILBOX) |
2279                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2280                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2281                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2282                                 FM10K_EIMR_DISABLE(VFLR));
2283
2284         /* Disable ITR 0 */
2285         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2286         FM10K_WRITE_FLUSH(hw);
2287 }
2288
2289 static void
2290 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2291 {
2292         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2294
2295         /* Bind all local non-queue interrupt to vector 0 */
2296         int_map |= FM10K_MISC_VEC_ID;
2297
2298         /* Only INT 0 available, other 15 are reserved. */
2299         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2300
2301         /* Enable ITR 0 */
2302         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2303                                         FM10K_ITR_MASK_CLEAR);
2304         FM10K_WRITE_FLUSH(hw);
2305 }
2306
2307 static void
2308 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2309 {
2310         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2311         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2312
2313         int_map |= FM10K_MISC_VEC_ID;
2314
2315         /* Only INT 0 available, other 15 are reserved. */
2316         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2317
2318         /* Disable ITR 0 */
2319         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2320         FM10K_WRITE_FLUSH(hw);
2321 }
2322
2323 static int
2324 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2325 {
2326         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2327         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2328
2329         /* Enable ITR */
2330         if (hw->mac.type == fm10k_mac_pf)
2331                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2332                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2333         else
2334                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2335                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2336         rte_intr_enable(&pdev->intr_handle);
2337         return 0;
2338 }
2339
2340 static int
2341 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2342 {
2343         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2344         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2345
2346         /* Disable ITR */
2347         if (hw->mac.type == fm10k_mac_pf)
2348                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2349                         FM10K_ITR_MASK_SET);
2350         else
2351                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2352                         FM10K_ITR_MASK_SET);
2353         return 0;
2354 }
2355
2356 static int
2357 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2358 {
2359         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2360         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2361         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2362         uint32_t intr_vector, vec;
2363         uint16_t queue_id;
2364         int result = 0;
2365
2366         /* fm10k needs one separate interrupt for mailbox,
2367          * so only drivers which support multiple interrupt vectors
2368          * e.g. vfio-pci can work for fm10k interrupt mode
2369          */
2370         if (!rte_intr_cap_multiple(intr_handle) ||
2371                         dev->data->dev_conf.intr_conf.rxq == 0)
2372                 return result;
2373
2374         intr_vector = dev->data->nb_rx_queues;
2375
2376         /* disable interrupt first */
2377         rte_intr_disable(intr_handle);
2378         if (hw->mac.type == fm10k_mac_pf)
2379                 fm10k_dev_disable_intr_pf(dev);
2380         else
2381                 fm10k_dev_disable_intr_vf(dev);
2382
2383         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2384                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2385                 result = -EIO;
2386         }
2387
2388         if (rte_intr_dp_is_en(intr_handle) && !result) {
2389                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2390                         dev->data->nb_rx_queues * sizeof(int), 0);
2391                 if (intr_handle->intr_vec) {
2392                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2393                                         queue_id < dev->data->nb_rx_queues;
2394                                         queue_id++) {
2395                                 intr_handle->intr_vec[queue_id] = vec;
2396                                 if (vec < intr_handle->nb_efd - 1
2397                                                 + FM10K_RX_VEC_START)
2398                                         vec++;
2399                         }
2400                 } else {
2401                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2402                                 " intr_vec", dev->data->nb_rx_queues);
2403                         rte_intr_efd_disable(intr_handle);
2404                         result = -ENOMEM;
2405                 }
2406         }
2407
2408         if (hw->mac.type == fm10k_mac_pf)
2409                 fm10k_dev_enable_intr_pf(dev);
2410         else
2411                 fm10k_dev_enable_intr_vf(dev);
2412         rte_intr_enable(intr_handle);
2413         hw->mac.ops.update_int_moderator(hw);
2414         return result;
2415 }
2416
2417 static int
2418 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2419 {
2420         struct fm10k_fault fault;
2421         int err;
2422         const char *estr = "Unknown error";
2423
2424         /* Process PCA fault */
2425         if (eicr & FM10K_EICR_PCA_FAULT) {
2426                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2427                 if (err)
2428                         goto error;
2429                 switch (fault.type) {
2430                 case PCA_NO_FAULT:
2431                         estr = "PCA_NO_FAULT"; break;
2432                 case PCA_UNMAPPED_ADDR:
2433                         estr = "PCA_UNMAPPED_ADDR"; break;
2434                 case PCA_BAD_QACCESS_PF:
2435                         estr = "PCA_BAD_QACCESS_PF"; break;
2436                 case PCA_BAD_QACCESS_VF:
2437                         estr = "PCA_BAD_QACCESS_VF"; break;
2438                 case PCA_MALICIOUS_REQ:
2439                         estr = "PCA_MALICIOUS_REQ"; break;
2440                 case PCA_POISONED_TLP:
2441                         estr = "PCA_POISONED_TLP"; break;
2442                 case PCA_TLP_ABORT:
2443                         estr = "PCA_TLP_ABORT"; break;
2444                 default:
2445                         goto error;
2446                 }
2447                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2448                         estr, fault.func ? "VF" : "PF", fault.func,
2449                         fault.address, fault.specinfo);
2450         }
2451
2452         /* Process THI fault */
2453         if (eicr & FM10K_EICR_THI_FAULT) {
2454                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2455                 if (err)
2456                         goto error;
2457                 switch (fault.type) {
2458                 case THI_NO_FAULT:
2459                         estr = "THI_NO_FAULT"; break;
2460                 case THI_MAL_DIS_Q_FAULT:
2461                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2462                 default:
2463                         goto error;
2464                 }
2465                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2466                         estr, fault.func ? "VF" : "PF", fault.func,
2467                         fault.address, fault.specinfo);
2468         }
2469
2470         /* Process FUM fault */
2471         if (eicr & FM10K_EICR_FUM_FAULT) {
2472                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2473                 if (err)
2474                         goto error;
2475                 switch (fault.type) {
2476                 case FUM_NO_FAULT:
2477                         estr = "FUM_NO_FAULT"; break;
2478                 case FUM_UNMAPPED_ADDR:
2479                         estr = "FUM_UNMAPPED_ADDR"; break;
2480                 case FUM_POISONED_TLP:
2481                         estr = "FUM_POISONED_TLP"; break;
2482                 case FUM_BAD_VF_QACCESS:
2483                         estr = "FUM_BAD_VF_QACCESS"; break;
2484                 case FUM_ADD_DECODE_ERR:
2485                         estr = "FUM_ADD_DECODE_ERR"; break;
2486                 case FUM_RO_ERROR:
2487                         estr = "FUM_RO_ERROR"; break;
2488                 case FUM_QPRC_CRC_ERROR:
2489                         estr = "FUM_QPRC_CRC_ERROR"; break;
2490                 case FUM_CSR_TIMEOUT:
2491                         estr = "FUM_CSR_TIMEOUT"; break;
2492                 case FUM_INVALID_TYPE:
2493                         estr = "FUM_INVALID_TYPE"; break;
2494                 case FUM_INVALID_LENGTH:
2495                         estr = "FUM_INVALID_LENGTH"; break;
2496                 case FUM_INVALID_BE:
2497                         estr = "FUM_INVALID_BE"; break;
2498                 case FUM_INVALID_ALIGN:
2499                         estr = "FUM_INVALID_ALIGN"; break;
2500                 default:
2501                         goto error;
2502                 }
2503                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2504                         estr, fault.func ? "VF" : "PF", fault.func,
2505                         fault.address, fault.specinfo);
2506         }
2507
2508         return 0;
2509 error:
2510         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2511         return err;
2512 }
2513
2514 /**
2515  * PF interrupt handler triggered by NIC for handling specific interrupt.
2516  *
2517  * @param handle
2518  *  Pointer to interrupt handle.
2519  * @param param
2520  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2521  *
2522  * @return
2523  *  void
2524  */
2525 static void
2526 fm10k_dev_interrupt_handler_pf(void *param)
2527 {
2528         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2529         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2530         uint32_t cause, status;
2531         struct fm10k_dev_info *dev_info =
2532                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2533         int status_mbx;
2534         s32 err;
2535
2536         if (hw->mac.type != fm10k_mac_pf)
2537                 return;
2538
2539         cause = FM10K_READ_REG(hw, FM10K_EICR);
2540
2541         /* Handle PCI fault cases */
2542         if (cause & FM10K_EICR_FAULT_MASK) {
2543                 PMD_INIT_LOG(ERR, "INT: find fault!");
2544                 fm10k_dev_handle_fault(hw, cause);
2545         }
2546
2547         /* Handle switch up/down */
2548         if (cause & FM10K_EICR_SWITCHNOTREADY)
2549                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2550
2551         if (cause & FM10K_EICR_SWITCHREADY) {
2552                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2553                 if (dev_info->sm_down == 1) {
2554                         fm10k_mbx_lock(hw);
2555
2556                         /* For recreating logical ports */
2557                         status_mbx = hw->mac.ops.update_lport_state(hw,
2558                                         hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2559                         if (status_mbx == FM10K_SUCCESS)
2560                                 PMD_INIT_LOG(INFO,
2561                                         "INT: Recreated Logical port");
2562                         else
2563                                 PMD_INIT_LOG(INFO,
2564                                         "INT: Logical ports weren't recreated");
2565
2566                         status_mbx = hw->mac.ops.update_xcast_mode(hw,
2567                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2568                         if (status_mbx != FM10K_SUCCESS)
2569                                 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2570
2571                         fm10k_mbx_unlock(hw);
2572
2573                         /* first clear the internal SW recording structure */
2574                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2575                                                 ETH_MQ_RX_VMDQ_FLAG))
2576                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2577                                         false);
2578
2579                         fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2580                                         MAIN_VSI_POOL_NUMBER);
2581
2582                         /*
2583                          * Add default mac address and vlan for the logical
2584                          * ports that have been created, leave to the
2585                          * application to fully recover Rx filtering.
2586                          */
2587                         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2588                                         MAIN_VSI_POOL_NUMBER);
2589
2590                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2591                                                 ETH_MQ_RX_VMDQ_FLAG))
2592                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2593                                         true);
2594
2595                         dev_info->sm_down = 0;
2596                         _rte_eth_dev_callback_process(dev,
2597                                         RTE_ETH_EVENT_INTR_LSC,
2598                                         NULL, NULL);
2599                 }
2600         }
2601
2602         /* Handle mailbox message */
2603         fm10k_mbx_lock(hw);
2604         err = hw->mbx.ops.process(hw, &hw->mbx);
2605         fm10k_mbx_unlock(hw);
2606
2607         if (err == FM10K_ERR_RESET_REQUESTED) {
2608                 PMD_INIT_LOG(INFO, "INT: Switch is down");
2609                 dev_info->sm_down = 1;
2610                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2611                                 NULL, NULL);
2612         }
2613
2614         /* Handle SRAM error */
2615         if (cause & FM10K_EICR_SRAMERROR) {
2616                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2617
2618                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2619                 /* Write to clear pending bits */
2620                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2621
2622                 /* Todo: print out error message after shared code  updates */
2623         }
2624
2625         /* Clear these 3 events if having any */
2626         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2627                  FM10K_EICR_SWITCHREADY;
2628         if (cause)
2629                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2630
2631         /* Re-enable interrupt from device side */
2632         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2633                                         FM10K_ITR_MASK_CLEAR);
2634         /* Re-enable interrupt from host side */
2635         rte_intr_enable(dev->intr_handle);
2636 }
2637
2638 /**
2639  * VF interrupt handler triggered by NIC for handling specific interrupt.
2640  *
2641  * @param handle
2642  *  Pointer to interrupt handle.
2643  * @param param
2644  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2645  *
2646  * @return
2647  *  void
2648  */
2649 static void
2650 fm10k_dev_interrupt_handler_vf(void *param)
2651 {
2652         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2653         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2654         struct fm10k_mbx_info *mbx = &hw->mbx;
2655         struct fm10k_dev_info *dev_info =
2656                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2657         const enum fm10k_mbx_state state = mbx->state;
2658         int status_mbx;
2659
2660         if (hw->mac.type != fm10k_mac_vf)
2661                 return;
2662
2663         /* Handle mailbox message if lock is acquired */
2664         fm10k_mbx_lock(hw);
2665         hw->mbx.ops.process(hw, &hw->mbx);
2666         fm10k_mbx_unlock(hw);
2667
2668         if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2669                 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2670
2671                 fm10k_mbx_lock(hw);
2672                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2673                                 MAX_LPORT_NUM, 1);
2674                 fm10k_mbx_unlock(hw);
2675
2676                 /* Setting reset flag */
2677                 dev_info->sm_down = 1;
2678                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2679                                 NULL, NULL);
2680         }
2681
2682         if (dev_info->sm_down == 1 &&
2683                         hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2684                 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2685                 fm10k_mbx_lock(hw);
2686                 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2687                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2688                 if (status_mbx != FM10K_SUCCESS)
2689                         PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2690                 fm10k_mbx_unlock(hw);
2691
2692                 /* first clear the internal SW recording structure */
2693                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2694                 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2695                                 MAIN_VSI_POOL_NUMBER);
2696
2697                 /*
2698                  * Add default mac address and vlan for the logical ports that
2699                  * have been created, leave to the application to fully recover
2700                  * Rx filtering.
2701                  */
2702                 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2703                                 MAIN_VSI_POOL_NUMBER);
2704                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2705
2706                 dev_info->sm_down = 0;
2707                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2708                                 NULL, NULL);
2709         }
2710
2711         /* Re-enable interrupt from device side */
2712         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2713                                         FM10K_ITR_MASK_CLEAR);
2714         /* Re-enable interrupt from host side */
2715         rte_intr_enable(dev->intr_handle);
2716 }
2717
2718 /* Mailbox message handler in VF */
2719 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2720         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2721         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2722         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2723         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2724 };
2725
2726 static int
2727 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2728 {
2729         int err = 0;
2730
2731         /* Initialize mailbox lock */
2732         fm10k_mbx_initlock(hw);
2733
2734         /* Replace default message handler with new ones */
2735         if (hw->mac.type == fm10k_mac_vf)
2736                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2737
2738         if (err) {
2739                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2740                                 err);
2741                 return err;
2742         }
2743         /* Connect to SM for PF device or PF for VF device */
2744         return hw->mbx.ops.connect(hw, &hw->mbx);
2745 }
2746
2747 static void
2748 fm10k_close_mbx_service(struct fm10k_hw *hw)
2749 {
2750         /* Disconnect from SM for PF device or PF for VF device */
2751         hw->mbx.ops.disconnect(hw, &hw->mbx);
2752 }
2753
2754 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2755         .dev_configure          = fm10k_dev_configure,
2756         .dev_start              = fm10k_dev_start,
2757         .dev_stop               = fm10k_dev_stop,
2758         .dev_close              = fm10k_dev_close,
2759         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2760         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2761         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2762         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2763         .stats_get              = fm10k_stats_get,
2764         .xstats_get             = fm10k_xstats_get,
2765         .xstats_get_names       = fm10k_xstats_get_names,
2766         .stats_reset            = fm10k_stats_reset,
2767         .xstats_reset           = fm10k_stats_reset,
2768         .link_update            = fm10k_link_update,
2769         .dev_infos_get          = fm10k_dev_infos_get,
2770         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2771         .vlan_filter_set        = fm10k_vlan_filter_set,
2772         .vlan_offload_set       = fm10k_vlan_offload_set,
2773         .mac_addr_add           = fm10k_macaddr_add,
2774         .mac_addr_remove        = fm10k_macaddr_remove,
2775         .rx_queue_start         = fm10k_dev_rx_queue_start,
2776         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2777         .tx_queue_start         = fm10k_dev_tx_queue_start,
2778         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2779         .rx_queue_setup         = fm10k_rx_queue_setup,
2780         .rx_queue_release       = fm10k_rx_queue_release,
2781         .tx_queue_setup         = fm10k_tx_queue_setup,
2782         .tx_queue_release       = fm10k_tx_queue_release,
2783         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2784         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2785         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2786         .reta_update            = fm10k_reta_update,
2787         .reta_query             = fm10k_reta_query,
2788         .rss_hash_update        = fm10k_rss_hash_update,
2789         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2790 };
2791
2792 static int ftag_check_handler(__rte_unused const char *key,
2793                 const char *value, __rte_unused void *opaque)
2794 {
2795         if (strcmp(value, "1"))
2796                 return -1;
2797
2798         return 0;
2799 }
2800
2801 static int
2802 fm10k_check_ftag(struct rte_devargs *devargs)
2803 {
2804         struct rte_kvargs *kvlist;
2805         const char *ftag_key = "enable_ftag";
2806
2807         if (devargs == NULL)
2808                 return 0;
2809
2810         kvlist = rte_kvargs_parse(devargs->args, NULL);
2811         if (kvlist == NULL)
2812                 return 0;
2813
2814         if (!rte_kvargs_count(kvlist, ftag_key)) {
2815                 rte_kvargs_free(kvlist);
2816                 return 0;
2817         }
2818         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2819         if (rte_kvargs_process(kvlist, ftag_key,
2820                                 ftag_check_handler, NULL) < 0) {
2821                 rte_kvargs_free(kvlist);
2822                 return 0;
2823         }
2824         rte_kvargs_free(kvlist);
2825
2826         return 1;
2827 }
2828
2829 static uint16_t
2830 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2831                     uint16_t nb_pkts)
2832 {
2833         uint16_t nb_tx = 0;
2834         struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2835
2836         while (nb_pkts) {
2837                 uint16_t ret, num;
2838
2839                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2840                 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2841                                                  num);
2842                 nb_tx += ret;
2843                 nb_pkts -= ret;
2844                 if (ret < num)
2845                         break;
2846         }
2847
2848         return nb_tx;
2849 }
2850
2851 static void __attribute__((cold))
2852 fm10k_set_tx_function(struct rte_eth_dev *dev)
2853 {
2854         struct fm10k_tx_queue *txq;
2855         int i;
2856         int use_sse = 1;
2857         uint16_t tx_ftag_en = 0;
2858
2859         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2860                 /* primary process has set the ftag flag and txq_flags */
2861                 txq = dev->data->tx_queues[0];
2862                 if (fm10k_tx_vec_condition_check(txq)) {
2863                         dev->tx_pkt_burst = fm10k_xmit_pkts;
2864                         dev->tx_pkt_prepare = fm10k_prep_pkts;
2865                         PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2866                 } else {
2867                         PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2868                         dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2869                         dev->tx_pkt_prepare = NULL;
2870                 }
2871                 return;
2872         }
2873
2874         if (fm10k_check_ftag(dev->device->devargs))
2875                 tx_ftag_en = 1;
2876
2877         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2878                 txq = dev->data->tx_queues[i];
2879                 txq->tx_ftag_en = tx_ftag_en;
2880                 /* Check if Vector Tx is satisfied */
2881                 if (fm10k_tx_vec_condition_check(txq))
2882                         use_sse = 0;
2883         }
2884
2885         if (use_sse) {
2886                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2887                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2888                         txq = dev->data->tx_queues[i];
2889                         fm10k_txq_vec_setup(txq);
2890                 }
2891                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2892                 dev->tx_pkt_prepare = NULL;
2893         } else {
2894                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2895                 dev->tx_pkt_prepare = fm10k_prep_pkts;
2896                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2897         }
2898 }
2899
2900 static void __attribute__((cold))
2901 fm10k_set_rx_function(struct rte_eth_dev *dev)
2902 {
2903         struct fm10k_dev_info *dev_info =
2904                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2905         uint16_t i, rx_using_sse;
2906         uint16_t rx_ftag_en = 0;
2907
2908         if (fm10k_check_ftag(dev->device->devargs))
2909                 rx_ftag_en = 1;
2910
2911         /* In order to allow Vector Rx there are a few configuration
2912          * conditions to be met.
2913          */
2914         if (!fm10k_rx_vec_condition_check(dev) &&
2915                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2916                 if (dev->data->scattered_rx)
2917                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2918                 else
2919                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2920         } else if (dev->data->scattered_rx)
2921                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2922         else
2923                 dev->rx_pkt_burst = fm10k_recv_pkts;
2924
2925         rx_using_sse =
2926                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2927                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2928
2929         if (rx_using_sse)
2930                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2931         else
2932                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2933
2934         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2935                 return;
2936
2937         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2938                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2939
2940                 rxq->rx_using_sse = rx_using_sse;
2941                 rxq->rx_ftag_en = rx_ftag_en;
2942         }
2943 }
2944
2945 static void
2946 fm10k_params_init(struct rte_eth_dev *dev)
2947 {
2948         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2949         struct fm10k_dev_info *info =
2950                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2951
2952         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2953          * there is no way to get link status without reading BAR4.  Until this
2954          * works, assume we have maximum bandwidth.
2955          * @todo - fix bus info
2956          */
2957         hw->bus_caps.speed = fm10k_bus_speed_8000;
2958         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2959         hw->bus_caps.payload = fm10k_bus_payload_512;
2960         hw->bus.speed = fm10k_bus_speed_8000;
2961         hw->bus.width = fm10k_bus_width_pcie_x8;
2962         hw->bus.payload = fm10k_bus_payload_256;
2963
2964         info->rx_vec_allowed = true;
2965 }
2966
2967 static int
2968 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2969 {
2970         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2971         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2972         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2973         int diag, i;
2974         struct fm10k_macvlan_filter_info *macvlan;
2975
2976         PMD_INIT_FUNC_TRACE();
2977
2978         dev->dev_ops = &fm10k_eth_dev_ops;
2979         dev->rx_pkt_burst = &fm10k_recv_pkts;
2980         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2981         dev->tx_pkt_prepare = &fm10k_prep_pkts;
2982
2983         /*
2984          * Primary process does the whole initialization, for secondary
2985          * processes, we just select the same Rx and Tx function as primary.
2986          */
2987         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2988                 fm10k_set_rx_function(dev);
2989                 fm10k_set_tx_function(dev);
2990                 return 0;
2991         }
2992
2993         rte_eth_copy_pci_info(dev, pdev);
2994
2995         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2996         memset(macvlan, 0, sizeof(*macvlan));
2997         /* Vendor and Device ID need to be set before init of shared code */
2998         memset(hw, 0, sizeof(*hw));
2999         hw->device_id = pdev->id.device_id;
3000         hw->vendor_id = pdev->id.vendor_id;
3001         hw->subsystem_device_id = pdev->id.subsystem_device_id;
3002         hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3003         hw->revision_id = 0;
3004         hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3005         if (hw->hw_addr == NULL) {
3006                 PMD_INIT_LOG(ERR, "Bad mem resource."
3007                         " Try to blacklist unused devices.");
3008                 return -EIO;
3009         }
3010
3011         /* Store fm10k_adapter pointer */
3012         hw->back = dev->data->dev_private;
3013
3014         /* Initialize the shared code */
3015         diag = fm10k_init_shared_code(hw);
3016         if (diag != FM10K_SUCCESS) {
3017                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3018                 return -EIO;
3019         }
3020
3021         /* Initialize parameters */
3022         fm10k_params_init(dev);
3023
3024         /* Initialize the hw */
3025         diag = fm10k_init_hw(hw);
3026         if (diag != FM10K_SUCCESS) {
3027                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3028                 return -EIO;
3029         }
3030
3031         /* Initialize MAC address(es) */
3032         dev->data->mac_addrs = rte_zmalloc("fm10k",
3033                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3034         if (dev->data->mac_addrs == NULL) {
3035                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3036                 return -ENOMEM;
3037         }
3038
3039         diag = fm10k_read_mac_addr(hw);
3040
3041         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3042                         &dev->data->mac_addrs[0]);
3043
3044         if (diag != FM10K_SUCCESS ||
3045                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3046
3047                 /* Generate a random addr */
3048                 eth_random_addr(hw->mac.addr);
3049                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3050                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3051                 &dev->data->mac_addrs[0]);
3052         }
3053
3054         /* Reset the hw statistics */
3055         fm10k_stats_reset(dev);
3056
3057         /* Reset the hw */
3058         diag = fm10k_reset_hw(hw);
3059         if (diag != FM10K_SUCCESS) {
3060                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3061                 return -EIO;
3062         }
3063
3064         /* Setup mailbox service */
3065         diag = fm10k_setup_mbx_service(hw);
3066         if (diag != FM10K_SUCCESS) {
3067                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3068                 return -EIO;
3069         }
3070
3071         /*PF/VF has different interrupt handling mechanism */
3072         if (hw->mac.type == fm10k_mac_pf) {
3073                 /* register callback func to eal lib */
3074                 rte_intr_callback_register(intr_handle,
3075                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3076
3077                 /* enable MISC interrupt */
3078                 fm10k_dev_enable_intr_pf(dev);
3079         } else { /* VF */
3080                 rte_intr_callback_register(intr_handle,
3081                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3082
3083                 fm10k_dev_enable_intr_vf(dev);
3084         }
3085
3086         /* Enable intr after callback registered */
3087         rte_intr_enable(intr_handle);
3088
3089         hw->mac.ops.update_int_moderator(hw);
3090
3091         /* Make sure Switch Manager is ready before going forward. */
3092         if (hw->mac.type == fm10k_mac_pf) {
3093                 int switch_ready = 0;
3094
3095                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3096                         fm10k_mbx_lock(hw);
3097                         hw->mac.ops.get_host_state(hw, &switch_ready);
3098                         fm10k_mbx_unlock(hw);
3099                         if (switch_ready)
3100                                 break;
3101                         /* Delay some time to acquire async LPORT_MAP info. */
3102                         rte_delay_us(WAIT_SWITCH_MSG_US);
3103                 }
3104
3105                 if (switch_ready == 0) {
3106                         PMD_INIT_LOG(ERR, "switch is not ready");
3107                         return -1;
3108                 }
3109         }
3110
3111         /*
3112          * Below function will trigger operations on mailbox, acquire lock to
3113          * avoid race condition from interrupt handler. Operations on mailbox
3114          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3115          * will handle and generate an interrupt to our side. Then,  FIFO in
3116          * mailbox will be touched.
3117          */
3118         fm10k_mbx_lock(hw);
3119         /* Enable port first */
3120         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3121                                         MAX_LPORT_NUM, 1);
3122
3123         /* Set unicast mode by default. App can change to other mode in other
3124          * API func.
3125          */
3126         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3127                                         FM10K_XCAST_MODE_NONE);
3128
3129         fm10k_mbx_unlock(hw);
3130
3131         /* Make sure default VID is ready before going forward. */
3132         if (hw->mac.type == fm10k_mac_pf) {
3133                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3134                         if (hw->mac.default_vid)
3135                                 break;
3136                         /* Delay some time to acquire async port VLAN info. */
3137                         rte_delay_us(WAIT_SWITCH_MSG_US);
3138                 }
3139
3140                 if (!hw->mac.default_vid) {
3141                         PMD_INIT_LOG(ERR, "default VID is not ready");
3142                         return -1;
3143                 }
3144         }
3145
3146         /* Add default mac address */
3147         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3148                 MAIN_VSI_POOL_NUMBER);
3149
3150         return 0;
3151 }
3152
3153 static int
3154 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3155 {
3156         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3157         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3158         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3159         PMD_INIT_FUNC_TRACE();
3160
3161         /* only uninitialize in the primary process */
3162         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3163                 return 0;
3164
3165         /* safe to close dev here */
3166         fm10k_dev_close(dev);
3167
3168         dev->dev_ops = NULL;
3169         dev->rx_pkt_burst = NULL;
3170         dev->tx_pkt_burst = NULL;
3171
3172         /* disable uio/vfio intr */
3173         rte_intr_disable(intr_handle);
3174
3175         /*PF/VF has different interrupt handling mechanism */
3176         if (hw->mac.type == fm10k_mac_pf) {
3177                 /* disable interrupt */
3178                 fm10k_dev_disable_intr_pf(dev);
3179
3180                 /* unregister callback func to eal lib */
3181                 rte_intr_callback_unregister(intr_handle,
3182                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3183         } else {
3184                 /* disable interrupt */
3185                 fm10k_dev_disable_intr_vf(dev);
3186
3187                 rte_intr_callback_unregister(intr_handle,
3188                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3189         }
3190
3191         /* free mac memory */
3192         if (dev->data->mac_addrs) {
3193                 rte_free(dev->data->mac_addrs);
3194                 dev->data->mac_addrs = NULL;
3195         }
3196
3197         memset(hw, 0, sizeof(*hw));
3198
3199         return 0;
3200 }
3201
3202 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3203         struct rte_pci_device *pci_dev)
3204 {
3205         return rte_eth_dev_pci_generic_probe(pci_dev,
3206                 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3207 }
3208
3209 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3210 {
3211         return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3212 }
3213
3214 /*
3215  * The set of PCI devices this driver supports. This driver will enable both PF
3216  * and SRIOV-VF devices.
3217  */
3218 static const struct rte_pci_id pci_id_fm10k_map[] = {
3219         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3220         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3221         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3222         { .vendor_id = 0, /* sentinel */ },
3223 };
3224
3225 static struct rte_pci_driver rte_pmd_fm10k = {
3226         .id_table = pci_id_fm10k_map,
3227         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3228                      RTE_PCI_DRV_IOVA_AS_VA,
3229         .probe = eth_fm10k_pci_probe,
3230         .remove = eth_fm10k_pci_remove,
3231 };
3232
3233 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3234 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3235 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");