net/fm10k: fix logical port delete
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013-2016 Intel Corporation
3  */
4
5 #include <rte_ethdev.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10 #include <rte_dev.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
13
14 #include "fm10k.h"
15 #include "base/fm10k_api.h"
16
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
20
21 #define MAIN_VSI_POOL_NUMBER 0
22
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US    100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
32
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
35
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM    128
38 #define GLORT_FD_Q_BASE  0x40
39 #define GLORT_PF_MASK    0xFFC0
40 #define GLORT_FD_MASK    GLORT_PF_MASK
41 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
42
43 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
44 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
45 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
46 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
47 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
48 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
49 static int
50 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
51 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
52         const u8 *mac, bool add, uint32_t pool);
53 static void fm10k_tx_queue_release(void *queue);
54 static void fm10k_rx_queue_release(void *queue);
55 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
56 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
57 static int fm10k_check_ftag(struct rte_devargs *devargs);
58 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
59
60 struct fm10k_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         unsigned offset;
63 };
64
65 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
66         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
67         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
68         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
69         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
70         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
71         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
72         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
73         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
74                 nodesc_drop)},
75 };
76
77 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
78                 sizeof(fm10k_hw_stats_strings[0]))
79
80 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
81         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
82         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
83         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
84 };
85
86 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
87                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
88
89 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
90         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
91         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
92 };
93
94 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
95                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
96
97 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
98                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
99 static int
100 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
101
102 static void
103 fm10k_mbx_initlock(struct fm10k_hw *hw)
104 {
105         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
106 }
107
108 static void
109 fm10k_mbx_lock(struct fm10k_hw *hw)
110 {
111         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
112                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
113 }
114
115 static void
116 fm10k_mbx_unlock(struct fm10k_hw *hw)
117 {
118         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
119 }
120
121 /* Stubs needed for linkage when vPMD is disabled */
122 int __attribute__((weak))
123 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
124 {
125         return -1;
126 }
127
128 uint16_t __attribute__((weak))
129 fm10k_recv_pkts_vec(
130         __rte_unused void *rx_queue,
131         __rte_unused struct rte_mbuf **rx_pkts,
132         __rte_unused uint16_t nb_pkts)
133 {
134         return 0;
135 }
136
137 uint16_t __attribute__((weak))
138 fm10k_recv_scattered_pkts_vec(
139                 __rte_unused void *rx_queue,
140                 __rte_unused struct rte_mbuf **rx_pkts,
141                 __rte_unused uint16_t nb_pkts)
142 {
143         return 0;
144 }
145
146 int __attribute__((weak))
147 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
148
149 {
150         return -1;
151 }
152
153 void __attribute__((weak))
154 fm10k_rx_queue_release_mbufs_vec(
155                 __rte_unused struct fm10k_rx_queue *rxq)
156 {
157         return;
158 }
159
160 void __attribute__((weak))
161 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
162 {
163         return;
164 }
165
166 int __attribute__((weak))
167 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
168 {
169         return -1;
170 }
171
172 uint16_t __attribute__((weak))
173 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
174                            __rte_unused struct rte_mbuf **tx_pkts,
175                            __rte_unused uint16_t nb_pkts)
176 {
177         return 0;
178 }
179
180 /*
181  * reset queue to initial state, allocate software buffers used when starting
182  * device.
183  * return 0 on success
184  * return -ENOMEM if buffers cannot be allocated
185  * return -EINVAL if buffers do not satisfy alignment condition
186  */
187 static inline int
188 rx_queue_reset(struct fm10k_rx_queue *q)
189 {
190         static const union fm10k_rx_desc zero = {{0} };
191         uint64_t dma_addr;
192         int i, diag;
193         PMD_INIT_FUNC_TRACE();
194
195         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
196         if (diag != 0)
197                 return -ENOMEM;
198
199         for (i = 0; i < q->nb_desc; ++i) {
200                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
201                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
202                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
203                                                 q->nb_desc);
204                         return -EINVAL;
205                 }
206                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
207                 q->hw_ring[i].q.pkt_addr = dma_addr;
208                 q->hw_ring[i].q.hdr_addr = dma_addr;
209         }
210
211         /* initialize extra software ring entries. Space for these extra
212          * entries is always allocated.
213          */
214         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
215         for (i = 0; i < q->nb_fake_desc; ++i) {
216                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
217                 q->hw_ring[q->nb_desc + i] = zero;
218         }
219
220         q->next_dd = 0;
221         q->next_alloc = 0;
222         q->next_trigger = q->alloc_thresh - 1;
223         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
224         q->rxrearm_start = 0;
225         q->rxrearm_nb = 0;
226
227         return 0;
228 }
229
230 /*
231  * clean queue, descriptor rings, free software buffers used when stopping
232  * device.
233  */
234 static inline void
235 rx_queue_clean(struct fm10k_rx_queue *q)
236 {
237         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
238         uint32_t i;
239         PMD_INIT_FUNC_TRACE();
240
241         /* zero descriptor rings */
242         for (i = 0; i < q->nb_desc; ++i)
243                 q->hw_ring[i] = zero;
244
245         /* zero faked descriptors */
246         for (i = 0; i < q->nb_fake_desc; ++i)
247                 q->hw_ring[q->nb_desc + i] = zero;
248
249         /* vPMD driver has a different way of releasing mbufs. */
250         if (q->rx_using_sse) {
251                 fm10k_rx_queue_release_mbufs_vec(q);
252                 return;
253         }
254
255         /* free software buffers */
256         for (i = 0; i < q->nb_desc; ++i) {
257                 if (q->sw_ring[i]) {
258                         rte_pktmbuf_free_seg(q->sw_ring[i]);
259                         q->sw_ring[i] = NULL;
260                 }
261         }
262 }
263
264 /*
265  * free all queue memory used when releasing the queue (i.e. configure)
266  */
267 static inline void
268 rx_queue_free(struct fm10k_rx_queue *q)
269 {
270         PMD_INIT_FUNC_TRACE();
271         if (q) {
272                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
273                 rx_queue_clean(q);
274                 if (q->sw_ring) {
275                         rte_free(q->sw_ring);
276                         q->sw_ring = NULL;
277                 }
278                 rte_free(q);
279                 q = NULL;
280         }
281 }
282
283 /*
284  * disable RX queue, wait unitl HW finished necessary flush operation
285  */
286 static inline int
287 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
288 {
289         uint32_t reg, i;
290
291         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
292         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
293                         reg & ~FM10K_RXQCTL_ENABLE);
294
295         /* Wait 100us at most */
296         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
297                 rte_delay_us(1);
298                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
299                 if (!(reg & FM10K_RXQCTL_ENABLE))
300                         break;
301         }
302
303         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
304                 return -1;
305
306         return 0;
307 }
308
309 /*
310  * reset queue to initial state, allocate software buffers used when starting
311  * device
312  */
313 static inline void
314 tx_queue_reset(struct fm10k_tx_queue *q)
315 {
316         PMD_INIT_FUNC_TRACE();
317         q->last_free = 0;
318         q->next_free = 0;
319         q->nb_used = 0;
320         q->nb_free = q->nb_desc - 1;
321         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
322         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
323 }
324
325 /*
326  * clean queue, descriptor rings, free software buffers used when stopping
327  * device
328  */
329 static inline void
330 tx_queue_clean(struct fm10k_tx_queue *q)
331 {
332         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
333         uint32_t i;
334         PMD_INIT_FUNC_TRACE();
335
336         /* zero descriptor rings */
337         for (i = 0; i < q->nb_desc; ++i)
338                 q->hw_ring[i] = zero;
339
340         /* free software buffers */
341         for (i = 0; i < q->nb_desc; ++i) {
342                 if (q->sw_ring[i]) {
343                         rte_pktmbuf_free_seg(q->sw_ring[i]);
344                         q->sw_ring[i] = NULL;
345                 }
346         }
347 }
348
349 /*
350  * free all queue memory used when releasing the queue (i.e. configure)
351  */
352 static inline void
353 tx_queue_free(struct fm10k_tx_queue *q)
354 {
355         PMD_INIT_FUNC_TRACE();
356         if (q) {
357                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
358                 tx_queue_clean(q);
359                 if (q->rs_tracker.list) {
360                         rte_free(q->rs_tracker.list);
361                         q->rs_tracker.list = NULL;
362                 }
363                 if (q->sw_ring) {
364                         rte_free(q->sw_ring);
365                         q->sw_ring = NULL;
366                 }
367                 rte_free(q);
368                 q = NULL;
369         }
370 }
371
372 /*
373  * disable TX queue, wait unitl HW finished necessary flush operation
374  */
375 static inline int
376 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
377 {
378         uint32_t reg, i;
379
380         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
381         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
382                         reg & ~FM10K_TXDCTL_ENABLE);
383
384         /* Wait 100us at most */
385         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
386                 rte_delay_us(1);
387                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
388                 if (!(reg & FM10K_TXDCTL_ENABLE))
389                         break;
390         }
391
392         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
393                 return -1;
394
395         return 0;
396 }
397
398 static int
399 fm10k_check_mq_mode(struct rte_eth_dev *dev)
400 {
401         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
402         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
403         struct rte_eth_vmdq_rx_conf *vmdq_conf;
404         uint16_t nb_rx_q = dev->data->nb_rx_queues;
405
406         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
407
408         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
409                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
410                 return -EINVAL;
411         }
412
413         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
414                 return 0;
415
416         if (hw->mac.type == fm10k_mac_vf) {
417                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
418                 return -EINVAL;
419         }
420
421         /* Check VMDQ queue pool number */
422         if (vmdq_conf->nb_queue_pools >
423                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
424                         vmdq_conf->nb_queue_pools > nb_rx_q) {
425                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
426                         vmdq_conf->nb_queue_pools);
427                 return -EINVAL;
428         }
429
430         return 0;
431 }
432
433 static const struct fm10k_txq_ops def_txq_ops = {
434         .reset = tx_queue_reset,
435 };
436
437 static int
438 fm10k_dev_configure(struct rte_eth_dev *dev)
439 {
440         int ret;
441
442         PMD_INIT_FUNC_TRACE();
443
444         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
445                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
446         /* multipe queue mode checking */
447         ret  = fm10k_check_mq_mode(dev);
448         if (ret != 0) {
449                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
450                             ret);
451                 return ret;
452         }
453
454         return 0;
455 }
456
457 /* fls = find last set bit = 32 minus the number of leading zeros */
458 #ifndef fls
459 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
460 #endif
461
462 static void
463 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
464 {
465         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
466         struct rte_eth_vmdq_rx_conf *vmdq_conf;
467         uint32_t i;
468
469         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
470
471         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
472                 if (!vmdq_conf->pool_map[i].pools)
473                         continue;
474                 fm10k_mbx_lock(hw);
475                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
476                 fm10k_mbx_unlock(hw);
477         }
478 }
479
480 static void
481 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
482 {
483         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
484
485         /* Add default mac address */
486         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
487                 MAIN_VSI_POOL_NUMBER);
488 }
489
490 static void
491 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
492 {
493         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
494         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
495         uint32_t mrqc, *key, i, reta, j;
496         uint64_t hf;
497
498 #define RSS_KEY_SIZE 40
499         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
500                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
501                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
502                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
503                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
504                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
505         };
506
507         if (dev->data->nb_rx_queues == 1 ||
508             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
509             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
510                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
511                 return;
512         }
513
514         /* random key is rss_intel_key (default) or user provided (rss_key) */
515         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
516                 key = (uint32_t *)rss_intel_key;
517         else
518                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
519
520         /* Now fill our hash function seeds, 4 bytes at a time */
521         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
522                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
523
524         /*
525          * Fill in redirection table
526          * The byte-swap is needed because NIC registers are in
527          * little-endian order.
528          */
529         reta = 0;
530         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
531                 if (j == dev->data->nb_rx_queues)
532                         j = 0;
533                 reta = (reta << CHAR_BIT) | j;
534                 if ((i & 3) == 3)
535                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
536                                         rte_bswap32(reta));
537         }
538
539         /*
540          * Generate RSS hash based on packet types, TCP/UDP
541          * port numbers and/or IPv4/v6 src and dst addresses
542          */
543         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
544         mrqc = 0;
545         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
546         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
547         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
548         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
549         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
550         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
551         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
552         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
553         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
554
555         if (mrqc == 0) {
556                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
557                         "supported", hf);
558                 return;
559         }
560
561         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
562 }
563
564 static void
565 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
566 {
567         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
568         uint32_t i;
569
570         for (i = 0; i < nb_lport_new; i++) {
571                 /* Set unicast mode by default. App can change
572                  * to other mode in other API func.
573                  */
574                 fm10k_mbx_lock(hw);
575                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
576                         FM10K_XCAST_MODE_NONE);
577                 fm10k_mbx_unlock(hw);
578         }
579 }
580
581 static void
582 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
583 {
584         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
585         struct rte_eth_vmdq_rx_conf *vmdq_conf;
586         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
587         struct fm10k_macvlan_filter_info *macvlan;
588         uint16_t nb_queue_pools = 0; /* pool number in configuration */
589         uint16_t nb_lport_new;
590
591         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
592         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
593
594         fm10k_dev_rss_configure(dev);
595
596         /* only PF supports VMDQ */
597         if (hw->mac.type != fm10k_mac_pf)
598                 return;
599
600         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
601                 nb_queue_pools = vmdq_conf->nb_queue_pools;
602
603         /* no pool number change, no need to update logic port and VLAN/MAC */
604         if (macvlan->nb_queue_pools == nb_queue_pools)
605                 return;
606
607         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
608         fm10k_dev_logic_port_update(dev, nb_lport_new);
609
610         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
611         memset(dev->data->mac_addrs, 0,
612                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
613         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
614                 &dev->data->mac_addrs[0]);
615         memset(macvlan, 0, sizeof(*macvlan));
616         macvlan->nb_queue_pools = nb_queue_pools;
617
618         if (nb_queue_pools)
619                 fm10k_dev_vmdq_rx_configure(dev);
620         else
621                 fm10k_dev_pf_main_vsi_reset(dev);
622 }
623
624 static int
625 fm10k_dev_tx_init(struct rte_eth_dev *dev)
626 {
627         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
628         int i, ret;
629         struct fm10k_tx_queue *txq;
630         uint64_t base_addr;
631         uint32_t size;
632
633         /* Disable TXINT to avoid possible interrupt */
634         for (i = 0; i < hw->mac.max_queues; i++)
635                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
636                                 3 << FM10K_TXINT_TIMER_SHIFT);
637
638         /* Setup TX queue */
639         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
640                 txq = dev->data->tx_queues[i];
641                 base_addr = txq->hw_ring_phys_addr;
642                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
643
644                 /* disable queue to avoid issues while updating state */
645                 ret = tx_queue_disable(hw, i);
646                 if (ret) {
647                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
648                         return -1;
649                 }
650                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
651                  * register is read-only for VF.
652                  */
653                 if (fm10k_check_ftag(dev->device->devargs)) {
654                         if (hw->mac.type == fm10k_mac_pf) {
655                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
656                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
657                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
658                         } else {
659                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
660                                 return -ENOTSUP;
661                         }
662                 }
663
664                 /* set location and size for descriptor ring */
665                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
666                                 base_addr & UINT64_LOWER_32BITS_MASK);
667                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
668                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
669                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
670
671                 /* assign default SGLORT for each TX queue by PF */
672                 if (hw->mac.type == fm10k_mac_pf)
673                         FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
674         }
675
676         /* set up vector or scalar TX function as appropriate */
677         fm10k_set_tx_function(dev);
678
679         return 0;
680 }
681
682 static int
683 fm10k_dev_rx_init(struct rte_eth_dev *dev)
684 {
685         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
686         struct fm10k_macvlan_filter_info *macvlan;
687         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
688         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
689         int i, ret;
690         struct fm10k_rx_queue *rxq;
691         uint64_t base_addr;
692         uint32_t size;
693         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
694         uint32_t logic_port = hw->mac.dglort_map;
695         uint16_t buf_size;
696         uint16_t queue_stride = 0;
697
698         /* enable RXINT for interrupt mode */
699         i = 0;
700         if (rte_intr_dp_is_en(intr_handle)) {
701                 for (; i < dev->data->nb_rx_queues; i++) {
702                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
703                         if (hw->mac.type == fm10k_mac_pf)
704                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
705                                         FM10K_ITR_AUTOMASK |
706                                         FM10K_ITR_MASK_CLEAR);
707                         else
708                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
709                                         FM10K_ITR_AUTOMASK |
710                                         FM10K_ITR_MASK_CLEAR);
711                 }
712         }
713         /* Disable other RXINT to avoid possible interrupt */
714         for (; i < hw->mac.max_queues; i++)
715                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
716                         3 << FM10K_RXINT_TIMER_SHIFT);
717
718         /* Setup RX queues */
719         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
720                 rxq = dev->data->rx_queues[i];
721                 base_addr = rxq->hw_ring_phys_addr;
722                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
723
724                 /* disable queue to avoid issues while updating state */
725                 ret = rx_queue_disable(hw, i);
726                 if (ret) {
727                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
728                         return -1;
729                 }
730
731                 /* Setup the Base and Length of the Rx Descriptor Ring */
732                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
733                                 base_addr & UINT64_LOWER_32BITS_MASK);
734                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
735                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
736                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
737
738                 /* Configure the Rx buffer size for one buff without split */
739                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
740                         RTE_PKTMBUF_HEADROOM);
741                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
742                  * reserved for this purpose, and the worst case could be 511B.
743                  * But SRR reg assumes all buffers have the same size. In order
744                  * to fill the gap, we'll have to consider the worst case and
745                  * assume 512B is reserved. If we don't do so, it's possible
746                  * for HW to overwrite data to next mbuf.
747                  */
748                 buf_size -= FM10K_RX_DATABUF_ALIGN;
749
750                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
751                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
752                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
753
754                 /* It adds dual VLAN length for supporting dual VLAN */
755                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
756                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
757                         dev->data->dev_conf.rxmode.enable_scatter) {
758                         uint32_t reg;
759                         dev->data->scattered_rx = 1;
760                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
761                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
762                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
763                 }
764
765                 /* Enable drop on empty, it's RO for VF */
766                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
767                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
768
769                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
770                 FM10K_WRITE_FLUSH(hw);
771         }
772
773         /* Configure VMDQ/RSS if applicable */
774         fm10k_dev_mq_rx_configure(dev);
775
776         /* Decide the best RX function */
777         fm10k_set_rx_function(dev);
778
779         /* update RX_SGLORT for loopback suppress*/
780         if (hw->mac.type != fm10k_mac_pf)
781                 return 0;
782         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
783         if (macvlan->nb_queue_pools)
784                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
785         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
786                 if (i && queue_stride && !(i % queue_stride))
787                         logic_port++;
788                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
789         }
790
791         return 0;
792 }
793
794 static int
795 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
796 {
797         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
798         int err = -1;
799         uint32_t reg;
800         struct fm10k_rx_queue *rxq;
801
802         PMD_INIT_FUNC_TRACE();
803
804         if (rx_queue_id < dev->data->nb_rx_queues) {
805                 rxq = dev->data->rx_queues[rx_queue_id];
806                 err = rx_queue_reset(rxq);
807                 if (err == -ENOMEM) {
808                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
809                         return err;
810                 } else if (err == -EINVAL) {
811                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
812                                 " %d", err);
813                         return err;
814                 }
815
816                 /* Setup the HW Rx Head and Tail Descriptor Pointers
817                  * Note: this must be done AFTER the queue is enabled on real
818                  * hardware, but BEFORE the queue is enabled when using the
819                  * emulation platform. Do it in both places for now and remove
820                  * this comment and the following two register writes when the
821                  * emulation platform is no longer being used.
822                  */
823                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
824                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
825
826                 /* Set PF ownership flag for PF devices */
827                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
828                 if (hw->mac.type == fm10k_mac_pf)
829                         reg |= FM10K_RXQCTL_PF;
830                 reg |= FM10K_RXQCTL_ENABLE;
831                 /* enable RX queue */
832                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
833                 FM10K_WRITE_FLUSH(hw);
834
835                 /* Setup the HW Rx Head and Tail Descriptor Pointers
836                  * Note: this must be done AFTER the queue is enabled
837                  */
838                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
839                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
840                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
841         }
842
843         return err;
844 }
845
846 static int
847 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
848 {
849         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
850
851         PMD_INIT_FUNC_TRACE();
852
853         if (rx_queue_id < dev->data->nb_rx_queues) {
854                 /* Disable RX queue */
855                 rx_queue_disable(hw, rx_queue_id);
856
857                 /* Free mbuf and clean HW ring */
858                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
859                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
860         }
861
862         return 0;
863 }
864
865 static int
866 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
867 {
868         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869         /** @todo - this should be defined in the shared code */
870 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
871         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
872         int err = 0;
873
874         PMD_INIT_FUNC_TRACE();
875
876         if (tx_queue_id < dev->data->nb_tx_queues) {
877                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
878
879                 q->ops->reset(q);
880
881                 /* reset head and tail pointers */
882                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
883                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
884
885                 /* enable TX queue */
886                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
887                                         FM10K_TXDCTL_ENABLE | txdctl);
888                 FM10K_WRITE_FLUSH(hw);
889                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
890         } else
891                 err = -1;
892
893         return err;
894 }
895
896 static int
897 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
898 {
899         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
900
901         PMD_INIT_FUNC_TRACE();
902
903         if (tx_queue_id < dev->data->nb_tx_queues) {
904                 tx_queue_disable(hw, tx_queue_id);
905                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
906                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
907         }
908
909         return 0;
910 }
911
912 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
913 {
914         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
915                 != FM10K_DGLORTMAP_NONE);
916 }
917
918 static void
919 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
920 {
921         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
922         int status;
923
924         PMD_INIT_FUNC_TRACE();
925
926         /* Return if it didn't acquire valid glort range */
927         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
928                 return;
929
930         fm10k_mbx_lock(hw);
931         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
932                                 FM10K_XCAST_MODE_PROMISC);
933         fm10k_mbx_unlock(hw);
934
935         if (status != FM10K_SUCCESS)
936                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
937 }
938
939 static void
940 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
941 {
942         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
943         uint8_t mode;
944         int status;
945
946         PMD_INIT_FUNC_TRACE();
947
948         /* Return if it didn't acquire valid glort range */
949         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
950                 return;
951
952         if (dev->data->all_multicast == 1)
953                 mode = FM10K_XCAST_MODE_ALLMULTI;
954         else
955                 mode = FM10K_XCAST_MODE_NONE;
956
957         fm10k_mbx_lock(hw);
958         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
959                                 mode);
960         fm10k_mbx_unlock(hw);
961
962         if (status != FM10K_SUCCESS)
963                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
964 }
965
966 static void
967 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
968 {
969         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
970         int status;
971
972         PMD_INIT_FUNC_TRACE();
973
974         /* Return if it didn't acquire valid glort range */
975         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
976                 return;
977
978         /* If promiscuous mode is enabled, it doesn't make sense to enable
979          * allmulticast and disable promiscuous since fm10k only can select
980          * one of the modes.
981          */
982         if (dev->data->promiscuous) {
983                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
984                         "needn't enable allmulticast");
985                 return;
986         }
987
988         fm10k_mbx_lock(hw);
989         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
990                                 FM10K_XCAST_MODE_ALLMULTI);
991         fm10k_mbx_unlock(hw);
992
993         if (status != FM10K_SUCCESS)
994                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
995 }
996
997 static void
998 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
999 {
1000         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1001         int status;
1002
1003         PMD_INIT_FUNC_TRACE();
1004
1005         /* Return if it didn't acquire valid glort range */
1006         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1007                 return;
1008
1009         if (dev->data->promiscuous) {
1010                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1011                         "since promisc mode is enabled");
1012                 return;
1013         }
1014
1015         fm10k_mbx_lock(hw);
1016         /* Change mode to unicast mode */
1017         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1018                                 FM10K_XCAST_MODE_NONE);
1019         fm10k_mbx_unlock(hw);
1020
1021         if (status != FM10K_SUCCESS)
1022                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1023 }
1024
1025 static void
1026 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1027 {
1028         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1029         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1030         uint16_t nb_queue_pools;
1031         struct fm10k_macvlan_filter_info *macvlan;
1032
1033         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1034         nb_queue_pools = macvlan->nb_queue_pools;
1035         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1036         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1037
1038         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1039         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1040         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1041                         hw->mac.dglort_map;
1042         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1043         /* Configure VMDQ/RSS DGlort Decoder */
1044         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1045
1046         /* Flow Director configurations, only queue number is valid. */
1047         dglortdec = fls(dev->data->nb_rx_queues - 1);
1048         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1049                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1050         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1051         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1052
1053         /* Invalidate all other GLORT entries */
1054         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1055                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1056                                 FM10K_DGLORTMAP_NONE);
1057 }
1058
1059 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1060 static int
1061 fm10k_dev_start(struct rte_eth_dev *dev)
1062 {
1063         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1064         int i, diag;
1065
1066         PMD_INIT_FUNC_TRACE();
1067
1068         /* stop, init, then start the hw */
1069         diag = fm10k_stop_hw(hw);
1070         if (diag != FM10K_SUCCESS) {
1071                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1072                 return -EIO;
1073         }
1074
1075         diag = fm10k_init_hw(hw);
1076         if (diag != FM10K_SUCCESS) {
1077                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1078                 return -EIO;
1079         }
1080
1081         diag = fm10k_start_hw(hw);
1082         if (diag != FM10K_SUCCESS) {
1083                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1084                 return -EIO;
1085         }
1086
1087         diag = fm10k_dev_tx_init(dev);
1088         if (diag) {
1089                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1090                 return diag;
1091         }
1092
1093         if (fm10k_dev_rxq_interrupt_setup(dev))
1094                 return -EIO;
1095
1096         diag = fm10k_dev_rx_init(dev);
1097         if (diag) {
1098                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1099                 return diag;
1100         }
1101
1102         if (hw->mac.type == fm10k_mac_pf)
1103                 fm10k_dev_dglort_map_configure(dev);
1104
1105         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1106                 struct fm10k_rx_queue *rxq;
1107                 rxq = dev->data->rx_queues[i];
1108
1109                 if (rxq->rx_deferred_start)
1110                         continue;
1111                 diag = fm10k_dev_rx_queue_start(dev, i);
1112                 if (diag != 0) {
1113                         int j;
1114                         for (j = 0; j < i; ++j)
1115                                 rx_queue_clean(dev->data->rx_queues[j]);
1116                         return diag;
1117                 }
1118         }
1119
1120         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1121                 struct fm10k_tx_queue *txq;
1122                 txq = dev->data->tx_queues[i];
1123
1124                 if (txq->tx_deferred_start)
1125                         continue;
1126                 diag = fm10k_dev_tx_queue_start(dev, i);
1127                 if (diag != 0) {
1128                         int j;
1129                         for (j = 0; j < i; ++j)
1130                                 tx_queue_clean(dev->data->tx_queues[j]);
1131                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1132                                 rx_queue_clean(dev->data->rx_queues[j]);
1133                         return diag;
1134                 }
1135         }
1136
1137         /* Update default vlan when not in VMDQ mode */
1138         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1139                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1140
1141         fm10k_link_update(dev, 0);
1142
1143         return 0;
1144 }
1145
1146 static void
1147 fm10k_dev_stop(struct rte_eth_dev *dev)
1148 {
1149         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1150         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1151         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1152         int i;
1153
1154         PMD_INIT_FUNC_TRACE();
1155
1156         if (dev->data->tx_queues)
1157                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1158                         fm10k_dev_tx_queue_stop(dev, i);
1159
1160         if (dev->data->rx_queues)
1161                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1162                         fm10k_dev_rx_queue_stop(dev, i);
1163
1164         /* Disable datapath event */
1165         if (rte_intr_dp_is_en(intr_handle)) {
1166                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1167                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1168                                 3 << FM10K_RXINT_TIMER_SHIFT);
1169                         if (hw->mac.type == fm10k_mac_pf)
1170                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1171                                         FM10K_ITR_MASK_SET);
1172                         else
1173                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1174                                         FM10K_ITR_MASK_SET);
1175                 }
1176         }
1177         /* Clean datapath event and queue/vec mapping */
1178         rte_intr_efd_disable(intr_handle);
1179         rte_free(intr_handle->intr_vec);
1180         intr_handle->intr_vec = NULL;
1181 }
1182
1183 static void
1184 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1185 {
1186         int i;
1187
1188         PMD_INIT_FUNC_TRACE();
1189
1190         if (dev->data->tx_queues) {
1191                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1192                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1193
1194                         tx_queue_free(txq);
1195                 }
1196         }
1197
1198         if (dev->data->rx_queues) {
1199                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1200                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1201         }
1202 }
1203
1204 static void
1205 fm10k_dev_close(struct rte_eth_dev *dev)
1206 {
1207         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1208
1209         PMD_INIT_FUNC_TRACE();
1210
1211         fm10k_mbx_lock(hw);
1212         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1213                 MAX_LPORT_NUM, false);
1214         fm10k_mbx_unlock(hw);
1215
1216         /* allow 100ms for device to quiesce */
1217         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1218
1219         /* Stop mailbox service first */
1220         fm10k_close_mbx_service(hw);
1221         fm10k_dev_stop(dev);
1222         fm10k_dev_queue_release(dev);
1223         fm10k_stop_hw(hw);
1224 }
1225
1226 static int
1227 fm10k_link_update(struct rte_eth_dev *dev,
1228         __rte_unused int wait_to_complete)
1229 {
1230         struct fm10k_dev_info *dev_info =
1231                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1232         PMD_INIT_FUNC_TRACE();
1233
1234         /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1235          * leave the speed undefined since there is no 50Gbps Ethernet.
1236          */
1237         dev->data->dev_link.link_speed  = 0;
1238         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1239         dev->data->dev_link.link_status =
1240                 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1241
1242         return 0;
1243 }
1244
1245 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1246         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1247 {
1248         unsigned i, q;
1249         unsigned count = 0;
1250
1251         if (xstats_names != NULL) {
1252                 /* Note: limit checked in rte_eth_xstats_names() */
1253
1254                 /* Global stats */
1255                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1256                         snprintf(xstats_names[count].name,
1257                                 sizeof(xstats_names[count].name),
1258                                 "%s", fm10k_hw_stats_strings[count].name);
1259                         count++;
1260                 }
1261
1262                 /* PF queue stats */
1263                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1264                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1265                                 snprintf(xstats_names[count].name,
1266                                         sizeof(xstats_names[count].name),
1267                                         "rx_q%u_%s", q,
1268                                         fm10k_hw_stats_rx_q_strings[i].name);
1269                                 count++;
1270                         }
1271                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1272                                 snprintf(xstats_names[count].name,
1273                                         sizeof(xstats_names[count].name),
1274                                         "tx_q%u_%s", q,
1275                                         fm10k_hw_stats_tx_q_strings[i].name);
1276                                 count++;
1277                         }
1278                 }
1279         }
1280         return FM10K_NB_XSTATS;
1281 }
1282
1283 static int
1284 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1285                  unsigned n)
1286 {
1287         struct fm10k_hw_stats *hw_stats =
1288                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1289         unsigned i, q, count = 0;
1290
1291         if (n < FM10K_NB_XSTATS)
1292                 return FM10K_NB_XSTATS;
1293
1294         /* Global stats */
1295         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1296                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1297                         fm10k_hw_stats_strings[count].offset);
1298                 xstats[count].id = count;
1299                 count++;
1300         }
1301
1302         /* PF queue stats */
1303         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1304                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1305                         xstats[count].value =
1306                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1307                                 fm10k_hw_stats_rx_q_strings[i].offset);
1308                         xstats[count].id = count;
1309                         count++;
1310                 }
1311                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1312                         xstats[count].value =
1313                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1314                                 fm10k_hw_stats_tx_q_strings[i].offset);
1315                         xstats[count].id = count;
1316                         count++;
1317                 }
1318         }
1319
1320         return FM10K_NB_XSTATS;
1321 }
1322
1323 static int
1324 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1325 {
1326         uint64_t ipackets, opackets, ibytes, obytes;
1327         struct fm10k_hw *hw =
1328                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329         struct fm10k_hw_stats *hw_stats =
1330                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1331         int i;
1332
1333         PMD_INIT_FUNC_TRACE();
1334
1335         fm10k_update_hw_stats(hw, hw_stats);
1336
1337         ipackets = opackets = ibytes = obytes = 0;
1338         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1339                 (i < hw->mac.max_queues); ++i) {
1340                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1341                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1342                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1343                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1344                 ipackets += stats->q_ipackets[i];
1345                 opackets += stats->q_opackets[i];
1346                 ibytes   += stats->q_ibytes[i];
1347                 obytes   += stats->q_obytes[i];
1348         }
1349         stats->ipackets = ipackets;
1350         stats->opackets = opackets;
1351         stats->ibytes = ibytes;
1352         stats->obytes = obytes;
1353         return 0;
1354 }
1355
1356 static void
1357 fm10k_stats_reset(struct rte_eth_dev *dev)
1358 {
1359         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1360         struct fm10k_hw_stats *hw_stats =
1361                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         memset(hw_stats, 0, sizeof(*hw_stats));
1366         fm10k_rebind_hw_stats(hw, hw_stats);
1367 }
1368
1369 static void
1370 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1371         struct rte_eth_dev_info *dev_info)
1372 {
1373         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1374         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1375
1376         PMD_INIT_FUNC_TRACE();
1377
1378         dev_info->pci_dev            = pdev;
1379         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1380         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1381         dev_info->max_rx_queues      = hw->mac.max_queues;
1382         dev_info->max_tx_queues      = hw->mac.max_queues;
1383         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1384         dev_info->max_hash_mac_addrs = 0;
1385         dev_info->max_vfs            = pdev->max_vfs;
1386         dev_info->vmdq_pool_base     = 0;
1387         dev_info->vmdq_queue_base    = 0;
1388         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1389         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1390         dev_info->rx_offload_capa =
1391                 DEV_RX_OFFLOAD_VLAN_STRIP |
1392                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1393                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1394                 DEV_RX_OFFLOAD_TCP_CKSUM;
1395         dev_info->tx_offload_capa =
1396                 DEV_TX_OFFLOAD_VLAN_INSERT |
1397                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1398                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1399                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1400                 DEV_TX_OFFLOAD_TCP_TSO;
1401
1402         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1403         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1404
1405         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1406                 .rx_thresh = {
1407                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1408                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1409                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1410                 },
1411                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1412                 .rx_drop_en = 0,
1413         };
1414
1415         dev_info->default_txconf = (struct rte_eth_txconf) {
1416                 .tx_thresh = {
1417                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1418                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1419                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1420                 },
1421                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1422                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1423                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1424         };
1425
1426         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1427                 .nb_max = FM10K_MAX_RX_DESC,
1428                 .nb_min = FM10K_MIN_RX_DESC,
1429                 .nb_align = FM10K_MULT_RX_DESC,
1430         };
1431
1432         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1433                 .nb_max = FM10K_MAX_TX_DESC,
1434                 .nb_min = FM10K_MIN_TX_DESC,
1435                 .nb_align = FM10K_MULT_TX_DESC,
1436                 .nb_seg_max = FM10K_TX_MAX_SEG,
1437                 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1438         };
1439
1440         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1441                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1442                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1443 }
1444
1445 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1446 static const uint32_t *
1447 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1448 {
1449         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1450             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1451                 static uint32_t ptypes[] = {
1452                         /* refers to rx_desc_to_ol_flags() */
1453                         RTE_PTYPE_L2_ETHER,
1454                         RTE_PTYPE_L3_IPV4,
1455                         RTE_PTYPE_L3_IPV4_EXT,
1456                         RTE_PTYPE_L3_IPV6,
1457                         RTE_PTYPE_L3_IPV6_EXT,
1458                         RTE_PTYPE_L4_TCP,
1459                         RTE_PTYPE_L4_UDP,
1460                         RTE_PTYPE_UNKNOWN
1461                 };
1462
1463                 return ptypes;
1464         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1465                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1466                 static uint32_t ptypes_vec[] = {
1467                         /* refers to fm10k_desc_to_pktype_v() */
1468                         RTE_PTYPE_L3_IPV4,
1469                         RTE_PTYPE_L3_IPV4_EXT,
1470                         RTE_PTYPE_L3_IPV6,
1471                         RTE_PTYPE_L3_IPV6_EXT,
1472                         RTE_PTYPE_L4_TCP,
1473                         RTE_PTYPE_L4_UDP,
1474                         RTE_PTYPE_TUNNEL_GENEVE,
1475                         RTE_PTYPE_TUNNEL_NVGRE,
1476                         RTE_PTYPE_TUNNEL_VXLAN,
1477                         RTE_PTYPE_TUNNEL_GRE,
1478                         RTE_PTYPE_UNKNOWN
1479                 };
1480
1481                 return ptypes_vec;
1482         }
1483
1484         return NULL;
1485 }
1486 #else
1487 static const uint32_t *
1488 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1489 {
1490         return NULL;
1491 }
1492 #endif
1493
1494 static int
1495 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1496 {
1497         s32 result;
1498         uint16_t mac_num = 0;
1499         uint32_t vid_idx, vid_bit, mac_index;
1500         struct fm10k_hw *hw;
1501         struct fm10k_macvlan_filter_info *macvlan;
1502         struct rte_eth_dev_data *data = dev->data;
1503
1504         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1506
1507         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1508                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1509                 return -EINVAL;
1510         }
1511
1512         if (vlan_id > ETH_VLAN_ID_MAX) {
1513                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1514                 return -EINVAL;
1515         }
1516
1517         vid_idx = FM10K_VFTA_IDX(vlan_id);
1518         vid_bit = FM10K_VFTA_BIT(vlan_id);
1519         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1520         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1521                 return 0;
1522         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1523         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1524                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1525                         "in the VLAN filter table");
1526                 return -EINVAL;
1527         }
1528
1529         fm10k_mbx_lock(hw);
1530         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1531         fm10k_mbx_unlock(hw);
1532         if (result != FM10K_SUCCESS) {
1533                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1534                 return -EIO;
1535         }
1536
1537         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1538                         (result == FM10K_SUCCESS); mac_index++) {
1539                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1540                         continue;
1541                 if (mac_num > macvlan->mac_num - 1) {
1542                         PMD_INIT_LOG(ERR, "MAC address number "
1543                                         "not match");
1544                         break;
1545                 }
1546                 fm10k_mbx_lock(hw);
1547                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1548                         data->mac_addrs[mac_index].addr_bytes,
1549                         vlan_id, on, 0);
1550                 fm10k_mbx_unlock(hw);
1551                 mac_num++;
1552         }
1553         if (result != FM10K_SUCCESS) {
1554                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1555                 return -EIO;
1556         }
1557
1558         if (on) {
1559                 macvlan->vlan_num++;
1560                 macvlan->vfta[vid_idx] |= vid_bit;
1561         } else {
1562                 macvlan->vlan_num--;
1563                 macvlan->vfta[vid_idx] &= ~vid_bit;
1564         }
1565         return 0;
1566 }
1567
1568 static int
1569 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1570 {
1571         if (mask & ETH_VLAN_STRIP_MASK) {
1572                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1573                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1574                                         "always on in fm10k");
1575         }
1576
1577         if (mask & ETH_VLAN_EXTEND_MASK) {
1578                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1579                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1580                                         "supported in fm10k");
1581         }
1582
1583         if (mask & ETH_VLAN_FILTER_MASK) {
1584                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1585                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1586         }
1587
1588         return 0;
1589 }
1590
1591 /* Add/Remove a MAC address, and update filters to main VSI */
1592 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1593                 const u8 *mac, bool add, uint32_t pool)
1594 {
1595         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1596         struct fm10k_macvlan_filter_info *macvlan;
1597         uint32_t i, j, k;
1598
1599         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1600
1601         if (pool != MAIN_VSI_POOL_NUMBER) {
1602                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1603                         "mac to pool %u", pool);
1604                 return;
1605         }
1606         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1607                 if (!macvlan->vfta[j])
1608                         continue;
1609                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1610                         if (!(macvlan->vfta[j] & (1 << k)))
1611                                 continue;
1612                         if (i + 1 > macvlan->vlan_num) {
1613                                 PMD_INIT_LOG(ERR, "vlan number not match");
1614                                 return;
1615                         }
1616                         fm10k_mbx_lock(hw);
1617                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1618                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1619                         fm10k_mbx_unlock(hw);
1620                         i++;
1621                 }
1622         }
1623 }
1624
1625 /* Add/Remove a MAC address, and update filters to VMDQ */
1626 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1627                 const u8 *mac, bool add, uint32_t pool)
1628 {
1629         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1630         struct fm10k_macvlan_filter_info *macvlan;
1631         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1632         uint32_t i;
1633
1634         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1635         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1636
1637         if (pool > macvlan->nb_queue_pools) {
1638                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1639                         " Max pool is %u",
1640                         pool, macvlan->nb_queue_pools);
1641                 return;
1642         }
1643         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1644                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1645                         continue;
1646                 fm10k_mbx_lock(hw);
1647                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1648                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1649                 fm10k_mbx_unlock(hw);
1650         }
1651 }
1652
1653 /* Add/Remove a MAC address, and update filters */
1654 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1655                 const u8 *mac, bool add, uint32_t pool)
1656 {
1657         struct fm10k_macvlan_filter_info *macvlan;
1658
1659         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1660
1661         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1662                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1663         else
1664                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1665
1666         if (add)
1667                 macvlan->mac_num++;
1668         else
1669                 macvlan->mac_num--;
1670 }
1671
1672 /* Add a MAC address, and update filters */
1673 static int
1674 fm10k_macaddr_add(struct rte_eth_dev *dev,
1675                 struct ether_addr *mac_addr,
1676                 uint32_t index,
1677                 uint32_t pool)
1678 {
1679         struct fm10k_macvlan_filter_info *macvlan;
1680
1681         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1682         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1683         macvlan->mac_vmdq_id[index] = pool;
1684         return 0;
1685 }
1686
1687 /* Remove a MAC address, and update filters */
1688 static void
1689 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1690 {
1691         struct rte_eth_dev_data *data = dev->data;
1692         struct fm10k_macvlan_filter_info *macvlan;
1693
1694         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1695         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1696                         FALSE, macvlan->mac_vmdq_id[index]);
1697         macvlan->mac_vmdq_id[index] = 0;
1698 }
1699
1700 static inline int
1701 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1702 {
1703         if ((request < min) || (request > max) || ((request % mult) != 0))
1704                 return -1;
1705         else
1706                 return 0;
1707 }
1708
1709
1710 static inline int
1711 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1712 {
1713         if ((request < min) || (request > max) || ((div % request) != 0))
1714                 return -1;
1715         else
1716                 return 0;
1717 }
1718
1719 static inline int
1720 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1721 {
1722         uint16_t rx_free_thresh;
1723
1724         if (conf->rx_free_thresh == 0)
1725                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1726         else
1727                 rx_free_thresh = conf->rx_free_thresh;
1728
1729         /* make sure the requested threshold satisfies the constraints */
1730         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1731                         FM10K_RX_FREE_THRESH_MAX(q),
1732                         FM10K_RX_FREE_THRESH_DIV(q),
1733                         rx_free_thresh)) {
1734                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1735                         "less than or equal to %u, "
1736                         "greater than or equal to %u, "
1737                         "and a divisor of %u",
1738                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1739                         FM10K_RX_FREE_THRESH_MIN(q),
1740                         FM10K_RX_FREE_THRESH_DIV(q));
1741                 return -EINVAL;
1742         }
1743
1744         q->alloc_thresh = rx_free_thresh;
1745         q->drop_en = conf->rx_drop_en;
1746         q->rx_deferred_start = conf->rx_deferred_start;
1747
1748         return 0;
1749 }
1750
1751 /*
1752  * Hardware requires specific alignment for Rx packet buffers. At
1753  * least one of the following two conditions must be satisfied.
1754  *  1. Address is 512B aligned
1755  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1756  *
1757  * As such, the driver may need to adjust the DMA address within the
1758  * buffer by up to 512B.
1759  *
1760  * return 1 if the element size is valid, otherwise return 0.
1761  */
1762 static int
1763 mempool_element_size_valid(struct rte_mempool *mp)
1764 {
1765         uint32_t min_size;
1766
1767         /* elt_size includes mbuf header and headroom */
1768         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1769                         RTE_PKTMBUF_HEADROOM;
1770
1771         /* account for up to 512B of alignment */
1772         min_size -= FM10K_RX_DATABUF_ALIGN;
1773
1774         /* sanity check for overflow */
1775         if (min_size > mp->elt_size)
1776                 return 0;
1777
1778         /* size is valid */
1779         return 1;
1780 }
1781
1782 static int
1783 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1784         uint16_t nb_desc, unsigned int socket_id,
1785         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1786 {
1787         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788         struct fm10k_dev_info *dev_info =
1789                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1790         struct fm10k_rx_queue *q;
1791         const struct rte_memzone *mz;
1792
1793         PMD_INIT_FUNC_TRACE();
1794
1795         /* make sure the mempool element size can account for alignment. */
1796         if (!mempool_element_size_valid(mp)) {
1797                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1798                 return -EINVAL;
1799         }
1800
1801         /* make sure a valid number of descriptors have been requested */
1802         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1803                                 FM10K_MULT_RX_DESC, nb_desc)) {
1804                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1805                         "less than or equal to %"PRIu32", "
1806                         "greater than or equal to %u, "
1807                         "and a multiple of %u",
1808                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1809                         FM10K_MULT_RX_DESC);
1810                 return -EINVAL;
1811         }
1812
1813         /*
1814          * if this queue existed already, free the associated memory. The
1815          * queue cannot be reused in case we need to allocate memory on
1816          * different socket than was previously used.
1817          */
1818         if (dev->data->rx_queues[queue_id] != NULL) {
1819                 rx_queue_free(dev->data->rx_queues[queue_id]);
1820                 dev->data->rx_queues[queue_id] = NULL;
1821         }
1822
1823         /* allocate memory for the queue structure */
1824         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1825                                 socket_id);
1826         if (q == NULL) {
1827                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1828                 return -ENOMEM;
1829         }
1830
1831         /* setup queue */
1832         q->mp = mp;
1833         q->nb_desc = nb_desc;
1834         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1835         q->port_id = dev->data->port_id;
1836         q->queue_id = queue_id;
1837         q->tail_ptr = (volatile uint32_t *)
1838                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1839         if (handle_rxconf(q, conf))
1840                 return -EINVAL;
1841
1842         /* allocate memory for the software ring */
1843         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1844                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1845                         RTE_CACHE_LINE_SIZE, socket_id);
1846         if (q->sw_ring == NULL) {
1847                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1848                 rte_free(q);
1849                 return -ENOMEM;
1850         }
1851
1852         /*
1853          * allocate memory for the hardware descriptor ring. A memzone large
1854          * enough to hold the maximum ring size is requested to allow for
1855          * resizing in later calls to the queue setup function.
1856          */
1857         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1858                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1859                                       socket_id);
1860         if (mz == NULL) {
1861                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1862                 rte_free(q->sw_ring);
1863                 rte_free(q);
1864                 return -ENOMEM;
1865         }
1866         q->hw_ring = mz->addr;
1867         q->hw_ring_phys_addr = mz->iova;
1868
1869         /* Check if number of descs satisfied Vector requirement */
1870         if (!rte_is_power_of_2(nb_desc)) {
1871                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1872                                     "preconditions - canceling the feature for "
1873                                     "the whole port[%d]",
1874                              q->queue_id, q->port_id);
1875                 dev_info->rx_vec_allowed = false;
1876         } else
1877                 fm10k_rxq_vec_setup(q);
1878
1879         dev->data->rx_queues[queue_id] = q;
1880         return 0;
1881 }
1882
1883 static void
1884 fm10k_rx_queue_release(void *queue)
1885 {
1886         PMD_INIT_FUNC_TRACE();
1887
1888         rx_queue_free(queue);
1889 }
1890
1891 static inline int
1892 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1893 {
1894         uint16_t tx_free_thresh;
1895         uint16_t tx_rs_thresh;
1896
1897         /* constraint MACROs require that tx_free_thresh is configured
1898          * before tx_rs_thresh */
1899         if (conf->tx_free_thresh == 0)
1900                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1901         else
1902                 tx_free_thresh = conf->tx_free_thresh;
1903
1904         /* make sure the requested threshold satisfies the constraints */
1905         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1906                         FM10K_TX_FREE_THRESH_MAX(q),
1907                         FM10K_TX_FREE_THRESH_DIV(q),
1908                         tx_free_thresh)) {
1909                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1910                         "less than or equal to %u, "
1911                         "greater than or equal to %u, "
1912                         "and a divisor of %u",
1913                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1914                         FM10K_TX_FREE_THRESH_MIN(q),
1915                         FM10K_TX_FREE_THRESH_DIV(q));
1916                 return -EINVAL;
1917         }
1918
1919         q->free_thresh = tx_free_thresh;
1920
1921         if (conf->tx_rs_thresh == 0)
1922                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1923         else
1924                 tx_rs_thresh = conf->tx_rs_thresh;
1925
1926         q->tx_deferred_start = conf->tx_deferred_start;
1927
1928         /* make sure the requested threshold satisfies the constraints */
1929         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1930                         FM10K_TX_RS_THRESH_MAX(q),
1931                         FM10K_TX_RS_THRESH_DIV(q),
1932                         tx_rs_thresh)) {
1933                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1934                         "less than or equal to %u, "
1935                         "greater than or equal to %u, "
1936                         "and a divisor of %u",
1937                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1938                         FM10K_TX_RS_THRESH_MIN(q),
1939                         FM10K_TX_RS_THRESH_DIV(q));
1940                 return -EINVAL;
1941         }
1942
1943         q->rs_thresh = tx_rs_thresh;
1944
1945         return 0;
1946 }
1947
1948 static int
1949 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1950         uint16_t nb_desc, unsigned int socket_id,
1951         const struct rte_eth_txconf *conf)
1952 {
1953         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1954         struct fm10k_tx_queue *q;
1955         const struct rte_memzone *mz;
1956
1957         PMD_INIT_FUNC_TRACE();
1958
1959         /* make sure a valid number of descriptors have been requested */
1960         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1961                                 FM10K_MULT_TX_DESC, nb_desc)) {
1962                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1963                         "less than or equal to %"PRIu32", "
1964                         "greater than or equal to %u, "
1965                         "and a multiple of %u",
1966                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1967                         FM10K_MULT_TX_DESC);
1968                 return -EINVAL;
1969         }
1970
1971         /*
1972          * if this queue existed already, free the associated memory. The
1973          * queue cannot be reused in case we need to allocate memory on
1974          * different socket than was previously used.
1975          */
1976         if (dev->data->tx_queues[queue_id] != NULL) {
1977                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1978
1979                 tx_queue_free(txq);
1980                 dev->data->tx_queues[queue_id] = NULL;
1981         }
1982
1983         /* allocate memory for the queue structure */
1984         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1985                                 socket_id);
1986         if (q == NULL) {
1987                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1988                 return -ENOMEM;
1989         }
1990
1991         /* setup queue */
1992         q->nb_desc = nb_desc;
1993         q->port_id = dev->data->port_id;
1994         q->queue_id = queue_id;
1995         q->txq_flags = conf->txq_flags;
1996         q->ops = &def_txq_ops;
1997         q->tail_ptr = (volatile uint32_t *)
1998                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1999         if (handle_txconf(q, conf))
2000                 return -EINVAL;
2001
2002         /* allocate memory for the software ring */
2003         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2004                                         nb_desc * sizeof(struct rte_mbuf *),
2005                                         RTE_CACHE_LINE_SIZE, socket_id);
2006         if (q->sw_ring == NULL) {
2007                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2008                 rte_free(q);
2009                 return -ENOMEM;
2010         }
2011
2012         /*
2013          * allocate memory for the hardware descriptor ring. A memzone large
2014          * enough to hold the maximum ring size is requested to allow for
2015          * resizing in later calls to the queue setup function.
2016          */
2017         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2018                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2019                                       socket_id);
2020         if (mz == NULL) {
2021                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2022                 rte_free(q->sw_ring);
2023                 rte_free(q);
2024                 return -ENOMEM;
2025         }
2026         q->hw_ring = mz->addr;
2027         q->hw_ring_phys_addr = mz->iova;
2028
2029         /*
2030          * allocate memory for the RS bit tracker. Enough slots to hold the
2031          * descriptor index for each RS bit needing to be set are required.
2032          */
2033         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2034                                 ((nb_desc + 1) / q->rs_thresh) *
2035                                 sizeof(uint16_t),
2036                                 RTE_CACHE_LINE_SIZE, socket_id);
2037         if (q->rs_tracker.list == NULL) {
2038                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2039                 rte_free(q->sw_ring);
2040                 rte_free(q);
2041                 return -ENOMEM;
2042         }
2043
2044         dev->data->tx_queues[queue_id] = q;
2045         return 0;
2046 }
2047
2048 static void
2049 fm10k_tx_queue_release(void *queue)
2050 {
2051         struct fm10k_tx_queue *q = queue;
2052         PMD_INIT_FUNC_TRACE();
2053
2054         tx_queue_free(q);
2055 }
2056
2057 static int
2058 fm10k_reta_update(struct rte_eth_dev *dev,
2059                         struct rte_eth_rss_reta_entry64 *reta_conf,
2060                         uint16_t reta_size)
2061 {
2062         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2063         uint16_t i, j, idx, shift;
2064         uint8_t mask;
2065         uint32_t reta;
2066
2067         PMD_INIT_FUNC_TRACE();
2068
2069         if (reta_size > FM10K_MAX_RSS_INDICES) {
2070                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2071                         "(%d) doesn't match the number hardware can supported "
2072                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2073                 return -EINVAL;
2074         }
2075
2076         /*
2077          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2078          * 128-entries in 32 registers
2079          */
2080         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2081                 idx = i / RTE_RETA_GROUP_SIZE;
2082                 shift = i % RTE_RETA_GROUP_SIZE;
2083                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2084                                 BIT_MASK_PER_UINT32);
2085                 if (mask == 0)
2086                         continue;
2087
2088                 reta = 0;
2089                 if (mask != BIT_MASK_PER_UINT32)
2090                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2091
2092                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2093                         if (mask & (0x1 << j)) {
2094                                 if (mask != 0xF)
2095                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2096                                 reta |= reta_conf[idx].reta[shift + j] <<
2097                                                 (CHAR_BIT * j);
2098                         }
2099                 }
2100                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2101         }
2102
2103         return 0;
2104 }
2105
2106 static int
2107 fm10k_reta_query(struct rte_eth_dev *dev,
2108                         struct rte_eth_rss_reta_entry64 *reta_conf,
2109                         uint16_t reta_size)
2110 {
2111         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2112         uint16_t i, j, idx, shift;
2113         uint8_t mask;
2114         uint32_t reta;
2115
2116         PMD_INIT_FUNC_TRACE();
2117
2118         if (reta_size < FM10K_MAX_RSS_INDICES) {
2119                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2120                         "(%d) doesn't match the number hardware can supported "
2121                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2122                 return -EINVAL;
2123         }
2124
2125         /*
2126          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2127          * 128-entries in 32 registers
2128          */
2129         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2130                 idx = i / RTE_RETA_GROUP_SIZE;
2131                 shift = i % RTE_RETA_GROUP_SIZE;
2132                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2133                                 BIT_MASK_PER_UINT32);
2134                 if (mask == 0)
2135                         continue;
2136
2137                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2138                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2139                         if (mask & (0x1 << j))
2140                                 reta_conf[idx].reta[shift + j] = ((reta >>
2141                                         CHAR_BIT * j) & UINT8_MAX);
2142                 }
2143         }
2144
2145         return 0;
2146 }
2147
2148 static int
2149 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2150         struct rte_eth_rss_conf *rss_conf)
2151 {
2152         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2153         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2154         uint32_t mrqc;
2155         uint64_t hf = rss_conf->rss_hf;
2156         int i;
2157
2158         PMD_INIT_FUNC_TRACE();
2159
2160         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2161                                 FM10K_RSSRK_ENTRIES_PER_REG))
2162                 return -EINVAL;
2163
2164         if (hf == 0)
2165                 return -EINVAL;
2166
2167         mrqc = 0;
2168         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2169         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2170         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2171         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2172         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2173         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2174         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2175         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2176         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2177
2178         /* If the mapping doesn't fit any supported, return */
2179         if (mrqc == 0)
2180                 return -EINVAL;
2181
2182         if (key != NULL)
2183                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2184                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2185
2186         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2187
2188         return 0;
2189 }
2190
2191 static int
2192 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2193         struct rte_eth_rss_conf *rss_conf)
2194 {
2195         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2197         uint32_t mrqc;
2198         uint64_t hf;
2199         int i;
2200
2201         PMD_INIT_FUNC_TRACE();
2202
2203         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2204                                 FM10K_RSSRK_ENTRIES_PER_REG))
2205                 return -EINVAL;
2206
2207         if (key != NULL)
2208                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2209                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2210
2211         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2212         hf = 0;
2213         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2214         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2215         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2216         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2217         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2218         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2219         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2220         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2221         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2222
2223         rss_conf->rss_hf = hf;
2224
2225         return 0;
2226 }
2227
2228 static void
2229 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2230 {
2231         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2232         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2233
2234         /* Bind all local non-queue interrupt to vector 0 */
2235         int_map |= FM10K_MISC_VEC_ID;
2236
2237         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2238         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2239         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2240         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2241         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2242         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2243
2244         /* Enable misc causes */
2245         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2246                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2247                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2248                                 FM10K_EIMR_ENABLE(MAILBOX) |
2249                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2250                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2251                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2252                                 FM10K_EIMR_ENABLE(VFLR));
2253
2254         /* Enable ITR 0 */
2255         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2256                                         FM10K_ITR_MASK_CLEAR);
2257         FM10K_WRITE_FLUSH(hw);
2258 }
2259
2260 static void
2261 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2262 {
2263         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2264         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2265
2266         int_map |= FM10K_MISC_VEC_ID;
2267
2268         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2269         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2270         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2271         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2272         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2273         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2274
2275         /* Disable misc causes */
2276         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2277                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2278                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2279                                 FM10K_EIMR_DISABLE(MAILBOX) |
2280                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2281                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2282                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2283                                 FM10K_EIMR_DISABLE(VFLR));
2284
2285         /* Disable ITR 0 */
2286         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2287         FM10K_WRITE_FLUSH(hw);
2288 }
2289
2290 static void
2291 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2292 {
2293         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2294         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2295
2296         /* Bind all local non-queue interrupt to vector 0 */
2297         int_map |= FM10K_MISC_VEC_ID;
2298
2299         /* Only INT 0 available, other 15 are reserved. */
2300         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2301
2302         /* Enable ITR 0 */
2303         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2304                                         FM10K_ITR_MASK_CLEAR);
2305         FM10K_WRITE_FLUSH(hw);
2306 }
2307
2308 static void
2309 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2310 {
2311         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2312         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2313
2314         int_map |= FM10K_MISC_VEC_ID;
2315
2316         /* Only INT 0 available, other 15 are reserved. */
2317         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2318
2319         /* Disable ITR 0 */
2320         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2321         FM10K_WRITE_FLUSH(hw);
2322 }
2323
2324 static int
2325 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2326 {
2327         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2329
2330         /* Enable ITR */
2331         if (hw->mac.type == fm10k_mac_pf)
2332                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2333                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2334         else
2335                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2336                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2337         rte_intr_enable(&pdev->intr_handle);
2338         return 0;
2339 }
2340
2341 static int
2342 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2343 {
2344         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2345         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2346
2347         /* Disable ITR */
2348         if (hw->mac.type == fm10k_mac_pf)
2349                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2350                         FM10K_ITR_MASK_SET);
2351         else
2352                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2353                         FM10K_ITR_MASK_SET);
2354         return 0;
2355 }
2356
2357 static int
2358 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2359 {
2360         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2361         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2362         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2363         uint32_t intr_vector, vec;
2364         uint16_t queue_id;
2365         int result = 0;
2366
2367         /* fm10k needs one separate interrupt for mailbox,
2368          * so only drivers which support multiple interrupt vectors
2369          * e.g. vfio-pci can work for fm10k interrupt mode
2370          */
2371         if (!rte_intr_cap_multiple(intr_handle) ||
2372                         dev->data->dev_conf.intr_conf.rxq == 0)
2373                 return result;
2374
2375         intr_vector = dev->data->nb_rx_queues;
2376
2377         /* disable interrupt first */
2378         rte_intr_disable(intr_handle);
2379         if (hw->mac.type == fm10k_mac_pf)
2380                 fm10k_dev_disable_intr_pf(dev);
2381         else
2382                 fm10k_dev_disable_intr_vf(dev);
2383
2384         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2385                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2386                 result = -EIO;
2387         }
2388
2389         if (rte_intr_dp_is_en(intr_handle) && !result) {
2390                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2391                         dev->data->nb_rx_queues * sizeof(int), 0);
2392                 if (intr_handle->intr_vec) {
2393                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2394                                         queue_id < dev->data->nb_rx_queues;
2395                                         queue_id++) {
2396                                 intr_handle->intr_vec[queue_id] = vec;
2397                                 if (vec < intr_handle->nb_efd - 1
2398                                                 + FM10K_RX_VEC_START)
2399                                         vec++;
2400                         }
2401                 } else {
2402                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2403                                 " intr_vec", dev->data->nb_rx_queues);
2404                         rte_intr_efd_disable(intr_handle);
2405                         result = -ENOMEM;
2406                 }
2407         }
2408
2409         if (hw->mac.type == fm10k_mac_pf)
2410                 fm10k_dev_enable_intr_pf(dev);
2411         else
2412                 fm10k_dev_enable_intr_vf(dev);
2413         rte_intr_enable(intr_handle);
2414         hw->mac.ops.update_int_moderator(hw);
2415         return result;
2416 }
2417
2418 static int
2419 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2420 {
2421         struct fm10k_fault fault;
2422         int err;
2423         const char *estr = "Unknown error";
2424
2425         /* Process PCA fault */
2426         if (eicr & FM10K_EICR_PCA_FAULT) {
2427                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2428                 if (err)
2429                         goto error;
2430                 switch (fault.type) {
2431                 case PCA_NO_FAULT:
2432                         estr = "PCA_NO_FAULT"; break;
2433                 case PCA_UNMAPPED_ADDR:
2434                         estr = "PCA_UNMAPPED_ADDR"; break;
2435                 case PCA_BAD_QACCESS_PF:
2436                         estr = "PCA_BAD_QACCESS_PF"; break;
2437                 case PCA_BAD_QACCESS_VF:
2438                         estr = "PCA_BAD_QACCESS_VF"; break;
2439                 case PCA_MALICIOUS_REQ:
2440                         estr = "PCA_MALICIOUS_REQ"; break;
2441                 case PCA_POISONED_TLP:
2442                         estr = "PCA_POISONED_TLP"; break;
2443                 case PCA_TLP_ABORT:
2444                         estr = "PCA_TLP_ABORT"; break;
2445                 default:
2446                         goto error;
2447                 }
2448                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2449                         estr, fault.func ? "VF" : "PF", fault.func,
2450                         fault.address, fault.specinfo);
2451         }
2452
2453         /* Process THI fault */
2454         if (eicr & FM10K_EICR_THI_FAULT) {
2455                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2456                 if (err)
2457                         goto error;
2458                 switch (fault.type) {
2459                 case THI_NO_FAULT:
2460                         estr = "THI_NO_FAULT"; break;
2461                 case THI_MAL_DIS_Q_FAULT:
2462                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2463                 default:
2464                         goto error;
2465                 }
2466                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2467                         estr, fault.func ? "VF" : "PF", fault.func,
2468                         fault.address, fault.specinfo);
2469         }
2470
2471         /* Process FUM fault */
2472         if (eicr & FM10K_EICR_FUM_FAULT) {
2473                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2474                 if (err)
2475                         goto error;
2476                 switch (fault.type) {
2477                 case FUM_NO_FAULT:
2478                         estr = "FUM_NO_FAULT"; break;
2479                 case FUM_UNMAPPED_ADDR:
2480                         estr = "FUM_UNMAPPED_ADDR"; break;
2481                 case FUM_POISONED_TLP:
2482                         estr = "FUM_POISONED_TLP"; break;
2483                 case FUM_BAD_VF_QACCESS:
2484                         estr = "FUM_BAD_VF_QACCESS"; break;
2485                 case FUM_ADD_DECODE_ERR:
2486                         estr = "FUM_ADD_DECODE_ERR"; break;
2487                 case FUM_RO_ERROR:
2488                         estr = "FUM_RO_ERROR"; break;
2489                 case FUM_QPRC_CRC_ERROR:
2490                         estr = "FUM_QPRC_CRC_ERROR"; break;
2491                 case FUM_CSR_TIMEOUT:
2492                         estr = "FUM_CSR_TIMEOUT"; break;
2493                 case FUM_INVALID_TYPE:
2494                         estr = "FUM_INVALID_TYPE"; break;
2495                 case FUM_INVALID_LENGTH:
2496                         estr = "FUM_INVALID_LENGTH"; break;
2497                 case FUM_INVALID_BE:
2498                         estr = "FUM_INVALID_BE"; break;
2499                 case FUM_INVALID_ALIGN:
2500                         estr = "FUM_INVALID_ALIGN"; break;
2501                 default:
2502                         goto error;
2503                 }
2504                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2505                         estr, fault.func ? "VF" : "PF", fault.func,
2506                         fault.address, fault.specinfo);
2507         }
2508
2509         return 0;
2510 error:
2511         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2512         return err;
2513 }
2514
2515 /**
2516  * PF interrupt handler triggered by NIC for handling specific interrupt.
2517  *
2518  * @param handle
2519  *  Pointer to interrupt handle.
2520  * @param param
2521  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2522  *
2523  * @return
2524  *  void
2525  */
2526 static void
2527 fm10k_dev_interrupt_handler_pf(void *param)
2528 {
2529         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2530         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2531         uint32_t cause, status;
2532         struct fm10k_dev_info *dev_info =
2533                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2534         int status_mbx;
2535         s32 err;
2536
2537         if (hw->mac.type != fm10k_mac_pf)
2538                 return;
2539
2540         cause = FM10K_READ_REG(hw, FM10K_EICR);
2541
2542         /* Handle PCI fault cases */
2543         if (cause & FM10K_EICR_FAULT_MASK) {
2544                 PMD_INIT_LOG(ERR, "INT: find fault!");
2545                 fm10k_dev_handle_fault(hw, cause);
2546         }
2547
2548         /* Handle switch up/down */
2549         if (cause & FM10K_EICR_SWITCHNOTREADY)
2550                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2551
2552         if (cause & FM10K_EICR_SWITCHREADY) {
2553                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2554                 if (dev_info->sm_down == 1) {
2555                         fm10k_mbx_lock(hw);
2556
2557                         /* For recreating logical ports */
2558                         status_mbx = hw->mac.ops.update_lport_state(hw,
2559                                         hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2560                         if (status_mbx == FM10K_SUCCESS)
2561                                 PMD_INIT_LOG(INFO,
2562                                         "INT: Recreated Logical port");
2563                         else
2564                                 PMD_INIT_LOG(INFO,
2565                                         "INT: Logical ports weren't recreated");
2566
2567                         status_mbx = hw->mac.ops.update_xcast_mode(hw,
2568                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2569                         if (status_mbx != FM10K_SUCCESS)
2570                                 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2571
2572                         fm10k_mbx_unlock(hw);
2573
2574                         /* first clear the internal SW recording structure */
2575                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2576                                                 ETH_MQ_RX_VMDQ_FLAG))
2577                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2578                                         false);
2579
2580                         fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2581                                         MAIN_VSI_POOL_NUMBER);
2582
2583                         /*
2584                          * Add default mac address and vlan for the logical
2585                          * ports that have been created, leave to the
2586                          * application to fully recover Rx filtering.
2587                          */
2588                         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2589                                         MAIN_VSI_POOL_NUMBER);
2590
2591                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2592                                                 ETH_MQ_RX_VMDQ_FLAG))
2593                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2594                                         true);
2595
2596                         dev_info->sm_down = 0;
2597                         _rte_eth_dev_callback_process(dev,
2598                                         RTE_ETH_EVENT_INTR_LSC,
2599                                         NULL, NULL);
2600                 }
2601         }
2602
2603         /* Handle mailbox message */
2604         fm10k_mbx_lock(hw);
2605         err = hw->mbx.ops.process(hw, &hw->mbx);
2606         fm10k_mbx_unlock(hw);
2607
2608         if (err == FM10K_ERR_RESET_REQUESTED) {
2609                 PMD_INIT_LOG(INFO, "INT: Switch is down");
2610                 dev_info->sm_down = 1;
2611                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2612                                 NULL, NULL);
2613         }
2614
2615         /* Handle SRAM error */
2616         if (cause & FM10K_EICR_SRAMERROR) {
2617                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2618
2619                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2620                 /* Write to clear pending bits */
2621                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2622
2623                 /* Todo: print out error message after shared code  updates */
2624         }
2625
2626         /* Clear these 3 events if having any */
2627         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2628                  FM10K_EICR_SWITCHREADY;
2629         if (cause)
2630                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2631
2632         /* Re-enable interrupt from device side */
2633         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2634                                         FM10K_ITR_MASK_CLEAR);
2635         /* Re-enable interrupt from host side */
2636         rte_intr_enable(dev->intr_handle);
2637 }
2638
2639 /**
2640  * VF interrupt handler triggered by NIC for handling specific interrupt.
2641  *
2642  * @param handle
2643  *  Pointer to interrupt handle.
2644  * @param param
2645  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2646  *
2647  * @return
2648  *  void
2649  */
2650 static void
2651 fm10k_dev_interrupt_handler_vf(void *param)
2652 {
2653         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2654         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2655         struct fm10k_mbx_info *mbx = &hw->mbx;
2656         struct fm10k_dev_info *dev_info =
2657                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2658         const enum fm10k_mbx_state state = mbx->state;
2659         int status_mbx;
2660
2661         if (hw->mac.type != fm10k_mac_vf)
2662                 return;
2663
2664         /* Handle mailbox message if lock is acquired */
2665         fm10k_mbx_lock(hw);
2666         hw->mbx.ops.process(hw, &hw->mbx);
2667         fm10k_mbx_unlock(hw);
2668
2669         if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2670                 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2671
2672                 fm10k_mbx_lock(hw);
2673                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2674                                 MAX_LPORT_NUM, 1);
2675                 fm10k_mbx_unlock(hw);
2676
2677                 /* Setting reset flag */
2678                 dev_info->sm_down = 1;
2679                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2680                                 NULL, NULL);
2681         }
2682
2683         if (dev_info->sm_down == 1 &&
2684                         hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2685                 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2686                 fm10k_mbx_lock(hw);
2687                 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2688                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2689                 if (status_mbx != FM10K_SUCCESS)
2690                         PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2691                 fm10k_mbx_unlock(hw);
2692
2693                 /* first clear the internal SW recording structure */
2694                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2695                 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2696                                 MAIN_VSI_POOL_NUMBER);
2697
2698                 /*
2699                  * Add default mac address and vlan for the logical ports that
2700                  * have been created, leave to the application to fully recover
2701                  * Rx filtering.
2702                  */
2703                 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2704                                 MAIN_VSI_POOL_NUMBER);
2705                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2706
2707                 dev_info->sm_down = 0;
2708                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2709                                 NULL, NULL);
2710         }
2711
2712         /* Re-enable interrupt from device side */
2713         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2714                                         FM10K_ITR_MASK_CLEAR);
2715         /* Re-enable interrupt from host side */
2716         rte_intr_enable(dev->intr_handle);
2717 }
2718
2719 /* Mailbox message handler in VF */
2720 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2721         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2722         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2723         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2724         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2725 };
2726
2727 static int
2728 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2729 {
2730         int err = 0;
2731
2732         /* Initialize mailbox lock */
2733         fm10k_mbx_initlock(hw);
2734
2735         /* Replace default message handler with new ones */
2736         if (hw->mac.type == fm10k_mac_vf)
2737                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2738
2739         if (err) {
2740                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2741                                 err);
2742                 return err;
2743         }
2744         /* Connect to SM for PF device or PF for VF device */
2745         return hw->mbx.ops.connect(hw, &hw->mbx);
2746 }
2747
2748 static void
2749 fm10k_close_mbx_service(struct fm10k_hw *hw)
2750 {
2751         /* Disconnect from SM for PF device or PF for VF device */
2752         hw->mbx.ops.disconnect(hw, &hw->mbx);
2753 }
2754
2755 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2756         .dev_configure          = fm10k_dev_configure,
2757         .dev_start              = fm10k_dev_start,
2758         .dev_stop               = fm10k_dev_stop,
2759         .dev_close              = fm10k_dev_close,
2760         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2761         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2762         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2763         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2764         .stats_get              = fm10k_stats_get,
2765         .xstats_get             = fm10k_xstats_get,
2766         .xstats_get_names       = fm10k_xstats_get_names,
2767         .stats_reset            = fm10k_stats_reset,
2768         .xstats_reset           = fm10k_stats_reset,
2769         .link_update            = fm10k_link_update,
2770         .dev_infos_get          = fm10k_dev_infos_get,
2771         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2772         .vlan_filter_set        = fm10k_vlan_filter_set,
2773         .vlan_offload_set       = fm10k_vlan_offload_set,
2774         .mac_addr_add           = fm10k_macaddr_add,
2775         .mac_addr_remove        = fm10k_macaddr_remove,
2776         .rx_queue_start         = fm10k_dev_rx_queue_start,
2777         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2778         .tx_queue_start         = fm10k_dev_tx_queue_start,
2779         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2780         .rx_queue_setup         = fm10k_rx_queue_setup,
2781         .rx_queue_release       = fm10k_rx_queue_release,
2782         .tx_queue_setup         = fm10k_tx_queue_setup,
2783         .tx_queue_release       = fm10k_tx_queue_release,
2784         .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
2785         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2786         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2787         .reta_update            = fm10k_reta_update,
2788         .reta_query             = fm10k_reta_query,
2789         .rss_hash_update        = fm10k_rss_hash_update,
2790         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2791 };
2792
2793 static int ftag_check_handler(__rte_unused const char *key,
2794                 const char *value, __rte_unused void *opaque)
2795 {
2796         if (strcmp(value, "1"))
2797                 return -1;
2798
2799         return 0;
2800 }
2801
2802 static int
2803 fm10k_check_ftag(struct rte_devargs *devargs)
2804 {
2805         struct rte_kvargs *kvlist;
2806         const char *ftag_key = "enable_ftag";
2807
2808         if (devargs == NULL)
2809                 return 0;
2810
2811         kvlist = rte_kvargs_parse(devargs->args, NULL);
2812         if (kvlist == NULL)
2813                 return 0;
2814
2815         if (!rte_kvargs_count(kvlist, ftag_key)) {
2816                 rte_kvargs_free(kvlist);
2817                 return 0;
2818         }
2819         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2820         if (rte_kvargs_process(kvlist, ftag_key,
2821                                 ftag_check_handler, NULL) < 0) {
2822                 rte_kvargs_free(kvlist);
2823                 return 0;
2824         }
2825         rte_kvargs_free(kvlist);
2826
2827         return 1;
2828 }
2829
2830 static uint16_t
2831 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2832                     uint16_t nb_pkts)
2833 {
2834         uint16_t nb_tx = 0;
2835         struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2836
2837         while (nb_pkts) {
2838                 uint16_t ret, num;
2839
2840                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2841                 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2842                                                  num);
2843                 nb_tx += ret;
2844                 nb_pkts -= ret;
2845                 if (ret < num)
2846                         break;
2847         }
2848
2849         return nb_tx;
2850 }
2851
2852 static void __attribute__((cold))
2853 fm10k_set_tx_function(struct rte_eth_dev *dev)
2854 {
2855         struct fm10k_tx_queue *txq;
2856         int i;
2857         int use_sse = 1;
2858         uint16_t tx_ftag_en = 0;
2859
2860         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2861                 /* primary process has set the ftag flag and txq_flags */
2862                 txq = dev->data->tx_queues[0];
2863                 if (fm10k_tx_vec_condition_check(txq)) {
2864                         dev->tx_pkt_burst = fm10k_xmit_pkts;
2865                         dev->tx_pkt_prepare = fm10k_prep_pkts;
2866                         PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2867                 } else {
2868                         PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2869                         dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2870                         dev->tx_pkt_prepare = NULL;
2871                 }
2872                 return;
2873         }
2874
2875         if (fm10k_check_ftag(dev->device->devargs))
2876                 tx_ftag_en = 1;
2877
2878         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2879                 txq = dev->data->tx_queues[i];
2880                 txq->tx_ftag_en = tx_ftag_en;
2881                 /* Check if Vector Tx is satisfied */
2882                 if (fm10k_tx_vec_condition_check(txq))
2883                         use_sse = 0;
2884         }
2885
2886         if (use_sse) {
2887                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2888                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2889                         txq = dev->data->tx_queues[i];
2890                         fm10k_txq_vec_setup(txq);
2891                 }
2892                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2893                 dev->tx_pkt_prepare = NULL;
2894         } else {
2895                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2896                 dev->tx_pkt_prepare = fm10k_prep_pkts;
2897                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2898         }
2899 }
2900
2901 static void __attribute__((cold))
2902 fm10k_set_rx_function(struct rte_eth_dev *dev)
2903 {
2904         struct fm10k_dev_info *dev_info =
2905                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2906         uint16_t i, rx_using_sse;
2907         uint16_t rx_ftag_en = 0;
2908
2909         if (fm10k_check_ftag(dev->device->devargs))
2910                 rx_ftag_en = 1;
2911
2912         /* In order to allow Vector Rx there are a few configuration
2913          * conditions to be met.
2914          */
2915         if (!fm10k_rx_vec_condition_check(dev) &&
2916                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2917                 if (dev->data->scattered_rx)
2918                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2919                 else
2920                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2921         } else if (dev->data->scattered_rx)
2922                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2923         else
2924                 dev->rx_pkt_burst = fm10k_recv_pkts;
2925
2926         rx_using_sse =
2927                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2928                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2929
2930         if (rx_using_sse)
2931                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2932         else
2933                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2934
2935         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2936                 return;
2937
2938         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2939                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2940
2941                 rxq->rx_using_sse = rx_using_sse;
2942                 rxq->rx_ftag_en = rx_ftag_en;
2943         }
2944 }
2945
2946 static void
2947 fm10k_params_init(struct rte_eth_dev *dev)
2948 {
2949         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2950         struct fm10k_dev_info *info =
2951                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2952
2953         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2954          * there is no way to get link status without reading BAR4.  Until this
2955          * works, assume we have maximum bandwidth.
2956          * @todo - fix bus info
2957          */
2958         hw->bus_caps.speed = fm10k_bus_speed_8000;
2959         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2960         hw->bus_caps.payload = fm10k_bus_payload_512;
2961         hw->bus.speed = fm10k_bus_speed_8000;
2962         hw->bus.width = fm10k_bus_width_pcie_x8;
2963         hw->bus.payload = fm10k_bus_payload_256;
2964
2965         info->rx_vec_allowed = true;
2966 }
2967
2968 static int
2969 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2970 {
2971         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2972         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2973         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2974         int diag, i;
2975         struct fm10k_macvlan_filter_info *macvlan;
2976
2977         PMD_INIT_FUNC_TRACE();
2978
2979         dev->dev_ops = &fm10k_eth_dev_ops;
2980         dev->rx_pkt_burst = &fm10k_recv_pkts;
2981         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2982         dev->tx_pkt_prepare = &fm10k_prep_pkts;
2983
2984         /*
2985          * Primary process does the whole initialization, for secondary
2986          * processes, we just select the same Rx and Tx function as primary.
2987          */
2988         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2989                 fm10k_set_rx_function(dev);
2990                 fm10k_set_tx_function(dev);
2991                 return 0;
2992         }
2993
2994         rte_eth_copy_pci_info(dev, pdev);
2995
2996         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2997         memset(macvlan, 0, sizeof(*macvlan));
2998         /* Vendor and Device ID need to be set before init of shared code */
2999         memset(hw, 0, sizeof(*hw));
3000         hw->device_id = pdev->id.device_id;
3001         hw->vendor_id = pdev->id.vendor_id;
3002         hw->subsystem_device_id = pdev->id.subsystem_device_id;
3003         hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3004         hw->revision_id = 0;
3005         hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3006         if (hw->hw_addr == NULL) {
3007                 PMD_INIT_LOG(ERR, "Bad mem resource."
3008                         " Try to blacklist unused devices.");
3009                 return -EIO;
3010         }
3011
3012         /* Store fm10k_adapter pointer */
3013         hw->back = dev->data->dev_private;
3014
3015         /* Initialize the shared code */
3016         diag = fm10k_init_shared_code(hw);
3017         if (diag != FM10K_SUCCESS) {
3018                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3019                 return -EIO;
3020         }
3021
3022         /* Initialize parameters */
3023         fm10k_params_init(dev);
3024
3025         /* Initialize the hw */
3026         diag = fm10k_init_hw(hw);
3027         if (diag != FM10K_SUCCESS) {
3028                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3029                 return -EIO;
3030         }
3031
3032         /* Initialize MAC address(es) */
3033         dev->data->mac_addrs = rte_zmalloc("fm10k",
3034                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3035         if (dev->data->mac_addrs == NULL) {
3036                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3037                 return -ENOMEM;
3038         }
3039
3040         diag = fm10k_read_mac_addr(hw);
3041
3042         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3043                         &dev->data->mac_addrs[0]);
3044
3045         if (diag != FM10K_SUCCESS ||
3046                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3047
3048                 /* Generate a random addr */
3049                 eth_random_addr(hw->mac.addr);
3050                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3051                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3052                 &dev->data->mac_addrs[0]);
3053         }
3054
3055         /* Reset the hw statistics */
3056         fm10k_stats_reset(dev);
3057
3058         /* Reset the hw */
3059         diag = fm10k_reset_hw(hw);
3060         if (diag != FM10K_SUCCESS) {
3061                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3062                 return -EIO;
3063         }
3064
3065         /* Setup mailbox service */
3066         diag = fm10k_setup_mbx_service(hw);
3067         if (diag != FM10K_SUCCESS) {
3068                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3069                 return -EIO;
3070         }
3071
3072         /*PF/VF has different interrupt handling mechanism */
3073         if (hw->mac.type == fm10k_mac_pf) {
3074                 /* register callback func to eal lib */
3075                 rte_intr_callback_register(intr_handle,
3076                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3077
3078                 /* enable MISC interrupt */
3079                 fm10k_dev_enable_intr_pf(dev);
3080         } else { /* VF */
3081                 rte_intr_callback_register(intr_handle,
3082                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3083
3084                 fm10k_dev_enable_intr_vf(dev);
3085         }
3086
3087         /* Enable intr after callback registered */
3088         rte_intr_enable(intr_handle);
3089
3090         hw->mac.ops.update_int_moderator(hw);
3091
3092         /* Make sure Switch Manager is ready before going forward. */
3093         if (hw->mac.type == fm10k_mac_pf) {
3094                 int switch_ready = 0;
3095
3096                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3097                         fm10k_mbx_lock(hw);
3098                         hw->mac.ops.get_host_state(hw, &switch_ready);
3099                         fm10k_mbx_unlock(hw);
3100                         if (switch_ready)
3101                                 break;
3102                         /* Delay some time to acquire async LPORT_MAP info. */
3103                         rte_delay_us(WAIT_SWITCH_MSG_US);
3104                 }
3105
3106                 if (switch_ready == 0) {
3107                         PMD_INIT_LOG(ERR, "switch is not ready");
3108                         return -1;
3109                 }
3110         }
3111
3112         /*
3113          * Below function will trigger operations on mailbox, acquire lock to
3114          * avoid race condition from interrupt handler. Operations on mailbox
3115          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3116          * will handle and generate an interrupt to our side. Then,  FIFO in
3117          * mailbox will be touched.
3118          */
3119         fm10k_mbx_lock(hw);
3120         /* Enable port first */
3121         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3122                                         MAX_LPORT_NUM, 1);
3123
3124         /* Set unicast mode by default. App can change to other mode in other
3125          * API func.
3126          */
3127         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3128                                         FM10K_XCAST_MODE_NONE);
3129
3130         fm10k_mbx_unlock(hw);
3131
3132         /* Make sure default VID is ready before going forward. */
3133         if (hw->mac.type == fm10k_mac_pf) {
3134                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3135                         if (hw->mac.default_vid)
3136                                 break;
3137                         /* Delay some time to acquire async port VLAN info. */
3138                         rte_delay_us(WAIT_SWITCH_MSG_US);
3139                 }
3140
3141                 if (!hw->mac.default_vid) {
3142                         PMD_INIT_LOG(ERR, "default VID is not ready");
3143                         return -1;
3144                 }
3145         }
3146
3147         /* Add default mac address */
3148         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3149                 MAIN_VSI_POOL_NUMBER);
3150
3151         return 0;
3152 }
3153
3154 static int
3155 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3156 {
3157         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3158         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3159         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3160         PMD_INIT_FUNC_TRACE();
3161
3162         /* only uninitialize in the primary process */
3163         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3164                 return 0;
3165
3166         /* safe to close dev here */
3167         fm10k_dev_close(dev);
3168
3169         dev->dev_ops = NULL;
3170         dev->rx_pkt_burst = NULL;
3171         dev->tx_pkt_burst = NULL;
3172
3173         /* disable uio/vfio intr */
3174         rte_intr_disable(intr_handle);
3175
3176         /*PF/VF has different interrupt handling mechanism */
3177         if (hw->mac.type == fm10k_mac_pf) {
3178                 /* disable interrupt */
3179                 fm10k_dev_disable_intr_pf(dev);
3180
3181                 /* unregister callback func to eal lib */
3182                 rte_intr_callback_unregister(intr_handle,
3183                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3184         } else {
3185                 /* disable interrupt */
3186                 fm10k_dev_disable_intr_vf(dev);
3187
3188                 rte_intr_callback_unregister(intr_handle,
3189                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3190         }
3191
3192         /* free mac memory */
3193         if (dev->data->mac_addrs) {
3194                 rte_free(dev->data->mac_addrs);
3195                 dev->data->mac_addrs = NULL;
3196         }
3197
3198         memset(hw, 0, sizeof(*hw));
3199
3200         return 0;
3201 }
3202
3203 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3204         struct rte_pci_device *pci_dev)
3205 {
3206         return rte_eth_dev_pci_generic_probe(pci_dev,
3207                 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3208 }
3209
3210 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3211 {
3212         return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3213 }
3214
3215 /*
3216  * The set of PCI devices this driver supports. This driver will enable both PF
3217  * and SRIOV-VF devices.
3218  */
3219 static const struct rte_pci_id pci_id_fm10k_map[] = {
3220         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3221         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3222         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3223         { .vendor_id = 0, /* sentinel */ },
3224 };
3225
3226 static struct rte_pci_driver rte_pmd_fm10k = {
3227         .id_table = pci_id_fm10k_map,
3228         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3229                      RTE_PCI_DRV_IOVA_AS_VA,
3230         .probe = eth_fm10k_pci_probe,
3231         .remove = eth_fm10k_pci_remove,
3232 };
3233
3234 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3235 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3236 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");