0c13fd5c37f9e7d75d1c9f213bddc74f65369430
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2013-2016 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_string_fns.h>
10 #include <rte_dev.h>
11 #include <rte_spinlock.h>
12 #include <rte_kvargs.h>
13
14 #include "fm10k.h"
15 #include "base/fm10k_api.h"
16
17 /* Default delay to acquire mailbox lock */
18 #define FM10K_MBXLOCK_DELAY_US 20
19 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
20
21 #define MAIN_VSI_POOL_NUMBER 0
22
23 /* Max try times to acquire switch status */
24 #define MAX_QUERY_SWITCH_STATE_TIMES 10
25 /* Wait interval to get switch status */
26 #define WAIT_SWITCH_MSG_US    100000
27 /* A period of quiescence for switch */
28 #define FM10K_SWITCH_QUIESCE_US 100000
29 /* Number of chars per uint32 type */
30 #define CHARS_PER_UINT32 (sizeof(uint32_t))
31 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
32
33 /* default 1:1 map from queue ID to interrupt vector ID */
34 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
35
36 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
37 #define MAX_LPORT_NUM    128
38 #define GLORT_FD_Q_BASE  0x40
39 #define GLORT_PF_MASK    0xFFC0
40 #define GLORT_FD_MASK    GLORT_PF_MASK
41 #define GLORT_FD_INDEX   GLORT_FD_Q_BASE
42
43 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
44 static int fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
45 static int fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
46 static int fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
47 static int fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
48 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
49 static int
50 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
51 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
52         const u8 *mac, bool add, uint32_t pool);
53 static void fm10k_tx_queue_release(void *queue);
54 static void fm10k_rx_queue_release(void *queue);
55 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
56 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
57 static int fm10k_check_ftag(struct rte_devargs *devargs);
58 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
59
60 static int fm10k_dev_infos_get(struct rte_eth_dev *dev,
61                                struct rte_eth_dev_info *dev_info);
62 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
63 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
64 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
65 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
66
67 struct fm10k_xstats_name_off {
68         char name[RTE_ETH_XSTATS_NAME_SIZE];
69         unsigned offset;
70 };
71
72 static const struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
73         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
74         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
75         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
76         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
77         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
78         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
79         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
80         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
81                 nodesc_drop)},
82 };
83
84 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
85                 sizeof(fm10k_hw_stats_strings[0]))
86
87 static const struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
88         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
89         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
90         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
91 };
92
93 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
94                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
95
96 static const struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
97         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
98         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
99 };
100
101 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
102                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
103
104 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
105                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
106 static int
107 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
108
109 static void
110 fm10k_mbx_initlock(struct fm10k_hw *hw)
111 {
112         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
113 }
114
115 static void
116 fm10k_mbx_lock(struct fm10k_hw *hw)
117 {
118         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
119                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
120 }
121
122 static void
123 fm10k_mbx_unlock(struct fm10k_hw *hw)
124 {
125         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
126 }
127
128 /* Stubs needed for linkage when vPMD is disabled */
129 __rte_weak int
130 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
131 {
132         return -1;
133 }
134
135 __rte_weak uint16_t
136 fm10k_recv_pkts_vec(
137         __rte_unused void *rx_queue,
138         __rte_unused struct rte_mbuf **rx_pkts,
139         __rte_unused uint16_t nb_pkts)
140 {
141         return 0;
142 }
143
144 __rte_weak uint16_t
145 fm10k_recv_scattered_pkts_vec(
146                 __rte_unused void *rx_queue,
147                 __rte_unused struct rte_mbuf **rx_pkts,
148                 __rte_unused uint16_t nb_pkts)
149 {
150         return 0;
151 }
152
153 __rte_weak int
154 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
155
156 {
157         return -1;
158 }
159
160 __rte_weak void
161 fm10k_rx_queue_release_mbufs_vec(
162                 __rte_unused struct fm10k_rx_queue *rxq)
163 {
164         return;
165 }
166
167 __rte_weak void
168 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
169 {
170         return;
171 }
172
173 __rte_weak int
174 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
175 {
176         return -1;
177 }
178
179 __rte_weak uint16_t
180 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
181                            __rte_unused struct rte_mbuf **tx_pkts,
182                            __rte_unused uint16_t nb_pkts)
183 {
184         return 0;
185 }
186
187 /*
188  * reset queue to initial state, allocate software buffers used when starting
189  * device.
190  * return 0 on success
191  * return -ENOMEM if buffers cannot be allocated
192  * return -EINVAL if buffers do not satisfy alignment condition
193  */
194 static inline int
195 rx_queue_reset(struct fm10k_rx_queue *q)
196 {
197         static const union fm10k_rx_desc zero = {{0} };
198         uint64_t dma_addr;
199         int i, diag;
200         PMD_INIT_FUNC_TRACE();
201
202         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
203         if (diag != 0)
204                 return -ENOMEM;
205
206         for (i = 0; i < q->nb_desc; ++i) {
207                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
208                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
209                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
210                                                 q->nb_desc);
211                         return -EINVAL;
212                 }
213                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
214                 q->hw_ring[i].q.pkt_addr = dma_addr;
215                 q->hw_ring[i].q.hdr_addr = dma_addr;
216         }
217
218         /* initialize extra software ring entries. Space for these extra
219          * entries is always allocated.
220          */
221         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
222         for (i = 0; i < q->nb_fake_desc; ++i) {
223                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
224                 q->hw_ring[q->nb_desc + i] = zero;
225         }
226
227         q->next_dd = 0;
228         q->next_alloc = 0;
229         q->next_trigger = q->alloc_thresh - 1;
230         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
231         q->rxrearm_start = 0;
232         q->rxrearm_nb = 0;
233
234         return 0;
235 }
236
237 /*
238  * clean queue, descriptor rings, free software buffers used when stopping
239  * device.
240  */
241 static inline void
242 rx_queue_clean(struct fm10k_rx_queue *q)
243 {
244         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
245         uint32_t i;
246         PMD_INIT_FUNC_TRACE();
247
248         /* zero descriptor rings */
249         for (i = 0; i < q->nb_desc; ++i)
250                 q->hw_ring[i] = zero;
251
252         /* zero faked descriptors */
253         for (i = 0; i < q->nb_fake_desc; ++i)
254                 q->hw_ring[q->nb_desc + i] = zero;
255
256         /* vPMD driver has a different way of releasing mbufs. */
257         if (q->rx_using_sse) {
258                 fm10k_rx_queue_release_mbufs_vec(q);
259                 return;
260         }
261
262         /* free software buffers */
263         for (i = 0; i < q->nb_desc; ++i) {
264                 if (q->sw_ring[i]) {
265                         rte_pktmbuf_free_seg(q->sw_ring[i]);
266                         q->sw_ring[i] = NULL;
267                 }
268         }
269 }
270
271 /*
272  * free all queue memory used when releasing the queue (i.e. configure)
273  */
274 static inline void
275 rx_queue_free(struct fm10k_rx_queue *q)
276 {
277         PMD_INIT_FUNC_TRACE();
278         if (q) {
279                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
280                 rx_queue_clean(q);
281                 if (q->sw_ring) {
282                         rte_free(q->sw_ring);
283                         q->sw_ring = NULL;
284                 }
285                 rte_free(q);
286                 q = NULL;
287         }
288 }
289
290 /*
291  * disable RX queue, wait unitl HW finished necessary flush operation
292  */
293 static inline int
294 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
295 {
296         uint32_t reg, i;
297
298         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
299         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
300                         reg & ~FM10K_RXQCTL_ENABLE);
301
302         /* Wait 100us at most */
303         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
304                 rte_delay_us(1);
305                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
306                 if (!(reg & FM10K_RXQCTL_ENABLE))
307                         break;
308         }
309
310         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
311                 return -1;
312
313         return 0;
314 }
315
316 /*
317  * reset queue to initial state, allocate software buffers used when starting
318  * device
319  */
320 static inline void
321 tx_queue_reset(struct fm10k_tx_queue *q)
322 {
323         PMD_INIT_FUNC_TRACE();
324         q->last_free = 0;
325         q->next_free = 0;
326         q->nb_used = 0;
327         q->nb_free = q->nb_desc - 1;
328         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
329         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
330 }
331
332 /*
333  * clean queue, descriptor rings, free software buffers used when stopping
334  * device
335  */
336 static inline void
337 tx_queue_clean(struct fm10k_tx_queue *q)
338 {
339         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
340         uint32_t i;
341         PMD_INIT_FUNC_TRACE();
342
343         /* zero descriptor rings */
344         for (i = 0; i < q->nb_desc; ++i)
345                 q->hw_ring[i] = zero;
346
347         /* free software buffers */
348         for (i = 0; i < q->nb_desc; ++i) {
349                 if (q->sw_ring[i]) {
350                         rte_pktmbuf_free_seg(q->sw_ring[i]);
351                         q->sw_ring[i] = NULL;
352                 }
353         }
354 }
355
356 /*
357  * free all queue memory used when releasing the queue (i.e. configure)
358  */
359 static inline void
360 tx_queue_free(struct fm10k_tx_queue *q)
361 {
362         PMD_INIT_FUNC_TRACE();
363         if (q) {
364                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
365                 tx_queue_clean(q);
366                 if (q->rs_tracker.list) {
367                         rte_free(q->rs_tracker.list);
368                         q->rs_tracker.list = NULL;
369                 }
370                 if (q->sw_ring) {
371                         rte_free(q->sw_ring);
372                         q->sw_ring = NULL;
373                 }
374                 rte_free(q);
375                 q = NULL;
376         }
377 }
378
379 /*
380  * disable TX queue, wait unitl HW finished necessary flush operation
381  */
382 static inline int
383 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
384 {
385         uint32_t reg, i;
386
387         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
388         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
389                         reg & ~FM10K_TXDCTL_ENABLE);
390
391         /* Wait 100us at most */
392         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
393                 rte_delay_us(1);
394                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
395                 if (!(reg & FM10K_TXDCTL_ENABLE))
396                         break;
397         }
398
399         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
400                 return -1;
401
402         return 0;
403 }
404
405 static int
406 fm10k_check_mq_mode(struct rte_eth_dev *dev)
407 {
408         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
409         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
410         struct rte_eth_vmdq_rx_conf *vmdq_conf;
411         uint16_t nb_rx_q = dev->data->nb_rx_queues;
412
413         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
414
415         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
416                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
417                 return -EINVAL;
418         }
419
420         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
421                 return 0;
422
423         if (hw->mac.type == fm10k_mac_vf) {
424                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
425                 return -EINVAL;
426         }
427
428         /* Check VMDQ queue pool number */
429         if (vmdq_conf->nb_queue_pools >
430                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
431                         vmdq_conf->nb_queue_pools > nb_rx_q) {
432                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
433                         vmdq_conf->nb_queue_pools);
434                 return -EINVAL;
435         }
436
437         return 0;
438 }
439
440 static const struct fm10k_txq_ops def_txq_ops = {
441         .reset = tx_queue_reset,
442 };
443
444 static int
445 fm10k_dev_configure(struct rte_eth_dev *dev)
446 {
447         int ret;
448
449         PMD_INIT_FUNC_TRACE();
450
451         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
452                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
453
454         /* multipe queue mode checking */
455         ret  = fm10k_check_mq_mode(dev);
456         if (ret != 0) {
457                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
458                             ret);
459                 return ret;
460         }
461
462         dev->data->scattered_rx = 0;
463
464         return 0;
465 }
466
467 static void
468 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
469 {
470         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
471         struct rte_eth_vmdq_rx_conf *vmdq_conf;
472         uint32_t i;
473
474         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
475
476         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
477                 if (!vmdq_conf->pool_map[i].pools)
478                         continue;
479                 fm10k_mbx_lock(hw);
480                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
481                 fm10k_mbx_unlock(hw);
482         }
483 }
484
485 static void
486 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
487 {
488         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
489
490         /* Add default mac address */
491         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
492                 MAIN_VSI_POOL_NUMBER);
493 }
494
495 static void
496 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
497 {
498         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
499         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
500         uint32_t mrqc, *key, i, reta, j;
501         uint64_t hf;
502
503 #define RSS_KEY_SIZE 40
504         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
505                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
506                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
507                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
508                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
509                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
510         };
511
512         if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
513                 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
514                 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
515                 return;
516         }
517
518         /* random key is rss_intel_key (default) or user provided (rss_key) */
519         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
520                 key = (uint32_t *)rss_intel_key;
521         else
522                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
523
524         /* Now fill our hash function seeds, 4 bytes at a time */
525         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
526                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
527
528         /*
529          * Fill in redirection table
530          * The byte-swap is needed because NIC registers are in
531          * little-endian order.
532          */
533         reta = 0;
534         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
535                 if (j == dev->data->nb_rx_queues)
536                         j = 0;
537                 reta = (reta << CHAR_BIT) | j;
538                 if ((i & 3) == 3)
539                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
540                                         rte_bswap32(reta));
541         }
542
543         /*
544          * Generate RSS hash based on packet types, TCP/UDP
545          * port numbers and/or IPv4/v6 src and dst addresses
546          */
547         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
548         mrqc = 0;
549         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
550         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
551         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
552         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
553         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
554         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
555         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
556         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
557         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
558
559         if (mrqc == 0) {
560                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
561                         "supported", hf);
562                 return;
563         }
564
565         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
566 }
567
568 static void
569 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
570 {
571         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
572         uint32_t i;
573
574         for (i = 0; i < nb_lport_new; i++) {
575                 /* Set unicast mode by default. App can change
576                  * to other mode in other API func.
577                  */
578                 fm10k_mbx_lock(hw);
579                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
580                         FM10K_XCAST_MODE_NONE);
581                 fm10k_mbx_unlock(hw);
582         }
583 }
584
585 static void
586 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
587 {
588         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
589         struct rte_eth_vmdq_rx_conf *vmdq_conf;
590         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
591         struct fm10k_macvlan_filter_info *macvlan;
592         uint16_t nb_queue_pools = 0; /* pool number in configuration */
593         uint16_t nb_lport_new;
594
595         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
596         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
597
598         fm10k_dev_rss_configure(dev);
599
600         /* only PF supports VMDQ */
601         if (hw->mac.type != fm10k_mac_pf)
602                 return;
603
604         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
605                 nb_queue_pools = vmdq_conf->nb_queue_pools;
606
607         /* no pool number change, no need to update logic port and VLAN/MAC */
608         if (macvlan->nb_queue_pools == nb_queue_pools)
609                 return;
610
611         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
612         fm10k_dev_logic_port_update(dev, nb_lport_new);
613
614         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
615         memset(dev->data->mac_addrs, 0,
616                 RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
617         rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
618                 &dev->data->mac_addrs[0]);
619         memset(macvlan, 0, sizeof(*macvlan));
620         macvlan->nb_queue_pools = nb_queue_pools;
621
622         if (nb_queue_pools)
623                 fm10k_dev_vmdq_rx_configure(dev);
624         else
625                 fm10k_dev_pf_main_vsi_reset(dev);
626 }
627
628 static int
629 fm10k_dev_tx_init(struct rte_eth_dev *dev)
630 {
631         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
632         int i, ret;
633         struct fm10k_tx_queue *txq;
634         uint64_t base_addr;
635         uint32_t size;
636
637         /* Disable TXINT to avoid possible interrupt */
638         for (i = 0; i < hw->mac.max_queues; i++)
639                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
640                                 3 << FM10K_TXINT_TIMER_SHIFT);
641
642         /* Setup TX queue */
643         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
644                 txq = dev->data->tx_queues[i];
645                 base_addr = txq->hw_ring_phys_addr;
646                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
647
648                 /* disable queue to avoid issues while updating state */
649                 ret = tx_queue_disable(hw, i);
650                 if (ret) {
651                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
652                         return -1;
653                 }
654                 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
655                  * register is read-only for VF.
656                  */
657                 if (fm10k_check_ftag(dev->device->devargs)) {
658                         if (hw->mac.type == fm10k_mac_pf) {
659                                 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
660                                                 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
661                                 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
662                         } else {
663                                 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
664                                 return -ENOTSUP;
665                         }
666                 }
667
668                 /* set location and size for descriptor ring */
669                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
670                                 base_addr & UINT64_LOWER_32BITS_MASK);
671                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
672                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
673                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
674
675                 /* assign default SGLORT for each TX queue by PF */
676                 if (hw->mac.type == fm10k_mac_pf)
677                         FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
678         }
679
680         /* set up vector or scalar TX function as appropriate */
681         fm10k_set_tx_function(dev);
682
683         return 0;
684 }
685
686 static int
687 fm10k_dev_rx_init(struct rte_eth_dev *dev)
688 {
689         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690         struct fm10k_macvlan_filter_info *macvlan;
691         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
692         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
693         int i, ret;
694         struct fm10k_rx_queue *rxq;
695         uint64_t base_addr;
696         uint32_t size;
697         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
698         uint32_t logic_port = hw->mac.dglort_map;
699         uint16_t buf_size;
700         uint16_t queue_stride = 0;
701
702         /* enable RXINT for interrupt mode */
703         i = 0;
704         if (rte_intr_dp_is_en(intr_handle)) {
705                 for (; i < dev->data->nb_rx_queues; i++) {
706                         FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
707                         if (hw->mac.type == fm10k_mac_pf)
708                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
709                                         FM10K_ITR_AUTOMASK |
710                                         FM10K_ITR_MASK_CLEAR);
711                         else
712                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
713                                         FM10K_ITR_AUTOMASK |
714                                         FM10K_ITR_MASK_CLEAR);
715                 }
716         }
717         /* Disable other RXINT to avoid possible interrupt */
718         for (; i < hw->mac.max_queues; i++)
719                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
720                         3 << FM10K_RXINT_TIMER_SHIFT);
721
722         /* Setup RX queues */
723         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
724                 rxq = dev->data->rx_queues[i];
725                 base_addr = rxq->hw_ring_phys_addr;
726                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
727
728                 /* disable queue to avoid issues while updating state */
729                 ret = rx_queue_disable(hw, i);
730                 if (ret) {
731                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
732                         return -1;
733                 }
734
735                 /* Setup the Base and Length of the Rx Descriptor Ring */
736                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
737                                 base_addr & UINT64_LOWER_32BITS_MASK);
738                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
739                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
740                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
741
742                 /* Configure the Rx buffer size for one buff without split */
743                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
744                         RTE_PKTMBUF_HEADROOM);
745                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
746                  * reserved for this purpose, and the worst case could be 511B.
747                  * But SRR reg assumes all buffers have the same size. In order
748                  * to fill the gap, we'll have to consider the worst case and
749                  * assume 512B is reserved. If we don't do so, it's possible
750                  * for HW to overwrite data to next mbuf.
751                  */
752                 buf_size -= FM10K_RX_DATABUF_ALIGN;
753
754                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
755                                 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
756                                 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
757
758                 /* It adds dual VLAN length for supporting dual VLAN */
759                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
760                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
761                         rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
762                         uint32_t reg;
763                         dev->data->scattered_rx = 1;
764                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
765                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
766                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
767                 }
768
769                 /* Enable drop on empty, it's RO for VF */
770                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
771                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
772
773                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
774                 FM10K_WRITE_FLUSH(hw);
775         }
776
777         /* Configure VMDQ/RSS if applicable */
778         fm10k_dev_mq_rx_configure(dev);
779
780         /* Decide the best RX function */
781         fm10k_set_rx_function(dev);
782
783         /* update RX_SGLORT for loopback suppress*/
784         if (hw->mac.type != fm10k_mac_pf)
785                 return 0;
786         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
787         if (macvlan->nb_queue_pools)
788                 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
789         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
790                 if (i && queue_stride && !(i % queue_stride))
791                         logic_port++;
792                 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
793         }
794
795         return 0;
796 }
797
798 static int
799 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
800 {
801         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
802         int err;
803         uint32_t reg;
804         struct fm10k_rx_queue *rxq;
805
806         PMD_INIT_FUNC_TRACE();
807
808         rxq = dev->data->rx_queues[rx_queue_id];
809         err = rx_queue_reset(rxq);
810         if (err == -ENOMEM) {
811                 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
812                 return err;
813         } else if (err == -EINVAL) {
814                 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
815                         " %d", err);
816                 return err;
817         }
818
819         /* Setup the HW Rx Head and Tail Descriptor Pointers
820          * Note: this must be done AFTER the queue is enabled on real
821          * hardware, but BEFORE the queue is enabled when using the
822          * emulation platform. Do it in both places for now and remove
823          * this comment and the following two register writes when the
824          * emulation platform is no longer being used.
825          */
826         FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
827         FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
828
829         /* Set PF ownership flag for PF devices */
830         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
831         if (hw->mac.type == fm10k_mac_pf)
832                 reg |= FM10K_RXQCTL_PF;
833         reg |= FM10K_RXQCTL_ENABLE;
834         /* enable RX queue */
835         FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
836         FM10K_WRITE_FLUSH(hw);
837
838         /* Setup the HW Rx Head and Tail Descriptor Pointers
839          * Note: this must be done AFTER the queue is enabled
840          */
841         FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
842         FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
843         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
844
845         return 0;
846 }
847
848 static int
849 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
850 {
851         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
852
853         PMD_INIT_FUNC_TRACE();
854
855         /* Disable RX queue */
856         rx_queue_disable(hw, rx_queue_id);
857
858         /* Free mbuf and clean HW ring */
859         rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
860         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
861
862         return 0;
863 }
864
865 static int
866 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
867 {
868         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869         /** @todo - this should be defined in the shared code */
870 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
871         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
872         struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
873
874         PMD_INIT_FUNC_TRACE();
875
876         q->ops->reset(q);
877
878         /* reset head and tail pointers */
879         FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
880         FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
881
882         /* enable TX queue */
883         FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
884                                 FM10K_TXDCTL_ENABLE | txdctl);
885         FM10K_WRITE_FLUSH(hw);
886         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
887
888         return 0;
889 }
890
891 static int
892 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
893 {
894         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
895
896         PMD_INIT_FUNC_TRACE();
897
898         tx_queue_disable(hw, tx_queue_id);
899         tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
900         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
901
902         return 0;
903 }
904
905 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
906 {
907         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
908                 != FM10K_DGLORTMAP_NONE);
909 }
910
911 static int
912 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
913 {
914         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
915         int status;
916
917         PMD_INIT_FUNC_TRACE();
918
919         /* Return if it didn't acquire valid glort range */
920         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
921                 return 0;
922
923         fm10k_mbx_lock(hw);
924         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
925                                 FM10K_XCAST_MODE_PROMISC);
926         fm10k_mbx_unlock(hw);
927
928         if (status != FM10K_SUCCESS) {
929                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
930                 return -EAGAIN;
931         }
932
933         return 0;
934 }
935
936 static int
937 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
938 {
939         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
940         uint8_t mode;
941         int status;
942
943         PMD_INIT_FUNC_TRACE();
944
945         /* Return if it didn't acquire valid glort range */
946         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
947                 return 0;
948
949         if (dev->data->all_multicast == 1)
950                 mode = FM10K_XCAST_MODE_ALLMULTI;
951         else
952                 mode = FM10K_XCAST_MODE_NONE;
953
954         fm10k_mbx_lock(hw);
955         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
956                                 mode);
957         fm10k_mbx_unlock(hw);
958
959         if (status != FM10K_SUCCESS) {
960                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
961                 return -EAGAIN;
962         }
963
964         return 0;
965 }
966
967 static int
968 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
969 {
970         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
971         int status;
972
973         PMD_INIT_FUNC_TRACE();
974
975         /* Return if it didn't acquire valid glort range */
976         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
977                 return 0;
978
979         /* If promiscuous mode is enabled, it doesn't make sense to enable
980          * allmulticast and disable promiscuous since fm10k only can select
981          * one of the modes.
982          */
983         if (dev->data->promiscuous) {
984                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
985                         "needn't enable allmulticast");
986                 return 0;
987         }
988
989         fm10k_mbx_lock(hw);
990         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
991                                 FM10K_XCAST_MODE_ALLMULTI);
992         fm10k_mbx_unlock(hw);
993
994         if (status != FM10K_SUCCESS) {
995                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
996                 return -EAGAIN;
997         }
998
999         return 0;
1000 }
1001
1002 static int
1003 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1004 {
1005         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1006         int status;
1007
1008         PMD_INIT_FUNC_TRACE();
1009
1010         /* Return if it didn't acquire valid glort range */
1011         if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1012                 return 0;
1013
1014         if (dev->data->promiscuous) {
1015                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1016                         "since promisc mode is enabled");
1017                 return -EINVAL;
1018         }
1019
1020         fm10k_mbx_lock(hw);
1021         /* Change mode to unicast mode */
1022         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1023                                 FM10K_XCAST_MODE_NONE);
1024         fm10k_mbx_unlock(hw);
1025
1026         if (status != FM10K_SUCCESS) {
1027                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1028                 return -EAGAIN;
1029         }
1030
1031         return 0;
1032 }
1033
1034 static void
1035 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1036 {
1037         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1038         uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1039         uint16_t nb_queue_pools;
1040         struct fm10k_macvlan_filter_info *macvlan;
1041
1042         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1043         nb_queue_pools = macvlan->nb_queue_pools;
1044         pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
1045         rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
1046
1047         /* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1048         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1049         dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1050                         hw->mac.dglort_map;
1051         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1052         /* Configure VMDQ/RSS DGlort Decoder */
1053         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1054
1055         /* Flow Director configurations, only queue number is valid. */
1056         dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
1057         dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1058                         (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1059         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1060         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1061
1062         /* Invalidate all other GLORT entries */
1063         for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1064                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1065                                 FM10K_DGLORTMAP_NONE);
1066 }
1067
1068 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1069 static int
1070 fm10k_dev_start(struct rte_eth_dev *dev)
1071 {
1072         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073         int i, diag;
1074
1075         PMD_INIT_FUNC_TRACE();
1076
1077         /* stop, init, then start the hw */
1078         diag = fm10k_stop_hw(hw);
1079         if (diag != FM10K_SUCCESS) {
1080                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1081                 return -EIO;
1082         }
1083
1084         diag = fm10k_init_hw(hw);
1085         if (diag != FM10K_SUCCESS) {
1086                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1087                 return -EIO;
1088         }
1089
1090         diag = fm10k_start_hw(hw);
1091         if (diag != FM10K_SUCCESS) {
1092                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1093                 return -EIO;
1094         }
1095
1096         diag = fm10k_dev_tx_init(dev);
1097         if (diag) {
1098                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1099                 return diag;
1100         }
1101
1102         if (fm10k_dev_rxq_interrupt_setup(dev))
1103                 return -EIO;
1104
1105         diag = fm10k_dev_rx_init(dev);
1106         if (diag) {
1107                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1108                 return diag;
1109         }
1110
1111         if (hw->mac.type == fm10k_mac_pf)
1112                 fm10k_dev_dglort_map_configure(dev);
1113
1114         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1115                 struct fm10k_rx_queue *rxq;
1116                 rxq = dev->data->rx_queues[i];
1117
1118                 if (rxq->rx_deferred_start)
1119                         continue;
1120                 diag = fm10k_dev_rx_queue_start(dev, i);
1121                 if (diag != 0) {
1122                         int j;
1123                         for (j = 0; j < i; ++j)
1124                                 rx_queue_clean(dev->data->rx_queues[j]);
1125                         return diag;
1126                 }
1127         }
1128
1129         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1130                 struct fm10k_tx_queue *txq;
1131                 txq = dev->data->tx_queues[i];
1132
1133                 if (txq->tx_deferred_start)
1134                         continue;
1135                 diag = fm10k_dev_tx_queue_start(dev, i);
1136                 if (diag != 0) {
1137                         int j;
1138                         for (j = 0; j < i; ++j)
1139                                 tx_queue_clean(dev->data->tx_queues[j]);
1140                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1141                                 rx_queue_clean(dev->data->rx_queues[j]);
1142                         return diag;
1143                 }
1144         }
1145
1146         /* Update default vlan when not in VMDQ mode */
1147         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1148                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1149
1150         fm10k_link_update(dev, 0);
1151
1152         return 0;
1153 }
1154
1155 static void
1156 fm10k_dev_stop(struct rte_eth_dev *dev)
1157 {
1158         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1159         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1160         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1161         int i;
1162
1163         PMD_INIT_FUNC_TRACE();
1164         dev->data->dev_started = 0;
1165
1166         if (dev->data->tx_queues)
1167                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1168                         fm10k_dev_tx_queue_stop(dev, i);
1169
1170         if (dev->data->rx_queues)
1171                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1172                         fm10k_dev_rx_queue_stop(dev, i);
1173
1174         /* Disable datapath event */
1175         if (rte_intr_dp_is_en(intr_handle)) {
1176                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1177                         FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1178                                 3 << FM10K_RXINT_TIMER_SHIFT);
1179                         if (hw->mac.type == fm10k_mac_pf)
1180                                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1181                                         FM10K_ITR_MASK_SET);
1182                         else
1183                                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1184                                         FM10K_ITR_MASK_SET);
1185                 }
1186         }
1187         /* Clean datapath event and queue/vec mapping */
1188         rte_intr_efd_disable(intr_handle);
1189         rte_free(intr_handle->intr_vec);
1190         intr_handle->intr_vec = NULL;
1191 }
1192
1193 static void
1194 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1195 {
1196         int i;
1197
1198         PMD_INIT_FUNC_TRACE();
1199
1200         if (dev->data->tx_queues) {
1201                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1202                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1203
1204                         tx_queue_free(txq);
1205                 }
1206         }
1207
1208         if (dev->data->rx_queues) {
1209                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1210                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1211         }
1212 }
1213
1214 static int
1215 fm10k_link_update(struct rte_eth_dev *dev,
1216         __rte_unused int wait_to_complete)
1217 {
1218         struct fm10k_dev_info *dev_info =
1219                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1220         PMD_INIT_FUNC_TRACE();
1221
1222         dev->data->dev_link.link_speed  = ETH_SPEED_NUM_50G;
1223         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1224         dev->data->dev_link.link_status =
1225                 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1226         dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
1227
1228         return 0;
1229 }
1230
1231 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1232         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1233 {
1234         unsigned i, q;
1235         unsigned count = 0;
1236
1237         if (xstats_names != NULL) {
1238                 /* Note: limit checked in rte_eth_xstats_names() */
1239
1240                 /* Global stats */
1241                 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1242                         snprintf(xstats_names[count].name,
1243                                 sizeof(xstats_names[count].name),
1244                                 "%s", fm10k_hw_stats_strings[count].name);
1245                         count++;
1246                 }
1247
1248                 /* PF queue stats */
1249                 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1250                         for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1251                                 snprintf(xstats_names[count].name,
1252                                         sizeof(xstats_names[count].name),
1253                                         "rx_q%u_%s", q,
1254                                         fm10k_hw_stats_rx_q_strings[i].name);
1255                                 count++;
1256                         }
1257                         for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1258                                 snprintf(xstats_names[count].name,
1259                                         sizeof(xstats_names[count].name),
1260                                         "tx_q%u_%s", q,
1261                                         fm10k_hw_stats_tx_q_strings[i].name);
1262                                 count++;
1263                         }
1264                 }
1265         }
1266         return FM10K_NB_XSTATS;
1267 }
1268
1269 static int
1270 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1271                  unsigned n)
1272 {
1273         struct fm10k_hw_stats *hw_stats =
1274                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1275         unsigned i, q, count = 0;
1276
1277         if (n < FM10K_NB_XSTATS)
1278                 return FM10K_NB_XSTATS;
1279
1280         /* Global stats */
1281         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1282                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1283                         fm10k_hw_stats_strings[count].offset);
1284                 xstats[count].id = count;
1285                 count++;
1286         }
1287
1288         /* PF queue stats */
1289         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1290                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1291                         xstats[count].value =
1292                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1293                                 fm10k_hw_stats_rx_q_strings[i].offset);
1294                         xstats[count].id = count;
1295                         count++;
1296                 }
1297                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1298                         xstats[count].value =
1299                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1300                                 fm10k_hw_stats_tx_q_strings[i].offset);
1301                         xstats[count].id = count;
1302                         count++;
1303                 }
1304         }
1305
1306         return FM10K_NB_XSTATS;
1307 }
1308
1309 static int
1310 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1311 {
1312         uint64_t ipackets, opackets, ibytes, obytes, imissed;
1313         struct fm10k_hw *hw =
1314                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315         struct fm10k_hw_stats *hw_stats =
1316                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1317         int i;
1318
1319         PMD_INIT_FUNC_TRACE();
1320
1321         fm10k_update_hw_stats(hw, hw_stats);
1322
1323         ipackets = opackets = ibytes = obytes = imissed = 0;
1324         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1325                 (i < hw->mac.max_queues); ++i) {
1326                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1327                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1328                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1329                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1330                 stats->q_errors[i]   = hw_stats->q[i].rx_drops.count;
1331                 ipackets += stats->q_ipackets[i];
1332                 opackets += stats->q_opackets[i];
1333                 ibytes   += stats->q_ibytes[i];
1334                 obytes   += stats->q_obytes[i];
1335                 imissed  += stats->q_errors[i];
1336         }
1337         stats->ipackets = ipackets;
1338         stats->opackets = opackets;
1339         stats->ibytes = ibytes;
1340         stats->obytes = obytes;
1341         stats->imissed = imissed;
1342         return 0;
1343 }
1344
1345 static int
1346 fm10k_stats_reset(struct rte_eth_dev *dev)
1347 {
1348         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349         struct fm10k_hw_stats *hw_stats =
1350                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1351
1352         PMD_INIT_FUNC_TRACE();
1353
1354         memset(hw_stats, 0, sizeof(*hw_stats));
1355         fm10k_rebind_hw_stats(hw, hw_stats);
1356
1357         return 0;
1358 }
1359
1360 static int
1361 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1362         struct rte_eth_dev_info *dev_info)
1363 {
1364         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1365         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1366
1367         PMD_INIT_FUNC_TRACE();
1368
1369         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1370         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1371         dev_info->max_rx_queues      = hw->mac.max_queues;
1372         dev_info->max_tx_queues      = hw->mac.max_queues;
1373         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1374         dev_info->max_hash_mac_addrs = 0;
1375         dev_info->max_vfs            = pdev->max_vfs;
1376         dev_info->vmdq_pool_base     = 0;
1377         dev_info->vmdq_queue_base    = 0;
1378         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1379         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1380         dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
1381         dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
1382                                     dev_info->rx_queue_offload_capa;
1383         dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
1384         dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
1385                                     dev_info->tx_queue_offload_capa;
1386
1387         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1388         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1389         dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1390                                         ETH_RSS_IPV6 |
1391                                         ETH_RSS_IPV6_EX |
1392                                         ETH_RSS_NONFRAG_IPV4_TCP |
1393                                         ETH_RSS_NONFRAG_IPV6_TCP |
1394                                         ETH_RSS_IPV6_TCP_EX |
1395                                         ETH_RSS_NONFRAG_IPV4_UDP |
1396                                         ETH_RSS_NONFRAG_IPV6_UDP |
1397                                         ETH_RSS_IPV6_UDP_EX;
1398
1399         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1400                 .rx_thresh = {
1401                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1402                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1403                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1404                 },
1405                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1406                 .rx_drop_en = 0,
1407                 .offloads = 0,
1408         };
1409
1410         dev_info->default_txconf = (struct rte_eth_txconf) {
1411                 .tx_thresh = {
1412                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1413                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1414                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1415                 },
1416                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1417                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1418                 .offloads = 0,
1419         };
1420
1421         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1422                 .nb_max = FM10K_MAX_RX_DESC,
1423                 .nb_min = FM10K_MIN_RX_DESC,
1424                 .nb_align = FM10K_MULT_RX_DESC,
1425         };
1426
1427         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1428                 .nb_max = FM10K_MAX_TX_DESC,
1429                 .nb_min = FM10K_MIN_TX_DESC,
1430                 .nb_align = FM10K_MULT_TX_DESC,
1431                 .nb_seg_max = FM10K_TX_MAX_SEG,
1432                 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1433         };
1434
1435         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1436                         ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1437                         ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1438
1439         return 0;
1440 }
1441
1442 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1443 static const uint32_t *
1444 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1445 {
1446         if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1447             dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1448                 static uint32_t ptypes[] = {
1449                         /* refers to rx_desc_to_ol_flags() */
1450                         RTE_PTYPE_L2_ETHER,
1451                         RTE_PTYPE_L3_IPV4,
1452                         RTE_PTYPE_L3_IPV4_EXT,
1453                         RTE_PTYPE_L3_IPV6,
1454                         RTE_PTYPE_L3_IPV6_EXT,
1455                         RTE_PTYPE_L4_TCP,
1456                         RTE_PTYPE_L4_UDP,
1457                         RTE_PTYPE_UNKNOWN
1458                 };
1459
1460                 return ptypes;
1461         } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1462                    dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1463                 static uint32_t ptypes_vec[] = {
1464                         /* refers to fm10k_desc_to_pktype_v() */
1465                         RTE_PTYPE_L3_IPV4,
1466                         RTE_PTYPE_L3_IPV4_EXT,
1467                         RTE_PTYPE_L3_IPV6,
1468                         RTE_PTYPE_L3_IPV6_EXT,
1469                         RTE_PTYPE_L4_TCP,
1470                         RTE_PTYPE_L4_UDP,
1471                         RTE_PTYPE_TUNNEL_GENEVE,
1472                         RTE_PTYPE_TUNNEL_NVGRE,
1473                         RTE_PTYPE_TUNNEL_VXLAN,
1474                         RTE_PTYPE_TUNNEL_GRE,
1475                         RTE_PTYPE_UNKNOWN
1476                 };
1477
1478                 return ptypes_vec;
1479         }
1480
1481         return NULL;
1482 }
1483 #else
1484 static const uint32_t *
1485 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1486 {
1487         return NULL;
1488 }
1489 #endif
1490
1491 static int
1492 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1493 {
1494         s32 result;
1495         uint16_t mac_num = 0;
1496         uint32_t vid_idx, vid_bit, mac_index;
1497         struct fm10k_hw *hw;
1498         struct fm10k_macvlan_filter_info *macvlan;
1499         struct rte_eth_dev_data *data = dev->data;
1500
1501         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1502         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1503
1504         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1505                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1506                 return -EINVAL;
1507         }
1508
1509         if (vlan_id > ETH_VLAN_ID_MAX) {
1510                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1511                 return -EINVAL;
1512         }
1513
1514         vid_idx = FM10K_VFTA_IDX(vlan_id);
1515         vid_bit = FM10K_VFTA_BIT(vlan_id);
1516         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1517         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1518                 return 0;
1519         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1520         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1521                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1522                         "in the VLAN filter table");
1523                 return -EINVAL;
1524         }
1525
1526         fm10k_mbx_lock(hw);
1527         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1528         fm10k_mbx_unlock(hw);
1529         if (result != FM10K_SUCCESS) {
1530                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1531                 return -EIO;
1532         }
1533
1534         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1535                         (result == FM10K_SUCCESS); mac_index++) {
1536                 if (rte_is_zero_ether_addr(&data->mac_addrs[mac_index]))
1537                         continue;
1538                 if (mac_num > macvlan->mac_num - 1) {
1539                         PMD_INIT_LOG(ERR, "MAC address number "
1540                                         "not match");
1541                         break;
1542                 }
1543                 fm10k_mbx_lock(hw);
1544                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1545                         data->mac_addrs[mac_index].addr_bytes,
1546                         vlan_id, on, 0);
1547                 fm10k_mbx_unlock(hw);
1548                 mac_num++;
1549         }
1550         if (result != FM10K_SUCCESS) {
1551                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1552                 return -EIO;
1553         }
1554
1555         if (on) {
1556                 macvlan->vlan_num++;
1557                 macvlan->vfta[vid_idx] |= vid_bit;
1558         } else {
1559                 macvlan->vlan_num--;
1560                 macvlan->vfta[vid_idx] &= ~vid_bit;
1561         }
1562         return 0;
1563 }
1564
1565 static int
1566 fm10k_vlan_offload_set(struct rte_eth_dev *dev __rte_unused,
1567                        int mask __rte_unused)
1568 {
1569         return 0;
1570 }
1571
1572 /* Add/Remove a MAC address, and update filters to main VSI */
1573 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1574                 const u8 *mac, bool add, uint32_t pool)
1575 {
1576         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1577         struct fm10k_macvlan_filter_info *macvlan;
1578         uint32_t i, j, k;
1579
1580         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1581
1582         if (pool != MAIN_VSI_POOL_NUMBER) {
1583                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1584                         "mac to pool %u", pool);
1585                 return;
1586         }
1587         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1588                 if (!macvlan->vfta[j])
1589                         continue;
1590                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1591                         if (!(macvlan->vfta[j] & (1 << k)))
1592                                 continue;
1593                         if (i + 1 > macvlan->vlan_num) {
1594                                 PMD_INIT_LOG(ERR, "vlan number not match");
1595                                 return;
1596                         }
1597                         fm10k_mbx_lock(hw);
1598                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1599                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1600                         fm10k_mbx_unlock(hw);
1601                         i++;
1602                 }
1603         }
1604 }
1605
1606 /* Add/Remove a MAC address, and update filters to VMDQ */
1607 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1608                 const u8 *mac, bool add, uint32_t pool)
1609 {
1610         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1611         struct fm10k_macvlan_filter_info *macvlan;
1612         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1613         uint32_t i;
1614
1615         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1616         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1617
1618         if (pool > macvlan->nb_queue_pools) {
1619                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1620                         " Max pool is %u",
1621                         pool, macvlan->nb_queue_pools);
1622                 return;
1623         }
1624         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1625                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1626                         continue;
1627                 fm10k_mbx_lock(hw);
1628                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1629                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1630                 fm10k_mbx_unlock(hw);
1631         }
1632 }
1633
1634 /* Add/Remove a MAC address, and update filters */
1635 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1636                 const u8 *mac, bool add, uint32_t pool)
1637 {
1638         struct fm10k_macvlan_filter_info *macvlan;
1639
1640         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1641
1642         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1643                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1644         else
1645                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1646
1647         if (add)
1648                 macvlan->mac_num++;
1649         else
1650                 macvlan->mac_num--;
1651 }
1652
1653 /* Add a MAC address, and update filters */
1654 static int
1655 fm10k_macaddr_add(struct rte_eth_dev *dev,
1656                 struct rte_ether_addr *mac_addr,
1657                 uint32_t index,
1658                 uint32_t pool)
1659 {
1660         struct fm10k_macvlan_filter_info *macvlan;
1661
1662         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1663         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1664         macvlan->mac_vmdq_id[index] = pool;
1665         return 0;
1666 }
1667
1668 /* Remove a MAC address, and update filters */
1669 static void
1670 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1671 {
1672         struct rte_eth_dev_data *data = dev->data;
1673         struct fm10k_macvlan_filter_info *macvlan;
1674
1675         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1676         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1677                         FALSE, macvlan->mac_vmdq_id[index]);
1678         macvlan->mac_vmdq_id[index] = 0;
1679 }
1680
1681 static inline int
1682 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1683 {
1684         if ((request < min) || (request > max) || ((request % mult) != 0))
1685                 return -1;
1686         else
1687                 return 0;
1688 }
1689
1690
1691 static inline int
1692 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1693 {
1694         if ((request < min) || (request > max) || ((div % request) != 0))
1695                 return -1;
1696         else
1697                 return 0;
1698 }
1699
1700 static inline int
1701 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1702 {
1703         uint16_t rx_free_thresh;
1704
1705         if (conf->rx_free_thresh == 0)
1706                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1707         else
1708                 rx_free_thresh = conf->rx_free_thresh;
1709
1710         /* make sure the requested threshold satisfies the constraints */
1711         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1712                         FM10K_RX_FREE_THRESH_MAX(q),
1713                         FM10K_RX_FREE_THRESH_DIV(q),
1714                         rx_free_thresh)) {
1715                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1716                         "less than or equal to %u, "
1717                         "greater than or equal to %u, "
1718                         "and a divisor of %u",
1719                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1720                         FM10K_RX_FREE_THRESH_MIN(q),
1721                         FM10K_RX_FREE_THRESH_DIV(q));
1722                 return -EINVAL;
1723         }
1724
1725         q->alloc_thresh = rx_free_thresh;
1726         q->drop_en = conf->rx_drop_en;
1727         q->rx_deferred_start = conf->rx_deferred_start;
1728
1729         return 0;
1730 }
1731
1732 /*
1733  * Hardware requires specific alignment for Rx packet buffers. At
1734  * least one of the following two conditions must be satisfied.
1735  *  1. Address is 512B aligned
1736  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1737  *
1738  * As such, the driver may need to adjust the DMA address within the
1739  * buffer by up to 512B.
1740  *
1741  * return 1 if the element size is valid, otherwise return 0.
1742  */
1743 static int
1744 mempool_element_size_valid(struct rte_mempool *mp)
1745 {
1746         uint32_t min_size;
1747
1748         /* elt_size includes mbuf header and headroom */
1749         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1750                         RTE_PKTMBUF_HEADROOM;
1751
1752         /* account for up to 512B of alignment */
1753         min_size -= FM10K_RX_DATABUF_ALIGN;
1754
1755         /* sanity check for overflow */
1756         if (min_size > mp->elt_size)
1757                 return 0;
1758
1759         /* size is valid */
1760         return 1;
1761 }
1762
1763 static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1764 {
1765         RTE_SET_USED(dev);
1766
1767         return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
1768 }
1769
1770 static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1771 {
1772         RTE_SET_USED(dev);
1773
1774         return  (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP  |
1775                            DEV_RX_OFFLOAD_VLAN_FILTER |
1776                            DEV_RX_OFFLOAD_IPV4_CKSUM  |
1777                            DEV_RX_OFFLOAD_UDP_CKSUM   |
1778                            DEV_RX_OFFLOAD_TCP_CKSUM   |
1779                            DEV_RX_OFFLOAD_JUMBO_FRAME |
1780                            DEV_RX_OFFLOAD_HEADER_SPLIT |
1781                            DEV_RX_OFFLOAD_RSS_HASH);
1782 }
1783
1784 static int
1785 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1786         uint16_t nb_desc, unsigned int socket_id,
1787         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1788 {
1789         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790         struct fm10k_dev_info *dev_info =
1791                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1792         struct fm10k_rx_queue *q;
1793         const struct rte_memzone *mz;
1794         uint64_t offloads;
1795
1796         PMD_INIT_FUNC_TRACE();
1797
1798         offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1799
1800         /* make sure the mempool element size can account for alignment. */
1801         if (!mempool_element_size_valid(mp)) {
1802                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1803                 return -EINVAL;
1804         }
1805
1806         /* make sure a valid number of descriptors have been requested */
1807         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1808                                 FM10K_MULT_RX_DESC, nb_desc)) {
1809                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1810                         "less than or equal to %"PRIu32", "
1811                         "greater than or equal to %u, "
1812                         "and a multiple of %u",
1813                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1814                         FM10K_MULT_RX_DESC);
1815                 return -EINVAL;
1816         }
1817
1818         /*
1819          * if this queue existed already, free the associated memory. The
1820          * queue cannot be reused in case we need to allocate memory on
1821          * different socket than was previously used.
1822          */
1823         if (dev->data->rx_queues[queue_id] != NULL) {
1824                 rx_queue_free(dev->data->rx_queues[queue_id]);
1825                 dev->data->rx_queues[queue_id] = NULL;
1826         }
1827
1828         /* allocate memory for the queue structure */
1829         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1830                                 socket_id);
1831         if (q == NULL) {
1832                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1833                 return -ENOMEM;
1834         }
1835
1836         /* setup queue */
1837         q->mp = mp;
1838         q->nb_desc = nb_desc;
1839         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1840         q->port_id = dev->data->port_id;
1841         q->queue_id = queue_id;
1842         q->tail_ptr = (volatile uint32_t *)
1843                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1844         q->offloads = offloads;
1845         if (handle_rxconf(q, conf)) {
1846                 rte_free(q);
1847                 return -EINVAL;
1848         }
1849         /* allocate memory for the software ring */
1850         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1851                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1852                         RTE_CACHE_LINE_SIZE, socket_id);
1853         if (q->sw_ring == NULL) {
1854                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1855                 rte_free(q);
1856                 return -ENOMEM;
1857         }
1858
1859         /*
1860          * allocate memory for the hardware descriptor ring. A memzone large
1861          * enough to hold the maximum ring size is requested to allow for
1862          * resizing in later calls to the queue setup function.
1863          */
1864         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1865                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1866                                       socket_id);
1867         if (mz == NULL) {
1868                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1869                 rte_free(q->sw_ring);
1870                 rte_free(q);
1871                 return -ENOMEM;
1872         }
1873         q->hw_ring = mz->addr;
1874         q->hw_ring_phys_addr = mz->iova;
1875
1876         /* Check if number of descs satisfied Vector requirement */
1877         if (!rte_is_power_of_2(nb_desc)) {
1878                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1879                                     "preconditions - canceling the feature for "
1880                                     "the whole port[%d]",
1881                              q->queue_id, q->port_id);
1882                 dev_info->rx_vec_allowed = false;
1883         } else
1884                 fm10k_rxq_vec_setup(q);
1885
1886         dev->data->rx_queues[queue_id] = q;
1887         return 0;
1888 }
1889
1890 static void
1891 fm10k_rx_queue_release(void *queue)
1892 {
1893         PMD_INIT_FUNC_TRACE();
1894
1895         rx_queue_free(queue);
1896 }
1897
1898 static inline int
1899 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1900 {
1901         uint16_t tx_free_thresh;
1902         uint16_t tx_rs_thresh;
1903
1904         /* constraint MACROs require that tx_free_thresh is configured
1905          * before tx_rs_thresh */
1906         if (conf->tx_free_thresh == 0)
1907                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1908         else
1909                 tx_free_thresh = conf->tx_free_thresh;
1910
1911         /* make sure the requested threshold satisfies the constraints */
1912         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1913                         FM10K_TX_FREE_THRESH_MAX(q),
1914                         FM10K_TX_FREE_THRESH_DIV(q),
1915                         tx_free_thresh)) {
1916                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1917                         "less than or equal to %u, "
1918                         "greater than or equal to %u, "
1919                         "and a divisor of %u",
1920                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1921                         FM10K_TX_FREE_THRESH_MIN(q),
1922                         FM10K_TX_FREE_THRESH_DIV(q));
1923                 return -EINVAL;
1924         }
1925
1926         q->free_thresh = tx_free_thresh;
1927
1928         if (conf->tx_rs_thresh == 0)
1929                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1930         else
1931                 tx_rs_thresh = conf->tx_rs_thresh;
1932
1933         q->tx_deferred_start = conf->tx_deferred_start;
1934
1935         /* make sure the requested threshold satisfies the constraints */
1936         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1937                         FM10K_TX_RS_THRESH_MAX(q),
1938                         FM10K_TX_RS_THRESH_DIV(q),
1939                         tx_rs_thresh)) {
1940                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1941                         "less than or equal to %u, "
1942                         "greater than or equal to %u, "
1943                         "and a divisor of %u",
1944                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1945                         FM10K_TX_RS_THRESH_MIN(q),
1946                         FM10K_TX_RS_THRESH_DIV(q));
1947                 return -EINVAL;
1948         }
1949
1950         q->rs_thresh = tx_rs_thresh;
1951
1952         return 0;
1953 }
1954
1955 static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1956 {
1957         RTE_SET_USED(dev);
1958
1959         return 0;
1960 }
1961
1962 static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1963 {
1964         RTE_SET_USED(dev);
1965
1966         return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
1967                           DEV_TX_OFFLOAD_MULTI_SEGS  |
1968                           DEV_TX_OFFLOAD_IPV4_CKSUM  |
1969                           DEV_TX_OFFLOAD_UDP_CKSUM   |
1970                           DEV_TX_OFFLOAD_TCP_CKSUM   |
1971                           DEV_TX_OFFLOAD_TCP_TSO);
1972 }
1973
1974 static int
1975 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1976         uint16_t nb_desc, unsigned int socket_id,
1977         const struct rte_eth_txconf *conf)
1978 {
1979         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980         struct fm10k_tx_queue *q;
1981         const struct rte_memzone *mz;
1982         uint64_t offloads;
1983
1984         PMD_INIT_FUNC_TRACE();
1985
1986         offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
1987
1988         /* make sure a valid number of descriptors have been requested */
1989         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1990                                 FM10K_MULT_TX_DESC, nb_desc)) {
1991                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1992                         "less than or equal to %"PRIu32", "
1993                         "greater than or equal to %u, "
1994                         "and a multiple of %u",
1995                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1996                         FM10K_MULT_TX_DESC);
1997                 return -EINVAL;
1998         }
1999
2000         /*
2001          * if this queue existed already, free the associated memory. The
2002          * queue cannot be reused in case we need to allocate memory on
2003          * different socket than was previously used.
2004          */
2005         if (dev->data->tx_queues[queue_id] != NULL) {
2006                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2007
2008                 tx_queue_free(txq);
2009                 dev->data->tx_queues[queue_id] = NULL;
2010         }
2011
2012         /* allocate memory for the queue structure */
2013         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2014                                 socket_id);
2015         if (q == NULL) {
2016                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2017                 return -ENOMEM;
2018         }
2019
2020         /* setup queue */
2021         q->nb_desc = nb_desc;
2022         q->port_id = dev->data->port_id;
2023         q->queue_id = queue_id;
2024         q->offloads = offloads;
2025         q->ops = &def_txq_ops;
2026         q->tail_ptr = (volatile uint32_t *)
2027                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2028         if (handle_txconf(q, conf)) {
2029                 rte_free(q);
2030                 return -EINVAL;
2031         }
2032
2033         /* allocate memory for the software ring */
2034         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2035                                         nb_desc * sizeof(struct rte_mbuf *),
2036                                         RTE_CACHE_LINE_SIZE, socket_id);
2037         if (q->sw_ring == NULL) {
2038                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2039                 rte_free(q);
2040                 return -ENOMEM;
2041         }
2042
2043         /*
2044          * allocate memory for the hardware descriptor ring. A memzone large
2045          * enough to hold the maximum ring size is requested to allow for
2046          * resizing in later calls to the queue setup function.
2047          */
2048         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2049                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2050                                       socket_id);
2051         if (mz == NULL) {
2052                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2053                 rte_free(q->sw_ring);
2054                 rte_free(q);
2055                 return -ENOMEM;
2056         }
2057         q->hw_ring = mz->addr;
2058         q->hw_ring_phys_addr = mz->iova;
2059
2060         /*
2061          * allocate memory for the RS bit tracker. Enough slots to hold the
2062          * descriptor index for each RS bit needing to be set are required.
2063          */
2064         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2065                                 ((nb_desc + 1) / q->rs_thresh) *
2066                                 sizeof(uint16_t),
2067                                 RTE_CACHE_LINE_SIZE, socket_id);
2068         if (q->rs_tracker.list == NULL) {
2069                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2070                 rte_free(q->sw_ring);
2071                 rte_free(q);
2072                 return -ENOMEM;
2073         }
2074
2075         dev->data->tx_queues[queue_id] = q;
2076         return 0;
2077 }
2078
2079 static void
2080 fm10k_tx_queue_release(void *queue)
2081 {
2082         struct fm10k_tx_queue *q = queue;
2083         PMD_INIT_FUNC_TRACE();
2084
2085         tx_queue_free(q);
2086 }
2087
2088 static int
2089 fm10k_reta_update(struct rte_eth_dev *dev,
2090                         struct rte_eth_rss_reta_entry64 *reta_conf,
2091                         uint16_t reta_size)
2092 {
2093         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094         uint16_t i, j, idx, shift;
2095         uint8_t mask;
2096         uint32_t reta;
2097
2098         PMD_INIT_FUNC_TRACE();
2099
2100         if (reta_size > FM10K_MAX_RSS_INDICES) {
2101                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2102                         "(%d) doesn't match the number hardware can supported "
2103                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2104                 return -EINVAL;
2105         }
2106
2107         /*
2108          * Update Redirection Table RETA[n], n=0..31. The redirection table has
2109          * 128-entries in 32 registers
2110          */
2111         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2112                 idx = i / RTE_RETA_GROUP_SIZE;
2113                 shift = i % RTE_RETA_GROUP_SIZE;
2114                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2115                                 BIT_MASK_PER_UINT32);
2116                 if (mask == 0)
2117                         continue;
2118
2119                 reta = 0;
2120                 if (mask != BIT_MASK_PER_UINT32)
2121                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2122
2123                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2124                         if (mask & (0x1 << j)) {
2125                                 if (mask != 0xF)
2126                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
2127                                 reta |= reta_conf[idx].reta[shift + j] <<
2128                                                 (CHAR_BIT * j);
2129                         }
2130                 }
2131                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2132         }
2133
2134         return 0;
2135 }
2136
2137 static int
2138 fm10k_reta_query(struct rte_eth_dev *dev,
2139                         struct rte_eth_rss_reta_entry64 *reta_conf,
2140                         uint16_t reta_size)
2141 {
2142         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143         uint16_t i, j, idx, shift;
2144         uint8_t mask;
2145         uint32_t reta;
2146
2147         PMD_INIT_FUNC_TRACE();
2148
2149         if (reta_size < FM10K_MAX_RSS_INDICES) {
2150                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2151                         "(%d) doesn't match the number hardware can supported "
2152                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2153                 return -EINVAL;
2154         }
2155
2156         /*
2157          * Read Redirection Table RETA[n], n=0..31. The redirection table has
2158          * 128-entries in 32 registers
2159          */
2160         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2161                 idx = i / RTE_RETA_GROUP_SIZE;
2162                 shift = i % RTE_RETA_GROUP_SIZE;
2163                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2164                                 BIT_MASK_PER_UINT32);
2165                 if (mask == 0)
2166                         continue;
2167
2168                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2169                 for (j = 0; j < CHARS_PER_UINT32; j++) {
2170                         if (mask & (0x1 << j))
2171                                 reta_conf[idx].reta[shift + j] = ((reta >>
2172                                         CHAR_BIT * j) & UINT8_MAX);
2173                 }
2174         }
2175
2176         return 0;
2177 }
2178
2179 static int
2180 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2181         struct rte_eth_rss_conf *rss_conf)
2182 {
2183         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2184         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2185         uint32_t mrqc;
2186         uint64_t hf = rss_conf->rss_hf;
2187         int i;
2188
2189         PMD_INIT_FUNC_TRACE();
2190
2191         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2192                                 FM10K_RSSRK_ENTRIES_PER_REG))
2193                 return -EINVAL;
2194
2195         if (hf == 0)
2196                 return -EINVAL;
2197
2198         mrqc = 0;
2199         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
2200         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
2201         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
2202         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
2203         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
2204         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
2205         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
2206         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
2207         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
2208
2209         /* If the mapping doesn't fit any supported, return */
2210         if (mrqc == 0)
2211                 return -EINVAL;
2212
2213         if (key != NULL)
2214                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2215                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2216
2217         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2218
2219         return 0;
2220 }
2221
2222 static int
2223 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2224         struct rte_eth_rss_conf *rss_conf)
2225 {
2226         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2227         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2228         uint32_t mrqc;
2229         uint64_t hf;
2230         int i;
2231
2232         PMD_INIT_FUNC_TRACE();
2233
2234         if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2235                                 FM10K_RSSRK_ENTRIES_PER_REG))
2236                 return -EINVAL;
2237
2238         if (key != NULL)
2239                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2240                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2241
2242         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2243         hf = 0;
2244         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2245         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2246         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2247         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2248         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2249         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2250         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2251         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2252         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2253
2254         rss_conf->rss_hf = hf;
2255
2256         return 0;
2257 }
2258
2259 static void
2260 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2261 {
2262         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2263         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2264
2265         /* Bind all local non-queue interrupt to vector 0 */
2266         int_map |= FM10K_MISC_VEC_ID;
2267
2268         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2269         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2270         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2271         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2272         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2273         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2274
2275         /* Enable misc causes */
2276         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2277                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2278                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2279                                 FM10K_EIMR_ENABLE(MAILBOX) |
2280                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2281                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2282                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2283                                 FM10K_EIMR_ENABLE(VFLR));
2284
2285         /* Enable ITR 0 */
2286         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2287                                         FM10K_ITR_MASK_CLEAR);
2288         FM10K_WRITE_FLUSH(hw);
2289 }
2290
2291 static void
2292 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2293 {
2294         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2295         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2296
2297         int_map |= FM10K_MISC_VEC_ID;
2298
2299         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2300         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2301         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2302         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2303         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2304         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2305
2306         /* Disable misc causes */
2307         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2308                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2309                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2310                                 FM10K_EIMR_DISABLE(MAILBOX) |
2311                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2312                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2313                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2314                                 FM10K_EIMR_DISABLE(VFLR));
2315
2316         /* Disable ITR 0 */
2317         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2318         FM10K_WRITE_FLUSH(hw);
2319 }
2320
2321 static void
2322 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2323 {
2324         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2325         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2326
2327         /* Bind all local non-queue interrupt to vector 0 */
2328         int_map |= FM10K_MISC_VEC_ID;
2329
2330         /* Only INT 0 available, other 15 are reserved. */
2331         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2332
2333         /* Enable ITR 0 */
2334         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2335                                         FM10K_ITR_MASK_CLEAR);
2336         FM10K_WRITE_FLUSH(hw);
2337 }
2338
2339 static void
2340 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2341 {
2342         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2343         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2344
2345         int_map |= FM10K_MISC_VEC_ID;
2346
2347         /* Only INT 0 available, other 15 are reserved. */
2348         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2349
2350         /* Disable ITR 0 */
2351         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2352         FM10K_WRITE_FLUSH(hw);
2353 }
2354
2355 static int
2356 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2357 {
2358         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2359         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2360
2361         /* Enable ITR */
2362         if (hw->mac.type == fm10k_mac_pf)
2363                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2364                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2365         else
2366                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2367                         FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2368         rte_intr_ack(&pdev->intr_handle);
2369         return 0;
2370 }
2371
2372 static int
2373 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2374 {
2375         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2376         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2377
2378         /* Disable ITR */
2379         if (hw->mac.type == fm10k_mac_pf)
2380                 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2381                         FM10K_ITR_MASK_SET);
2382         else
2383                 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2384                         FM10K_ITR_MASK_SET);
2385         return 0;
2386 }
2387
2388 static int
2389 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2390 {
2391         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2392         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2393         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2394         uint32_t intr_vector, vec;
2395         uint16_t queue_id;
2396         int result = 0;
2397
2398         /* fm10k needs one separate interrupt for mailbox,
2399          * so only drivers which support multiple interrupt vectors
2400          * e.g. vfio-pci can work for fm10k interrupt mode
2401          */
2402         if (!rte_intr_cap_multiple(intr_handle) ||
2403                         dev->data->dev_conf.intr_conf.rxq == 0)
2404                 return result;
2405
2406         intr_vector = dev->data->nb_rx_queues;
2407
2408         /* disable interrupt first */
2409         rte_intr_disable(intr_handle);
2410         if (hw->mac.type == fm10k_mac_pf)
2411                 fm10k_dev_disable_intr_pf(dev);
2412         else
2413                 fm10k_dev_disable_intr_vf(dev);
2414
2415         if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2416                 PMD_INIT_LOG(ERR, "Failed to init event fd");
2417                 result = -EIO;
2418         }
2419
2420         if (rte_intr_dp_is_en(intr_handle) && !result) {
2421                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2422                         dev->data->nb_rx_queues * sizeof(int), 0);
2423                 if (intr_handle->intr_vec) {
2424                         for (queue_id = 0, vec = FM10K_RX_VEC_START;
2425                                         queue_id < dev->data->nb_rx_queues;
2426                                         queue_id++) {
2427                                 intr_handle->intr_vec[queue_id] = vec;
2428                                 if (vec < intr_handle->nb_efd - 1
2429                                                 + FM10K_RX_VEC_START)
2430                                         vec++;
2431                         }
2432                 } else {
2433                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2434                                 " intr_vec", dev->data->nb_rx_queues);
2435                         rte_intr_efd_disable(intr_handle);
2436                         result = -ENOMEM;
2437                 }
2438         }
2439
2440         if (hw->mac.type == fm10k_mac_pf)
2441                 fm10k_dev_enable_intr_pf(dev);
2442         else
2443                 fm10k_dev_enable_intr_vf(dev);
2444         rte_intr_enable(intr_handle);
2445         hw->mac.ops.update_int_moderator(hw);
2446         return result;
2447 }
2448
2449 static int
2450 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2451 {
2452         struct fm10k_fault fault;
2453         int err;
2454         const char *estr = "Unknown error";
2455
2456         /* Process PCA fault */
2457         if (eicr & FM10K_EICR_PCA_FAULT) {
2458                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2459                 if (err)
2460                         goto error;
2461                 switch (fault.type) {
2462                 case PCA_NO_FAULT:
2463                         estr = "PCA_NO_FAULT"; break;
2464                 case PCA_UNMAPPED_ADDR:
2465                         estr = "PCA_UNMAPPED_ADDR"; break;
2466                 case PCA_BAD_QACCESS_PF:
2467                         estr = "PCA_BAD_QACCESS_PF"; break;
2468                 case PCA_BAD_QACCESS_VF:
2469                         estr = "PCA_BAD_QACCESS_VF"; break;
2470                 case PCA_MALICIOUS_REQ:
2471                         estr = "PCA_MALICIOUS_REQ"; break;
2472                 case PCA_POISONED_TLP:
2473                         estr = "PCA_POISONED_TLP"; break;
2474                 case PCA_TLP_ABORT:
2475                         estr = "PCA_TLP_ABORT"; break;
2476                 default:
2477                         goto error;
2478                 }
2479                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2480                         estr, fault.func ? "VF" : "PF", fault.func,
2481                         fault.address, fault.specinfo);
2482         }
2483
2484         /* Process THI fault */
2485         if (eicr & FM10K_EICR_THI_FAULT) {
2486                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2487                 if (err)
2488                         goto error;
2489                 switch (fault.type) {
2490                 case THI_NO_FAULT:
2491                         estr = "THI_NO_FAULT"; break;
2492                 case THI_MAL_DIS_Q_FAULT:
2493                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2494                 default:
2495                         goto error;
2496                 }
2497                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2498                         estr, fault.func ? "VF" : "PF", fault.func,
2499                         fault.address, fault.specinfo);
2500         }
2501
2502         /* Process FUM fault */
2503         if (eicr & FM10K_EICR_FUM_FAULT) {
2504                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2505                 if (err)
2506                         goto error;
2507                 switch (fault.type) {
2508                 case FUM_NO_FAULT:
2509                         estr = "FUM_NO_FAULT"; break;
2510                 case FUM_UNMAPPED_ADDR:
2511                         estr = "FUM_UNMAPPED_ADDR"; break;
2512                 case FUM_POISONED_TLP:
2513                         estr = "FUM_POISONED_TLP"; break;
2514                 case FUM_BAD_VF_QACCESS:
2515                         estr = "FUM_BAD_VF_QACCESS"; break;
2516                 case FUM_ADD_DECODE_ERR:
2517                         estr = "FUM_ADD_DECODE_ERR"; break;
2518                 case FUM_RO_ERROR:
2519                         estr = "FUM_RO_ERROR"; break;
2520                 case FUM_QPRC_CRC_ERROR:
2521                         estr = "FUM_QPRC_CRC_ERROR"; break;
2522                 case FUM_CSR_TIMEOUT:
2523                         estr = "FUM_CSR_TIMEOUT"; break;
2524                 case FUM_INVALID_TYPE:
2525                         estr = "FUM_INVALID_TYPE"; break;
2526                 case FUM_INVALID_LENGTH:
2527                         estr = "FUM_INVALID_LENGTH"; break;
2528                 case FUM_INVALID_BE:
2529                         estr = "FUM_INVALID_BE"; break;
2530                 case FUM_INVALID_ALIGN:
2531                         estr = "FUM_INVALID_ALIGN"; break;
2532                 default:
2533                         goto error;
2534                 }
2535                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2536                         estr, fault.func ? "VF" : "PF", fault.func,
2537                         fault.address, fault.specinfo);
2538         }
2539
2540         return 0;
2541 error:
2542         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2543         return err;
2544 }
2545
2546 /**
2547  * PF interrupt handler triggered by NIC for handling specific interrupt.
2548  *
2549  * @param handle
2550  *  Pointer to interrupt handle.
2551  * @param param
2552  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2553  *
2554  * @return
2555  *  void
2556  */
2557 static void
2558 fm10k_dev_interrupt_handler_pf(void *param)
2559 {
2560         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2561         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2562         uint32_t cause, status;
2563         struct fm10k_dev_info *dev_info =
2564                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2565         int status_mbx;
2566         s32 err;
2567
2568         if (hw->mac.type != fm10k_mac_pf)
2569                 return;
2570
2571         cause = FM10K_READ_REG(hw, FM10K_EICR);
2572
2573         /* Handle PCI fault cases */
2574         if (cause & FM10K_EICR_FAULT_MASK) {
2575                 PMD_INIT_LOG(ERR, "INT: find fault!");
2576                 fm10k_dev_handle_fault(hw, cause);
2577         }
2578
2579         /* Handle switch up/down */
2580         if (cause & FM10K_EICR_SWITCHNOTREADY)
2581                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2582
2583         if (cause & FM10K_EICR_SWITCHREADY) {
2584                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2585                 if (dev_info->sm_down == 1) {
2586                         fm10k_mbx_lock(hw);
2587
2588                         /* For recreating logical ports */
2589                         status_mbx = hw->mac.ops.update_lport_state(hw,
2590                                         hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2591                         if (status_mbx == FM10K_SUCCESS)
2592                                 PMD_INIT_LOG(INFO,
2593                                         "INT: Recreated Logical port");
2594                         else
2595                                 PMD_INIT_LOG(INFO,
2596                                         "INT: Logical ports weren't recreated");
2597
2598                         status_mbx = hw->mac.ops.update_xcast_mode(hw,
2599                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2600                         if (status_mbx != FM10K_SUCCESS)
2601                                 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2602
2603                         fm10k_mbx_unlock(hw);
2604
2605                         /* first clear the internal SW recording structure */
2606                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2607                                                 ETH_MQ_RX_VMDQ_FLAG))
2608                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2609                                         false);
2610
2611                         fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2612                                         MAIN_VSI_POOL_NUMBER);
2613
2614                         /*
2615                          * Add default mac address and vlan for the logical
2616                          * ports that have been created, leave to the
2617                          * application to fully recover Rx filtering.
2618                          */
2619                         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2620                                         MAIN_VSI_POOL_NUMBER);
2621
2622                         if (!(dev->data->dev_conf.rxmode.mq_mode &
2623                                                 ETH_MQ_RX_VMDQ_FLAG))
2624                                 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2625                                         true);
2626
2627                         dev_info->sm_down = 0;
2628                         rte_eth_dev_callback_process(dev,
2629                                         RTE_ETH_EVENT_INTR_LSC,
2630                                         NULL);
2631                 }
2632         }
2633
2634         /* Handle mailbox message */
2635         fm10k_mbx_lock(hw);
2636         err = hw->mbx.ops.process(hw, &hw->mbx);
2637         fm10k_mbx_unlock(hw);
2638
2639         if (err == FM10K_ERR_RESET_REQUESTED) {
2640                 PMD_INIT_LOG(INFO, "INT: Switch is down");
2641                 dev_info->sm_down = 1;
2642                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2643         }
2644
2645         /* Handle SRAM error */
2646         if (cause & FM10K_EICR_SRAMERROR) {
2647                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2648
2649                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2650                 /* Write to clear pending bits */
2651                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2652
2653                 /* Todo: print out error message after shared code  updates */
2654         }
2655
2656         /* Clear these 3 events if having any */
2657         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2658                  FM10K_EICR_SWITCHREADY;
2659         if (cause)
2660                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2661
2662         /* Re-enable interrupt from device side */
2663         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2664                                         FM10K_ITR_MASK_CLEAR);
2665         /* Re-enable interrupt from host side */
2666         rte_intr_ack(dev->intr_handle);
2667 }
2668
2669 /**
2670  * VF interrupt handler triggered by NIC for handling specific interrupt.
2671  *
2672  * @param handle
2673  *  Pointer to interrupt handle.
2674  * @param param
2675  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2676  *
2677  * @return
2678  *  void
2679  */
2680 static void
2681 fm10k_dev_interrupt_handler_vf(void *param)
2682 {
2683         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2684         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2685         struct fm10k_mbx_info *mbx = &hw->mbx;
2686         struct fm10k_dev_info *dev_info =
2687                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2688         const enum fm10k_mbx_state state = mbx->state;
2689         int status_mbx;
2690
2691         if (hw->mac.type != fm10k_mac_vf)
2692                 return;
2693
2694         /* Handle mailbox message if lock is acquired */
2695         fm10k_mbx_lock(hw);
2696         hw->mbx.ops.process(hw, &hw->mbx);
2697         fm10k_mbx_unlock(hw);
2698
2699         if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2700                 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2701
2702                 fm10k_mbx_lock(hw);
2703                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2704                                 MAX_LPORT_NUM, 1);
2705                 fm10k_mbx_unlock(hw);
2706
2707                 /* Setting reset flag */
2708                 dev_info->sm_down = 1;
2709                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2710         }
2711
2712         if (dev_info->sm_down == 1 &&
2713                         hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2714                 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2715                 fm10k_mbx_lock(hw);
2716                 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2717                                 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2718                 if (status_mbx != FM10K_SUCCESS)
2719                         PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2720                 fm10k_mbx_unlock(hw);
2721
2722                 /* first clear the internal SW recording structure */
2723                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2724                 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2725                                 MAIN_VSI_POOL_NUMBER);
2726
2727                 /*
2728                  * Add default mac address and vlan for the logical ports that
2729                  * have been created, leave to the application to fully recover
2730                  * Rx filtering.
2731                  */
2732                 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2733                                 MAIN_VSI_POOL_NUMBER);
2734                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2735
2736                 dev_info->sm_down = 0;
2737                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2738         }
2739
2740         /* Re-enable interrupt from device side */
2741         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2742                                         FM10K_ITR_MASK_CLEAR);
2743         /* Re-enable interrupt from host side */
2744         rte_intr_ack(dev->intr_handle);
2745 }
2746
2747 /* Mailbox message handler in VF */
2748 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2749         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2750         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2751         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2752         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2753 };
2754
2755 static int
2756 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2757 {
2758         int err = 0;
2759
2760         /* Initialize mailbox lock */
2761         fm10k_mbx_initlock(hw);
2762
2763         /* Replace default message handler with new ones */
2764         if (hw->mac.type == fm10k_mac_vf)
2765                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2766
2767         if (err) {
2768                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2769                                 err);
2770                 return err;
2771         }
2772         /* Connect to SM for PF device or PF for VF device */
2773         return hw->mbx.ops.connect(hw, &hw->mbx);
2774 }
2775
2776 static void
2777 fm10k_close_mbx_service(struct fm10k_hw *hw)
2778 {
2779         /* Disconnect from SM for PF device or PF for VF device */
2780         hw->mbx.ops.disconnect(hw, &hw->mbx);
2781 }
2782
2783 static int
2784 fm10k_dev_close(struct rte_eth_dev *dev)
2785 {
2786         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2787         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2788         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2789
2790         PMD_INIT_FUNC_TRACE();
2791         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2792                 return 0;
2793
2794         fm10k_mbx_lock(hw);
2795         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2796                 MAX_LPORT_NUM, false);
2797         fm10k_mbx_unlock(hw);
2798
2799         /* allow 100ms for device to quiesce */
2800         rte_delay_us(FM10K_SWITCH_QUIESCE_US);
2801
2802         /* Stop mailbox service first */
2803         fm10k_close_mbx_service(hw);
2804         fm10k_dev_stop(dev);
2805         fm10k_dev_queue_release(dev);
2806         fm10k_stop_hw(hw);
2807
2808         dev->dev_ops = NULL;
2809         dev->rx_pkt_burst = NULL;
2810         dev->tx_pkt_burst = NULL;
2811
2812         /* disable uio/vfio intr */
2813         rte_intr_disable(intr_handle);
2814
2815         /*PF/VF has different interrupt handling mechanism */
2816         if (hw->mac.type == fm10k_mac_pf) {
2817                 /* disable interrupt */
2818                 fm10k_dev_disable_intr_pf(dev);
2819
2820                 /* unregister callback func to eal lib */
2821                 rte_intr_callback_unregister(intr_handle,
2822                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2823         } else {
2824                 /* disable interrupt */
2825                 fm10k_dev_disable_intr_vf(dev);
2826
2827                 rte_intr_callback_unregister(intr_handle,
2828                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2829         }
2830
2831         return 0;
2832 }
2833
2834 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2835         .dev_configure          = fm10k_dev_configure,
2836         .dev_start              = fm10k_dev_start,
2837         .dev_stop               = fm10k_dev_stop,
2838         .dev_close              = fm10k_dev_close,
2839         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2840         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2841         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2842         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2843         .stats_get              = fm10k_stats_get,
2844         .xstats_get             = fm10k_xstats_get,
2845         .xstats_get_names       = fm10k_xstats_get_names,
2846         .stats_reset            = fm10k_stats_reset,
2847         .xstats_reset           = fm10k_stats_reset,
2848         .link_update            = fm10k_link_update,
2849         .dev_infos_get          = fm10k_dev_infos_get,
2850         .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2851         .vlan_filter_set        = fm10k_vlan_filter_set,
2852         .vlan_offload_set       = fm10k_vlan_offload_set,
2853         .mac_addr_add           = fm10k_macaddr_add,
2854         .mac_addr_remove        = fm10k_macaddr_remove,
2855         .rx_queue_start         = fm10k_dev_rx_queue_start,
2856         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2857         .tx_queue_start         = fm10k_dev_tx_queue_start,
2858         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2859         .rx_queue_setup         = fm10k_rx_queue_setup,
2860         .rx_queue_release       = fm10k_rx_queue_release,
2861         .tx_queue_setup         = fm10k_tx_queue_setup,
2862         .tx_queue_release       = fm10k_tx_queue_release,
2863         .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
2864         .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
2865         .reta_update            = fm10k_reta_update,
2866         .reta_query             = fm10k_reta_query,
2867         .rss_hash_update        = fm10k_rss_hash_update,
2868         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2869 };
2870
2871 static int ftag_check_handler(__rte_unused const char *key,
2872                 const char *value, __rte_unused void *opaque)
2873 {
2874         if (strcmp(value, "1"))
2875                 return -1;
2876
2877         return 0;
2878 }
2879
2880 static int
2881 fm10k_check_ftag(struct rte_devargs *devargs)
2882 {
2883         struct rte_kvargs *kvlist;
2884         const char *ftag_key = "enable_ftag";
2885
2886         if (devargs == NULL)
2887                 return 0;
2888
2889         kvlist = rte_kvargs_parse(devargs->args, NULL);
2890         if (kvlist == NULL)
2891                 return 0;
2892
2893         if (!rte_kvargs_count(kvlist, ftag_key)) {
2894                 rte_kvargs_free(kvlist);
2895                 return 0;
2896         }
2897         /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2898         if (rte_kvargs_process(kvlist, ftag_key,
2899                                 ftag_check_handler, NULL) < 0) {
2900                 rte_kvargs_free(kvlist);
2901                 return 0;
2902         }
2903         rte_kvargs_free(kvlist);
2904
2905         return 1;
2906 }
2907
2908 static uint16_t
2909 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2910                     uint16_t nb_pkts)
2911 {
2912         uint16_t nb_tx = 0;
2913         struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2914
2915         while (nb_pkts) {
2916                 uint16_t ret, num;
2917
2918                 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2919                 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2920                                                  num);
2921                 nb_tx += ret;
2922                 nb_pkts -= ret;
2923                 if (ret < num)
2924                         break;
2925         }
2926
2927         return nb_tx;
2928 }
2929
2930 static void __rte_cold
2931 fm10k_set_tx_function(struct rte_eth_dev *dev)
2932 {
2933         struct fm10k_tx_queue *txq;
2934         int i;
2935         int use_sse = 1;
2936         uint16_t tx_ftag_en = 0;
2937
2938         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2939                 /* primary process has set the ftag flag and offloads */
2940                 txq = dev->data->tx_queues[0];
2941                 if (fm10k_tx_vec_condition_check(txq)) {
2942                         dev->tx_pkt_burst = fm10k_xmit_pkts;
2943                         dev->tx_pkt_prepare = fm10k_prep_pkts;
2944                         PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2945                 } else {
2946                         PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2947                         dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2948                         dev->tx_pkt_prepare = NULL;
2949                 }
2950                 return;
2951         }
2952
2953         if (fm10k_check_ftag(dev->device->devargs))
2954                 tx_ftag_en = 1;
2955
2956         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2957                 txq = dev->data->tx_queues[i];
2958                 txq->tx_ftag_en = tx_ftag_en;
2959                 /* Check if Vector Tx is satisfied */
2960                 if (fm10k_tx_vec_condition_check(txq))
2961                         use_sse = 0;
2962         }
2963
2964         if (use_sse) {
2965                 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2966                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2967                         txq = dev->data->tx_queues[i];
2968                         fm10k_txq_vec_setup(txq);
2969                 }
2970                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2971                 dev->tx_pkt_prepare = NULL;
2972         } else {
2973                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2974                 dev->tx_pkt_prepare = fm10k_prep_pkts;
2975                 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2976         }
2977 }
2978
2979 static void __rte_cold
2980 fm10k_set_rx_function(struct rte_eth_dev *dev)
2981 {
2982         struct fm10k_dev_info *dev_info =
2983                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2984         uint16_t i, rx_using_sse;
2985         uint16_t rx_ftag_en = 0;
2986
2987         if (fm10k_check_ftag(dev->device->devargs))
2988                 rx_ftag_en = 1;
2989
2990         /* In order to allow Vector Rx there are a few configuration
2991          * conditions to be met.
2992          */
2993         if (!fm10k_rx_vec_condition_check(dev) &&
2994                         dev_info->rx_vec_allowed && !rx_ftag_en) {
2995                 if (dev->data->scattered_rx)
2996                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2997                 else
2998                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2999         } else if (dev->data->scattered_rx)
3000                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
3001         else
3002                 dev->rx_pkt_burst = fm10k_recv_pkts;
3003
3004         rx_using_sse =
3005                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
3006                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
3007
3008         if (rx_using_sse)
3009                 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
3010         else
3011                 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
3012
3013         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3014                 return;
3015
3016         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3017                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
3018
3019                 rxq->rx_using_sse = rx_using_sse;
3020                 rxq->rx_ftag_en = rx_ftag_en;
3021         }
3022 }
3023
3024 static void
3025 fm10k_params_init(struct rte_eth_dev *dev)
3026 {
3027         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3028         struct fm10k_dev_info *info =
3029                 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
3030
3031         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
3032          * there is no way to get link status without reading BAR4.  Until this
3033          * works, assume we have maximum bandwidth.
3034          * @todo - fix bus info
3035          */
3036         hw->bus_caps.speed = fm10k_bus_speed_8000;
3037         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
3038         hw->bus_caps.payload = fm10k_bus_payload_512;
3039         hw->bus.speed = fm10k_bus_speed_8000;
3040         hw->bus.width = fm10k_bus_width_pcie_x8;
3041         hw->bus.payload = fm10k_bus_payload_256;
3042
3043         info->rx_vec_allowed = true;
3044         info->sm_down = false;
3045 }
3046
3047 static int
3048 eth_fm10k_dev_init(struct rte_eth_dev *dev)
3049 {
3050         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3051         struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3052         struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3053         int diag, i;
3054         struct fm10k_macvlan_filter_info *macvlan;
3055
3056         PMD_INIT_FUNC_TRACE();
3057
3058         dev->dev_ops = &fm10k_eth_dev_ops;
3059         dev->rx_queue_count = fm10k_dev_rx_queue_count;
3060         dev->rx_descriptor_done = fm10k_dev_rx_descriptor_done;
3061         dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
3062         dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
3063         dev->rx_pkt_burst = &fm10k_recv_pkts;
3064         dev->tx_pkt_burst = &fm10k_xmit_pkts;
3065         dev->tx_pkt_prepare = &fm10k_prep_pkts;
3066
3067         /*
3068          * Primary process does the whole initialization, for secondary
3069          * processes, we just select the same Rx and Tx function as primary.
3070          */
3071         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3072                 fm10k_set_rx_function(dev);
3073                 fm10k_set_tx_function(dev);
3074                 return 0;
3075         }
3076
3077         rte_eth_copy_pci_info(dev, pdev);
3078
3079         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3080         memset(macvlan, 0, sizeof(*macvlan));
3081         /* Vendor and Device ID need to be set before init of shared code */
3082         memset(hw, 0, sizeof(*hw));
3083         hw->device_id = pdev->id.device_id;
3084         hw->vendor_id = pdev->id.vendor_id;
3085         hw->subsystem_device_id = pdev->id.subsystem_device_id;
3086         hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3087         hw->revision_id = 0;
3088         hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3089         if (hw->hw_addr == NULL) {
3090                 PMD_INIT_LOG(ERR, "Bad mem resource."
3091                         " Try to refuse unused devices.");
3092                 return -EIO;
3093         }
3094
3095         /* Store fm10k_adapter pointer */
3096         hw->back = dev->data->dev_private;
3097
3098         /* Initialize the shared code */
3099         diag = fm10k_init_shared_code(hw);
3100         if (diag != FM10K_SUCCESS) {
3101                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3102                 return -EIO;
3103         }
3104
3105         /* Initialize parameters */
3106         fm10k_params_init(dev);
3107
3108         /* Initialize the hw */
3109         diag = fm10k_init_hw(hw);
3110         if (diag != FM10K_SUCCESS) {
3111                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3112                 return -EIO;
3113         }
3114
3115         /* Initialize MAC address(es) */
3116         dev->data->mac_addrs = rte_zmalloc("fm10k",
3117                         RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3118         if (dev->data->mac_addrs == NULL) {
3119                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3120                 return -ENOMEM;
3121         }
3122
3123         diag = fm10k_read_mac_addr(hw);
3124
3125         rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3126                         &dev->data->mac_addrs[0]);
3127
3128         if (diag != FM10K_SUCCESS ||
3129                 !rte_is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3130
3131                 /* Generate a random addr */
3132                 rte_eth_random_addr(hw->mac.addr);
3133                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3134                 rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
3135                 &dev->data->mac_addrs[0]);
3136         }
3137
3138         /* Reset the hw statistics */
3139         diag = fm10k_stats_reset(dev);
3140         if (diag != 0) {
3141                 PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag);
3142                 return diag;
3143         }
3144
3145         /* Reset the hw */
3146         diag = fm10k_reset_hw(hw);
3147         if (diag != FM10K_SUCCESS) {
3148                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3149                 return -EIO;
3150         }
3151
3152         /* Setup mailbox service */
3153         diag = fm10k_setup_mbx_service(hw);
3154         if (diag != FM10K_SUCCESS) {
3155                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3156                 return -EIO;
3157         }
3158
3159         /*PF/VF has different interrupt handling mechanism */
3160         if (hw->mac.type == fm10k_mac_pf) {
3161                 /* register callback func to eal lib */
3162                 rte_intr_callback_register(intr_handle,
3163                         fm10k_dev_interrupt_handler_pf, (void *)dev);
3164
3165                 /* enable MISC interrupt */
3166                 fm10k_dev_enable_intr_pf(dev);
3167         } else { /* VF */
3168                 rte_intr_callback_register(intr_handle,
3169                         fm10k_dev_interrupt_handler_vf, (void *)dev);
3170
3171                 fm10k_dev_enable_intr_vf(dev);
3172         }
3173
3174         /* Enable intr after callback registered */
3175         rte_intr_enable(intr_handle);
3176
3177         hw->mac.ops.update_int_moderator(hw);
3178
3179         /* Make sure Switch Manager is ready before going forward. */
3180         if (hw->mac.type == fm10k_mac_pf) {
3181                 bool switch_ready = false;
3182
3183                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3184                         fm10k_mbx_lock(hw);
3185                         hw->mac.ops.get_host_state(hw, &switch_ready);
3186                         fm10k_mbx_unlock(hw);
3187                         if (switch_ready == true)
3188                                 break;
3189                         /* Delay some time to acquire async LPORT_MAP info. */
3190                         rte_delay_us(WAIT_SWITCH_MSG_US);
3191                 }
3192
3193                 if (switch_ready == false) {
3194                         PMD_INIT_LOG(ERR, "switch is not ready");
3195                         return -1;
3196                 }
3197         }
3198
3199         /*
3200          * Below function will trigger operations on mailbox, acquire lock to
3201          * avoid race condition from interrupt handler. Operations on mailbox
3202          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3203          * will handle and generate an interrupt to our side. Then,  FIFO in
3204          * mailbox will be touched.
3205          */
3206         fm10k_mbx_lock(hw);
3207         /* Enable port first */
3208         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3209                                         MAX_LPORT_NUM, 1);
3210
3211         /* Set unicast mode by default. App can change to other mode in other
3212          * API func.
3213          */
3214         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3215                                         FM10K_XCAST_MODE_NONE);
3216
3217         fm10k_mbx_unlock(hw);
3218
3219         /* Make sure default VID is ready before going forward. */
3220         if (hw->mac.type == fm10k_mac_pf) {
3221                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3222                         if (hw->mac.default_vid)
3223                                 break;
3224                         /* Delay some time to acquire async port VLAN info. */
3225                         rte_delay_us(WAIT_SWITCH_MSG_US);
3226                 }
3227
3228                 if (!hw->mac.default_vid) {
3229                         PMD_INIT_LOG(ERR, "default VID is not ready");
3230                         return -1;
3231                 }
3232         }
3233
3234         /* Add default mac address */
3235         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3236                 MAIN_VSI_POOL_NUMBER);
3237
3238         return 0;
3239 }
3240
3241 static int
3242 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3243 {
3244         PMD_INIT_FUNC_TRACE();
3245         fm10k_dev_close(dev);
3246         return 0;
3247 }
3248
3249 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3250         struct rte_pci_device *pci_dev)
3251 {
3252         return rte_eth_dev_pci_generic_probe(pci_dev,
3253                 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3254 }
3255
3256 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3257 {
3258         return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3259 }
3260
3261 /*
3262  * The set of PCI devices this driver supports. This driver will enable both PF
3263  * and SRIOV-VF devices.
3264  */
3265 static const struct rte_pci_id pci_id_fm10k_map[] = {
3266         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3267         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3268         { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3269         { .vendor_id = 0, /* sentinel */ },
3270 };
3271
3272 static struct rte_pci_driver rte_pmd_fm10k = {
3273         .id_table = pci_id_fm10k_map,
3274         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3275         .probe = eth_fm10k_pci_probe,
3276         .remove = eth_fm10k_pci_remove,
3277 };
3278
3279 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3280 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3281 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
3282 RTE_LOG_REGISTER(fm10k_logtype_init, pmd.net.fm10k.init, NOTICE);
3283 RTE_LOG_REGISTER(fm10k_logtype_driver, pmd.net.fm10k.driver, NOTICE);
3284 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
3285 RTE_LOG_REGISTER(fm10k_logtype_rx, pmd.net.fm10k.rx, DEBUG);
3286 #endif
3287 #ifdef RTE_LIBRTE_FM10K_DEBUG_TX
3288 RTE_LOG_REGISTER(fm10k_logtype_tx, pmd.net.fm10k.tx, DEBUG);
3289 #endif
3290 #ifdef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
3291 RTE_LOG_REGISTER(fm10k_logtype_tx_free, pmd.net.fm10k.tx_free, DEBUG);
3292 #endif