drivers: copy PCI device info to ethdev data
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 #define MAIN_VSI_POOL_NUMBER 0
49
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US    100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
57
58 #define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
59                                 ETH_TXQ_FLAGS_NOOFFLOADS)
60
61 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
62 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
63 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
64 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
65 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
66 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
67 static int
68 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
69 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
70         const u8 *mac, bool add, uint32_t pool);
71 static void fm10k_tx_queue_release(void *queue);
72 static void fm10k_rx_queue_release(void *queue);
73 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
74 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
75
76 struct fm10k_xstats_name_off {
77         char name[RTE_ETH_XSTATS_NAME_SIZE];
78         unsigned offset;
79 };
80
81 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
82         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
83         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
84         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
85         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
86         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
87         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
88         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
89         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
90                 nodesc_drop)},
91 };
92
93 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
94                 sizeof(fm10k_hw_stats_strings[0]))
95
96 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
97         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
98         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
99         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
100 };
101
102 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
103                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
104
105 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
106         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
107         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
108 };
109
110 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
111                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
112
113 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
114                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
115
116 static void
117 fm10k_mbx_initlock(struct fm10k_hw *hw)
118 {
119         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
120 }
121
122 static void
123 fm10k_mbx_lock(struct fm10k_hw *hw)
124 {
125         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
126                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
127 }
128
129 static void
130 fm10k_mbx_unlock(struct fm10k_hw *hw)
131 {
132         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
133 }
134
135 /*
136  * reset queue to initial state, allocate software buffers used when starting
137  * device.
138  * return 0 on success
139  * return -ENOMEM if buffers cannot be allocated
140  * return -EINVAL if buffers do not satisfy alignment condition
141  */
142 static inline int
143 rx_queue_reset(struct fm10k_rx_queue *q)
144 {
145         static const union fm10k_rx_desc zero = {{0} };
146         uint64_t dma_addr;
147         int i, diag;
148         PMD_INIT_FUNC_TRACE();
149
150         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
151         if (diag != 0)
152                 return -ENOMEM;
153
154         for (i = 0; i < q->nb_desc; ++i) {
155                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
156                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
157                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
158                                                 q->nb_desc);
159                         return -EINVAL;
160                 }
161                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
162                 q->hw_ring[i].q.pkt_addr = dma_addr;
163                 q->hw_ring[i].q.hdr_addr = dma_addr;
164         }
165
166         /* initialize extra software ring entries. Space for these extra
167          * entries is always allocated.
168          */
169         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
170         for (i = 0; i < q->nb_fake_desc; ++i) {
171                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
172                 q->hw_ring[q->nb_desc + i] = zero;
173         }
174
175         q->next_dd = 0;
176         q->next_alloc = 0;
177         q->next_trigger = q->alloc_thresh - 1;
178         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
179         q->rxrearm_start = 0;
180         q->rxrearm_nb = 0;
181
182         return 0;
183 }
184
185 /*
186  * clean queue, descriptor rings, free software buffers used when stopping
187  * device.
188  */
189 static inline void
190 rx_queue_clean(struct fm10k_rx_queue *q)
191 {
192         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
193         uint32_t i;
194         PMD_INIT_FUNC_TRACE();
195
196         /* zero descriptor rings */
197         for (i = 0; i < q->nb_desc; ++i)
198                 q->hw_ring[i] = zero;
199
200         /* zero faked descriptors */
201         for (i = 0; i < q->nb_fake_desc; ++i)
202                 q->hw_ring[q->nb_desc + i] = zero;
203
204         /* vPMD driver has a different way of releasing mbufs. */
205         if (q->rx_using_sse) {
206                 fm10k_rx_queue_release_mbufs_vec(q);
207                 return;
208         }
209
210         /* free software buffers */
211         for (i = 0; i < q->nb_desc; ++i) {
212                 if (q->sw_ring[i]) {
213                         rte_pktmbuf_free_seg(q->sw_ring[i]);
214                         q->sw_ring[i] = NULL;
215                 }
216         }
217 }
218
219 /*
220  * free all queue memory used when releasing the queue (i.e. configure)
221  */
222 static inline void
223 rx_queue_free(struct fm10k_rx_queue *q)
224 {
225         PMD_INIT_FUNC_TRACE();
226         if (q) {
227                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
228                 rx_queue_clean(q);
229                 if (q->sw_ring) {
230                         rte_free(q->sw_ring);
231                         q->sw_ring = NULL;
232                 }
233                 rte_free(q);
234                 q = NULL;
235         }
236 }
237
238 /*
239  * disable RX queue, wait unitl HW finished necessary flush operation
240  */
241 static inline int
242 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
243 {
244         uint32_t reg, i;
245
246         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
247         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
248                         reg & ~FM10K_RXQCTL_ENABLE);
249
250         /* Wait 100us at most */
251         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
252                 rte_delay_us(1);
253                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
254                 if (!(reg & FM10K_RXQCTL_ENABLE))
255                         break;
256         }
257
258         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
259                 return -1;
260
261         return 0;
262 }
263
264 /*
265  * reset queue to initial state, allocate software buffers used when starting
266  * device
267  */
268 static inline void
269 tx_queue_reset(struct fm10k_tx_queue *q)
270 {
271         PMD_INIT_FUNC_TRACE();
272         q->last_free = 0;
273         q->next_free = 0;
274         q->nb_used = 0;
275         q->nb_free = q->nb_desc - 1;
276         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
277         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
278 }
279
280 /*
281  * clean queue, descriptor rings, free software buffers used when stopping
282  * device
283  */
284 static inline void
285 tx_queue_clean(struct fm10k_tx_queue *q)
286 {
287         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
288         uint32_t i;
289         PMD_INIT_FUNC_TRACE();
290
291         /* zero descriptor rings */
292         for (i = 0; i < q->nb_desc; ++i)
293                 q->hw_ring[i] = zero;
294
295         /* free software buffers */
296         for (i = 0; i < q->nb_desc; ++i) {
297                 if (q->sw_ring[i]) {
298                         rte_pktmbuf_free_seg(q->sw_ring[i]);
299                         q->sw_ring[i] = NULL;
300                 }
301         }
302 }
303
304 /*
305  * free all queue memory used when releasing the queue (i.e. configure)
306  */
307 static inline void
308 tx_queue_free(struct fm10k_tx_queue *q)
309 {
310         PMD_INIT_FUNC_TRACE();
311         if (q) {
312                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
313                 tx_queue_clean(q);
314                 if (q->rs_tracker.list) {
315                         rte_free(q->rs_tracker.list);
316                         q->rs_tracker.list = NULL;
317                 }
318                 if (q->sw_ring) {
319                         rte_free(q->sw_ring);
320                         q->sw_ring = NULL;
321                 }
322                 rte_free(q);
323                 q = NULL;
324         }
325 }
326
327 /*
328  * disable TX queue, wait unitl HW finished necessary flush operation
329  */
330 static inline int
331 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
332 {
333         uint32_t reg, i;
334
335         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
336         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
337                         reg & ~FM10K_TXDCTL_ENABLE);
338
339         /* Wait 100us at most */
340         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
341                 rte_delay_us(1);
342                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
343                 if (!(reg & FM10K_TXDCTL_ENABLE))
344                         break;
345         }
346
347         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
348                 return -1;
349
350         return 0;
351 }
352
353 static int
354 fm10k_check_mq_mode(struct rte_eth_dev *dev)
355 {
356         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
357         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358         struct rte_eth_vmdq_rx_conf *vmdq_conf;
359         uint16_t nb_rx_q = dev->data->nb_rx_queues;
360
361         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
362
363         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
364                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
365                 return -EINVAL;
366         }
367
368         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
369                 return 0;
370
371         if (hw->mac.type == fm10k_mac_vf) {
372                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
373                 return -EINVAL;
374         }
375
376         /* Check VMDQ queue pool number */
377         if (vmdq_conf->nb_queue_pools >
378                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
379                         vmdq_conf->nb_queue_pools > nb_rx_q) {
380                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
381                         vmdq_conf->nb_queue_pools);
382                 return -EINVAL;
383         }
384
385         return 0;
386 }
387
388 static const struct fm10k_txq_ops def_txq_ops = {
389         .release_mbufs = tx_queue_free,
390         .reset = tx_queue_reset,
391 };
392
393 static int
394 fm10k_dev_configure(struct rte_eth_dev *dev)
395 {
396         int ret;
397
398         PMD_INIT_FUNC_TRACE();
399
400         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
401                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
402         /* multipe queue mode checking */
403         ret  = fm10k_check_mq_mode(dev);
404         if (ret != 0) {
405                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
406                             ret);
407                 return ret;
408         }
409
410         return 0;
411 }
412
413 /* fls = find last set bit = 32 minus the number of leading zeros */
414 #ifndef fls
415 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
416 #endif
417
418 static void
419 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
420 {
421         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422         struct rte_eth_vmdq_rx_conf *vmdq_conf;
423         uint32_t i;
424
425         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
426
427         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
428                 if (!vmdq_conf->pool_map[i].pools)
429                         continue;
430                 fm10k_mbx_lock(hw);
431                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
432                 fm10k_mbx_unlock(hw);
433         }
434 }
435
436 static void
437 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
438 {
439         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440
441         /* Add default mac address */
442         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
443                 MAIN_VSI_POOL_NUMBER);
444 }
445
446 static void
447 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
448 {
449         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
450         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
451         uint32_t mrqc, *key, i, reta, j;
452         uint64_t hf;
453
454 #define RSS_KEY_SIZE 40
455         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
456                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
457                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
458                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
459                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
460                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
461         };
462
463         if (dev->data->nb_rx_queues == 1 ||
464             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
465             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
466                 return;
467
468         /* random key is rss_intel_key (default) or user provided (rss_key) */
469         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
470                 key = (uint32_t *)rss_intel_key;
471         else
472                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
473
474         /* Now fill our hash function seeds, 4 bytes at a time */
475         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
476                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
477
478         /*
479          * Fill in redirection table
480          * The byte-swap is needed because NIC registers are in
481          * little-endian order.
482          */
483         reta = 0;
484         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
485                 if (j == dev->data->nb_rx_queues)
486                         j = 0;
487                 reta = (reta << CHAR_BIT) | j;
488                 if ((i & 3) == 3)
489                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
490                                         rte_bswap32(reta));
491         }
492
493         /*
494          * Generate RSS hash based on packet types, TCP/UDP
495          * port numbers and/or IPv4/v6 src and dst addresses
496          */
497         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
498         mrqc = 0;
499         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
500         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
501         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
502         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
503         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
504         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
505         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
506         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
507         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
508
509         if (mrqc == 0) {
510                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
511                         "supported", hf);
512                 return;
513         }
514
515         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
516 }
517
518 static void
519 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
520         uint16_t nb_lport_old, uint16_t nb_lport_new)
521 {
522         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523         uint32_t i;
524
525         fm10k_mbx_lock(hw);
526         /* Disable previous logic ports */
527         if (nb_lport_old)
528                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
529                         nb_lport_old, false);
530         /* Enable new logic ports */
531         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
532                 nb_lport_new, true);
533         fm10k_mbx_unlock(hw);
534
535         for (i = 0; i < nb_lport_new; i++) {
536                 /* Set unicast mode by default. App can change
537                  * to other mode in other API func.
538                  */
539                 fm10k_mbx_lock(hw);
540                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
541                         FM10K_XCAST_MODE_NONE);
542                 fm10k_mbx_unlock(hw);
543         }
544 }
545
546 static void
547 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
548 {
549         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550         struct rte_eth_vmdq_rx_conf *vmdq_conf;
551         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
552         struct fm10k_macvlan_filter_info *macvlan;
553         uint16_t nb_queue_pools = 0; /* pool number in configuration */
554         uint16_t nb_lport_new, nb_lport_old;
555
556         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
557         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
558
559         fm10k_dev_rss_configure(dev);
560
561         /* only PF supports VMDQ */
562         if (hw->mac.type != fm10k_mac_pf)
563                 return;
564
565         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
566                 nb_queue_pools = vmdq_conf->nb_queue_pools;
567
568         /* no pool number change, no need to update logic port and VLAN/MAC */
569         if (macvlan->nb_queue_pools == nb_queue_pools)
570                 return;
571
572         nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
573         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
574         fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
575
576         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
577         memset(dev->data->mac_addrs, 0,
578                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
579         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
580                 &dev->data->mac_addrs[0]);
581         memset(macvlan, 0, sizeof(*macvlan));
582         macvlan->nb_queue_pools = nb_queue_pools;
583
584         if (nb_queue_pools)
585                 fm10k_dev_vmdq_rx_configure(dev);
586         else
587                 fm10k_dev_pf_main_vsi_reset(dev);
588 }
589
590 static int
591 fm10k_dev_tx_init(struct rte_eth_dev *dev)
592 {
593         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594         int i, ret;
595         struct fm10k_tx_queue *txq;
596         uint64_t base_addr;
597         uint32_t size;
598
599         /* Disable TXINT to avoid possible interrupt */
600         for (i = 0; i < hw->mac.max_queues; i++)
601                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
602                                 3 << FM10K_TXINT_TIMER_SHIFT);
603
604         /* Setup TX queue */
605         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
606                 txq = dev->data->tx_queues[i];
607                 base_addr = txq->hw_ring_phys_addr;
608                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
609
610                 /* disable queue to avoid issues while updating state */
611                 ret = tx_queue_disable(hw, i);
612                 if (ret) {
613                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
614                         return -1;
615                 }
616
617                 /* set location and size for descriptor ring */
618                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
619                                 base_addr & UINT64_LOWER_32BITS_MASK);
620                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
621                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
622                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
623         }
624
625         /* set up vector or scalar TX function as appropriate */
626         fm10k_set_tx_function(dev);
627
628         return 0;
629 }
630
631 static int
632 fm10k_dev_rx_init(struct rte_eth_dev *dev)
633 {
634         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635         int i, ret;
636         struct fm10k_rx_queue *rxq;
637         uint64_t base_addr;
638         uint32_t size;
639         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
640         uint16_t buf_size;
641
642         /* Disable RXINT to avoid possible interrupt */
643         for (i = 0; i < hw->mac.max_queues; i++)
644                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
645                                 3 << FM10K_RXINT_TIMER_SHIFT);
646
647         /* Setup RX queues */
648         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
649                 rxq = dev->data->rx_queues[i];
650                 base_addr = rxq->hw_ring_phys_addr;
651                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
652
653                 /* disable queue to avoid issues while updating state */
654                 ret = rx_queue_disable(hw, i);
655                 if (ret) {
656                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
657                         return -1;
658                 }
659
660                 /* Setup the Base and Length of the Rx Descriptor Ring */
661                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
662                                 base_addr & UINT64_LOWER_32BITS_MASK);
663                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
664                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
665                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
666
667                 /* Configure the Rx buffer size for one buff without split */
668                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
669                         RTE_PKTMBUF_HEADROOM);
670                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
671                  * reserved for this purpose, and the worst case could be 511B.
672                  * But SRR reg assumes all buffers have the same size. In order
673                  * to fill the gap, we'll have to consider the worst case and
674                  * assume 512B is reserved. If we don't do so, it's possible
675                  * for HW to overwrite data to next mbuf.
676                  */
677                 buf_size -= FM10K_RX_DATABUF_ALIGN;
678
679                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
680                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
681
682                 /* It adds dual VLAN length for supporting dual VLAN */
683                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
684                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
685                         dev->data->dev_conf.rxmode.enable_scatter) {
686                         uint32_t reg;
687                         dev->data->scattered_rx = 1;
688                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
689                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
690                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
691                 }
692
693                 /* Enable drop on empty, it's RO for VF */
694                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
695                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
696
697                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
698                 FM10K_WRITE_FLUSH(hw);
699         }
700
701         /* Configure VMDQ/RSS if applicable */
702         fm10k_dev_mq_rx_configure(dev);
703
704         /* Decide the best RX function */
705         fm10k_set_rx_function(dev);
706
707         return 0;
708 }
709
710 static int
711 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
712 {
713         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
714         int err = -1;
715         uint32_t reg;
716         struct fm10k_rx_queue *rxq;
717
718         PMD_INIT_FUNC_TRACE();
719
720         if (rx_queue_id < dev->data->nb_rx_queues) {
721                 rxq = dev->data->rx_queues[rx_queue_id];
722                 err = rx_queue_reset(rxq);
723                 if (err == -ENOMEM) {
724                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
725                         return err;
726                 } else if (err == -EINVAL) {
727                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
728                                 " %d", err);
729                         return err;
730                 }
731
732                 /* Setup the HW Rx Head and Tail Descriptor Pointers
733                  * Note: this must be done AFTER the queue is enabled on real
734                  * hardware, but BEFORE the queue is enabled when using the
735                  * emulation platform. Do it in both places for now and remove
736                  * this comment and the following two register writes when the
737                  * emulation platform is no longer being used.
738                  */
739                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
740                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
741
742                 /* Set PF ownership flag for PF devices */
743                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
744                 if (hw->mac.type == fm10k_mac_pf)
745                         reg |= FM10K_RXQCTL_PF;
746                 reg |= FM10K_RXQCTL_ENABLE;
747                 /* enable RX queue */
748                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
749                 FM10K_WRITE_FLUSH(hw);
750
751                 /* Setup the HW Rx Head and Tail Descriptor Pointers
752                  * Note: this must be done AFTER the queue is enabled
753                  */
754                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
755                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
756         }
757
758         return err;
759 }
760
761 static int
762 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
763 {
764         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
765
766         PMD_INIT_FUNC_TRACE();
767
768         if (rx_queue_id < dev->data->nb_rx_queues) {
769                 /* Disable RX queue */
770                 rx_queue_disable(hw, rx_queue_id);
771
772                 /* Free mbuf and clean HW ring */
773                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
774         }
775
776         return 0;
777 }
778
779 static int
780 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
781 {
782         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
783         /** @todo - this should be defined in the shared code */
784 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
785         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
786         int err = 0;
787
788         PMD_INIT_FUNC_TRACE();
789
790         if (tx_queue_id < dev->data->nb_tx_queues) {
791                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
792
793                 q->ops->reset(q);
794
795                 /* reset head and tail pointers */
796                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
797                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
798
799                 /* enable TX queue */
800                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
801                                         FM10K_TXDCTL_ENABLE | txdctl);
802                 FM10K_WRITE_FLUSH(hw);
803         } else
804                 err = -1;
805
806         return err;
807 }
808
809 static int
810 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
811 {
812         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
813
814         PMD_INIT_FUNC_TRACE();
815
816         if (tx_queue_id < dev->data->nb_tx_queues) {
817                 tx_queue_disable(hw, tx_queue_id);
818                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
819         }
820
821         return 0;
822 }
823
824 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
825 {
826         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
827                 != FM10K_DGLORTMAP_NONE);
828 }
829
830 static void
831 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
832 {
833         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
834         int status;
835
836         PMD_INIT_FUNC_TRACE();
837
838         /* Return if it didn't acquire valid glort range */
839         if (!fm10k_glort_valid(hw))
840                 return;
841
842         fm10k_mbx_lock(hw);
843         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
844                                 FM10K_XCAST_MODE_PROMISC);
845         fm10k_mbx_unlock(hw);
846
847         if (status != FM10K_SUCCESS)
848                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
849 }
850
851 static void
852 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
853 {
854         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
855         uint8_t mode;
856         int status;
857
858         PMD_INIT_FUNC_TRACE();
859
860         /* Return if it didn't acquire valid glort range */
861         if (!fm10k_glort_valid(hw))
862                 return;
863
864         if (dev->data->all_multicast == 1)
865                 mode = FM10K_XCAST_MODE_ALLMULTI;
866         else
867                 mode = FM10K_XCAST_MODE_NONE;
868
869         fm10k_mbx_lock(hw);
870         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
871                                 mode);
872         fm10k_mbx_unlock(hw);
873
874         if (status != FM10K_SUCCESS)
875                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
876 }
877
878 static void
879 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
880 {
881         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
882         int status;
883
884         PMD_INIT_FUNC_TRACE();
885
886         /* Return if it didn't acquire valid glort range */
887         if (!fm10k_glort_valid(hw))
888                 return;
889
890         /* If promiscuous mode is enabled, it doesn't make sense to enable
891          * allmulticast and disable promiscuous since fm10k only can select
892          * one of the modes.
893          */
894         if (dev->data->promiscuous) {
895                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
896                         "needn't enable allmulticast");
897                 return;
898         }
899
900         fm10k_mbx_lock(hw);
901         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
902                                 FM10K_XCAST_MODE_ALLMULTI);
903         fm10k_mbx_unlock(hw);
904
905         if (status != FM10K_SUCCESS)
906                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
907 }
908
909 static void
910 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
911 {
912         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
913         int status;
914
915         PMD_INIT_FUNC_TRACE();
916
917         /* Return if it didn't acquire valid glort range */
918         if (!fm10k_glort_valid(hw))
919                 return;
920
921         if (dev->data->promiscuous) {
922                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
923                         "since promisc mode is enabled");
924                 return;
925         }
926
927         fm10k_mbx_lock(hw);
928         /* Change mode to unicast mode */
929         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
930                                 FM10K_XCAST_MODE_NONE);
931         fm10k_mbx_unlock(hw);
932
933         if (status != FM10K_SUCCESS)
934                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
935 }
936
937 static void
938 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
939 {
940         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941         uint32_t dglortdec, pool_len, rss_len, i;
942         uint16_t nb_queue_pools;
943         struct fm10k_macvlan_filter_info *macvlan;
944
945         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
946         nb_queue_pools = macvlan->nb_queue_pools;
947         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
948         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
949         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
950
951         /* Establish only MAP 0 as valid */
952         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
953
954         /* Configure VMDQ/RSS DGlort Decoder */
955         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
956
957         /* Invalidate all other GLORT entries */
958         for (i = 1; i < FM10K_DGLORT_COUNT; i++)
959                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
960                                 FM10K_DGLORTMAP_NONE);
961 }
962
963 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
964 static int
965 fm10k_dev_start(struct rte_eth_dev *dev)
966 {
967         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
968         int i, diag;
969
970         PMD_INIT_FUNC_TRACE();
971
972         /* stop, init, then start the hw */
973         diag = fm10k_stop_hw(hw);
974         if (diag != FM10K_SUCCESS) {
975                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
976                 return -EIO;
977         }
978
979         diag = fm10k_init_hw(hw);
980         if (diag != FM10K_SUCCESS) {
981                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
982                 return -EIO;
983         }
984
985         diag = fm10k_start_hw(hw);
986         if (diag != FM10K_SUCCESS) {
987                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
988                 return -EIO;
989         }
990
991         diag = fm10k_dev_tx_init(dev);
992         if (diag) {
993                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
994                 return diag;
995         }
996
997         diag = fm10k_dev_rx_init(dev);
998         if (diag) {
999                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1000                 return diag;
1001         }
1002
1003         if (hw->mac.type == fm10k_mac_pf)
1004                 fm10k_dev_dglort_map_configure(dev);
1005
1006         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1007                 struct fm10k_rx_queue *rxq;
1008                 rxq = dev->data->rx_queues[i];
1009
1010                 if (rxq->rx_deferred_start)
1011                         continue;
1012                 diag = fm10k_dev_rx_queue_start(dev, i);
1013                 if (diag != 0) {
1014                         int j;
1015                         for (j = 0; j < i; ++j)
1016                                 rx_queue_clean(dev->data->rx_queues[j]);
1017                         return diag;
1018                 }
1019         }
1020
1021         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1022                 struct fm10k_tx_queue *txq;
1023                 txq = dev->data->tx_queues[i];
1024
1025                 if (txq->tx_deferred_start)
1026                         continue;
1027                 diag = fm10k_dev_tx_queue_start(dev, i);
1028                 if (diag != 0) {
1029                         int j;
1030                         for (j = 0; j < i; ++j)
1031                                 tx_queue_clean(dev->data->tx_queues[j]);
1032                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1033                                 rx_queue_clean(dev->data->rx_queues[j]);
1034                         return diag;
1035                 }
1036         }
1037
1038         /* Update default vlan when not in VMDQ mode */
1039         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1040                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1041
1042         return 0;
1043 }
1044
1045 static void
1046 fm10k_dev_stop(struct rte_eth_dev *dev)
1047 {
1048         int i;
1049
1050         PMD_INIT_FUNC_TRACE();
1051
1052         if (dev->data->tx_queues)
1053                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1054                         fm10k_dev_tx_queue_stop(dev, i);
1055
1056         if (dev->data->rx_queues)
1057                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1058                         fm10k_dev_rx_queue_stop(dev, i);
1059 }
1060
1061 static void
1062 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1063 {
1064         int i;
1065
1066         PMD_INIT_FUNC_TRACE();
1067
1068         if (dev->data->tx_queues) {
1069                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1070                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1071
1072                         txq->ops->release_mbufs(txq);
1073                 }
1074         }
1075
1076         if (dev->data->rx_queues) {
1077                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1078                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1079         }
1080 }
1081
1082 static void
1083 fm10k_dev_close(struct rte_eth_dev *dev)
1084 {
1085         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086         uint16_t nb_lport;
1087         struct fm10k_macvlan_filter_info *macvlan;
1088
1089         PMD_INIT_FUNC_TRACE();
1090
1091         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1092         nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1093         fm10k_mbx_lock(hw);
1094         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1095                 nb_lport, false);
1096         fm10k_mbx_unlock(hw);
1097
1098         /* Stop mailbox service first */
1099         fm10k_close_mbx_service(hw);
1100         fm10k_dev_stop(dev);
1101         fm10k_dev_queue_release(dev);
1102         fm10k_stop_hw(hw);
1103 }
1104
1105 static int
1106 fm10k_link_update(struct rte_eth_dev *dev,
1107         __rte_unused int wait_to_complete)
1108 {
1109         PMD_INIT_FUNC_TRACE();
1110
1111         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1112          * x8 PCIe interface. For now, we leave the speed undefined since there
1113          * is no 50Gbps Ethernet. */
1114         dev->data->dev_link.link_speed  = 0;
1115         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1116         dev->data->dev_link.link_status = 1;
1117
1118         return 0;
1119 }
1120
1121 static int
1122 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1123                  unsigned n)
1124 {
1125         struct fm10k_hw_stats *hw_stats =
1126                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1127         unsigned i, q, count = 0;
1128
1129         if (n < FM10K_NB_XSTATS)
1130                 return FM10K_NB_XSTATS;
1131
1132         /* Global stats */
1133         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1134                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1135                          "%s", fm10k_hw_stats_strings[count].name);
1136                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1137                         fm10k_hw_stats_strings[count].offset);
1138                 count++;
1139         }
1140
1141         /* PF queue stats */
1142         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1143                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1144                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1145                                  "rx_q%u_%s", q,
1146                                  fm10k_hw_stats_rx_q_strings[i].name);
1147                         xstats[count].value =
1148                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1149                                 fm10k_hw_stats_rx_q_strings[i].offset);
1150                         count++;
1151                 }
1152                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1153                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1154                                  "tx_q%u_%s", q,
1155                                  fm10k_hw_stats_tx_q_strings[i].name);
1156                         xstats[count].value =
1157                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1158                                 fm10k_hw_stats_tx_q_strings[i].offset);
1159                         count++;
1160                 }
1161         }
1162
1163         return FM10K_NB_XSTATS;
1164 }
1165
1166 static void
1167 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1168 {
1169         uint64_t ipackets, opackets, ibytes, obytes;
1170         struct fm10k_hw *hw =
1171                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172         struct fm10k_hw_stats *hw_stats =
1173                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1174         int i;
1175
1176         PMD_INIT_FUNC_TRACE();
1177
1178         fm10k_update_hw_stats(hw, hw_stats);
1179
1180         ipackets = opackets = ibytes = obytes = 0;
1181         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1182                 (i < hw->mac.max_queues); ++i) {
1183                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1184                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1185                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1186                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1187                 ipackets += stats->q_ipackets[i];
1188                 opackets += stats->q_opackets[i];
1189                 ibytes   += stats->q_ibytes[i];
1190                 obytes   += stats->q_obytes[i];
1191         }
1192         stats->ipackets = ipackets;
1193         stats->opackets = opackets;
1194         stats->ibytes = ibytes;
1195         stats->obytes = obytes;
1196 }
1197
1198 static void
1199 fm10k_stats_reset(struct rte_eth_dev *dev)
1200 {
1201         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202         struct fm10k_hw_stats *hw_stats =
1203                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1204
1205         PMD_INIT_FUNC_TRACE();
1206
1207         memset(hw_stats, 0, sizeof(*hw_stats));
1208         fm10k_rebind_hw_stats(hw, hw_stats);
1209 }
1210
1211 static void
1212 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1213         struct rte_eth_dev_info *dev_info)
1214 {
1215         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216
1217         PMD_INIT_FUNC_TRACE();
1218
1219         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1220         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1221         dev_info->max_rx_queues      = hw->mac.max_queues;
1222         dev_info->max_tx_queues      = hw->mac.max_queues;
1223         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1224         dev_info->max_hash_mac_addrs = 0;
1225         dev_info->max_vfs            = dev->pci_dev->max_vfs;
1226         dev_info->vmdq_pool_base     = 0;
1227         dev_info->vmdq_queue_base    = 0;
1228         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1229         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1230         dev_info->rx_offload_capa =
1231                 DEV_RX_OFFLOAD_VLAN_STRIP |
1232                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1233                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1234                 DEV_RX_OFFLOAD_TCP_CKSUM;
1235         dev_info->tx_offload_capa =
1236                 DEV_TX_OFFLOAD_VLAN_INSERT |
1237                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1238                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1239                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1240                 DEV_TX_OFFLOAD_TCP_TSO;
1241
1242         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1243         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1244
1245         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1246                 .rx_thresh = {
1247                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1248                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1249                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1250                 },
1251                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1252                 .rx_drop_en = 0,
1253         };
1254
1255         dev_info->default_txconf = (struct rte_eth_txconf) {
1256                 .tx_thresh = {
1257                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1258                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1259                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1260                 },
1261                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1262                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1263                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1264         };
1265
1266         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1267                 .nb_max = FM10K_MAX_RX_DESC,
1268                 .nb_min = FM10K_MIN_RX_DESC,
1269                 .nb_align = FM10K_MULT_RX_DESC,
1270         };
1271
1272         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1273                 .nb_max = FM10K_MAX_TX_DESC,
1274                 .nb_min = FM10K_MIN_TX_DESC,
1275                 .nb_align = FM10K_MULT_TX_DESC,
1276         };
1277 }
1278
1279 static int
1280 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1281 {
1282         s32 result;
1283         uint16_t mac_num = 0;
1284         uint32_t vid_idx, vid_bit, mac_index;
1285         struct fm10k_hw *hw;
1286         struct fm10k_macvlan_filter_info *macvlan;
1287         struct rte_eth_dev_data *data = dev->data;
1288
1289         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1290         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1291
1292         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1293                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1294                 return (-EINVAL);
1295         }
1296
1297         if (vlan_id > ETH_VLAN_ID_MAX) {
1298                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1299                 return (-EINVAL);
1300         }
1301
1302         vid_idx = FM10K_VFTA_IDX(vlan_id);
1303         vid_bit = FM10K_VFTA_BIT(vlan_id);
1304         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1305         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1306                 return 0;
1307         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1308         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1309                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1310                         "in the VLAN filter table");
1311                 return (-EINVAL);
1312         }
1313
1314         fm10k_mbx_lock(hw);
1315         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1316         fm10k_mbx_unlock(hw);
1317         if (result != FM10K_SUCCESS) {
1318                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1319                 return (-EIO);
1320         }
1321
1322         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1323                         (result == FM10K_SUCCESS); mac_index++) {
1324                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1325                         continue;
1326                 if (mac_num > macvlan->mac_num - 1) {
1327                         PMD_INIT_LOG(ERR, "MAC address number "
1328                                         "not match");
1329                         break;
1330                 }
1331                 fm10k_mbx_lock(hw);
1332                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1333                         data->mac_addrs[mac_index].addr_bytes,
1334                         vlan_id, on, 0);
1335                 fm10k_mbx_unlock(hw);
1336                 mac_num++;
1337         }
1338         if (result != FM10K_SUCCESS) {
1339                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1340                 return (-EIO);
1341         }
1342
1343         if (on) {
1344                 macvlan->vlan_num++;
1345                 macvlan->vfta[vid_idx] |= vid_bit;
1346         } else {
1347                 macvlan->vlan_num--;
1348                 macvlan->vfta[vid_idx] &= ~vid_bit;
1349         }
1350         return 0;
1351 }
1352
1353 static void
1354 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1355 {
1356         if (mask & ETH_VLAN_STRIP_MASK) {
1357                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1358                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1359                                         "always on in fm10k");
1360         }
1361
1362         if (mask & ETH_VLAN_EXTEND_MASK) {
1363                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1364                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1365                                         "supported in fm10k");
1366         }
1367
1368         if (mask & ETH_VLAN_FILTER_MASK) {
1369                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1370                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1371         }
1372 }
1373
1374 /* Add/Remove a MAC address, and update filters to main VSI */
1375 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1376                 const u8 *mac, bool add, uint32_t pool)
1377 {
1378         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1379         struct fm10k_macvlan_filter_info *macvlan;
1380         uint32_t i, j, k;
1381
1382         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1383
1384         if (pool != MAIN_VSI_POOL_NUMBER) {
1385                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1386                         "mac to pool %u", pool);
1387                 return;
1388         }
1389         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1390                 if (!macvlan->vfta[j])
1391                         continue;
1392                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1393                         if (!(macvlan->vfta[j] & (1 << k)))
1394                                 continue;
1395                         if (i + 1 > macvlan->vlan_num) {
1396                                 PMD_INIT_LOG(ERR, "vlan number not match");
1397                                 return;
1398                         }
1399                         fm10k_mbx_lock(hw);
1400                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1401                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1402                         fm10k_mbx_unlock(hw);
1403                         i++;
1404                 }
1405         }
1406 }
1407
1408 /* Add/Remove a MAC address, and update filters to VMDQ */
1409 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1410                 const u8 *mac, bool add, uint32_t pool)
1411 {
1412         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1413         struct fm10k_macvlan_filter_info *macvlan;
1414         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1415         uint32_t i;
1416
1417         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1418         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1419
1420         if (pool > macvlan->nb_queue_pools) {
1421                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1422                         " Max pool is %u",
1423                         pool, macvlan->nb_queue_pools);
1424                 return;
1425         }
1426         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1427                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1428                         continue;
1429                 fm10k_mbx_lock(hw);
1430                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1431                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1432                 fm10k_mbx_unlock(hw);
1433         }
1434 }
1435
1436 /* Add/Remove a MAC address, and update filters */
1437 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1438                 const u8 *mac, bool add, uint32_t pool)
1439 {
1440         struct fm10k_macvlan_filter_info *macvlan;
1441
1442         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1443
1444         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1445                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1446         else
1447                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1448
1449         if (add)
1450                 macvlan->mac_num++;
1451         else
1452                 macvlan->mac_num--;
1453 }
1454
1455 /* Add a MAC address, and update filters */
1456 static void
1457 fm10k_macaddr_add(struct rte_eth_dev *dev,
1458                 struct ether_addr *mac_addr,
1459                 uint32_t index,
1460                 uint32_t pool)
1461 {
1462         struct fm10k_macvlan_filter_info *macvlan;
1463
1464         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1465         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1466         macvlan->mac_vmdq_id[index] = pool;
1467 }
1468
1469 /* Remove a MAC address, and update filters */
1470 static void
1471 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1472 {
1473         struct rte_eth_dev_data *data = dev->data;
1474         struct fm10k_macvlan_filter_info *macvlan;
1475
1476         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1477         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1478                         FALSE, macvlan->mac_vmdq_id[index]);
1479         macvlan->mac_vmdq_id[index] = 0;
1480 }
1481
1482 static inline int
1483 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1484 {
1485         if ((request < min) || (request > max) || ((request % mult) != 0))
1486                 return -1;
1487         else
1488                 return 0;
1489 }
1490
1491 /*
1492  * Create a memzone for hardware descriptor rings. Malloc cannot be used since
1493  * the physical address is required. If the memzone is already created, then
1494  * this function returns a pointer to the existing memzone.
1495  */
1496 static inline const struct rte_memzone *
1497 allocate_hw_ring(const char *driver_name, const char *ring_name,
1498         uint8_t port_id, uint16_t queue_id, int socket_id,
1499         uint32_t size, uint32_t align)
1500 {
1501         char name[RTE_MEMZONE_NAMESIZE];
1502         const struct rte_memzone *mz;
1503
1504         snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
1505                  driver_name, ring_name, port_id, queue_id, socket_id);
1506
1507         /* return the memzone if it already exists */
1508         mz = rte_memzone_lookup(name);
1509         if (mz)
1510                 return mz;
1511
1512 #ifdef RTE_LIBRTE_XEN_DOM0
1513         return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
1514                                            RTE_PGSIZE_2M);
1515 #else
1516         return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
1517 #endif
1518 }
1519
1520 static inline int
1521 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1522 {
1523         if ((request < min) || (request > max) || ((div % request) != 0))
1524                 return -1;
1525         else
1526                 return 0;
1527 }
1528
1529 static inline int
1530 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1531 {
1532         uint16_t rx_free_thresh;
1533
1534         if (conf->rx_free_thresh == 0)
1535                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1536         else
1537                 rx_free_thresh = conf->rx_free_thresh;
1538
1539         /* make sure the requested threshold satisfies the constraints */
1540         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1541                         FM10K_RX_FREE_THRESH_MAX(q),
1542                         FM10K_RX_FREE_THRESH_DIV(q),
1543                         rx_free_thresh)) {
1544                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1545                         "less than or equal to %u, "
1546                         "greater than or equal to %u, "
1547                         "and a divisor of %u",
1548                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1549                         FM10K_RX_FREE_THRESH_MIN(q),
1550                         FM10K_RX_FREE_THRESH_DIV(q));
1551                 return (-EINVAL);
1552         }
1553
1554         q->alloc_thresh = rx_free_thresh;
1555         q->drop_en = conf->rx_drop_en;
1556         q->rx_deferred_start = conf->rx_deferred_start;
1557
1558         return 0;
1559 }
1560
1561 /*
1562  * Hardware requires specific alignment for Rx packet buffers. At
1563  * least one of the following two conditions must be satisfied.
1564  *  1. Address is 512B aligned
1565  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1566  *
1567  * As such, the driver may need to adjust the DMA address within the
1568  * buffer by up to 512B.
1569  *
1570  * return 1 if the element size is valid, otherwise return 0.
1571  */
1572 static int
1573 mempool_element_size_valid(struct rte_mempool *mp)
1574 {
1575         uint32_t min_size;
1576
1577         /* elt_size includes mbuf header and headroom */
1578         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1579                         RTE_PKTMBUF_HEADROOM;
1580
1581         /* account for up to 512B of alignment */
1582         min_size -= FM10K_RX_DATABUF_ALIGN;
1583
1584         /* sanity check for overflow */
1585         if (min_size > mp->elt_size)
1586                 return 0;
1587
1588         /* size is valid */
1589         return 1;
1590 }
1591
1592 static int
1593 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1594         uint16_t nb_desc, unsigned int socket_id,
1595         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1596 {
1597         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1599         struct fm10k_rx_queue *q;
1600         const struct rte_memzone *mz;
1601
1602         PMD_INIT_FUNC_TRACE();
1603
1604         /* make sure the mempool element size can account for alignment. */
1605         if (!mempool_element_size_valid(mp)) {
1606                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1607                 return (-EINVAL);
1608         }
1609
1610         /* make sure a valid number of descriptors have been requested */
1611         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1612                                 FM10K_MULT_RX_DESC, nb_desc)) {
1613                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1614                         "less than or equal to %"PRIu32", "
1615                         "greater than or equal to %u, "
1616                         "and a multiple of %u",
1617                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1618                         FM10K_MULT_RX_DESC);
1619                 return (-EINVAL);
1620         }
1621
1622         /*
1623          * if this queue existed already, free the associated memory. The
1624          * queue cannot be reused in case we need to allocate memory on
1625          * different socket than was previously used.
1626          */
1627         if (dev->data->rx_queues[queue_id] != NULL) {
1628                 rx_queue_free(dev->data->rx_queues[queue_id]);
1629                 dev->data->rx_queues[queue_id] = NULL;
1630         }
1631
1632         /* allocate memory for the queue structure */
1633         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1634                                 socket_id);
1635         if (q == NULL) {
1636                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1637                 return (-ENOMEM);
1638         }
1639
1640         /* setup queue */
1641         q->mp = mp;
1642         q->nb_desc = nb_desc;
1643         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1644         q->port_id = dev->data->port_id;
1645         q->queue_id = queue_id;
1646         q->tail_ptr = (volatile uint32_t *)
1647                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1648         if (handle_rxconf(q, conf))
1649                 return (-EINVAL);
1650
1651         /* allocate memory for the software ring */
1652         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1653                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1654                         RTE_CACHE_LINE_SIZE, socket_id);
1655         if (q->sw_ring == NULL) {
1656                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1657                 rte_free(q);
1658                 return (-ENOMEM);
1659         }
1660
1661         /*
1662          * allocate memory for the hardware descriptor ring. A memzone large
1663          * enough to hold the maximum ring size is requested to allow for
1664          * resizing in later calls to the queue setup function.
1665          */
1666         mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
1667                                 dev->data->port_id, queue_id, socket_id,
1668                                 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
1669         if (mz == NULL) {
1670                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1671                 rte_free(q->sw_ring);
1672                 rte_free(q);
1673                 return (-ENOMEM);
1674         }
1675         q->hw_ring = mz->addr;
1676 #ifdef RTE_LIBRTE_XEN_DOM0
1677         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1678 #else
1679         q->hw_ring_phys_addr = mz->phys_addr;
1680 #endif
1681
1682         /* Check if number of descs satisfied Vector requirement */
1683         if (!rte_is_power_of_2(nb_desc)) {
1684                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1685                                     "preconditions - canceling the feature for "
1686                                     "the whole port[%d]",
1687                              q->queue_id, q->port_id);
1688                 dev_info->rx_vec_allowed = false;
1689         } else
1690                 fm10k_rxq_vec_setup(q);
1691
1692         dev->data->rx_queues[queue_id] = q;
1693         return 0;
1694 }
1695
1696 static void
1697 fm10k_rx_queue_release(void *queue)
1698 {
1699         PMD_INIT_FUNC_TRACE();
1700
1701         rx_queue_free(queue);
1702 }
1703
1704 static inline int
1705 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1706 {
1707         uint16_t tx_free_thresh;
1708         uint16_t tx_rs_thresh;
1709
1710         /* constraint MACROs require that tx_free_thresh is configured
1711          * before tx_rs_thresh */
1712         if (conf->tx_free_thresh == 0)
1713                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1714         else
1715                 tx_free_thresh = conf->tx_free_thresh;
1716
1717         /* make sure the requested threshold satisfies the constraints */
1718         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1719                         FM10K_TX_FREE_THRESH_MAX(q),
1720                         FM10K_TX_FREE_THRESH_DIV(q),
1721                         tx_free_thresh)) {
1722                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1723                         "less than or equal to %u, "
1724                         "greater than or equal to %u, "
1725                         "and a divisor of %u",
1726                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1727                         FM10K_TX_FREE_THRESH_MIN(q),
1728                         FM10K_TX_FREE_THRESH_DIV(q));
1729                 return (-EINVAL);
1730         }
1731
1732         q->free_thresh = tx_free_thresh;
1733
1734         if (conf->tx_rs_thresh == 0)
1735                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1736         else
1737                 tx_rs_thresh = conf->tx_rs_thresh;
1738
1739         q->tx_deferred_start = conf->tx_deferred_start;
1740
1741         /* make sure the requested threshold satisfies the constraints */
1742         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1743                         FM10K_TX_RS_THRESH_MAX(q),
1744                         FM10K_TX_RS_THRESH_DIV(q),
1745                         tx_rs_thresh)) {
1746                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1747                         "less than or equal to %u, "
1748                         "greater than or equal to %u, "
1749                         "and a divisor of %u",
1750                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1751                         FM10K_TX_RS_THRESH_MIN(q),
1752                         FM10K_TX_RS_THRESH_DIV(q));
1753                 return (-EINVAL);
1754         }
1755
1756         q->rs_thresh = tx_rs_thresh;
1757
1758         return 0;
1759 }
1760
1761 static int
1762 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1763         uint16_t nb_desc, unsigned int socket_id,
1764         const struct rte_eth_txconf *conf)
1765 {
1766         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1767         struct fm10k_tx_queue *q;
1768         const struct rte_memzone *mz;
1769
1770         PMD_INIT_FUNC_TRACE();
1771
1772         /* make sure a valid number of descriptors have been requested */
1773         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1774                                 FM10K_MULT_TX_DESC, nb_desc)) {
1775                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1776                         "less than or equal to %"PRIu32", "
1777                         "greater than or equal to %u, "
1778                         "and a multiple of %u",
1779                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1780                         FM10K_MULT_TX_DESC);
1781                 return (-EINVAL);
1782         }
1783
1784         /*
1785          * if this queue existed already, free the associated memory. The
1786          * queue cannot be reused in case we need to allocate memory on
1787          * different socket than was previously used.
1788          */
1789         if (dev->data->tx_queues[queue_id] != NULL) {
1790                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1791
1792                 txq->ops->release_mbufs(txq);
1793                 dev->data->tx_queues[queue_id] = NULL;
1794         }
1795
1796         /* allocate memory for the queue structure */
1797         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1798                                 socket_id);
1799         if (q == NULL) {
1800                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1801                 return (-ENOMEM);
1802         }
1803
1804         /* setup queue */
1805         q->nb_desc = nb_desc;
1806         q->port_id = dev->data->port_id;
1807         q->queue_id = queue_id;
1808         q->txq_flags = conf->txq_flags;
1809         q->ops = &def_txq_ops;
1810         q->tail_ptr = (volatile uint32_t *)
1811                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1812         if (handle_txconf(q, conf))
1813                 return (-EINVAL);
1814
1815         /* allocate memory for the software ring */
1816         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1817                                         nb_desc * sizeof(struct rte_mbuf *),
1818                                         RTE_CACHE_LINE_SIZE, socket_id);
1819         if (q->sw_ring == NULL) {
1820                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1821                 rte_free(q);
1822                 return (-ENOMEM);
1823         }
1824
1825         /*
1826          * allocate memory for the hardware descriptor ring. A memzone large
1827          * enough to hold the maximum ring size is requested to allow for
1828          * resizing in later calls to the queue setup function.
1829          */
1830         mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
1831                                 dev->data->port_id, queue_id, socket_id,
1832                                 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
1833         if (mz == NULL) {
1834                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1835                 rte_free(q->sw_ring);
1836                 rte_free(q);
1837                 return (-ENOMEM);
1838         }
1839         q->hw_ring = mz->addr;
1840 #ifdef RTE_LIBRTE_XEN_DOM0
1841         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1842 #else
1843         q->hw_ring_phys_addr = mz->phys_addr;
1844 #endif
1845
1846         /*
1847          * allocate memory for the RS bit tracker. Enough slots to hold the
1848          * descriptor index for each RS bit needing to be set are required.
1849          */
1850         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1851                                 ((nb_desc + 1) / q->rs_thresh) *
1852                                 sizeof(uint16_t),
1853                                 RTE_CACHE_LINE_SIZE, socket_id);
1854         if (q->rs_tracker.list == NULL) {
1855                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1856                 rte_free(q->sw_ring);
1857                 rte_free(q);
1858                 return (-ENOMEM);
1859         }
1860
1861         dev->data->tx_queues[queue_id] = q;
1862         return 0;
1863 }
1864
1865 static void
1866 fm10k_tx_queue_release(void *queue)
1867 {
1868         struct fm10k_tx_queue *q = queue;
1869         PMD_INIT_FUNC_TRACE();
1870
1871         q->ops->release_mbufs(q);
1872 }
1873
1874 static int
1875 fm10k_reta_update(struct rte_eth_dev *dev,
1876                         struct rte_eth_rss_reta_entry64 *reta_conf,
1877                         uint16_t reta_size)
1878 {
1879         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880         uint16_t i, j, idx, shift;
1881         uint8_t mask;
1882         uint32_t reta;
1883
1884         PMD_INIT_FUNC_TRACE();
1885
1886         if (reta_size > FM10K_MAX_RSS_INDICES) {
1887                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1888                         "(%d) doesn't match the number hardware can supported "
1889                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1890                 return -EINVAL;
1891         }
1892
1893         /*
1894          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1895          * 128-entries in 32 registers
1896          */
1897         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1898                 idx = i / RTE_RETA_GROUP_SIZE;
1899                 shift = i % RTE_RETA_GROUP_SIZE;
1900                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1901                                 BIT_MASK_PER_UINT32);
1902                 if (mask == 0)
1903                         continue;
1904
1905                 reta = 0;
1906                 if (mask != BIT_MASK_PER_UINT32)
1907                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1908
1909                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1910                         if (mask & (0x1 << j)) {
1911                                 if (mask != 0xF)
1912                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1913                                 reta |= reta_conf[idx].reta[shift + j] <<
1914                                                 (CHAR_BIT * j);
1915                         }
1916                 }
1917                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1918         }
1919
1920         return 0;
1921 }
1922
1923 static int
1924 fm10k_reta_query(struct rte_eth_dev *dev,
1925                         struct rte_eth_rss_reta_entry64 *reta_conf,
1926                         uint16_t reta_size)
1927 {
1928         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1929         uint16_t i, j, idx, shift;
1930         uint8_t mask;
1931         uint32_t reta;
1932
1933         PMD_INIT_FUNC_TRACE();
1934
1935         if (reta_size < FM10K_MAX_RSS_INDICES) {
1936                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1937                         "(%d) doesn't match the number hardware can supported "
1938                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1939                 return -EINVAL;
1940         }
1941
1942         /*
1943          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1944          * 128-entries in 32 registers
1945          */
1946         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1947                 idx = i / RTE_RETA_GROUP_SIZE;
1948                 shift = i % RTE_RETA_GROUP_SIZE;
1949                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1950                                 BIT_MASK_PER_UINT32);
1951                 if (mask == 0)
1952                         continue;
1953
1954                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1955                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1956                         if (mask & (0x1 << j))
1957                                 reta_conf[idx].reta[shift + j] = ((reta >>
1958                                         CHAR_BIT * j) & UINT8_MAX);
1959                 }
1960         }
1961
1962         return 0;
1963 }
1964
1965 static int
1966 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1967         struct rte_eth_rss_conf *rss_conf)
1968 {
1969         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1970         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1971         uint32_t mrqc;
1972         uint64_t hf = rss_conf->rss_hf;
1973         int i;
1974
1975         PMD_INIT_FUNC_TRACE();
1976
1977         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1978                 FM10K_RSSRK_ENTRIES_PER_REG)
1979                 return -EINVAL;
1980
1981         if (hf == 0)
1982                 return -EINVAL;
1983
1984         mrqc = 0;
1985         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
1986         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
1987         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
1988         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
1989         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
1990         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
1991         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
1992         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
1993         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
1994
1995         /* If the mapping doesn't fit any supported, return */
1996         if (mrqc == 0)
1997                 return -EINVAL;
1998
1999         if (key != NULL)
2000                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2001                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2002
2003         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2004
2005         return 0;
2006 }
2007
2008 static int
2009 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2010         struct rte_eth_rss_conf *rss_conf)
2011 {
2012         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2013         uint32_t *key = (uint32_t *)rss_conf->rss_key;
2014         uint32_t mrqc;
2015         uint64_t hf;
2016         int i;
2017
2018         PMD_INIT_FUNC_TRACE();
2019
2020         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2021                                 FM10K_RSSRK_ENTRIES_PER_REG)
2022                 return -EINVAL;
2023
2024         if (key != NULL)
2025                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2026                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2027
2028         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2029         hf = 0;
2030         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
2031         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
2032         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2033         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2034         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2035         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2036         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2037         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2038         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2039
2040         rss_conf->rss_hf = hf;
2041
2042         return 0;
2043 }
2044
2045 static void
2046 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2047 {
2048         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2049         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2050
2051         /* Bind all local non-queue interrupt to vector 0 */
2052         int_map |= 0;
2053
2054         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2055         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2056         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2057         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2058         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2059         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2060
2061         /* Enable misc causes */
2062         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2063                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2064                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2065                                 FM10K_EIMR_ENABLE(MAILBOX) |
2066                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2067                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2068                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2069                                 FM10K_EIMR_ENABLE(VFLR));
2070
2071         /* Enable ITR 0 */
2072         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2073                                         FM10K_ITR_MASK_CLEAR);
2074         FM10K_WRITE_FLUSH(hw);
2075 }
2076
2077 static void
2078 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2079 {
2080         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2081         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2082
2083         int_map |= 0;
2084
2085         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2086         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2087         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2088         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2089         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2090         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2091
2092         /* Disable misc causes */
2093         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2094                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2095                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2096                                 FM10K_EIMR_DISABLE(MAILBOX) |
2097                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2098                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2099                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2100                                 FM10K_EIMR_DISABLE(VFLR));
2101
2102         /* Disable ITR 0 */
2103         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2104         FM10K_WRITE_FLUSH(hw);
2105 }
2106
2107 static void
2108 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2109 {
2110         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2111         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2112
2113         /* Bind all local non-queue interrupt to vector 0 */
2114         int_map |= 0;
2115
2116         /* Only INT 0 available, other 15 are reserved. */
2117         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2118
2119         /* Enable ITR 0 */
2120         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2121                                         FM10K_ITR_MASK_CLEAR);
2122         FM10K_WRITE_FLUSH(hw);
2123 }
2124
2125 static void
2126 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2127 {
2128         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2129         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2130
2131         int_map |= 0;
2132
2133         /* Only INT 0 available, other 15 are reserved. */
2134         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2135
2136         /* Disable ITR 0 */
2137         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2138         FM10K_WRITE_FLUSH(hw);
2139 }
2140
2141 static int
2142 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2143 {
2144         struct fm10k_fault fault;
2145         int err;
2146         const char *estr = "Unknown error";
2147
2148         /* Process PCA fault */
2149         if (eicr & FM10K_EICR_PCA_FAULT) {
2150                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2151                 if (err)
2152                         goto error;
2153                 switch (fault.type) {
2154                 case PCA_NO_FAULT:
2155                         estr = "PCA_NO_FAULT"; break;
2156                 case PCA_UNMAPPED_ADDR:
2157                         estr = "PCA_UNMAPPED_ADDR"; break;
2158                 case PCA_BAD_QACCESS_PF:
2159                         estr = "PCA_BAD_QACCESS_PF"; break;
2160                 case PCA_BAD_QACCESS_VF:
2161                         estr = "PCA_BAD_QACCESS_VF"; break;
2162                 case PCA_MALICIOUS_REQ:
2163                         estr = "PCA_MALICIOUS_REQ"; break;
2164                 case PCA_POISONED_TLP:
2165                         estr = "PCA_POISONED_TLP"; break;
2166                 case PCA_TLP_ABORT:
2167                         estr = "PCA_TLP_ABORT"; break;
2168                 default:
2169                         goto error;
2170                 }
2171                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2172                         estr, fault.func ? "VF" : "PF", fault.func,
2173                         fault.address, fault.specinfo);
2174         }
2175
2176         /* Process THI fault */
2177         if (eicr & FM10K_EICR_THI_FAULT) {
2178                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2179                 if (err)
2180                         goto error;
2181                 switch (fault.type) {
2182                 case THI_NO_FAULT:
2183                         estr = "THI_NO_FAULT"; break;
2184                 case THI_MAL_DIS_Q_FAULT:
2185                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2186                 default:
2187                         goto error;
2188                 }
2189                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2190                         estr, fault.func ? "VF" : "PF", fault.func,
2191                         fault.address, fault.specinfo);
2192         }
2193
2194         /* Process FUM fault */
2195         if (eicr & FM10K_EICR_FUM_FAULT) {
2196                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2197                 if (err)
2198                         goto error;
2199                 switch (fault.type) {
2200                 case FUM_NO_FAULT:
2201                         estr = "FUM_NO_FAULT"; break;
2202                 case FUM_UNMAPPED_ADDR:
2203                         estr = "FUM_UNMAPPED_ADDR"; break;
2204                 case FUM_POISONED_TLP:
2205                         estr = "FUM_POISONED_TLP"; break;
2206                 case FUM_BAD_VF_QACCESS:
2207                         estr = "FUM_BAD_VF_QACCESS"; break;
2208                 case FUM_ADD_DECODE_ERR:
2209                         estr = "FUM_ADD_DECODE_ERR"; break;
2210                 case FUM_RO_ERROR:
2211                         estr = "FUM_RO_ERROR"; break;
2212                 case FUM_QPRC_CRC_ERROR:
2213                         estr = "FUM_QPRC_CRC_ERROR"; break;
2214                 case FUM_CSR_TIMEOUT:
2215                         estr = "FUM_CSR_TIMEOUT"; break;
2216                 case FUM_INVALID_TYPE:
2217                         estr = "FUM_INVALID_TYPE"; break;
2218                 case FUM_INVALID_LENGTH:
2219                         estr = "FUM_INVALID_LENGTH"; break;
2220                 case FUM_INVALID_BE:
2221                         estr = "FUM_INVALID_BE"; break;
2222                 case FUM_INVALID_ALIGN:
2223                         estr = "FUM_INVALID_ALIGN"; break;
2224                 default:
2225                         goto error;
2226                 }
2227                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2228                         estr, fault.func ? "VF" : "PF", fault.func,
2229                         fault.address, fault.specinfo);
2230         }
2231
2232         return 0;
2233 error:
2234         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2235         return err;
2236 }
2237
2238 /**
2239  * PF interrupt handler triggered by NIC for handling specific interrupt.
2240  *
2241  * @param handle
2242  *  Pointer to interrupt handle.
2243  * @param param
2244  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2245  *
2246  * @return
2247  *  void
2248  */
2249 static void
2250 fm10k_dev_interrupt_handler_pf(
2251                         __rte_unused struct rte_intr_handle *handle,
2252                         void *param)
2253 {
2254         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2255         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2256         uint32_t cause, status;
2257
2258         if (hw->mac.type != fm10k_mac_pf)
2259                 return;
2260
2261         cause = FM10K_READ_REG(hw, FM10K_EICR);
2262
2263         /* Handle PCI fault cases */
2264         if (cause & FM10K_EICR_FAULT_MASK) {
2265                 PMD_INIT_LOG(ERR, "INT: find fault!");
2266                 fm10k_dev_handle_fault(hw, cause);
2267         }
2268
2269         /* Handle switch up/down */
2270         if (cause & FM10K_EICR_SWITCHNOTREADY)
2271                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2272
2273         if (cause & FM10K_EICR_SWITCHREADY)
2274                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2275
2276         /* Handle mailbox message */
2277         fm10k_mbx_lock(hw);
2278         hw->mbx.ops.process(hw, &hw->mbx);
2279         fm10k_mbx_unlock(hw);
2280
2281         /* Handle SRAM error */
2282         if (cause & FM10K_EICR_SRAMERROR) {
2283                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2284
2285                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2286                 /* Write to clear pending bits */
2287                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2288
2289                 /* Todo: print out error message after shared code  updates */
2290         }
2291
2292         /* Clear these 3 events if having any */
2293         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2294                  FM10K_EICR_SWITCHREADY;
2295         if (cause)
2296                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2297
2298         /* Re-enable interrupt from device side */
2299         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2300                                         FM10K_ITR_MASK_CLEAR);
2301         /* Re-enable interrupt from host side */
2302         rte_intr_enable(&(dev->pci_dev->intr_handle));
2303 }
2304
2305 /**
2306  * VF interrupt handler triggered by NIC for handling specific interrupt.
2307  *
2308  * @param handle
2309  *  Pointer to interrupt handle.
2310  * @param param
2311  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2312  *
2313  * @return
2314  *  void
2315  */
2316 static void
2317 fm10k_dev_interrupt_handler_vf(
2318                         __rte_unused struct rte_intr_handle *handle,
2319                         void *param)
2320 {
2321         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2322         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2323
2324         if (hw->mac.type != fm10k_mac_vf)
2325                 return;
2326
2327         /* Handle mailbox message if lock is acquired */
2328         fm10k_mbx_lock(hw);
2329         hw->mbx.ops.process(hw, &hw->mbx);
2330         fm10k_mbx_unlock(hw);
2331
2332         /* Re-enable interrupt from device side */
2333         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2334                                         FM10K_ITR_MASK_CLEAR);
2335         /* Re-enable interrupt from host side */
2336         rte_intr_enable(&(dev->pci_dev->intr_handle));
2337 }
2338
2339 /* Mailbox message handler in VF */
2340 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2341         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2342         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2343         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2344         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2345 };
2346
2347 /* Mailbox message handler in PF */
2348 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2349         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2350         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2351         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2352         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2353         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2354         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2355         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2356 };
2357
2358 static int
2359 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2360 {
2361         int err;
2362
2363         /* Initialize mailbox lock */
2364         fm10k_mbx_initlock(hw);
2365
2366         /* Replace default message handler with new ones */
2367         if (hw->mac.type == fm10k_mac_pf)
2368                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2369         else
2370                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2371
2372         if (err) {
2373                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2374                                 err);
2375                 return err;
2376         }
2377         /* Connect to SM for PF device or PF for VF device */
2378         return hw->mbx.ops.connect(hw, &hw->mbx);
2379 }
2380
2381 static void
2382 fm10k_close_mbx_service(struct fm10k_hw *hw)
2383 {
2384         /* Disconnect from SM for PF device or PF for VF device */
2385         hw->mbx.ops.disconnect(hw, &hw->mbx);
2386 }
2387
2388 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2389         .dev_configure          = fm10k_dev_configure,
2390         .dev_start              = fm10k_dev_start,
2391         .dev_stop               = fm10k_dev_stop,
2392         .dev_close              = fm10k_dev_close,
2393         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2394         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2395         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2396         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2397         .stats_get              = fm10k_stats_get,
2398         .xstats_get             = fm10k_xstats_get,
2399         .stats_reset            = fm10k_stats_reset,
2400         .xstats_reset           = fm10k_stats_reset,
2401         .link_update            = fm10k_link_update,
2402         .dev_infos_get          = fm10k_dev_infos_get,
2403         .vlan_filter_set        = fm10k_vlan_filter_set,
2404         .vlan_offload_set       = fm10k_vlan_offload_set,
2405         .mac_addr_add           = fm10k_macaddr_add,
2406         .mac_addr_remove        = fm10k_macaddr_remove,
2407         .rx_queue_start         = fm10k_dev_rx_queue_start,
2408         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2409         .tx_queue_start         = fm10k_dev_tx_queue_start,
2410         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2411         .rx_queue_setup         = fm10k_rx_queue_setup,
2412         .rx_queue_release       = fm10k_rx_queue_release,
2413         .tx_queue_setup         = fm10k_tx_queue_setup,
2414         .tx_queue_release       = fm10k_tx_queue_release,
2415         .reta_update            = fm10k_reta_update,
2416         .reta_query             = fm10k_reta_query,
2417         .rss_hash_update        = fm10k_rss_hash_update,
2418         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2419 };
2420
2421 static void __attribute__((cold))
2422 fm10k_set_tx_function(struct rte_eth_dev *dev)
2423 {
2424         struct fm10k_tx_queue *txq;
2425         int i;
2426         int use_sse = 1;
2427
2428         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2429                 txq = dev->data->tx_queues[i];
2430                 if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) !=
2431                         FM10K_SIMPLE_TX_FLAG) {
2432                         use_sse = 0;
2433                         break;
2434                 }
2435         }
2436
2437         if (use_sse) {
2438                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2439                         txq = dev->data->tx_queues[i];
2440                         fm10k_txq_vec_setup(txq);
2441                 }
2442                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2443         } else
2444                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2445 }
2446
2447 static void __attribute__((cold))
2448 fm10k_set_rx_function(struct rte_eth_dev *dev)
2449 {
2450         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2451         uint16_t i, rx_using_sse;
2452
2453         /* In order to allow Vector Rx there are a few configuration
2454          * conditions to be met.
2455          */
2456         if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2457                 if (dev->data->scattered_rx)
2458                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2459                 else
2460                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2461         } else if (dev->data->scattered_rx)
2462                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2463
2464         rx_using_sse =
2465                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2466                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2467
2468         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2469                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2470
2471                 rxq->rx_using_sse = rx_using_sse;
2472         }
2473 }
2474
2475 static void
2476 fm10k_params_init(struct rte_eth_dev *dev)
2477 {
2478         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2479         struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2480
2481         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2482          * there is no way to get link status without reading BAR4.  Until this
2483          * works, assume we have maximum bandwidth.
2484          * @todo - fix bus info
2485          */
2486         hw->bus_caps.speed = fm10k_bus_speed_8000;
2487         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2488         hw->bus_caps.payload = fm10k_bus_payload_512;
2489         hw->bus.speed = fm10k_bus_speed_8000;
2490         hw->bus.width = fm10k_bus_width_pcie_x8;
2491         hw->bus.payload = fm10k_bus_payload_256;
2492
2493         info->rx_vec_allowed = true;
2494 }
2495
2496 static int
2497 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2498 {
2499         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2500         int diag;
2501         struct fm10k_macvlan_filter_info *macvlan;
2502
2503         PMD_INIT_FUNC_TRACE();
2504
2505         dev->dev_ops = &fm10k_eth_dev_ops;
2506         dev->rx_pkt_burst = &fm10k_recv_pkts;
2507         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2508
2509         /* only initialize in the primary process */
2510         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2511                 return 0;
2512
2513         rte_eth_copy_pci_info(dev, dev->pci_dev);
2514
2515         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2516         memset(macvlan, 0, sizeof(*macvlan));
2517         /* Vendor and Device ID need to be set before init of shared code */
2518         memset(hw, 0, sizeof(*hw));
2519         hw->device_id = dev->pci_dev->id.device_id;
2520         hw->vendor_id = dev->pci_dev->id.vendor_id;
2521         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2522         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2523         hw->revision_id = 0;
2524         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2525         if (hw->hw_addr == NULL) {
2526                 PMD_INIT_LOG(ERR, "Bad mem resource."
2527                         " Try to blacklist unused devices.");
2528                 return -EIO;
2529         }
2530
2531         /* Store fm10k_adapter pointer */
2532         hw->back = dev->data->dev_private;
2533
2534         /* Initialize the shared code */
2535         diag = fm10k_init_shared_code(hw);
2536         if (diag != FM10K_SUCCESS) {
2537                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2538                 return -EIO;
2539         }
2540
2541         /* Initialize parameters */
2542         fm10k_params_init(dev);
2543
2544         /* Initialize the hw */
2545         diag = fm10k_init_hw(hw);
2546         if (diag != FM10K_SUCCESS) {
2547                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2548                 return -EIO;
2549         }
2550
2551         /* Initialize MAC address(es) */
2552         dev->data->mac_addrs = rte_zmalloc("fm10k",
2553                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2554         if (dev->data->mac_addrs == NULL) {
2555                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2556                 return -ENOMEM;
2557         }
2558
2559         diag = fm10k_read_mac_addr(hw);
2560
2561         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2562                         &dev->data->mac_addrs[0]);
2563
2564         if (diag != FM10K_SUCCESS ||
2565                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2566
2567                 /* Generate a random addr */
2568                 eth_random_addr(hw->mac.addr);
2569                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2570                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2571                 &dev->data->mac_addrs[0]);
2572         }
2573
2574         /* Reset the hw statistics */
2575         fm10k_stats_reset(dev);
2576
2577         /* Reset the hw */
2578         diag = fm10k_reset_hw(hw);
2579         if (diag != FM10K_SUCCESS) {
2580                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2581                 return -EIO;
2582         }
2583
2584         /* Setup mailbox service */
2585         diag = fm10k_setup_mbx_service(hw);
2586         if (diag != FM10K_SUCCESS) {
2587                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2588                 return -EIO;
2589         }
2590
2591         /*PF/VF has different interrupt handling mechanism */
2592         if (hw->mac.type == fm10k_mac_pf) {
2593                 /* register callback func to eal lib */
2594                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2595                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2596
2597                 /* enable MISC interrupt */
2598                 fm10k_dev_enable_intr_pf(dev);
2599         } else { /* VF */
2600                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2601                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2602
2603                 fm10k_dev_enable_intr_vf(dev);
2604         }
2605
2606         /* Enable uio intr after callback registered */
2607         rte_intr_enable(&(dev->pci_dev->intr_handle));
2608
2609         hw->mac.ops.update_int_moderator(hw);
2610
2611         /* Make sure Switch Manager is ready before going forward. */
2612         if (hw->mac.type == fm10k_mac_pf) {
2613                 int switch_ready = 0;
2614                 int i;
2615
2616                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2617                         fm10k_mbx_lock(hw);
2618                         hw->mac.ops.get_host_state(hw, &switch_ready);
2619                         fm10k_mbx_unlock(hw);
2620                         if (switch_ready)
2621                                 break;
2622                         /* Delay some time to acquire async LPORT_MAP info. */
2623                         rte_delay_us(WAIT_SWITCH_MSG_US);
2624                 }
2625
2626                 if (switch_ready == 0) {
2627                         PMD_INIT_LOG(ERR, "switch is not ready");
2628                         return -1;
2629                 }
2630         }
2631
2632         /*
2633          * Below function will trigger operations on mailbox, acquire lock to
2634          * avoid race condition from interrupt handler. Operations on mailbox
2635          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2636          * will handle and generate an interrupt to our side. Then,  FIFO in
2637          * mailbox will be touched.
2638          */
2639         fm10k_mbx_lock(hw);
2640         /* Enable port first */
2641         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2642
2643         /* Set unicast mode by default. App can change to other mode in other
2644          * API func.
2645          */
2646         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2647                                         FM10K_XCAST_MODE_NONE);
2648
2649         fm10k_mbx_unlock(hw);
2650
2651         /* Add default mac address */
2652         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2653                 MAIN_VSI_POOL_NUMBER);
2654
2655         return 0;
2656 }
2657
2658 static int
2659 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2660 {
2661         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2662
2663         PMD_INIT_FUNC_TRACE();
2664
2665         /* only uninitialize in the primary process */
2666         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2667                 return 0;
2668
2669         /* safe to close dev here */
2670         fm10k_dev_close(dev);
2671
2672         dev->dev_ops = NULL;
2673         dev->rx_pkt_burst = NULL;
2674         dev->tx_pkt_burst = NULL;
2675
2676         /* disable uio/vfio intr */
2677         rte_intr_disable(&(dev->pci_dev->intr_handle));
2678
2679         /*PF/VF has different interrupt handling mechanism */
2680         if (hw->mac.type == fm10k_mac_pf) {
2681                 /* disable interrupt */
2682                 fm10k_dev_disable_intr_pf(dev);
2683
2684                 /* unregister callback func to eal lib */
2685                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2686                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2687         } else {
2688                 /* disable interrupt */
2689                 fm10k_dev_disable_intr_vf(dev);
2690
2691                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2692                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2693         }
2694
2695         /* free mac memory */
2696         if (dev->data->mac_addrs) {
2697                 rte_free(dev->data->mac_addrs);
2698                 dev->data->mac_addrs = NULL;
2699         }
2700
2701         memset(hw, 0, sizeof(*hw));
2702
2703         return 0;
2704 }
2705
2706 /*
2707  * The set of PCI devices this driver supports. This driver will enable both PF
2708  * and SRIOV-VF devices.
2709  */
2710 static const struct rte_pci_id pci_id_fm10k_map[] = {
2711 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2712 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2713 #include "rte_pci_dev_ids.h"
2714         { .vendor_id = 0, /* sentinel */ },
2715 };
2716
2717 static struct eth_driver rte_pmd_fm10k = {
2718         .pci_drv = {
2719                 .name = "rte_pmd_fm10k",
2720                 .id_table = pci_id_fm10k_map,
2721                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2722         },
2723         .eth_dev_init = eth_fm10k_dev_init,
2724         .eth_dev_uninit = eth_fm10k_dev_uninit,
2725         .dev_private_size = sizeof(struct fm10k_adapter),
2726 };
2727
2728 /*
2729  * Driver initialization routine.
2730  * Invoked once at EAL init time.
2731  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2732  */
2733 static int
2734 rte_pmd_fm10k_init(__rte_unused const char *name,
2735         __rte_unused const char *params)
2736 {
2737         PMD_INIT_FUNC_TRACE();
2738         rte_eth_driver_register(&rte_pmd_fm10k);
2739         return 0;
2740 }
2741
2742 static struct rte_driver rte_fm10k_driver = {
2743         .type = PMD_PDEV,
2744         .init = rte_pmd_fm10k_init,
2745 };
2746
2747 PMD_REGISTER_DRIVER(rte_fm10k_driver);