7f5c852c9739993cce950fb43972f20493604fbb
[dpdk.git] / drivers / net / fm10k / fm10k_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
36 #include <rte_memzone.h>
37 #include <rte_string_fns.h>
38 #include <rte_dev.h>
39 #include <rte_spinlock.h>
40
41 #include "fm10k.h"
42 #include "base/fm10k_api.h"
43
44 /* Default delay to acquire mailbox lock */
45 #define FM10K_MBXLOCK_DELAY_US 20
46 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
47
48 #define MAIN_VSI_POOL_NUMBER 0
49
50 /* Max try times to acquire switch status */
51 #define MAX_QUERY_SWITCH_STATE_TIMES 10
52 /* Wait interval to get switch status */
53 #define WAIT_SWITCH_MSG_US    100000
54 /* Number of chars per uint32 type */
55 #define CHARS_PER_UINT32 (sizeof(uint32_t))
56 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
57
58 #define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
59                                 ETH_TXQ_FLAGS_NOOFFLOADS)
60
61 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
62 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
63 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
64 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
65 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
66 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
67 static int
68 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
69 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
70         const u8 *mac, bool add, uint32_t pool);
71 static void fm10k_tx_queue_release(void *queue);
72 static void fm10k_rx_queue_release(void *queue);
73 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
74 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
75
76 struct fm10k_xstats_name_off {
77         char name[RTE_ETH_XSTATS_NAME_SIZE];
78         unsigned offset;
79 };
80
81 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
82         {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
83         {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
84         {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
85         {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
86         {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
87         {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
88         {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
89         {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
90                 nodesc_drop)},
91 };
92
93 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
94                 sizeof(fm10k_hw_stats_strings[0]))
95
96 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
97         {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
98         {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
99         {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
100 };
101
102 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
103                 sizeof(fm10k_hw_stats_rx_q_strings[0]))
104
105 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
106         {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
107         {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
108 };
109
110 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
111                 sizeof(fm10k_hw_stats_tx_q_strings[0]))
112
113 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
114                 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
115
116 static void
117 fm10k_mbx_initlock(struct fm10k_hw *hw)
118 {
119         rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
120 }
121
122 static void
123 fm10k_mbx_lock(struct fm10k_hw *hw)
124 {
125         while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
126                 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
127 }
128
129 static void
130 fm10k_mbx_unlock(struct fm10k_hw *hw)
131 {
132         rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
133 }
134
135 /*
136  * reset queue to initial state, allocate software buffers used when starting
137  * device.
138  * return 0 on success
139  * return -ENOMEM if buffers cannot be allocated
140  * return -EINVAL if buffers do not satisfy alignment condition
141  */
142 static inline int
143 rx_queue_reset(struct fm10k_rx_queue *q)
144 {
145         static const union fm10k_rx_desc zero = {{0} };
146         uint64_t dma_addr;
147         int i, diag;
148         PMD_INIT_FUNC_TRACE();
149
150         diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
151         if (diag != 0)
152                 return -ENOMEM;
153
154         for (i = 0; i < q->nb_desc; ++i) {
155                 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
156                 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
157                         rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
158                                                 q->nb_desc);
159                         return -EINVAL;
160                 }
161                 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
162                 q->hw_ring[i].q.pkt_addr = dma_addr;
163                 q->hw_ring[i].q.hdr_addr = dma_addr;
164         }
165
166         /* initialize extra software ring entries. Space for these extra
167          * entries is always allocated.
168          */
169         memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
170         for (i = 0; i < q->nb_fake_desc; ++i) {
171                 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
172                 q->hw_ring[q->nb_desc + i] = zero;
173         }
174
175         q->next_dd = 0;
176         q->next_alloc = 0;
177         q->next_trigger = q->alloc_thresh - 1;
178         FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
179         q->rxrearm_start = 0;
180         q->rxrearm_nb = 0;
181
182         return 0;
183 }
184
185 /*
186  * clean queue, descriptor rings, free software buffers used when stopping
187  * device.
188  */
189 static inline void
190 rx_queue_clean(struct fm10k_rx_queue *q)
191 {
192         union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
193         uint32_t i;
194         PMD_INIT_FUNC_TRACE();
195
196         /* zero descriptor rings */
197         for (i = 0; i < q->nb_desc; ++i)
198                 q->hw_ring[i] = zero;
199
200         /* zero faked descriptors */
201         for (i = 0; i < q->nb_fake_desc; ++i)
202                 q->hw_ring[q->nb_desc + i] = zero;
203
204         /* vPMD driver has a different way of releasing mbufs. */
205         if (q->rx_using_sse) {
206                 fm10k_rx_queue_release_mbufs_vec(q);
207                 return;
208         }
209
210         /* free software buffers */
211         for (i = 0; i < q->nb_desc; ++i) {
212                 if (q->sw_ring[i]) {
213                         rte_pktmbuf_free_seg(q->sw_ring[i]);
214                         q->sw_ring[i] = NULL;
215                 }
216         }
217 }
218
219 /*
220  * free all queue memory used when releasing the queue (i.e. configure)
221  */
222 static inline void
223 rx_queue_free(struct fm10k_rx_queue *q)
224 {
225         PMD_INIT_FUNC_TRACE();
226         if (q) {
227                 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
228                 rx_queue_clean(q);
229                 if (q->sw_ring) {
230                         rte_free(q->sw_ring);
231                         q->sw_ring = NULL;
232                 }
233                 rte_free(q);
234                 q = NULL;
235         }
236 }
237
238 /*
239  * disable RX queue, wait unitl HW finished necessary flush operation
240  */
241 static inline int
242 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
243 {
244         uint32_t reg, i;
245
246         reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
247         FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
248                         reg & ~FM10K_RXQCTL_ENABLE);
249
250         /* Wait 100us at most */
251         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
252                 rte_delay_us(1);
253                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
254                 if (!(reg & FM10K_RXQCTL_ENABLE))
255                         break;
256         }
257
258         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
259                 return -1;
260
261         return 0;
262 }
263
264 /*
265  * reset queue to initial state, allocate software buffers used when starting
266  * device
267  */
268 static inline void
269 tx_queue_reset(struct fm10k_tx_queue *q)
270 {
271         PMD_INIT_FUNC_TRACE();
272         q->last_free = 0;
273         q->next_free = 0;
274         q->nb_used = 0;
275         q->nb_free = q->nb_desc - 1;
276         fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
277         FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
278 }
279
280 /*
281  * clean queue, descriptor rings, free software buffers used when stopping
282  * device
283  */
284 static inline void
285 tx_queue_clean(struct fm10k_tx_queue *q)
286 {
287         struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
288         uint32_t i;
289         PMD_INIT_FUNC_TRACE();
290
291         /* zero descriptor rings */
292         for (i = 0; i < q->nb_desc; ++i)
293                 q->hw_ring[i] = zero;
294
295         /* free software buffers */
296         for (i = 0; i < q->nb_desc; ++i) {
297                 if (q->sw_ring[i]) {
298                         rte_pktmbuf_free_seg(q->sw_ring[i]);
299                         q->sw_ring[i] = NULL;
300                 }
301         }
302 }
303
304 /*
305  * free all queue memory used when releasing the queue (i.e. configure)
306  */
307 static inline void
308 tx_queue_free(struct fm10k_tx_queue *q)
309 {
310         PMD_INIT_FUNC_TRACE();
311         if (q) {
312                 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
313                 tx_queue_clean(q);
314                 if (q->rs_tracker.list) {
315                         rte_free(q->rs_tracker.list);
316                         q->rs_tracker.list = NULL;
317                 }
318                 if (q->sw_ring) {
319                         rte_free(q->sw_ring);
320                         q->sw_ring = NULL;
321                 }
322                 rte_free(q);
323                 q = NULL;
324         }
325 }
326
327 /*
328  * disable TX queue, wait unitl HW finished necessary flush operation
329  */
330 static inline int
331 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
332 {
333         uint32_t reg, i;
334
335         reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
336         FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
337                         reg & ~FM10K_TXDCTL_ENABLE);
338
339         /* Wait 100us at most */
340         for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
341                 rte_delay_us(1);
342                 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
343                 if (!(reg & FM10K_TXDCTL_ENABLE))
344                         break;
345         }
346
347         if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
348                 return -1;
349
350         return 0;
351 }
352
353 static int
354 fm10k_check_mq_mode(struct rte_eth_dev *dev)
355 {
356         enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
357         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358         struct rte_eth_vmdq_rx_conf *vmdq_conf;
359         uint16_t nb_rx_q = dev->data->nb_rx_queues;
360
361         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
362
363         if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
364                 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
365                 return -EINVAL;
366         }
367
368         if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
369                 return 0;
370
371         if (hw->mac.type == fm10k_mac_vf) {
372                 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
373                 return -EINVAL;
374         }
375
376         /* Check VMDQ queue pool number */
377         if (vmdq_conf->nb_queue_pools >
378                         sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
379                         vmdq_conf->nb_queue_pools > nb_rx_q) {
380                 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
381                         vmdq_conf->nb_queue_pools);
382                 return -EINVAL;
383         }
384
385         return 0;
386 }
387
388 static const struct fm10k_txq_ops def_txq_ops = {
389         .reset = tx_queue_reset,
390 };
391
392 static int
393 fm10k_dev_configure(struct rte_eth_dev *dev)
394 {
395         int ret;
396
397         PMD_INIT_FUNC_TRACE();
398
399         if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
400                 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
401         /* multipe queue mode checking */
402         ret  = fm10k_check_mq_mode(dev);
403         if (ret != 0) {
404                 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
405                             ret);
406                 return ret;
407         }
408
409         return 0;
410 }
411
412 /* fls = find last set bit = 32 minus the number of leading zeros */
413 #ifndef fls
414 #define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
415 #endif
416
417 static void
418 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
419 {
420         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
421         struct rte_eth_vmdq_rx_conf *vmdq_conf;
422         uint32_t i;
423
424         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
425
426         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
427                 if (!vmdq_conf->pool_map[i].pools)
428                         continue;
429                 fm10k_mbx_lock(hw);
430                 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
431                 fm10k_mbx_unlock(hw);
432         }
433 }
434
435 static void
436 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
437 {
438         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
439
440         /* Add default mac address */
441         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
442                 MAIN_VSI_POOL_NUMBER);
443 }
444
445 static void
446 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
447 {
448         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
450         uint32_t mrqc, *key, i, reta, j;
451         uint64_t hf;
452
453 #define RSS_KEY_SIZE 40
454         static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
455                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
456                 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
457                 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
458                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
459                 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
460         };
461
462         if (dev->data->nb_rx_queues == 1 ||
463             dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
464             dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
465                 return;
466
467         /* random key is rss_intel_key (default) or user provided (rss_key) */
468         if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
469                 key = (uint32_t *)rss_intel_key;
470         else
471                 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
472
473         /* Now fill our hash function seeds, 4 bytes at a time */
474         for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
475                 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
476
477         /*
478          * Fill in redirection table
479          * The byte-swap is needed because NIC registers are in
480          * little-endian order.
481          */
482         reta = 0;
483         for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
484                 if (j == dev->data->nb_rx_queues)
485                         j = 0;
486                 reta = (reta << CHAR_BIT) | j;
487                 if ((i & 3) == 3)
488                         FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
489                                         rte_bswap32(reta));
490         }
491
492         /*
493          * Generate RSS hash based on packet types, TCP/UDP
494          * port numbers and/or IPv4/v6 src and dst addresses
495          */
496         hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
497         mrqc = 0;
498         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
499         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
500         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
501         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
502         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
503         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
504         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
505         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
506         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
507
508         if (mrqc == 0) {
509                 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
510                         "supported", hf);
511                 return;
512         }
513
514         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
515 }
516
517 static void
518 fm10k_dev_logic_port_update(struct rte_eth_dev *dev,
519         uint16_t nb_lport_old, uint16_t nb_lport_new)
520 {
521         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
522         uint32_t i;
523
524         fm10k_mbx_lock(hw);
525         /* Disable previous logic ports */
526         if (nb_lport_old)
527                 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
528                         nb_lport_old, false);
529         /* Enable new logic ports */
530         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
531                 nb_lport_new, true);
532         fm10k_mbx_unlock(hw);
533
534         for (i = 0; i < nb_lport_new; i++) {
535                 /* Set unicast mode by default. App can change
536                  * to other mode in other API func.
537                  */
538                 fm10k_mbx_lock(hw);
539                 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
540                         FM10K_XCAST_MODE_NONE);
541                 fm10k_mbx_unlock(hw);
542         }
543 }
544
545 static void
546 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
547 {
548         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549         struct rte_eth_vmdq_rx_conf *vmdq_conf;
550         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
551         struct fm10k_macvlan_filter_info *macvlan;
552         uint16_t nb_queue_pools = 0; /* pool number in configuration */
553         uint16_t nb_lport_new, nb_lport_old;
554
555         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
556         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
557
558         fm10k_dev_rss_configure(dev);
559
560         /* only PF supports VMDQ */
561         if (hw->mac.type != fm10k_mac_pf)
562                 return;
563
564         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
565                 nb_queue_pools = vmdq_conf->nb_queue_pools;
566
567         /* no pool number change, no need to update logic port and VLAN/MAC */
568         if (macvlan->nb_queue_pools == nb_queue_pools)
569                 return;
570
571         nb_lport_old = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
572         nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
573         fm10k_dev_logic_port_update(dev, nb_lport_old, nb_lport_new);
574
575         /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
576         memset(dev->data->mac_addrs, 0,
577                 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
578         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
579                 &dev->data->mac_addrs[0]);
580         memset(macvlan, 0, sizeof(*macvlan));
581         macvlan->nb_queue_pools = nb_queue_pools;
582
583         if (nb_queue_pools)
584                 fm10k_dev_vmdq_rx_configure(dev);
585         else
586                 fm10k_dev_pf_main_vsi_reset(dev);
587 }
588
589 static int
590 fm10k_dev_tx_init(struct rte_eth_dev *dev)
591 {
592         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
593         int i, ret;
594         struct fm10k_tx_queue *txq;
595         uint64_t base_addr;
596         uint32_t size;
597
598         /* Disable TXINT to avoid possible interrupt */
599         for (i = 0; i < hw->mac.max_queues; i++)
600                 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
601                                 3 << FM10K_TXINT_TIMER_SHIFT);
602
603         /* Setup TX queue */
604         for (i = 0; i < dev->data->nb_tx_queues; ++i) {
605                 txq = dev->data->tx_queues[i];
606                 base_addr = txq->hw_ring_phys_addr;
607                 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
608
609                 /* disable queue to avoid issues while updating state */
610                 ret = tx_queue_disable(hw, i);
611                 if (ret) {
612                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
613                         return -1;
614                 }
615
616                 /* set location and size for descriptor ring */
617                 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
618                                 base_addr & UINT64_LOWER_32BITS_MASK);
619                 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
620                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
621                 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
622         }
623
624         /* set up vector or scalar TX function as appropriate */
625         fm10k_set_tx_function(dev);
626
627         return 0;
628 }
629
630 static int
631 fm10k_dev_rx_init(struct rte_eth_dev *dev)
632 {
633         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634         int i, ret;
635         struct fm10k_rx_queue *rxq;
636         uint64_t base_addr;
637         uint32_t size;
638         uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
639         uint16_t buf_size;
640
641         /* Disable RXINT to avoid possible interrupt */
642         for (i = 0; i < hw->mac.max_queues; i++)
643                 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
644                                 3 << FM10K_RXINT_TIMER_SHIFT);
645
646         /* Setup RX queues */
647         for (i = 0; i < dev->data->nb_rx_queues; ++i) {
648                 rxq = dev->data->rx_queues[i];
649                 base_addr = rxq->hw_ring_phys_addr;
650                 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
651
652                 /* disable queue to avoid issues while updating state */
653                 ret = rx_queue_disable(hw, i);
654                 if (ret) {
655                         PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
656                         return -1;
657                 }
658
659                 /* Setup the Base and Length of the Rx Descriptor Ring */
660                 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
661                                 base_addr & UINT64_LOWER_32BITS_MASK);
662                 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
663                                 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
664                 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
665
666                 /* Configure the Rx buffer size for one buff without split */
667                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
668                         RTE_PKTMBUF_HEADROOM);
669                 /* As RX buffer is aligned to 512B within mbuf, some bytes are
670                  * reserved for this purpose, and the worst case could be 511B.
671                  * But SRR reg assumes all buffers have the same size. In order
672                  * to fill the gap, we'll have to consider the worst case and
673                  * assume 512B is reserved. If we don't do so, it's possible
674                  * for HW to overwrite data to next mbuf.
675                  */
676                 buf_size -= FM10K_RX_DATABUF_ALIGN;
677
678                 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
679                                 buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);
680
681                 /* It adds dual VLAN length for supporting dual VLAN */
682                 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
683                                 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
684                         dev->data->dev_conf.rxmode.enable_scatter) {
685                         uint32_t reg;
686                         dev->data->scattered_rx = 1;
687                         reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
688                         reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
689                         FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
690                 }
691
692                 /* Enable drop on empty, it's RO for VF */
693                 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
694                         rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
695
696                 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
697                 FM10K_WRITE_FLUSH(hw);
698         }
699
700         /* Configure VMDQ/RSS if applicable */
701         fm10k_dev_mq_rx_configure(dev);
702
703         /* Decide the best RX function */
704         fm10k_set_rx_function(dev);
705
706         return 0;
707 }
708
709 static int
710 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
711 {
712         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713         int err = -1;
714         uint32_t reg;
715         struct fm10k_rx_queue *rxq;
716
717         PMD_INIT_FUNC_TRACE();
718
719         if (rx_queue_id < dev->data->nb_rx_queues) {
720                 rxq = dev->data->rx_queues[rx_queue_id];
721                 err = rx_queue_reset(rxq);
722                 if (err == -ENOMEM) {
723                         PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
724                         return err;
725                 } else if (err == -EINVAL) {
726                         PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
727                                 " %d", err);
728                         return err;
729                 }
730
731                 /* Setup the HW Rx Head and Tail Descriptor Pointers
732                  * Note: this must be done AFTER the queue is enabled on real
733                  * hardware, but BEFORE the queue is enabled when using the
734                  * emulation platform. Do it in both places for now and remove
735                  * this comment and the following two register writes when the
736                  * emulation platform is no longer being used.
737                  */
738                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
739                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
740
741                 /* Set PF ownership flag for PF devices */
742                 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
743                 if (hw->mac.type == fm10k_mac_pf)
744                         reg |= FM10K_RXQCTL_PF;
745                 reg |= FM10K_RXQCTL_ENABLE;
746                 /* enable RX queue */
747                 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
748                 FM10K_WRITE_FLUSH(hw);
749
750                 /* Setup the HW Rx Head and Tail Descriptor Pointers
751                  * Note: this must be done AFTER the queue is enabled
752                  */
753                 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
754                 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
755                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
756         }
757
758         return err;
759 }
760
761 static int
762 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
763 {
764         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
765
766         PMD_INIT_FUNC_TRACE();
767
768         if (rx_queue_id < dev->data->nb_rx_queues) {
769                 /* Disable RX queue */
770                 rx_queue_disable(hw, rx_queue_id);
771
772                 /* Free mbuf and clean HW ring */
773                 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
774                 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
775         }
776
777         return 0;
778 }
779
780 static int
781 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
782 {
783         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
784         /** @todo - this should be defined in the shared code */
785 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY       0x00010000
786         uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
787         int err = 0;
788
789         PMD_INIT_FUNC_TRACE();
790
791         if (tx_queue_id < dev->data->nb_tx_queues) {
792                 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
793
794                 q->ops->reset(q);
795
796                 /* reset head and tail pointers */
797                 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
798                 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
799
800                 /* enable TX queue */
801                 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
802                                         FM10K_TXDCTL_ENABLE | txdctl);
803                 FM10K_WRITE_FLUSH(hw);
804                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
805         } else
806                 err = -1;
807
808         return err;
809 }
810
811 static int
812 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
813 {
814         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
815
816         PMD_INIT_FUNC_TRACE();
817
818         if (tx_queue_id < dev->data->nb_tx_queues) {
819                 tx_queue_disable(hw, tx_queue_id);
820                 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
821                 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
822         }
823
824         return 0;
825 }
826
827 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
828 {
829         return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
830                 != FM10K_DGLORTMAP_NONE);
831 }
832
833 static void
834 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
835 {
836         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
837         int status;
838
839         PMD_INIT_FUNC_TRACE();
840
841         /* Return if it didn't acquire valid glort range */
842         if (!fm10k_glort_valid(hw))
843                 return;
844
845         fm10k_mbx_lock(hw);
846         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
847                                 FM10K_XCAST_MODE_PROMISC);
848         fm10k_mbx_unlock(hw);
849
850         if (status != FM10K_SUCCESS)
851                 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
852 }
853
854 static void
855 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
856 {
857         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858         uint8_t mode;
859         int status;
860
861         PMD_INIT_FUNC_TRACE();
862
863         /* Return if it didn't acquire valid glort range */
864         if (!fm10k_glort_valid(hw))
865                 return;
866
867         if (dev->data->all_multicast == 1)
868                 mode = FM10K_XCAST_MODE_ALLMULTI;
869         else
870                 mode = FM10K_XCAST_MODE_NONE;
871
872         fm10k_mbx_lock(hw);
873         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
874                                 mode);
875         fm10k_mbx_unlock(hw);
876
877         if (status != FM10K_SUCCESS)
878                 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
879 }
880
881 static void
882 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
883 {
884         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
885         int status;
886
887         PMD_INIT_FUNC_TRACE();
888
889         /* Return if it didn't acquire valid glort range */
890         if (!fm10k_glort_valid(hw))
891                 return;
892
893         /* If promiscuous mode is enabled, it doesn't make sense to enable
894          * allmulticast and disable promiscuous since fm10k only can select
895          * one of the modes.
896          */
897         if (dev->data->promiscuous) {
898                 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
899                         "needn't enable allmulticast");
900                 return;
901         }
902
903         fm10k_mbx_lock(hw);
904         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
905                                 FM10K_XCAST_MODE_ALLMULTI);
906         fm10k_mbx_unlock(hw);
907
908         if (status != FM10K_SUCCESS)
909                 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
910 }
911
912 static void
913 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
914 {
915         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
916         int status;
917
918         PMD_INIT_FUNC_TRACE();
919
920         /* Return if it didn't acquire valid glort range */
921         if (!fm10k_glort_valid(hw))
922                 return;
923
924         if (dev->data->promiscuous) {
925                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
926                         "since promisc mode is enabled");
927                 return;
928         }
929
930         fm10k_mbx_lock(hw);
931         /* Change mode to unicast mode */
932         status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
933                                 FM10K_XCAST_MODE_NONE);
934         fm10k_mbx_unlock(hw);
935
936         if (status != FM10K_SUCCESS)
937                 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
938 }
939
940 static void
941 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
942 {
943         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
944         uint32_t dglortdec, pool_len, rss_len, i;
945         uint16_t nb_queue_pools;
946         struct fm10k_macvlan_filter_info *macvlan;
947
948         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
949         nb_queue_pools = macvlan->nb_queue_pools;
950         pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
951         rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
952         dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
953
954         /* Establish only MAP 0 as valid */
955         FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY);
956
957         /* Configure VMDQ/RSS DGlort Decoder */
958         FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
959
960         /* Invalidate all other GLORT entries */
961         for (i = 1; i < FM10K_DGLORT_COUNT; i++)
962                 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
963                                 FM10K_DGLORTMAP_NONE);
964 }
965
966 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
967 static int
968 fm10k_dev_start(struct rte_eth_dev *dev)
969 {
970         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
971         int i, diag;
972
973         PMD_INIT_FUNC_TRACE();
974
975         /* stop, init, then start the hw */
976         diag = fm10k_stop_hw(hw);
977         if (diag != FM10K_SUCCESS) {
978                 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
979                 return -EIO;
980         }
981
982         diag = fm10k_init_hw(hw);
983         if (diag != FM10K_SUCCESS) {
984                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
985                 return -EIO;
986         }
987
988         diag = fm10k_start_hw(hw);
989         if (diag != FM10K_SUCCESS) {
990                 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
991                 return -EIO;
992         }
993
994         diag = fm10k_dev_tx_init(dev);
995         if (diag) {
996                 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
997                 return diag;
998         }
999
1000         diag = fm10k_dev_rx_init(dev);
1001         if (diag) {
1002                 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1003                 return diag;
1004         }
1005
1006         if (hw->mac.type == fm10k_mac_pf)
1007                 fm10k_dev_dglort_map_configure(dev);
1008
1009         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1010                 struct fm10k_rx_queue *rxq;
1011                 rxq = dev->data->rx_queues[i];
1012
1013                 if (rxq->rx_deferred_start)
1014                         continue;
1015                 diag = fm10k_dev_rx_queue_start(dev, i);
1016                 if (diag != 0) {
1017                         int j;
1018                         for (j = 0; j < i; ++j)
1019                                 rx_queue_clean(dev->data->rx_queues[j]);
1020                         return diag;
1021                 }
1022         }
1023
1024         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1025                 struct fm10k_tx_queue *txq;
1026                 txq = dev->data->tx_queues[i];
1027
1028                 if (txq->tx_deferred_start)
1029                         continue;
1030                 diag = fm10k_dev_tx_queue_start(dev, i);
1031                 if (diag != 0) {
1032                         int j;
1033                         for (j = 0; j < i; ++j)
1034                                 tx_queue_clean(dev->data->tx_queues[j]);
1035                         for (j = 0; j < dev->data->nb_rx_queues; ++j)
1036                                 rx_queue_clean(dev->data->rx_queues[j]);
1037                         return diag;
1038                 }
1039         }
1040
1041         /* Update default vlan when not in VMDQ mode */
1042         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1043                 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1044
1045         return 0;
1046 }
1047
1048 static void
1049 fm10k_dev_stop(struct rte_eth_dev *dev)
1050 {
1051         int i;
1052
1053         PMD_INIT_FUNC_TRACE();
1054
1055         if (dev->data->tx_queues)
1056                 for (i = 0; i < dev->data->nb_tx_queues; i++)
1057                         fm10k_dev_tx_queue_stop(dev, i);
1058
1059         if (dev->data->rx_queues)
1060                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1061                         fm10k_dev_rx_queue_stop(dev, i);
1062 }
1063
1064 static void
1065 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1066 {
1067         int i;
1068
1069         PMD_INIT_FUNC_TRACE();
1070
1071         if (dev->data->tx_queues) {
1072                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1073                         struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1074
1075                         tx_queue_free(txq);
1076                 }
1077         }
1078
1079         if (dev->data->rx_queues) {
1080                 for (i = 0; i < dev->data->nb_rx_queues; i++)
1081                         fm10k_rx_queue_release(dev->data->rx_queues[i]);
1082         }
1083 }
1084
1085 static void
1086 fm10k_dev_close(struct rte_eth_dev *dev)
1087 {
1088         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089         uint16_t nb_lport;
1090         struct fm10k_macvlan_filter_info *macvlan;
1091
1092         PMD_INIT_FUNC_TRACE();
1093
1094         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1095         nb_lport = macvlan->nb_queue_pools ? macvlan->nb_queue_pools : 1;
1096         fm10k_mbx_lock(hw);
1097         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1098                 nb_lport, false);
1099         fm10k_mbx_unlock(hw);
1100
1101         /* Stop mailbox service first */
1102         fm10k_close_mbx_service(hw);
1103         fm10k_dev_stop(dev);
1104         fm10k_dev_queue_release(dev);
1105         fm10k_stop_hw(hw);
1106 }
1107
1108 static int
1109 fm10k_link_update(struct rte_eth_dev *dev,
1110         __rte_unused int wait_to_complete)
1111 {
1112         PMD_INIT_FUNC_TRACE();
1113
1114         /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1115          * x8 PCIe interface. For now, we leave the speed undefined since there
1116          * is no 50Gbps Ethernet. */
1117         dev->data->dev_link.link_speed  = 0;
1118         dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1119         dev->data->dev_link.link_status = 1;
1120
1121         return 0;
1122 }
1123
1124 static int
1125 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
1126                  unsigned n)
1127 {
1128         struct fm10k_hw_stats *hw_stats =
1129                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1130         unsigned i, q, count = 0;
1131
1132         if (n < FM10K_NB_XSTATS)
1133                 return FM10K_NB_XSTATS;
1134
1135         /* Global stats */
1136         for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1137                 snprintf(xstats[count].name, sizeof(xstats[count].name),
1138                          "%s", fm10k_hw_stats_strings[count].name);
1139                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1140                         fm10k_hw_stats_strings[count].offset);
1141                 count++;
1142         }
1143
1144         /* PF queue stats */
1145         for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1146                 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1147                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1148                                  "rx_q%u_%s", q,
1149                                  fm10k_hw_stats_rx_q_strings[i].name);
1150                         xstats[count].value =
1151                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1152                                 fm10k_hw_stats_rx_q_strings[i].offset);
1153                         count++;
1154                 }
1155                 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1156                         snprintf(xstats[count].name, sizeof(xstats[count].name),
1157                                  "tx_q%u_%s", q,
1158                                  fm10k_hw_stats_tx_q_strings[i].name);
1159                         xstats[count].value =
1160                                 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1161                                 fm10k_hw_stats_tx_q_strings[i].offset);
1162                         count++;
1163                 }
1164         }
1165
1166         return FM10K_NB_XSTATS;
1167 }
1168
1169 static void
1170 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1171 {
1172         uint64_t ipackets, opackets, ibytes, obytes;
1173         struct fm10k_hw *hw =
1174                 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175         struct fm10k_hw_stats *hw_stats =
1176                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1177         int i;
1178
1179         PMD_INIT_FUNC_TRACE();
1180
1181         fm10k_update_hw_stats(hw, hw_stats);
1182
1183         ipackets = opackets = ibytes = obytes = 0;
1184         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1185                 (i < hw->mac.max_queues); ++i) {
1186                 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1187                 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1188                 stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1189                 stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1190                 ipackets += stats->q_ipackets[i];
1191                 opackets += stats->q_opackets[i];
1192                 ibytes   += stats->q_ibytes[i];
1193                 obytes   += stats->q_obytes[i];
1194         }
1195         stats->ipackets = ipackets;
1196         stats->opackets = opackets;
1197         stats->ibytes = ibytes;
1198         stats->obytes = obytes;
1199 }
1200
1201 static void
1202 fm10k_stats_reset(struct rte_eth_dev *dev)
1203 {
1204         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205         struct fm10k_hw_stats *hw_stats =
1206                 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1207
1208         PMD_INIT_FUNC_TRACE();
1209
1210         memset(hw_stats, 0, sizeof(*hw_stats));
1211         fm10k_rebind_hw_stats(hw, hw_stats);
1212 }
1213
1214 static void
1215 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1216         struct rte_eth_dev_info *dev_info)
1217 {
1218         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1219
1220         PMD_INIT_FUNC_TRACE();
1221
1222         dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1223         dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1224         dev_info->max_rx_queues      = hw->mac.max_queues;
1225         dev_info->max_tx_queues      = hw->mac.max_queues;
1226         dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1227         dev_info->max_hash_mac_addrs = 0;
1228         dev_info->max_vfs            = dev->pci_dev->max_vfs;
1229         dev_info->vmdq_pool_base     = 0;
1230         dev_info->vmdq_queue_base    = 0;
1231         dev_info->max_vmdq_pools     = ETH_32_POOLS;
1232         dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1233         dev_info->rx_offload_capa =
1234                 DEV_RX_OFFLOAD_VLAN_STRIP |
1235                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1236                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1237                 DEV_RX_OFFLOAD_TCP_CKSUM;
1238         dev_info->tx_offload_capa =
1239                 DEV_TX_OFFLOAD_VLAN_INSERT |
1240                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1241                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1242                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1243                 DEV_TX_OFFLOAD_TCP_TSO;
1244
1245         dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1246         dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1247
1248         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1249                 .rx_thresh = {
1250                         .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1251                         .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1252                         .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1253                 },
1254                 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1255                 .rx_drop_en = 0,
1256         };
1257
1258         dev_info->default_txconf = (struct rte_eth_txconf) {
1259                 .tx_thresh = {
1260                         .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1261                         .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1262                         .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1263                 },
1264                 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1265                 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1266                 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1267         };
1268
1269         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1270                 .nb_max = FM10K_MAX_RX_DESC,
1271                 .nb_min = FM10K_MIN_RX_DESC,
1272                 .nb_align = FM10K_MULT_RX_DESC,
1273         };
1274
1275         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1276                 .nb_max = FM10K_MAX_TX_DESC,
1277                 .nb_min = FM10K_MIN_TX_DESC,
1278                 .nb_align = FM10K_MULT_TX_DESC,
1279         };
1280 }
1281
1282 static int
1283 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1284 {
1285         s32 result;
1286         uint16_t mac_num = 0;
1287         uint32_t vid_idx, vid_bit, mac_index;
1288         struct fm10k_hw *hw;
1289         struct fm10k_macvlan_filter_info *macvlan;
1290         struct rte_eth_dev_data *data = dev->data;
1291
1292         hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1293         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1294
1295         if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1296                 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1297                 return (-EINVAL);
1298         }
1299
1300         if (vlan_id > ETH_VLAN_ID_MAX) {
1301                 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1302                 return (-EINVAL);
1303         }
1304
1305         vid_idx = FM10K_VFTA_IDX(vlan_id);
1306         vid_bit = FM10K_VFTA_BIT(vlan_id);
1307         /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1308         if (on && (macvlan->vfta[vid_idx] & vid_bit))
1309                 return 0;
1310         /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1311         if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1312                 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1313                         "in the VLAN filter table");
1314                 return (-EINVAL);
1315         }
1316
1317         fm10k_mbx_lock(hw);
1318         result = fm10k_update_vlan(hw, vlan_id, 0, on);
1319         fm10k_mbx_unlock(hw);
1320         if (result != FM10K_SUCCESS) {
1321                 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1322                 return (-EIO);
1323         }
1324
1325         for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1326                         (result == FM10K_SUCCESS); mac_index++) {
1327                 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1328                         continue;
1329                 if (mac_num > macvlan->mac_num - 1) {
1330                         PMD_INIT_LOG(ERR, "MAC address number "
1331                                         "not match");
1332                         break;
1333                 }
1334                 fm10k_mbx_lock(hw);
1335                 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1336                         data->mac_addrs[mac_index].addr_bytes,
1337                         vlan_id, on, 0);
1338                 fm10k_mbx_unlock(hw);
1339                 mac_num++;
1340         }
1341         if (result != FM10K_SUCCESS) {
1342                 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1343                 return (-EIO);
1344         }
1345
1346         if (on) {
1347                 macvlan->vlan_num++;
1348                 macvlan->vfta[vid_idx] |= vid_bit;
1349         } else {
1350                 macvlan->vlan_num--;
1351                 macvlan->vfta[vid_idx] &= ~vid_bit;
1352         }
1353         return 0;
1354 }
1355
1356 static void
1357 fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask)
1358 {
1359         if (mask & ETH_VLAN_STRIP_MASK) {
1360                 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1361                         PMD_INIT_LOG(ERR, "VLAN stripping is "
1362                                         "always on in fm10k");
1363         }
1364
1365         if (mask & ETH_VLAN_EXTEND_MASK) {
1366                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1367                         PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1368                                         "supported in fm10k");
1369         }
1370
1371         if (mask & ETH_VLAN_FILTER_MASK) {
1372                 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1373                         PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1374         }
1375 }
1376
1377 /* Add/Remove a MAC address, and update filters to main VSI */
1378 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1379                 const u8 *mac, bool add, uint32_t pool)
1380 {
1381         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1382         struct fm10k_macvlan_filter_info *macvlan;
1383         uint32_t i, j, k;
1384
1385         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1386
1387         if (pool != MAIN_VSI_POOL_NUMBER) {
1388                 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1389                         "mac to pool %u", pool);
1390                 return;
1391         }
1392         for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1393                 if (!macvlan->vfta[j])
1394                         continue;
1395                 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1396                         if (!(macvlan->vfta[j] & (1 << k)))
1397                                 continue;
1398                         if (i + 1 > macvlan->vlan_num) {
1399                                 PMD_INIT_LOG(ERR, "vlan number not match");
1400                                 return;
1401                         }
1402                         fm10k_mbx_lock(hw);
1403                         fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1404                                 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1405                         fm10k_mbx_unlock(hw);
1406                         i++;
1407                 }
1408         }
1409 }
1410
1411 /* Add/Remove a MAC address, and update filters to VMDQ */
1412 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1413                 const u8 *mac, bool add, uint32_t pool)
1414 {
1415         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1416         struct fm10k_macvlan_filter_info *macvlan;
1417         struct rte_eth_vmdq_rx_conf *vmdq_conf;
1418         uint32_t i;
1419
1420         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1421         vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1422
1423         if (pool > macvlan->nb_queue_pools) {
1424                 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1425                         " Max pool is %u",
1426                         pool, macvlan->nb_queue_pools);
1427                 return;
1428         }
1429         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1430                 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1431                         continue;
1432                 fm10k_mbx_lock(hw);
1433                 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1434                         vmdq_conf->pool_map[i].vlan_id, add, 0);
1435                 fm10k_mbx_unlock(hw);
1436         }
1437 }
1438
1439 /* Add/Remove a MAC address, and update filters */
1440 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1441                 const u8 *mac, bool add, uint32_t pool)
1442 {
1443         struct fm10k_macvlan_filter_info *macvlan;
1444
1445         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1446
1447         if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1448                 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1449         else
1450                 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1451
1452         if (add)
1453                 macvlan->mac_num++;
1454         else
1455                 macvlan->mac_num--;
1456 }
1457
1458 /* Add a MAC address, and update filters */
1459 static void
1460 fm10k_macaddr_add(struct rte_eth_dev *dev,
1461                 struct ether_addr *mac_addr,
1462                 uint32_t index,
1463                 uint32_t pool)
1464 {
1465         struct fm10k_macvlan_filter_info *macvlan;
1466
1467         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1468         fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1469         macvlan->mac_vmdq_id[index] = pool;
1470 }
1471
1472 /* Remove a MAC address, and update filters */
1473 static void
1474 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1475 {
1476         struct rte_eth_dev_data *data = dev->data;
1477         struct fm10k_macvlan_filter_info *macvlan;
1478
1479         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1480         fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1481                         FALSE, macvlan->mac_vmdq_id[index]);
1482         macvlan->mac_vmdq_id[index] = 0;
1483 }
1484
1485 static inline int
1486 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1487 {
1488         if ((request < min) || (request > max) || ((request % mult) != 0))
1489                 return -1;
1490         else
1491                 return 0;
1492 }
1493
1494
1495 static inline int
1496 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1497 {
1498         if ((request < min) || (request > max) || ((div % request) != 0))
1499                 return -1;
1500         else
1501                 return 0;
1502 }
1503
1504 static inline int
1505 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1506 {
1507         uint16_t rx_free_thresh;
1508
1509         if (conf->rx_free_thresh == 0)
1510                 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1511         else
1512                 rx_free_thresh = conf->rx_free_thresh;
1513
1514         /* make sure the requested threshold satisfies the constraints */
1515         if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1516                         FM10K_RX_FREE_THRESH_MAX(q),
1517                         FM10K_RX_FREE_THRESH_DIV(q),
1518                         rx_free_thresh)) {
1519                 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1520                         "less than or equal to %u, "
1521                         "greater than or equal to %u, "
1522                         "and a divisor of %u",
1523                         rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1524                         FM10K_RX_FREE_THRESH_MIN(q),
1525                         FM10K_RX_FREE_THRESH_DIV(q));
1526                 return (-EINVAL);
1527         }
1528
1529         q->alloc_thresh = rx_free_thresh;
1530         q->drop_en = conf->rx_drop_en;
1531         q->rx_deferred_start = conf->rx_deferred_start;
1532
1533         return 0;
1534 }
1535
1536 /*
1537  * Hardware requires specific alignment for Rx packet buffers. At
1538  * least one of the following two conditions must be satisfied.
1539  *  1. Address is 512B aligned
1540  *  2. Address is 8B aligned and buffer does not cross 4K boundary.
1541  *
1542  * As such, the driver may need to adjust the DMA address within the
1543  * buffer by up to 512B.
1544  *
1545  * return 1 if the element size is valid, otherwise return 0.
1546  */
1547 static int
1548 mempool_element_size_valid(struct rte_mempool *mp)
1549 {
1550         uint32_t min_size;
1551
1552         /* elt_size includes mbuf header and headroom */
1553         min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1554                         RTE_PKTMBUF_HEADROOM;
1555
1556         /* account for up to 512B of alignment */
1557         min_size -= FM10K_RX_DATABUF_ALIGN;
1558
1559         /* sanity check for overflow */
1560         if (min_size > mp->elt_size)
1561                 return 0;
1562
1563         /* size is valid */
1564         return 1;
1565 }
1566
1567 static int
1568 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1569         uint16_t nb_desc, unsigned int socket_id,
1570         const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1571 {
1572         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1573         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
1574         struct fm10k_rx_queue *q;
1575         const struct rte_memzone *mz;
1576
1577         PMD_INIT_FUNC_TRACE();
1578
1579         /* make sure the mempool element size can account for alignment. */
1580         if (!mempool_element_size_valid(mp)) {
1581                 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1582                 return (-EINVAL);
1583         }
1584
1585         /* make sure a valid number of descriptors have been requested */
1586         if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1587                                 FM10K_MULT_RX_DESC, nb_desc)) {
1588                 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1589                         "less than or equal to %"PRIu32", "
1590                         "greater than or equal to %u, "
1591                         "and a multiple of %u",
1592                         nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1593                         FM10K_MULT_RX_DESC);
1594                 return (-EINVAL);
1595         }
1596
1597         /*
1598          * if this queue existed already, free the associated memory. The
1599          * queue cannot be reused in case we need to allocate memory on
1600          * different socket than was previously used.
1601          */
1602         if (dev->data->rx_queues[queue_id] != NULL) {
1603                 rx_queue_free(dev->data->rx_queues[queue_id]);
1604                 dev->data->rx_queues[queue_id] = NULL;
1605         }
1606
1607         /* allocate memory for the queue structure */
1608         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1609                                 socket_id);
1610         if (q == NULL) {
1611                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1612                 return (-ENOMEM);
1613         }
1614
1615         /* setup queue */
1616         q->mp = mp;
1617         q->nb_desc = nb_desc;
1618         q->nb_fake_desc = FM10K_MULT_RX_DESC;
1619         q->port_id = dev->data->port_id;
1620         q->queue_id = queue_id;
1621         q->tail_ptr = (volatile uint32_t *)
1622                 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1623         if (handle_rxconf(q, conf))
1624                 return (-EINVAL);
1625
1626         /* allocate memory for the software ring */
1627         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1628                         (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1629                         RTE_CACHE_LINE_SIZE, socket_id);
1630         if (q->sw_ring == NULL) {
1631                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1632                 rte_free(q);
1633                 return (-ENOMEM);
1634         }
1635
1636         /*
1637          * allocate memory for the hardware descriptor ring. A memzone large
1638          * enough to hold the maximum ring size is requested to allow for
1639          * resizing in later calls to the queue setup function.
1640          */
1641         mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1642                                       FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1643                                       socket_id);
1644         if (mz == NULL) {
1645                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1646                 rte_free(q->sw_ring);
1647                 rte_free(q);
1648                 return (-ENOMEM);
1649         }
1650         q->hw_ring = mz->addr;
1651         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1652
1653         /* Check if number of descs satisfied Vector requirement */
1654         if (!rte_is_power_of_2(nb_desc)) {
1655                 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1656                                     "preconditions - canceling the feature for "
1657                                     "the whole port[%d]",
1658                              q->queue_id, q->port_id);
1659                 dev_info->rx_vec_allowed = false;
1660         } else
1661                 fm10k_rxq_vec_setup(q);
1662
1663         dev->data->rx_queues[queue_id] = q;
1664         return 0;
1665 }
1666
1667 static void
1668 fm10k_rx_queue_release(void *queue)
1669 {
1670         PMD_INIT_FUNC_TRACE();
1671
1672         rx_queue_free(queue);
1673 }
1674
1675 static inline int
1676 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1677 {
1678         uint16_t tx_free_thresh;
1679         uint16_t tx_rs_thresh;
1680
1681         /* constraint MACROs require that tx_free_thresh is configured
1682          * before tx_rs_thresh */
1683         if (conf->tx_free_thresh == 0)
1684                 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1685         else
1686                 tx_free_thresh = conf->tx_free_thresh;
1687
1688         /* make sure the requested threshold satisfies the constraints */
1689         if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1690                         FM10K_TX_FREE_THRESH_MAX(q),
1691                         FM10K_TX_FREE_THRESH_DIV(q),
1692                         tx_free_thresh)) {
1693                 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1694                         "less than or equal to %u, "
1695                         "greater than or equal to %u, "
1696                         "and a divisor of %u",
1697                         tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1698                         FM10K_TX_FREE_THRESH_MIN(q),
1699                         FM10K_TX_FREE_THRESH_DIV(q));
1700                 return (-EINVAL);
1701         }
1702
1703         q->free_thresh = tx_free_thresh;
1704
1705         if (conf->tx_rs_thresh == 0)
1706                 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1707         else
1708                 tx_rs_thresh = conf->tx_rs_thresh;
1709
1710         q->tx_deferred_start = conf->tx_deferred_start;
1711
1712         /* make sure the requested threshold satisfies the constraints */
1713         if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1714                         FM10K_TX_RS_THRESH_MAX(q),
1715                         FM10K_TX_RS_THRESH_DIV(q),
1716                         tx_rs_thresh)) {
1717                 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1718                         "less than or equal to %u, "
1719                         "greater than or equal to %u, "
1720                         "and a divisor of %u",
1721                         tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1722                         FM10K_TX_RS_THRESH_MIN(q),
1723                         FM10K_TX_RS_THRESH_DIV(q));
1724                 return (-EINVAL);
1725         }
1726
1727         q->rs_thresh = tx_rs_thresh;
1728
1729         return 0;
1730 }
1731
1732 static int
1733 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1734         uint16_t nb_desc, unsigned int socket_id,
1735         const struct rte_eth_txconf *conf)
1736 {
1737         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1738         struct fm10k_tx_queue *q;
1739         const struct rte_memzone *mz;
1740
1741         PMD_INIT_FUNC_TRACE();
1742
1743         /* make sure a valid number of descriptors have been requested */
1744         if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1745                                 FM10K_MULT_TX_DESC, nb_desc)) {
1746                 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1747                         "less than or equal to %"PRIu32", "
1748                         "greater than or equal to %u, "
1749                         "and a multiple of %u",
1750                         nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1751                         FM10K_MULT_TX_DESC);
1752                 return (-EINVAL);
1753         }
1754
1755         /*
1756          * if this queue existed already, free the associated memory. The
1757          * queue cannot be reused in case we need to allocate memory on
1758          * different socket than was previously used.
1759          */
1760         if (dev->data->tx_queues[queue_id] != NULL) {
1761                 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
1762
1763                 tx_queue_free(txq);
1764                 dev->data->tx_queues[queue_id] = NULL;
1765         }
1766
1767         /* allocate memory for the queue structure */
1768         q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1769                                 socket_id);
1770         if (q == NULL) {
1771                 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1772                 return (-ENOMEM);
1773         }
1774
1775         /* setup queue */
1776         q->nb_desc = nb_desc;
1777         q->port_id = dev->data->port_id;
1778         q->queue_id = queue_id;
1779         q->txq_flags = conf->txq_flags;
1780         q->ops = &def_txq_ops;
1781         q->tail_ptr = (volatile uint32_t *)
1782                 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
1783         if (handle_txconf(q, conf))
1784                 return (-EINVAL);
1785
1786         /* allocate memory for the software ring */
1787         q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1788                                         nb_desc * sizeof(struct rte_mbuf *),
1789                                         RTE_CACHE_LINE_SIZE, socket_id);
1790         if (q->sw_ring == NULL) {
1791                 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1792                 rte_free(q);
1793                 return (-ENOMEM);
1794         }
1795
1796         /*
1797          * allocate memory for the hardware descriptor ring. A memzone large
1798          * enough to hold the maximum ring size is requested to allow for
1799          * resizing in later calls to the queue setup function.
1800          */
1801         mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
1802                                       FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
1803                                       socket_id);
1804         if (mz == NULL) {
1805                 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1806                 rte_free(q->sw_ring);
1807                 rte_free(q);
1808                 return (-ENOMEM);
1809         }
1810         q->hw_ring = mz->addr;
1811         q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1812
1813         /*
1814          * allocate memory for the RS bit tracker. Enough slots to hold the
1815          * descriptor index for each RS bit needing to be set are required.
1816          */
1817         q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
1818                                 ((nb_desc + 1) / q->rs_thresh) *
1819                                 sizeof(uint16_t),
1820                                 RTE_CACHE_LINE_SIZE, socket_id);
1821         if (q->rs_tracker.list == NULL) {
1822                 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
1823                 rte_free(q->sw_ring);
1824                 rte_free(q);
1825                 return (-ENOMEM);
1826         }
1827
1828         dev->data->tx_queues[queue_id] = q;
1829         return 0;
1830 }
1831
1832 static void
1833 fm10k_tx_queue_release(void *queue)
1834 {
1835         struct fm10k_tx_queue *q = queue;
1836         PMD_INIT_FUNC_TRACE();
1837
1838         tx_queue_free(q);
1839 }
1840
1841 static int
1842 fm10k_reta_update(struct rte_eth_dev *dev,
1843                         struct rte_eth_rss_reta_entry64 *reta_conf,
1844                         uint16_t reta_size)
1845 {
1846         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1847         uint16_t i, j, idx, shift;
1848         uint8_t mask;
1849         uint32_t reta;
1850
1851         PMD_INIT_FUNC_TRACE();
1852
1853         if (reta_size > FM10K_MAX_RSS_INDICES) {
1854                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1855                         "(%d) doesn't match the number hardware can supported "
1856                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1857                 return -EINVAL;
1858         }
1859
1860         /*
1861          * Update Redirection Table RETA[n], n=0..31. The redirection table has
1862          * 128-entries in 32 registers
1863          */
1864         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1865                 idx = i / RTE_RETA_GROUP_SIZE;
1866                 shift = i % RTE_RETA_GROUP_SIZE;
1867                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1868                                 BIT_MASK_PER_UINT32);
1869                 if (mask == 0)
1870                         continue;
1871
1872                 reta = 0;
1873                 if (mask != BIT_MASK_PER_UINT32)
1874                         reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1875
1876                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1877                         if (mask & (0x1 << j)) {
1878                                 if (mask != 0xF)
1879                                         reta &= ~(UINT8_MAX << CHAR_BIT * j);
1880                                 reta |= reta_conf[idx].reta[shift + j] <<
1881                                                 (CHAR_BIT * j);
1882                         }
1883                 }
1884                 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
1885         }
1886
1887         return 0;
1888 }
1889
1890 static int
1891 fm10k_reta_query(struct rte_eth_dev *dev,
1892                         struct rte_eth_rss_reta_entry64 *reta_conf,
1893                         uint16_t reta_size)
1894 {
1895         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1896         uint16_t i, j, idx, shift;
1897         uint8_t mask;
1898         uint32_t reta;
1899
1900         PMD_INIT_FUNC_TRACE();
1901
1902         if (reta_size < FM10K_MAX_RSS_INDICES) {
1903                 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
1904                         "(%d) doesn't match the number hardware can supported "
1905                         "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
1906                 return -EINVAL;
1907         }
1908
1909         /*
1910          * Read Redirection Table RETA[n], n=0..31. The redirection table has
1911          * 128-entries in 32 registers
1912          */
1913         for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
1914                 idx = i / RTE_RETA_GROUP_SIZE;
1915                 shift = i % RTE_RETA_GROUP_SIZE;
1916                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1917                                 BIT_MASK_PER_UINT32);
1918                 if (mask == 0)
1919                         continue;
1920
1921                 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
1922                 for (j = 0; j < CHARS_PER_UINT32; j++) {
1923                         if (mask & (0x1 << j))
1924                                 reta_conf[idx].reta[shift + j] = ((reta >>
1925                                         CHAR_BIT * j) & UINT8_MAX);
1926                 }
1927         }
1928
1929         return 0;
1930 }
1931
1932 static int
1933 fm10k_rss_hash_update(struct rte_eth_dev *dev,
1934         struct rte_eth_rss_conf *rss_conf)
1935 {
1936         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1938         uint32_t mrqc;
1939         uint64_t hf = rss_conf->rss_hf;
1940         int i;
1941
1942         PMD_INIT_FUNC_TRACE();
1943
1944         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1945                 FM10K_RSSRK_ENTRIES_PER_REG)
1946                 return -EINVAL;
1947
1948         if (hf == 0)
1949                 return -EINVAL;
1950
1951         mrqc = 0;
1952         mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
1953         mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
1954         mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
1955         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
1956         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
1957         mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
1958         mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
1959         mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
1960         mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
1961
1962         /* If the mapping doesn't fit any supported, return */
1963         if (mrqc == 0)
1964                 return -EINVAL;
1965
1966         if (key != NULL)
1967                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1968                         FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
1969
1970         FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
1971
1972         return 0;
1973 }
1974
1975 static int
1976 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
1977         struct rte_eth_rss_conf *rss_conf)
1978 {
1979         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1981         uint32_t mrqc;
1982         uint64_t hf;
1983         int i;
1984
1985         PMD_INIT_FUNC_TRACE();
1986
1987         if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
1988                                 FM10K_RSSRK_ENTRIES_PER_REG)
1989                 return -EINVAL;
1990
1991         if (key != NULL)
1992                 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
1993                         key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
1994
1995         mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
1996         hf = 0;
1997         hf |= (mrqc & FM10K_MRQC_IPV4)     ? ETH_RSS_IPV4              : 0;
1998         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6              : 0;
1999         hf |= (mrqc & FM10K_MRQC_IPV6)     ? ETH_RSS_IPV6_EX           : 0;
2000         hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP  : 0;
2001         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP  : 0;
2002         hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX       : 0;
2003         hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP  : 0;
2004         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP  : 0;
2005         hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX       : 0;
2006
2007         rss_conf->rss_hf = hf;
2008
2009         return 0;
2010 }
2011
2012 static void
2013 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2014 {
2015         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2016         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2017
2018         /* Bind all local non-queue interrupt to vector 0 */
2019         int_map |= 0;
2020
2021         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2022         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2023         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2024         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2025         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2026         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2027
2028         /* Enable misc causes */
2029         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2030                                 FM10K_EIMR_ENABLE(THI_FAULT) |
2031                                 FM10K_EIMR_ENABLE(FUM_FAULT) |
2032                                 FM10K_EIMR_ENABLE(MAILBOX) |
2033                                 FM10K_EIMR_ENABLE(SWITCHREADY) |
2034                                 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2035                                 FM10K_EIMR_ENABLE(SRAMERROR) |
2036                                 FM10K_EIMR_ENABLE(VFLR));
2037
2038         /* Enable ITR 0 */
2039         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2040                                         FM10K_ITR_MASK_CLEAR);
2041         FM10K_WRITE_FLUSH(hw);
2042 }
2043
2044 static void
2045 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2046 {
2047         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2048         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2049
2050         int_map |= 0;
2051
2052         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map);
2053         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map);
2054         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map);
2055         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map);
2056         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map);
2057         FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map);
2058
2059         /* Disable misc causes */
2060         FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2061                                 FM10K_EIMR_DISABLE(THI_FAULT) |
2062                                 FM10K_EIMR_DISABLE(FUM_FAULT) |
2063                                 FM10K_EIMR_DISABLE(MAILBOX) |
2064                                 FM10K_EIMR_DISABLE(SWITCHREADY) |
2065                                 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2066                                 FM10K_EIMR_DISABLE(SRAMERROR) |
2067                                 FM10K_EIMR_DISABLE(VFLR));
2068
2069         /* Disable ITR 0 */
2070         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2071         FM10K_WRITE_FLUSH(hw);
2072 }
2073
2074 static void
2075 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2076 {
2077         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078         uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2079
2080         /* Bind all local non-queue interrupt to vector 0 */
2081         int_map |= 0;
2082
2083         /* Only INT 0 available, other 15 are reserved. */
2084         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2085
2086         /* Enable ITR 0 */
2087         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2088                                         FM10K_ITR_MASK_CLEAR);
2089         FM10K_WRITE_FLUSH(hw);
2090 }
2091
2092 static void
2093 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2094 {
2095         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2096         uint32_t int_map = FM10K_INT_MAP_DISABLE;
2097
2098         int_map |= 0;
2099
2100         /* Only INT 0 available, other 15 are reserved. */
2101         FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2102
2103         /* Disable ITR 0 */
2104         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2105         FM10K_WRITE_FLUSH(hw);
2106 }
2107
2108 static int
2109 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2110 {
2111         struct fm10k_fault fault;
2112         int err;
2113         const char *estr = "Unknown error";
2114
2115         /* Process PCA fault */
2116         if (eicr & FM10K_EICR_PCA_FAULT) {
2117                 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2118                 if (err)
2119                         goto error;
2120                 switch (fault.type) {
2121                 case PCA_NO_FAULT:
2122                         estr = "PCA_NO_FAULT"; break;
2123                 case PCA_UNMAPPED_ADDR:
2124                         estr = "PCA_UNMAPPED_ADDR"; break;
2125                 case PCA_BAD_QACCESS_PF:
2126                         estr = "PCA_BAD_QACCESS_PF"; break;
2127                 case PCA_BAD_QACCESS_VF:
2128                         estr = "PCA_BAD_QACCESS_VF"; break;
2129                 case PCA_MALICIOUS_REQ:
2130                         estr = "PCA_MALICIOUS_REQ"; break;
2131                 case PCA_POISONED_TLP:
2132                         estr = "PCA_POISONED_TLP"; break;
2133                 case PCA_TLP_ABORT:
2134                         estr = "PCA_TLP_ABORT"; break;
2135                 default:
2136                         goto error;
2137                 }
2138                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2139                         estr, fault.func ? "VF" : "PF", fault.func,
2140                         fault.address, fault.specinfo);
2141         }
2142
2143         /* Process THI fault */
2144         if (eicr & FM10K_EICR_THI_FAULT) {
2145                 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2146                 if (err)
2147                         goto error;
2148                 switch (fault.type) {
2149                 case THI_NO_FAULT:
2150                         estr = "THI_NO_FAULT"; break;
2151                 case THI_MAL_DIS_Q_FAULT:
2152                         estr = "THI_MAL_DIS_Q_FAULT"; break;
2153                 default:
2154                         goto error;
2155                 }
2156                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2157                         estr, fault.func ? "VF" : "PF", fault.func,
2158                         fault.address, fault.specinfo);
2159         }
2160
2161         /* Process FUM fault */
2162         if (eicr & FM10K_EICR_FUM_FAULT) {
2163                 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2164                 if (err)
2165                         goto error;
2166                 switch (fault.type) {
2167                 case FUM_NO_FAULT:
2168                         estr = "FUM_NO_FAULT"; break;
2169                 case FUM_UNMAPPED_ADDR:
2170                         estr = "FUM_UNMAPPED_ADDR"; break;
2171                 case FUM_POISONED_TLP:
2172                         estr = "FUM_POISONED_TLP"; break;
2173                 case FUM_BAD_VF_QACCESS:
2174                         estr = "FUM_BAD_VF_QACCESS"; break;
2175                 case FUM_ADD_DECODE_ERR:
2176                         estr = "FUM_ADD_DECODE_ERR"; break;
2177                 case FUM_RO_ERROR:
2178                         estr = "FUM_RO_ERROR"; break;
2179                 case FUM_QPRC_CRC_ERROR:
2180                         estr = "FUM_QPRC_CRC_ERROR"; break;
2181                 case FUM_CSR_TIMEOUT:
2182                         estr = "FUM_CSR_TIMEOUT"; break;
2183                 case FUM_INVALID_TYPE:
2184                         estr = "FUM_INVALID_TYPE"; break;
2185                 case FUM_INVALID_LENGTH:
2186                         estr = "FUM_INVALID_LENGTH"; break;
2187                 case FUM_INVALID_BE:
2188                         estr = "FUM_INVALID_BE"; break;
2189                 case FUM_INVALID_ALIGN:
2190                         estr = "FUM_INVALID_ALIGN"; break;
2191                 default:
2192                         goto error;
2193                 }
2194                 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2195                         estr, fault.func ? "VF" : "PF", fault.func,
2196                         fault.address, fault.specinfo);
2197         }
2198
2199         return 0;
2200 error:
2201         PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2202         return err;
2203 }
2204
2205 /**
2206  * PF interrupt handler triggered by NIC for handling specific interrupt.
2207  *
2208  * @param handle
2209  *  Pointer to interrupt handle.
2210  * @param param
2211  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2212  *
2213  * @return
2214  *  void
2215  */
2216 static void
2217 fm10k_dev_interrupt_handler_pf(
2218                         __rte_unused struct rte_intr_handle *handle,
2219                         void *param)
2220 {
2221         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2222         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2223         uint32_t cause, status;
2224
2225         if (hw->mac.type != fm10k_mac_pf)
2226                 return;
2227
2228         cause = FM10K_READ_REG(hw, FM10K_EICR);
2229
2230         /* Handle PCI fault cases */
2231         if (cause & FM10K_EICR_FAULT_MASK) {
2232                 PMD_INIT_LOG(ERR, "INT: find fault!");
2233                 fm10k_dev_handle_fault(hw, cause);
2234         }
2235
2236         /* Handle switch up/down */
2237         if (cause & FM10K_EICR_SWITCHNOTREADY)
2238                 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2239
2240         if (cause & FM10K_EICR_SWITCHREADY)
2241                 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2242
2243         /* Handle mailbox message */
2244         fm10k_mbx_lock(hw);
2245         hw->mbx.ops.process(hw, &hw->mbx);
2246         fm10k_mbx_unlock(hw);
2247
2248         /* Handle SRAM error */
2249         if (cause & FM10K_EICR_SRAMERROR) {
2250                 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2251
2252                 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2253                 /* Write to clear pending bits */
2254                 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2255
2256                 /* Todo: print out error message after shared code  updates */
2257         }
2258
2259         /* Clear these 3 events if having any */
2260         cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2261                  FM10K_EICR_SWITCHREADY;
2262         if (cause)
2263                 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2264
2265         /* Re-enable interrupt from device side */
2266         FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2267                                         FM10K_ITR_MASK_CLEAR);
2268         /* Re-enable interrupt from host side */
2269         rte_intr_enable(&(dev->pci_dev->intr_handle));
2270 }
2271
2272 /**
2273  * VF interrupt handler triggered by NIC for handling specific interrupt.
2274  *
2275  * @param handle
2276  *  Pointer to interrupt handle.
2277  * @param param
2278  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2279  *
2280  * @return
2281  *  void
2282  */
2283 static void
2284 fm10k_dev_interrupt_handler_vf(
2285                         __rte_unused struct rte_intr_handle *handle,
2286                         void *param)
2287 {
2288         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2289         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2290
2291         if (hw->mac.type != fm10k_mac_vf)
2292                 return;
2293
2294         /* Handle mailbox message if lock is acquired */
2295         fm10k_mbx_lock(hw);
2296         hw->mbx.ops.process(hw, &hw->mbx);
2297         fm10k_mbx_unlock(hw);
2298
2299         /* Re-enable interrupt from device side */
2300         FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2301                                         FM10K_ITR_MASK_CLEAR);
2302         /* Re-enable interrupt from host side */
2303         rte_intr_enable(&(dev->pci_dev->intr_handle));
2304 }
2305
2306 /* Mailbox message handler in VF */
2307 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2308         FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2309         FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2310         FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2311         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2312 };
2313
2314 /* Mailbox message handler in PF */
2315 static const struct fm10k_msg_data fm10k_msgdata_pf[] = {
2316         FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
2317         FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
2318         FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
2319         FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
2320         FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
2321         FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
2322         FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2323 };
2324
2325 static int
2326 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2327 {
2328         int err;
2329
2330         /* Initialize mailbox lock */
2331         fm10k_mbx_initlock(hw);
2332
2333         /* Replace default message handler with new ones */
2334         if (hw->mac.type == fm10k_mac_pf)
2335                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf);
2336         else
2337                 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2338
2339         if (err) {
2340                 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2341                                 err);
2342                 return err;
2343         }
2344         /* Connect to SM for PF device or PF for VF device */
2345         return hw->mbx.ops.connect(hw, &hw->mbx);
2346 }
2347
2348 static void
2349 fm10k_close_mbx_service(struct fm10k_hw *hw)
2350 {
2351         /* Disconnect from SM for PF device or PF for VF device */
2352         hw->mbx.ops.disconnect(hw, &hw->mbx);
2353 }
2354
2355 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2356         .dev_configure          = fm10k_dev_configure,
2357         .dev_start              = fm10k_dev_start,
2358         .dev_stop               = fm10k_dev_stop,
2359         .dev_close              = fm10k_dev_close,
2360         .promiscuous_enable     = fm10k_dev_promiscuous_enable,
2361         .promiscuous_disable    = fm10k_dev_promiscuous_disable,
2362         .allmulticast_enable    = fm10k_dev_allmulticast_enable,
2363         .allmulticast_disable   = fm10k_dev_allmulticast_disable,
2364         .stats_get              = fm10k_stats_get,
2365         .xstats_get             = fm10k_xstats_get,
2366         .stats_reset            = fm10k_stats_reset,
2367         .xstats_reset           = fm10k_stats_reset,
2368         .link_update            = fm10k_link_update,
2369         .dev_infos_get          = fm10k_dev_infos_get,
2370         .vlan_filter_set        = fm10k_vlan_filter_set,
2371         .vlan_offload_set       = fm10k_vlan_offload_set,
2372         .mac_addr_add           = fm10k_macaddr_add,
2373         .mac_addr_remove        = fm10k_macaddr_remove,
2374         .rx_queue_start         = fm10k_dev_rx_queue_start,
2375         .rx_queue_stop          = fm10k_dev_rx_queue_stop,
2376         .tx_queue_start         = fm10k_dev_tx_queue_start,
2377         .tx_queue_stop          = fm10k_dev_tx_queue_stop,
2378         .rx_queue_setup         = fm10k_rx_queue_setup,
2379         .rx_queue_release       = fm10k_rx_queue_release,
2380         .tx_queue_setup         = fm10k_tx_queue_setup,
2381         .tx_queue_release       = fm10k_tx_queue_release,
2382         .reta_update            = fm10k_reta_update,
2383         .reta_query             = fm10k_reta_query,
2384         .rss_hash_update        = fm10k_rss_hash_update,
2385         .rss_hash_conf_get      = fm10k_rss_hash_conf_get,
2386 };
2387
2388 static void __attribute__((cold))
2389 fm10k_set_tx_function(struct rte_eth_dev *dev)
2390 {
2391         struct fm10k_tx_queue *txq;
2392         int i;
2393         int use_sse = 1;
2394
2395         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2396                 txq = dev->data->tx_queues[i];
2397                 if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) !=
2398                         FM10K_SIMPLE_TX_FLAG) {
2399                         use_sse = 0;
2400                         break;
2401                 }
2402         }
2403
2404         if (use_sse) {
2405                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2406                         txq = dev->data->tx_queues[i];
2407                         fm10k_txq_vec_setup(txq);
2408                 }
2409                 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2410         } else
2411                 dev->tx_pkt_burst = fm10k_xmit_pkts;
2412 }
2413
2414 static void __attribute__((cold))
2415 fm10k_set_rx_function(struct rte_eth_dev *dev)
2416 {
2417         struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2418         uint16_t i, rx_using_sse;
2419
2420         /* In order to allow Vector Rx there are a few configuration
2421          * conditions to be met.
2422          */
2423         if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) {
2424                 if (dev->data->scattered_rx)
2425                         dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2426                 else
2427                         dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2428         } else if (dev->data->scattered_rx)
2429                 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2430
2431         rx_using_sse =
2432                 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2433                 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2434
2435         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2436                 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2437
2438                 rxq->rx_using_sse = rx_using_sse;
2439         }
2440 }
2441
2442 static void
2443 fm10k_params_init(struct rte_eth_dev *dev)
2444 {
2445         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2446         struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
2447
2448         /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2449          * there is no way to get link status without reading BAR4.  Until this
2450          * works, assume we have maximum bandwidth.
2451          * @todo - fix bus info
2452          */
2453         hw->bus_caps.speed = fm10k_bus_speed_8000;
2454         hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2455         hw->bus_caps.payload = fm10k_bus_payload_512;
2456         hw->bus.speed = fm10k_bus_speed_8000;
2457         hw->bus.width = fm10k_bus_width_pcie_x8;
2458         hw->bus.payload = fm10k_bus_payload_256;
2459
2460         info->rx_vec_allowed = true;
2461 }
2462
2463 static int
2464 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2465 {
2466         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2467         int diag;
2468         struct fm10k_macvlan_filter_info *macvlan;
2469
2470         PMD_INIT_FUNC_TRACE();
2471
2472         dev->dev_ops = &fm10k_eth_dev_ops;
2473         dev->rx_pkt_burst = &fm10k_recv_pkts;
2474         dev->tx_pkt_burst = &fm10k_xmit_pkts;
2475
2476         /* only initialize in the primary process */
2477         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2478                 return 0;
2479
2480         rte_eth_copy_pci_info(dev, dev->pci_dev);
2481
2482         macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
2483         memset(macvlan, 0, sizeof(*macvlan));
2484         /* Vendor and Device ID need to be set before init of shared code */
2485         memset(hw, 0, sizeof(*hw));
2486         hw->device_id = dev->pci_dev->id.device_id;
2487         hw->vendor_id = dev->pci_dev->id.vendor_id;
2488         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
2489         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
2490         hw->revision_id = 0;
2491         hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
2492         if (hw->hw_addr == NULL) {
2493                 PMD_INIT_LOG(ERR, "Bad mem resource."
2494                         " Try to blacklist unused devices.");
2495                 return -EIO;
2496         }
2497
2498         /* Store fm10k_adapter pointer */
2499         hw->back = dev->data->dev_private;
2500
2501         /* Initialize the shared code */
2502         diag = fm10k_init_shared_code(hw);
2503         if (diag != FM10K_SUCCESS) {
2504                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
2505                 return -EIO;
2506         }
2507
2508         /* Initialize parameters */
2509         fm10k_params_init(dev);
2510
2511         /* Initialize the hw */
2512         diag = fm10k_init_hw(hw);
2513         if (diag != FM10K_SUCCESS) {
2514                 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
2515                 return -EIO;
2516         }
2517
2518         /* Initialize MAC address(es) */
2519         dev->data->mac_addrs = rte_zmalloc("fm10k",
2520                         ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
2521         if (dev->data->mac_addrs == NULL) {
2522                 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
2523                 return -ENOMEM;
2524         }
2525
2526         diag = fm10k_read_mac_addr(hw);
2527
2528         ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2529                         &dev->data->mac_addrs[0]);
2530
2531         if (diag != FM10K_SUCCESS ||
2532                 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
2533
2534                 /* Generate a random addr */
2535                 eth_random_addr(hw->mac.addr);
2536                 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
2537                 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
2538                 &dev->data->mac_addrs[0]);
2539         }
2540
2541         /* Reset the hw statistics */
2542         fm10k_stats_reset(dev);
2543
2544         /* Reset the hw */
2545         diag = fm10k_reset_hw(hw);
2546         if (diag != FM10K_SUCCESS) {
2547                 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
2548                 return -EIO;
2549         }
2550
2551         /* Setup mailbox service */
2552         diag = fm10k_setup_mbx_service(hw);
2553         if (diag != FM10K_SUCCESS) {
2554                 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
2555                 return -EIO;
2556         }
2557
2558         /*PF/VF has different interrupt handling mechanism */
2559         if (hw->mac.type == fm10k_mac_pf) {
2560                 /* register callback func to eal lib */
2561                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2562                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2563
2564                 /* enable MISC interrupt */
2565                 fm10k_dev_enable_intr_pf(dev);
2566         } else { /* VF */
2567                 rte_intr_callback_register(&(dev->pci_dev->intr_handle),
2568                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2569
2570                 fm10k_dev_enable_intr_vf(dev);
2571         }
2572
2573         /* Enable uio intr after callback registered */
2574         rte_intr_enable(&(dev->pci_dev->intr_handle));
2575
2576         hw->mac.ops.update_int_moderator(hw);
2577
2578         /* Make sure Switch Manager is ready before going forward. */
2579         if (hw->mac.type == fm10k_mac_pf) {
2580                 int switch_ready = 0;
2581                 int i;
2582
2583                 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
2584                         fm10k_mbx_lock(hw);
2585                         hw->mac.ops.get_host_state(hw, &switch_ready);
2586                         fm10k_mbx_unlock(hw);
2587                         if (switch_ready)
2588                                 break;
2589                         /* Delay some time to acquire async LPORT_MAP info. */
2590                         rte_delay_us(WAIT_SWITCH_MSG_US);
2591                 }
2592
2593                 if (switch_ready == 0) {
2594                         PMD_INIT_LOG(ERR, "switch is not ready");
2595                         return -1;
2596                 }
2597         }
2598
2599         /*
2600          * Below function will trigger operations on mailbox, acquire lock to
2601          * avoid race condition from interrupt handler. Operations on mailbox
2602          * FIFO will trigger interrupt to PF/SM, in which interrupt handler
2603          * will handle and generate an interrupt to our side. Then,  FIFO in
2604          * mailbox will be touched.
2605          */
2606         fm10k_mbx_lock(hw);
2607         /* Enable port first */
2608         hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1);
2609
2610         /* Set unicast mode by default. App can change to other mode in other
2611          * API func.
2612          */
2613         hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
2614                                         FM10K_XCAST_MODE_NONE);
2615
2616         fm10k_mbx_unlock(hw);
2617
2618         /* Add default mac address */
2619         fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2620                 MAIN_VSI_POOL_NUMBER);
2621
2622         return 0;
2623 }
2624
2625 static int
2626 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
2627 {
2628         struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2629
2630         PMD_INIT_FUNC_TRACE();
2631
2632         /* only uninitialize in the primary process */
2633         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2634                 return 0;
2635
2636         /* safe to close dev here */
2637         fm10k_dev_close(dev);
2638
2639         dev->dev_ops = NULL;
2640         dev->rx_pkt_burst = NULL;
2641         dev->tx_pkt_burst = NULL;
2642
2643         /* disable uio/vfio intr */
2644         rte_intr_disable(&(dev->pci_dev->intr_handle));
2645
2646         /*PF/VF has different interrupt handling mechanism */
2647         if (hw->mac.type == fm10k_mac_pf) {
2648                 /* disable interrupt */
2649                 fm10k_dev_disable_intr_pf(dev);
2650
2651                 /* unregister callback func to eal lib */
2652                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2653                         fm10k_dev_interrupt_handler_pf, (void *)dev);
2654         } else {
2655                 /* disable interrupt */
2656                 fm10k_dev_disable_intr_vf(dev);
2657
2658                 rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
2659                         fm10k_dev_interrupt_handler_vf, (void *)dev);
2660         }
2661
2662         /* free mac memory */
2663         if (dev->data->mac_addrs) {
2664                 rte_free(dev->data->mac_addrs);
2665                 dev->data->mac_addrs = NULL;
2666         }
2667
2668         memset(hw, 0, sizeof(*hw));
2669
2670         return 0;
2671 }
2672
2673 /*
2674  * The set of PCI devices this driver supports. This driver will enable both PF
2675  * and SRIOV-VF devices.
2676  */
2677 static const struct rte_pci_id pci_id_fm10k_map[] = {
2678 #define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2679 #define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) },
2680 #include "rte_pci_dev_ids.h"
2681         { .vendor_id = 0, /* sentinel */ },
2682 };
2683
2684 static struct eth_driver rte_pmd_fm10k = {
2685         .pci_drv = {
2686                 .name = "rte_pmd_fm10k",
2687                 .id_table = pci_id_fm10k_map,
2688                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
2689         },
2690         .eth_dev_init = eth_fm10k_dev_init,
2691         .eth_dev_uninit = eth_fm10k_dev_uninit,
2692         .dev_private_size = sizeof(struct fm10k_adapter),
2693 };
2694
2695 /*
2696  * Driver initialization routine.
2697  * Invoked once at EAL init time.
2698  * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
2699  */
2700 static int
2701 rte_pmd_fm10k_init(__rte_unused const char *name,
2702         __rte_unused const char *params)
2703 {
2704         PMD_INIT_FUNC_TRACE();
2705         rte_eth_driver_register(&rte_pmd_fm10k);
2706         return 0;
2707 }
2708
2709 static struct rte_driver rte_fm10k_driver = {
2710         .type = PMD_PDEV,
2711         .init = rte_pmd_fm10k_init,
2712 };
2713
2714 PMD_REGISTER_DRIVER(rte_fm10k_driver);