remove trailing whitespaces
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45
46 #include <rte_interrupts.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "ixgbe/ixgbe_api.h"
66 #include "ixgbe/ixgbe_vf.h"
67 #include "ixgbe/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70
71 /*
72  * High threshold controlling when to start sending XOFF frames. Must be at
73  * least 8 bytes less than receive packet buffer size. This value is in units
74  * of 1024 bytes.
75  */
76 #define IXGBE_FC_HI    0x80
77
78 /*
79  * Low threshold controlling when to start sending XON frames. This value is
80  * in units of 1024 bytes.
81  */
82 #define IXGBE_FC_LO    0x40
83
84 /* Timer value included in XOFF frames. */
85 #define IXGBE_FC_PAUSE 0x680
86
87 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
88 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
89 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
90
91
92 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
93
94 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
95
96 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
97                 struct rte_eth_dev *eth_dev);
98 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
99 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
100 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
101 static void ixgbe_dev_close(struct rte_eth_dev *dev);
102 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
103 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
104 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
105 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
106 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
107                                 int wait_to_complete);
108 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
109                                 struct rte_eth_stats *stats);
110 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
111 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
112                                              uint16_t queue_id,
113                                              uint8_t stat_idx,
114                                              uint8_t is_rx);
115 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
116                                 struct rte_eth_dev_info *dev_info);
117 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
118                 uint16_t vlan_id, int on);
119 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
120 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
121                 uint16_t queue, bool on);
122 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
123                 int on);
124 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
125 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
126 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
127 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
128 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
129
130 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
131 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
132 static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
133                 struct rte_eth_fc_conf *fc_conf);
134 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
135                 struct rte_eth_pfc_conf *pfc_conf);
136 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
137                 struct rte_eth_rss_reta *reta_conf);
138 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
139                 struct rte_eth_rss_reta *reta_conf);
140 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
141 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
142 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
143 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
144 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
145                 void *param);
146 static void ixgbe_dev_interrupt_delayed_handler(void *param);
147 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
148                 uint32_t index, uint32_t pool);
149 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
150 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
151
152 /* For Virtual Function support */
153 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
154                 struct rte_eth_dev *eth_dev);
155 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
156 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
157 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
158 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
159 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
160 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
161                 struct rte_eth_stats *stats);
162 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
163 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
164                 uint16_t vlan_id, int on);
165 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
166                 uint16_t queue, int on);
167 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
168 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
169
170 /* For Eth VMDQ APIs support */
171 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
172                 ether_addr* mac_addr,uint8_t on);
173 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
174 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
175                 uint16_t rx_mask, uint8_t on);
176 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
177 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
178 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
179                 uint64_t pool_mask,uint8_t vlan_on);
180 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
181                 struct rte_eth_vmdq_mirror_conf *mirror_conf,
182                 uint8_t rule_id, uint8_t on);
183 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
184                 uint8_t rule_id);
185
186 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
187                                  struct ether_addr *mac_addr,
188                                  uint32_t index, uint32_t pool);
189 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
190
191 /*
192  * Define VF Stats MACRO for Non "cleared on read" register
193  */
194 #define UPDATE_VF_STAT(reg, last, cur)                          \
195 {                                                               \
196         u32 latest = IXGBE_READ_REG(hw, reg);                   \
197         cur += latest - last;                                   \
198         last = latest;                                          \
199 }
200
201 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
202 {                                                                \
203         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
204         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
205         u64 latest = ((new_msb << 32) | new_lsb);                \
206         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
207         last = latest;                                           \
208 }
209
210 #define IXGBE_SET_HWSTRIP(h, q) do{\
211                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
212                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
213                 (h)->bitmap[idx] |= 1 << bit;\
214         }while(0)
215
216 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
217                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
218                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
219                 (h)->bitmap[idx] &= ~(1 << bit);\
220         }while(0)
221
222 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
223                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
224                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
225                 (r) = (h)->bitmap[idx] >> bit & 1;\
226         }while(0)
227
228 /*
229  * The set of PCI devices this driver supports
230  */
231 static struct rte_pci_id pci_id_ixgbe_map[] = {
232
233 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
234 #include "rte_pci_dev_ids.h"
235
236 { .vendor_id = 0, /* sentinel */ },
237 };
238
239
240 /*
241  * The set of PCI devices this driver supports (for 82599 VF)
242  */
243 static struct rte_pci_id pci_id_ixgbevf_map[] = {
244
245 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
246 #include "rte_pci_dev_ids.h"
247 { .vendor_id = 0, /* sentinel */ },
248
249 };
250
251 static struct eth_dev_ops ixgbe_eth_dev_ops = {
252         .dev_configure        = ixgbe_dev_configure,
253         .dev_start            = ixgbe_dev_start,
254         .dev_stop             = ixgbe_dev_stop,
255         .dev_close            = ixgbe_dev_close,
256         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
257         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
258         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
259         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
260         .link_update          = ixgbe_dev_link_update,
261         .stats_get            = ixgbe_dev_stats_get,
262         .stats_reset          = ixgbe_dev_stats_reset,
263         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
264         .dev_infos_get        = ixgbe_dev_info_get,
265         .vlan_filter_set      = ixgbe_vlan_filter_set,
266         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
267         .vlan_offload_set     = ixgbe_vlan_offload_set,
268         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
269         .rx_queue_start       = ixgbe_dev_rx_queue_start,
270         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
271         .tx_queue_start       = ixgbe_dev_tx_queue_start,
272         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
273         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
274         .rx_queue_release     = ixgbe_dev_rx_queue_release,
275         .rx_queue_count       = ixgbe_dev_rx_queue_count,
276         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
277         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
278         .tx_queue_release     = ixgbe_dev_tx_queue_release,
279         .dev_led_on           = ixgbe_dev_led_on,
280         .dev_led_off          = ixgbe_dev_led_off,
281         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
282         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
283         .mac_addr_add         = ixgbe_add_rar,
284         .mac_addr_remove      = ixgbe_remove_rar,
285         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
286         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
287         .mirror_rule_set      = ixgbe_mirror_rule_set,
288         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
289         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
290         .set_vf_rx            = ixgbe_set_pool_rx,
291         .set_vf_tx            = ixgbe_set_pool_tx,
292         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
293         .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
294         .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
295         .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
296         .fdir_infos_get               = ixgbe_fdir_info_get,
297         .fdir_add_perfect_filter      = ixgbe_fdir_add_perfect_filter,
298         .fdir_update_perfect_filter   = ixgbe_fdir_update_perfect_filter,
299         .fdir_remove_perfect_filter   = ixgbe_fdir_remove_perfect_filter,
300         .fdir_set_masks               = ixgbe_fdir_set_masks,
301         .reta_update          = ixgbe_dev_rss_reta_update,
302         .reta_query           = ixgbe_dev_rss_reta_query,
303 #ifdef RTE_NIC_BYPASS
304         .bypass_init          = ixgbe_bypass_init,
305         .bypass_state_set     = ixgbe_bypass_state_store,
306         .bypass_state_show    = ixgbe_bypass_state_show,
307         .bypass_event_set     = ixgbe_bypass_event_store,
308         .bypass_event_show    = ixgbe_bypass_event_show,
309         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
310         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
311         .bypass_ver_show      = ixgbe_bypass_ver_show,
312         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
313 #endif /* RTE_NIC_BYPASS */
314         .rss_hash_update      = ixgbe_dev_rss_hash_update,
315         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
316 };
317
318 /*
319  * dev_ops for virtual function, bare necessities for basic vf
320  * operation have been implemented
321  */
322 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
323
324         .dev_configure        = ixgbevf_dev_configure,
325         .dev_start            = ixgbevf_dev_start,
326         .dev_stop             = ixgbevf_dev_stop,
327         .link_update          = ixgbe_dev_link_update,
328         .stats_get            = ixgbevf_dev_stats_get,
329         .stats_reset          = ixgbevf_dev_stats_reset,
330         .dev_close            = ixgbevf_dev_close,
331         .dev_infos_get        = ixgbe_dev_info_get,
332         .vlan_filter_set      = ixgbevf_vlan_filter_set,
333         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
334         .vlan_offload_set     = ixgbevf_vlan_offload_set,
335         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
336         .rx_queue_release     = ixgbe_dev_rx_queue_release,
337         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
338         .tx_queue_release     = ixgbe_dev_tx_queue_release,
339         .mac_addr_add         = ixgbevf_add_mac_addr,
340         .mac_addr_remove      = ixgbevf_remove_mac_addr,
341 };
342
343 /**
344  * Atomically reads the link status information from global
345  * structure rte_eth_dev.
346  *
347  * @param dev
348  *   - Pointer to the structure rte_eth_dev to read from.
349  *   - Pointer to the buffer to be saved with the link status.
350  *
351  * @return
352  *   - On success, zero.
353  *   - On failure, negative value.
354  */
355 static inline int
356 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
357                                 struct rte_eth_link *link)
358 {
359         struct rte_eth_link *dst = link;
360         struct rte_eth_link *src = &(dev->data->dev_link);
361
362         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
363                                         *(uint64_t *)src) == 0)
364                 return -1;
365
366         return 0;
367 }
368
369 /**
370  * Atomically writes the link status information into global
371  * structure rte_eth_dev.
372  *
373  * @param dev
374  *   - Pointer to the structure rte_eth_dev to read from.
375  *   - Pointer to the buffer to be saved with the link status.
376  *
377  * @return
378  *   - On success, zero.
379  *   - On failure, negative value.
380  */
381 static inline int
382 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
383                                 struct rte_eth_link *link)
384 {
385         struct rte_eth_link *dst = &(dev->data->dev_link);
386         struct rte_eth_link *src = link;
387
388         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
389                                         *(uint64_t *)src) == 0)
390                 return -1;
391
392         return 0;
393 }
394
395 /*
396  * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
397  */
398 static inline int
399 ixgbe_is_sfp(struct ixgbe_hw *hw)
400 {
401         switch (hw->phy.type) {
402         case ixgbe_phy_sfp_avago:
403         case ixgbe_phy_sfp_ftl:
404         case ixgbe_phy_sfp_intel:
405         case ixgbe_phy_sfp_unknown:
406         case ixgbe_phy_sfp_passive_tyco:
407         case ixgbe_phy_sfp_passive_unknown:
408                 return 1;
409         default:
410                 return 0;
411         }
412 }
413
414 static inline int32_t
415 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
416 {
417         uint32_t ctrl_ext;
418         int32_t status;
419
420         status = ixgbe_reset_hw(hw);
421
422         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
423         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
424         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
425         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
426         IXGBE_WRITE_FLUSH(hw);
427
428         return status;
429 }
430
431 static inline void
432 ixgbe_enable_intr(struct rte_eth_dev *dev)
433 {
434         struct ixgbe_interrupt *intr =
435                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
436         struct ixgbe_hw *hw =
437                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438
439         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
440         IXGBE_WRITE_FLUSH(hw);
441 }
442
443 /*
444  * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
445  */
446 static void
447 ixgbe_disable_intr(struct ixgbe_hw *hw)
448 {
449         PMD_INIT_FUNC_TRACE();
450
451         if (hw->mac.type == ixgbe_mac_82598EB) {
452                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
453         } else {
454                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
455                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
456                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
457         }
458         IXGBE_WRITE_FLUSH(hw);
459 }
460
461 /*
462  * This function resets queue statistics mapping registers.
463  * From Niantic datasheet, Initialization of Statistics section:
464  * "...if software requires the queue counters, the RQSMR and TQSM registers
465  * must be re-programmed following a device reset.
466  */
467 static void
468 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
469 {
470         uint32_t i;
471
472         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
473                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
474                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
475         }
476 }
477
478
479 static int
480 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
481                                   uint16_t queue_id,
482                                   uint8_t stat_idx,
483                                   uint8_t is_rx)
484 {
485 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
486 #define NB_QMAP_FIELDS_PER_QSM_REG 4
487 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
488
489         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
490         struct ixgbe_stat_mapping_registers *stat_mappings =
491                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
492         uint32_t qsmr_mask = 0;
493         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
494         uint32_t q_map;
495         uint8_t n, offset;
496
497         if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
498                 return -ENOSYS;
499
500         PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
501                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
502
503         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
504         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
505                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
506                 return -EIO;
507         }
508         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
509
510         /* Now clear any previous stat_idx set */
511         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
512         if (!is_rx)
513                 stat_mappings->tqsm[n] &= ~clearing_mask;
514         else
515                 stat_mappings->rqsmr[n] &= ~clearing_mask;
516
517         q_map = (uint32_t)stat_idx;
518         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
519         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
520         if (!is_rx)
521                 stat_mappings->tqsm[n] |= qsmr_mask;
522         else
523                 stat_mappings->rqsmr[n] |= qsmr_mask;
524
525         PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
526                      "%s[%d] = 0x%08x\n",
527                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
528                      is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
529
530         /* Now write the mapping in the appropriate register */
531         if (is_rx) {
532                 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
533                              stat_mappings->rqsmr[n], n);
534                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
535         }
536         else {
537                 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
538                              stat_mappings->tqsm[n], n);
539                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
540         }
541         return 0;
542 }
543
544 static void
545 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
546 {
547         struct ixgbe_stat_mapping_registers *stat_mappings =
548                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
549         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550         int i;
551
552         /* write whatever was in stat mapping table to the NIC */
553         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
554                 /* rx */
555                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
556
557                 /* tx */
558                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
559         }
560 }
561
562 static void
563 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
564 {
565         uint8_t i;
566         struct ixgbe_dcb_tc_config *tc;
567         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
568
569         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
570         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
571         for (i = 0; i < dcb_max_tc; i++) {
572                 tc = &dcb_config->tc_config[i];
573                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
574                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
575                                  (uint8_t)(100/dcb_max_tc + (i & 1));
576                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
577                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
578                                  (uint8_t)(100/dcb_max_tc + (i & 1));
579                 tc->pfc = ixgbe_dcb_pfc_disabled;
580         }
581
582         /* Initialize default user to priority mapping, UPx->TC0 */
583         tc = &dcb_config->tc_config[0];
584         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
585         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
586         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
587                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
588                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
589         }
590         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
591         dcb_config->pfc_mode_enable = false;
592         dcb_config->vt_mode = true;
593         dcb_config->round_robin_enable = false;
594         /* support all DCB capabilities in 82599 */
595         dcb_config->support.capabilities = 0xFF;
596
597         /*we only support 4 Tcs for X540*/
598         if (hw->mac.type == ixgbe_mac_X540) {
599                 dcb_config->num_tcs.pg_tcs = 4;
600                 dcb_config->num_tcs.pfc_tcs = 4;
601         }
602 }
603
604 /*
605  * Ensure that all locks are released before first NVM or PHY access
606  */
607 static void
608 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
609 {
610         uint16_t mask;
611
612         /*
613          * Phy lock should not fail in this early stage. If this is the case,
614          * it is due to an improper exit of the application.
615          * So force the release of the faulty lock. Release of common lock
616          * is done automatically by swfw_sync function.
617          */
618         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
619         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
620                    DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
621         }
622         ixgbe_release_swfw_semaphore(hw, mask);
623
624         /*
625          * These ones are more tricky since they are common to all ports; but
626          * swfw_sync retries last long enough (1s) to be almost sure that if
627          * lock can not be taken it is due to an improper lock of the
628          * semaphore.
629          */
630         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
631         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
632                    DEBUGOUT("SWFW common locks released");
633         }
634         ixgbe_release_swfw_semaphore(hw, mask);
635 }
636
637 /*
638  * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
639  * It returns 0 on success.
640  */
641 static int
642 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
643                      struct rte_eth_dev *eth_dev)
644 {
645         struct rte_pci_device *pci_dev;
646         struct ixgbe_hw *hw =
647                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
648         struct ixgbe_vfta * shadow_vfta =
649                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
650         struct ixgbe_hwstrip *hwstrip =
651                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
652         struct ixgbe_dcb_config *dcb_config =
653                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
654         uint32_t ctrl_ext;
655         uint16_t csum;
656         int diag, i;
657
658         PMD_INIT_FUNC_TRACE();
659
660         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
661         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
662         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
663
664         /* for secondary processes, we don't initialise any further as primary
665          * has already done this work. Only check we don't need a different
666          * RX function */
667         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
668                 if (eth_dev->data->scattered_rx)
669                         eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
670                 return 0;
671         }
672         pci_dev = eth_dev->pci_dev;
673
674         /* Vendor and Device ID need to be set before init of shared code */
675         hw->device_id = pci_dev->id.device_id;
676         hw->vendor_id = pci_dev->id.vendor_id;
677         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
678 #ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
679         hw->allow_unsupported_sfp = 1;
680 #endif
681
682         /* Initialize the shared code */
683 #ifdef RTE_NIC_BYPASS
684         diag = ixgbe_bypass_init_shared_code(hw);
685 #else
686         diag = ixgbe_init_shared_code(hw);
687 #endif /* RTE_NIC_BYPASS */
688
689         if (diag != IXGBE_SUCCESS) {
690                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
691                 return -EIO;
692         }
693
694         /* pick up the PCI bus settings for reporting later */
695         ixgbe_get_bus_info(hw);
696
697         /* Unlock any pending hardware semaphore */
698         ixgbe_swfw_lock_reset(hw);
699
700         /* Initialize DCB configuration*/
701         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
702         ixgbe_dcb_init(hw,dcb_config);
703         /* Get Hardware Flow Control setting */
704         hw->fc.requested_mode = ixgbe_fc_full;
705         hw->fc.current_mode = ixgbe_fc_full;
706         hw->fc.pause_time = IXGBE_FC_PAUSE;
707         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
708                 hw->fc.low_water[i] = IXGBE_FC_LO;
709                 hw->fc.high_water[i] = IXGBE_FC_HI;
710         }
711         hw->fc.send_xon = 1;
712
713         /* Make sure we have a good EEPROM before we read from it */
714         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
715         if (diag != IXGBE_SUCCESS) {
716                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
717                 return -EIO;
718         }
719
720 #ifdef RTE_NIC_BYPASS
721         diag = ixgbe_bypass_init_hw(hw);
722 #else
723         diag = ixgbe_init_hw(hw);
724 #endif /* RTE_NIC_BYPASS */
725
726         /*
727          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
728          * is called too soon after the kernel driver unbinding/binding occurs.
729          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
730          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
731          * also called. See ixgbe_identify_phy_82599(). The reason for the
732          * failure is not known, and only occuts when virtualisation features
733          * are disabled in the bios. A delay of 100ms  was found to be enough by
734          * trial-and-error, and is doubled to be safe.
735          */
736         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
737                 rte_delay_ms(200);
738                 diag = ixgbe_init_hw(hw);
739         }
740
741         if (diag == IXGBE_ERR_EEPROM_VERSION) {
742                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
743                     "LOM.  Please be aware there may be issues associated "
744                     "with your hardware.\n If you are experiencing problems "
745                     "please contact your Intel or hardware representative "
746                     "who provided you with this hardware.\n");
747         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
748                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
749         if (diag) {
750                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
751                 return -EIO;
752         }
753
754         /* disable interrupt */
755         ixgbe_disable_intr(hw);
756
757         /* reset mappings for queue statistics hw counters*/
758         ixgbe_reset_qstat_mappings(hw);
759
760         /* Allocate memory for storing MAC addresses */
761         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
762                         hw->mac.num_rar_entries, 0);
763         if (eth_dev->data->mac_addrs == NULL) {
764                 PMD_INIT_LOG(ERR,
765                         "Failed to allocate %u bytes needed to store "
766                         "MAC addresses",
767                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
768                 return -ENOMEM;
769         }
770         /* Copy the permanent MAC address */
771         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
772                         &eth_dev->data->mac_addrs[0]);
773
774         /* Allocate memory for storing hash filter MAC addresses */
775         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
776                         IXGBE_VMDQ_NUM_UC_MAC, 0);
777         if (eth_dev->data->hash_mac_addrs == NULL) {
778                 PMD_INIT_LOG(ERR,
779                         "Failed to allocate %d bytes needed to store MAC addresses",
780                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
781                 return -ENOMEM;
782         }
783
784         /* initialize the vfta */
785         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
786
787         /* initialize the hw strip bitmap*/
788         memset(hwstrip, 0, sizeof(*hwstrip));
789
790         /* initialize PF if max_vfs not zero */
791         ixgbe_pf_host_init(eth_dev);
792
793         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
794         /* let hardware know driver is loaded */
795         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
796         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
797         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
798         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
799         IXGBE_WRITE_FLUSH(hw);
800
801         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
802                 PMD_INIT_LOG(DEBUG,
803                              "MAC: %d, PHY: %d, SFP+: %d<n",
804                              (int) hw->mac.type, (int) hw->phy.type,
805                              (int) hw->phy.sfp_type);
806         else
807                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
808                              (int) hw->mac.type, (int) hw->phy.type);
809
810         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
811                         eth_dev->data->port_id, pci_dev->id.vendor_id,
812                         pci_dev->id.device_id);
813
814         rte_intr_callback_register(&(pci_dev->intr_handle),
815                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
816
817         /* enable uio intr after callback register */
818         rte_intr_enable(&(pci_dev->intr_handle));
819
820         /* enable support intr */
821         ixgbe_enable_intr(eth_dev);
822
823         return 0;
824 }
825
826
827 /*
828  * Negotiate mailbox API version with the PF.
829  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
830  * Then we try to negotiate starting with the most recent one.
831  * If all negotiation attempts fail, then we will proceed with
832  * the default one (ixgbe_mbox_api_10).
833  */
834 static void
835 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
836 {
837         int32_t i;
838
839         /* start with highest supported, proceed down */
840         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
841                 ixgbe_mbox_api_11,
842                 ixgbe_mbox_api_10,
843         };
844
845         for (i = 0;
846                         i != RTE_DIM(sup_ver) &&
847                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
848                         i++)
849                 ;
850 }
851
852 static void
853 generate_random_mac_addr(struct ether_addr *mac_addr)
854 {
855         uint64_t random;
856
857         /* Set Organizationally Unique Identifier (OUI) prefix. */
858         mac_addr->addr_bytes[0] = 0x00;
859         mac_addr->addr_bytes[1] = 0x09;
860         mac_addr->addr_bytes[2] = 0xC0;
861         /* Force indication of locally assigned MAC address. */
862         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
863         /* Generate the last 3 bytes of the MAC address with a random number. */
864         random = rte_rand();
865         memcpy(&mac_addr->addr_bytes[3], &random, 3);
866 }
867
868 /*
869  * Virtual Function device init
870  */
871 static int
872 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
873                      struct rte_eth_dev *eth_dev)
874 {
875         int diag;
876         uint32_t tc, tcs;
877         struct rte_pci_device *pci_dev;
878         struct ixgbe_hw *hw =
879                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
880         struct ixgbe_vfta * shadow_vfta =
881                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
882         struct ixgbe_hwstrip *hwstrip =
883                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
884         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
885
886         PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
887
888         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
889         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
890         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
891
892         /* for secondary processes, we don't initialise any further as primary
893          * has already done this work. Only check we don't need a different
894          * RX function */
895         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
896                 if (eth_dev->data->scattered_rx)
897                         eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
898                 return 0;
899         }
900
901         pci_dev = eth_dev->pci_dev;
902
903         hw->device_id = pci_dev->id.device_id;
904         hw->vendor_id = pci_dev->id.vendor_id;
905         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
906
907         /* initialize the vfta */
908         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
909
910         /* initialize the hw strip bitmap*/
911         memset(hwstrip, 0, sizeof(*hwstrip));
912
913         /* Initialize the shared code */
914         diag = ixgbe_init_shared_code(hw);
915         if (diag != IXGBE_SUCCESS) {
916                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
917                 return -EIO;
918         }
919
920         /* init_mailbox_params */
921         hw->mbx.ops.init_params(hw);
922
923         /* Disable the interrupts for VF */
924         ixgbevf_intr_disable(hw);
925
926         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
927         diag = hw->mac.ops.reset_hw(hw);
928
929         /*
930          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
931          * the underlying PF driver has not assigned a MAC address to the VF.
932          * In this case, assign a random MAC address.
933          */
934         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
935                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
936                 return (diag);
937         }
938
939         /* negotiate mailbox API version to use with the PF. */
940         ixgbevf_negotiate_api(hw);
941
942         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
943         ixgbevf_get_queues(hw, &tcs, &tc);
944
945         /* Allocate memory for storing MAC addresses */
946         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
947                         hw->mac.num_rar_entries, 0);
948         if (eth_dev->data->mac_addrs == NULL) {
949                 PMD_INIT_LOG(ERR,
950                         "Failed to allocate %u bytes needed to store "
951                         "MAC addresses",
952                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
953                 return -ENOMEM;
954         }
955
956         /* Generate a random MAC address, if none was assigned by PF. */
957         if (is_zero_ether_addr(perm_addr)) {
958                 generate_random_mac_addr(perm_addr);
959                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
960                 if (diag) {
961                         rte_free(eth_dev->data->mac_addrs);
962                         eth_dev->data->mac_addrs = NULL;
963                         return diag;
964                 }
965                 RTE_LOG(INFO, PMD,
966                         "\tVF MAC address not assigned by Host PF\n"
967                         "\tAssign randomly generated MAC address "
968                         "%02x:%02x:%02x:%02x:%02x:%02x\n",
969                         perm_addr->addr_bytes[0],
970                         perm_addr->addr_bytes[1],
971                         perm_addr->addr_bytes[2],
972                         perm_addr->addr_bytes[3],
973                         perm_addr->addr_bytes[4],
974                         perm_addr->addr_bytes[5]);
975         }
976
977         /* Copy the permanent MAC address */
978         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
979
980         /* reset the hardware with the new settings */
981         diag = hw->mac.ops.start_hw(hw);
982         switch (diag) {
983                 case  0:
984                         break;
985
986                 default:
987                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
988                         return (-EIO);
989         }
990
991         PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
992                          eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
993                          "ixgbe_mac_82599_vf");
994
995         return 0;
996 }
997
998 static struct eth_driver rte_ixgbe_pmd = {
999         {
1000                 .name = "rte_ixgbe_pmd",
1001                 .id_table = pci_id_ixgbe_map,
1002                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
1003         },
1004         .eth_dev_init = eth_ixgbe_dev_init,
1005         .dev_private_size = sizeof(struct ixgbe_adapter),
1006 };
1007
1008 /*
1009  * virtual function driver struct
1010  */
1011 static struct eth_driver rte_ixgbevf_pmd = {
1012         {
1013                 .name = "rte_ixgbevf_pmd",
1014                 .id_table = pci_id_ixgbevf_map,
1015                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
1016         },
1017         .eth_dev_init = eth_ixgbevf_dev_init,
1018         .dev_private_size = sizeof(struct ixgbe_adapter),
1019 };
1020
1021 /*
1022  * Driver initialization routine.
1023  * Invoked once at EAL init time.
1024  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1025  */
1026 static int
1027 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1028 {
1029         PMD_INIT_FUNC_TRACE();
1030
1031         rte_eth_driver_register(&rte_ixgbe_pmd);
1032         return 0;
1033 }
1034
1035 /*
1036  * VF Driver initialization routine.
1037  * Invoked one at EAL init time.
1038  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1039  */
1040 static int
1041 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1042 {
1043         DEBUGFUNC("rte_ixgbevf_pmd_init");
1044
1045         rte_eth_driver_register(&rte_ixgbevf_pmd);
1046         return (0);
1047 }
1048
1049 static int
1050 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1051 {
1052         struct ixgbe_hw *hw =
1053                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1054         struct ixgbe_vfta * shadow_vfta =
1055                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1056         uint32_t vfta;
1057         uint32_t vid_idx;
1058         uint32_t vid_bit;
1059
1060         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1061         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1062         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1063         if (on)
1064                 vfta |= vid_bit;
1065         else
1066                 vfta &= ~vid_bit;
1067         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1068
1069         /* update local VFTA copy */
1070         shadow_vfta->vfta[vid_idx] = vfta;
1071
1072         return 0;
1073 }
1074
1075 static void
1076 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1077 {
1078         if (on)
1079                 ixgbe_vlan_hw_strip_enable(dev, queue);
1080         else
1081                 ixgbe_vlan_hw_strip_disable(dev, queue);
1082 }
1083
1084 static void
1085 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1086 {
1087         struct ixgbe_hw *hw =
1088                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089
1090         /* Only the high 16-bits is valid */
1091         IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1092 }
1093
1094 void
1095 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1096 {
1097         struct ixgbe_hw *hw =
1098                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1099         uint32_t vlnctrl;
1100
1101         PMD_INIT_FUNC_TRACE();
1102
1103         /* Filter Table Disable */
1104         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1105         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1106
1107         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1108 }
1109
1110 void
1111 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1112 {
1113         struct ixgbe_hw *hw =
1114                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1115         struct ixgbe_vfta * shadow_vfta =
1116                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1117         uint32_t vlnctrl;
1118         uint16_t i;
1119
1120         PMD_INIT_FUNC_TRACE();
1121
1122         /* Filter Table Enable */
1123         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1124         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1125         vlnctrl |= IXGBE_VLNCTRL_VFE;
1126
1127         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1128
1129         /* write whatever is in local vfta copy */
1130         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1131                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1132 }
1133
1134 static void
1135 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1136 {
1137         struct ixgbe_hwstrip *hwstrip =
1138                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1139
1140         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1141                 return;
1142
1143         if (on)
1144                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1145         else
1146                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1147 }
1148
1149 static void
1150 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1151 {
1152         struct ixgbe_hw *hw =
1153                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1154         uint32_t ctrl;
1155
1156         PMD_INIT_FUNC_TRACE();
1157
1158         if (hw->mac.type == ixgbe_mac_82598EB) {
1159                 /* No queue level support */
1160                 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1161                 return;
1162         }
1163         else {
1164                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1165                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1166                 ctrl &= ~IXGBE_RXDCTL_VME;
1167                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1168         }
1169         /* record those setting for HW strip per queue */
1170         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1171 }
1172
1173 static void
1174 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1175 {
1176         struct ixgbe_hw *hw =
1177                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1178         uint32_t ctrl;
1179
1180         PMD_INIT_FUNC_TRACE();
1181
1182         if (hw->mac.type == ixgbe_mac_82598EB) {
1183                 /* No queue level supported */
1184                 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1185                 return;
1186         }
1187         else {
1188                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1189                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1190                 ctrl |= IXGBE_RXDCTL_VME;
1191                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1192         }
1193         /* record those setting for HW strip per queue */
1194         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1195 }
1196
1197 void
1198 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1199 {
1200         struct ixgbe_hw *hw =
1201                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202         uint32_t ctrl;
1203         uint16_t i;
1204
1205         PMD_INIT_FUNC_TRACE();
1206
1207         if (hw->mac.type == ixgbe_mac_82598EB) {
1208                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1209                 ctrl &= ~IXGBE_VLNCTRL_VME;
1210                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1211         }
1212         else {
1213                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1214                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1215                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1216                         ctrl &= ~IXGBE_RXDCTL_VME;
1217                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1218
1219                         /* record those setting for HW strip per queue */
1220                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1221                 }
1222         }
1223 }
1224
1225 void
1226 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1227 {
1228         struct ixgbe_hw *hw =
1229                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230         uint32_t ctrl;
1231         uint16_t i;
1232
1233         PMD_INIT_FUNC_TRACE();
1234
1235         if (hw->mac.type == ixgbe_mac_82598EB) {
1236                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1237                 ctrl |= IXGBE_VLNCTRL_VME;
1238                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1239         }
1240         else {
1241                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1242                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1243                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1244                         ctrl |= IXGBE_RXDCTL_VME;
1245                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1246
1247                         /* record those setting for HW strip per queue */
1248                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1249                 }
1250         }
1251 }
1252
1253 static void
1254 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1255 {
1256         struct ixgbe_hw *hw =
1257                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258         uint32_t ctrl;
1259
1260         PMD_INIT_FUNC_TRACE();
1261
1262         /* DMATXCTRL: Geric Double VLAN Disable */
1263         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1264         ctrl &= ~IXGBE_DMATXCTL_GDV;
1265         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1266
1267         /* CTRL_EXT: Global Double VLAN Disable */
1268         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1269         ctrl &= ~IXGBE_EXTENDED_VLAN;
1270         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1271
1272 }
1273
1274 static void
1275 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1276 {
1277         struct ixgbe_hw *hw =
1278                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1279         uint32_t ctrl;
1280
1281         PMD_INIT_FUNC_TRACE();
1282
1283         /* DMATXCTRL: Geric Double VLAN Enable */
1284         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1285         ctrl |= IXGBE_DMATXCTL_GDV;
1286         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1287
1288         /* CTRL_EXT: Global Double VLAN Enable */
1289         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1290         ctrl |= IXGBE_EXTENDED_VLAN;
1291         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1292
1293         /*
1294          * VET EXT field in the EXVET register = 0x8100 by default
1295          * So no need to change. Same to VT field of DMATXCTL register
1296          */
1297 }
1298
1299 static void
1300 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1301 {
1302         if(mask & ETH_VLAN_STRIP_MASK){
1303                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1304                         ixgbe_vlan_hw_strip_enable_all(dev);
1305                 else
1306                         ixgbe_vlan_hw_strip_disable_all(dev);
1307         }
1308
1309         if(mask & ETH_VLAN_FILTER_MASK){
1310                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1311                         ixgbe_vlan_hw_filter_enable(dev);
1312                 else
1313                         ixgbe_vlan_hw_filter_disable(dev);
1314         }
1315
1316         if(mask & ETH_VLAN_EXTEND_MASK){
1317                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1318                         ixgbe_vlan_hw_extend_enable(dev);
1319                 else
1320                         ixgbe_vlan_hw_extend_disable(dev);
1321         }
1322 }
1323
1324 static void
1325 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1326 {
1327         struct ixgbe_hw *hw =
1328                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1330         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1331         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1332         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1333 }
1334
1335 static int
1336 ixgbe_dev_configure(struct rte_eth_dev *dev)
1337 {
1338         struct ixgbe_interrupt *intr =
1339                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1340
1341         PMD_INIT_FUNC_TRACE();
1342
1343         /* set flag to update link status after init */
1344         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1345
1346         return 0;
1347 }
1348
1349 /*
1350  * Configure device link speed and setup link.
1351  * It returns 0 on success.
1352  */
1353 static int
1354 ixgbe_dev_start(struct rte_eth_dev *dev)
1355 {
1356         struct ixgbe_hw *hw =
1357                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358         int err, link_up = 0, negotiate = 0;
1359         uint32_t speed = 0;
1360         int mask = 0;
1361         int status;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         /* IXGBE devices don't support half duplex */
1366         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1367                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1368                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
1369                                 dev->data->dev_conf.link_duplex,
1370                                 dev->data->port_id);
1371                 return -EINVAL;
1372         }
1373
1374         /* stop adapter */
1375         hw->adapter_stopped = FALSE;
1376         ixgbe_stop_adapter(hw);
1377
1378         /* reinitialize adapter
1379          * this calls reset and start */
1380         status = ixgbe_pf_reset_hw(hw);
1381         if (status != 0)
1382                 return -1;
1383         hw->mac.ops.start_hw(hw);
1384
1385         /* configure PF module if SRIOV enabled */
1386         ixgbe_pf_host_configure(dev);
1387
1388         /* initialize transmission unit */
1389         ixgbe_dev_tx_init(dev);
1390
1391         /* This can fail when allocating mbufs for descriptor rings */
1392         err = ixgbe_dev_rx_init(dev);
1393         if (err) {
1394                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
1395                 goto error;
1396         }
1397
1398         ixgbe_dev_rxtx_start(dev);
1399
1400         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1401                 err = hw->mac.ops.setup_sfp(hw);
1402                 if (err)
1403                         goto error;
1404         }
1405
1406         /* Turn on the laser */
1407         ixgbe_enable_tx_laser(hw);
1408
1409         /* Skip link setup if loopback mode is enabled for 82599. */
1410         if (hw->mac.type == ixgbe_mac_82599EB &&
1411                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
1412                 goto skip_link_setup;
1413
1414         err = ixgbe_check_link(hw, &speed, &link_up, 0);
1415         if (err)
1416                 goto error;
1417         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
1418         if (err)
1419                 goto error;
1420
1421         switch(dev->data->dev_conf.link_speed) {
1422         case ETH_LINK_SPEED_AUTONEG:
1423                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
1424                                 IXGBE_LINK_SPEED_82599_AUTONEG :
1425                                 IXGBE_LINK_SPEED_82598_AUTONEG;
1426                 break;
1427         case ETH_LINK_SPEED_100:
1428                 /*
1429                  * Invalid for 82598 but error will be detected by
1430                  * ixgbe_setup_link()
1431                  */
1432                 speed = IXGBE_LINK_SPEED_100_FULL;
1433                 break;
1434         case ETH_LINK_SPEED_1000:
1435                 speed = IXGBE_LINK_SPEED_1GB_FULL;
1436                 break;
1437         case ETH_LINK_SPEED_10000:
1438                 speed = IXGBE_LINK_SPEED_10GB_FULL;
1439                 break;
1440         default:
1441                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
1442                                 dev->data->dev_conf.link_speed,
1443                                 dev->data->port_id);
1444                 goto error;
1445         }
1446
1447         err = ixgbe_setup_link(hw, speed, negotiate, link_up);
1448         if (err)
1449                 goto error;
1450
1451 skip_link_setup:
1452
1453         /* check if lsc interrupt is enabled */
1454         if (dev->data->dev_conf.intr_conf.lsc != 0)
1455                 ixgbe_dev_lsc_interrupt_setup(dev);
1456
1457         /* resume enabled intr since hw reset */
1458         ixgbe_enable_intr(dev);
1459
1460         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1461                 ETH_VLAN_EXTEND_MASK;
1462         ixgbe_vlan_offload_set(dev, mask);
1463
1464         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1465                 /* Enable vlan filtering for VMDq */
1466                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
1467         }
1468
1469         /* Configure DCB hw */
1470         ixgbe_configure_dcb(dev);
1471
1472         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1473                 err = ixgbe_fdir_configure(dev);
1474                 if (err)
1475                         goto error;
1476         }
1477
1478         ixgbe_restore_statistics_mapping(dev);
1479
1480         return (0);
1481
1482 error:
1483         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
1484         ixgbe_dev_clear_queues(dev);
1485         return -EIO;
1486 }
1487
1488 /*
1489  * Stop device: disable rx and tx functions to allow for reconfiguring.
1490  */
1491 static void
1492 ixgbe_dev_stop(struct rte_eth_dev *dev)
1493 {
1494         struct rte_eth_link link;
1495         struct ixgbe_hw *hw =
1496                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497         struct ixgbe_vf_info *vfinfo =
1498                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1499         int vf;
1500
1501         PMD_INIT_FUNC_TRACE();
1502
1503         /* disable interrupts */
1504         ixgbe_disable_intr(hw);
1505
1506         /* reset the NIC */
1507         ixgbe_pf_reset_hw(hw);
1508         hw->adapter_stopped = FALSE;
1509
1510         /* stop adapter */
1511         ixgbe_stop_adapter(hw);
1512
1513         for (vf = 0; vfinfo != NULL &&
1514                      vf < dev->pci_dev->max_vfs; vf++)
1515                 vfinfo[vf].clear_to_send = false;
1516
1517         /* Turn off the laser */
1518         ixgbe_disable_tx_laser(hw);
1519
1520         ixgbe_dev_clear_queues(dev);
1521
1522         /* Clear recorded link status */
1523         memset(&link, 0, sizeof(link));
1524         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1525 }
1526
1527 /*
1528  * Reest and stop device.
1529  */
1530 static void
1531 ixgbe_dev_close(struct rte_eth_dev *dev)
1532 {
1533         struct ixgbe_hw *hw =
1534                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1535
1536         PMD_INIT_FUNC_TRACE();
1537
1538         ixgbe_pf_reset_hw(hw);
1539
1540         ixgbe_dev_stop(dev);
1541         hw->adapter_stopped = 1;
1542
1543         ixgbe_disable_pcie_master(hw);
1544
1545         /* reprogram the RAR[0] in case user changed it. */
1546         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1547 }
1548
1549 /*
1550  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
1551  */
1552 static void
1553 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1554 {
1555         struct ixgbe_hw *hw =
1556                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1557         struct ixgbe_hw_stats *hw_stats =
1558                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1559         uint32_t bprc, lxon, lxoff, total;
1560         uint64_t total_missed_rx, total_qbrc, total_qprc;
1561         unsigned i;
1562
1563         total_missed_rx = 0;
1564         total_qbrc = 0;
1565         total_qprc = 0;
1566
1567         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1568         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1569         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1570         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1571
1572         for (i = 0; i < 8; i++) {
1573                 uint32_t mp;
1574                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1575                 /* global total per queue */
1576                 hw_stats->mpc[i] += mp;
1577                 /* Running comprehensive total for stats display */
1578                 total_missed_rx += hw_stats->mpc[i];
1579                 if (hw->mac.type == ixgbe_mac_82598EB)
1580                         hw_stats->rnbc[i] +=
1581                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1582                 hw_stats->pxontxc[i] +=
1583                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1584                 hw_stats->pxonrxc[i] +=
1585                     IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1586                 hw_stats->pxofftxc[i] +=
1587                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1588                 hw_stats->pxoffrxc[i] +=
1589                     IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1590                 hw_stats->pxon2offc[i] +=
1591                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1592         }
1593         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1594                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1595                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1596                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1597                 hw_stats->qbrc[i] +=
1598                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1599                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1600                 hw_stats->qbtc[i] +=
1601                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1602                 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1603
1604                 total_qprc += hw_stats->qprc[i];
1605                 total_qbrc += hw_stats->qbrc[i];
1606         }
1607         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1608         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1609         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1610
1611         /* Note that gprc counts missed packets */
1612         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1613
1614         if (hw->mac.type != ixgbe_mac_82598EB) {
1615                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
1616                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1617                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
1618                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1619                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
1620                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1621                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1622                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1623         } else {
1624                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1625                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1626                 /* 82598 only has a counter in the high register */
1627                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1628                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1629                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1630         }
1631
1632         /*
1633          * Workaround: mprc hardware is incorrectly counting
1634          * broadcasts, so for now we subtract those.
1635          */
1636         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1637         hw_stats->bprc += bprc;
1638         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1639         if (hw->mac.type == ixgbe_mac_82598EB)
1640                 hw_stats->mprc -= bprc;
1641
1642         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1643         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1644         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1645         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1646         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1647         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1648
1649         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1650         hw_stats->lxontxc += lxon;
1651         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1652         hw_stats->lxofftxc += lxoff;
1653         total = lxon + lxoff;
1654
1655         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1656         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1657         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1658         hw_stats->gptc -= total;
1659         hw_stats->mptc -= total;
1660         hw_stats->ptc64 -= total;
1661         hw_stats->gotc -= total * ETHER_MIN_LEN;
1662
1663         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1664         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1665         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1666         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1667         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1668         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1669         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1670         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1671         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1672         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1673         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1674         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1675         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1676         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1677         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1678         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1679         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1680         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1681         /* Only read FCOE on 82599 */
1682         if (hw->mac.type != ixgbe_mac_82598EB) {
1683                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1684                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1685                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1686                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1687                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1688         }
1689
1690         if (stats == NULL)
1691                 return;
1692
1693         /* Fill out the rte_eth_stats statistics structure */
1694         stats->ipackets = total_qprc;
1695         stats->ibytes = total_qbrc;
1696         stats->opackets = hw_stats->gptc;
1697         stats->obytes = hw_stats->gotc;
1698         stats->imcasts = hw_stats->mprc;
1699
1700         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1701                 stats->q_ipackets[i] = hw_stats->qprc[i];
1702                 stats->q_opackets[i] = hw_stats->qptc[i];
1703                 stats->q_ibytes[i] = hw_stats->qbrc[i];
1704                 stats->q_obytes[i] = hw_stats->qbtc[i];
1705                 stats->q_errors[i] = hw_stats->qprdc[i];
1706         }
1707
1708         /* Rx Errors */
1709         stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1710                 hw_stats->rlec;
1711
1712         stats->oerrors  = 0;
1713
1714         /* XON/XOFF pause frames */
1715         stats->tx_pause_xon  = hw_stats->lxontxc;
1716         stats->rx_pause_xon  = hw_stats->lxonrxc;
1717         stats->tx_pause_xoff = hw_stats->lxofftxc;
1718         stats->rx_pause_xoff = hw_stats->lxoffrxc;
1719
1720         /* Flow Director Stats registers */
1721         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1722         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1723         stats->fdirmatch = hw_stats->fdirmatch;
1724         stats->fdirmiss = hw_stats->fdirmiss;
1725 }
1726
1727 static void
1728 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1729 {
1730         struct ixgbe_hw_stats *stats =
1731                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1732
1733         /* HW registers are cleared on read */
1734         ixgbe_dev_stats_get(dev, NULL);
1735
1736         /* Reset software totals */
1737         memset(stats, 0, sizeof(*stats));
1738 }
1739
1740 static void
1741 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1742 {
1743         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1744         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1745                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1746
1747         /* Good Rx packet, include VF loopback */
1748         UPDATE_VF_STAT(IXGBE_VFGPRC,
1749             hw_stats->last_vfgprc, hw_stats->vfgprc);
1750
1751         /* Good Rx octets, include VF loopback */
1752         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1753             hw_stats->last_vfgorc, hw_stats->vfgorc);
1754
1755         /* Good Tx packet, include VF loopback */
1756         UPDATE_VF_STAT(IXGBE_VFGPTC,
1757             hw_stats->last_vfgptc, hw_stats->vfgptc);
1758
1759         /* Good Tx octets, include VF loopback */
1760         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1761             hw_stats->last_vfgotc, hw_stats->vfgotc);
1762
1763         /* Rx Multicst Packet */
1764         UPDATE_VF_STAT(IXGBE_VFMPRC,
1765             hw_stats->last_vfmprc, hw_stats->vfmprc);
1766
1767         if (stats == NULL)
1768                 return;
1769
1770         memset(stats, 0, sizeof(*stats));
1771         stats->ipackets = hw_stats->vfgprc;
1772         stats->ibytes = hw_stats->vfgorc;
1773         stats->opackets = hw_stats->vfgptc;
1774         stats->obytes = hw_stats->vfgotc;
1775         stats->imcasts = hw_stats->vfmprc;
1776 }
1777
1778 static void
1779 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1780 {
1781         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1782                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1783
1784         /* Sync HW register to the last stats */
1785         ixgbevf_dev_stats_get(dev, NULL);
1786
1787         /* reset HW current stats*/
1788         hw_stats->vfgprc = 0;
1789         hw_stats->vfgorc = 0;
1790         hw_stats->vfgptc = 0;
1791         hw_stats->vfgotc = 0;
1792         hw_stats->vfmprc = 0;
1793
1794 }
1795
1796 static void
1797 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1798 {
1799         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1800
1801         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
1802         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
1803         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1804         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1805         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1806         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
1807         dev_info->max_vfs = dev->pci_dev->max_vfs;
1808         if (hw->mac.type == ixgbe_mac_82598EB)
1809                 dev_info->max_vmdq_pools = ETH_16_POOLS;
1810         else
1811                 dev_info->max_vmdq_pools = ETH_64_POOLS;
1812         dev_info->rx_offload_capa =
1813                 DEV_RX_OFFLOAD_VLAN_STRIP |
1814                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1815                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1816                 DEV_RX_OFFLOAD_TCP_CKSUM;
1817         dev_info->tx_offload_capa =
1818                 DEV_TX_OFFLOAD_VLAN_INSERT |
1819                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1820                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1821                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1822                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1823 }
1824
1825 /* return 0 means link status changed, -1 means not changed */
1826 static int
1827 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1828 {
1829         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1830         struct rte_eth_link link, old;
1831         ixgbe_link_speed link_speed;
1832         int link_up;
1833         int diag;
1834
1835         link.link_status = 0;
1836         link.link_speed = 0;
1837         link.link_duplex = 0;
1838         memset(&old, 0, sizeof(old));
1839         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1840
1841         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1842         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1843                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1844         else
1845                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1846         if (diag != 0) {
1847                 link.link_speed = ETH_LINK_SPEED_100;
1848                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1849                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1850                 if (link.link_status == old.link_status)
1851                         return -1;
1852                 return 0;
1853         }
1854
1855         if (link_up == 0) {
1856                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1857                 if (link.link_status == old.link_status)
1858                         return -1;
1859                 return 0;
1860         }
1861         link.link_status = 1;
1862         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1863
1864         switch (link_speed) {
1865         default:
1866         case IXGBE_LINK_SPEED_UNKNOWN:
1867                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1868                 link.link_speed = ETH_LINK_SPEED_100;
1869                 break;
1870
1871         case IXGBE_LINK_SPEED_100_FULL:
1872                 link.link_speed = ETH_LINK_SPEED_100;
1873                 break;
1874
1875         case IXGBE_LINK_SPEED_1GB_FULL:
1876                 link.link_speed = ETH_LINK_SPEED_1000;
1877                 break;
1878
1879         case IXGBE_LINK_SPEED_10GB_FULL:
1880                 link.link_speed = ETH_LINK_SPEED_10000;
1881                 break;
1882         }
1883         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1884
1885         if (link.link_status == old.link_status)
1886                 return -1;
1887
1888         return 0;
1889 }
1890
1891 static void
1892 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1893 {
1894         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1895         uint32_t fctrl;
1896
1897         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1898         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1899         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1900 }
1901
1902 static void
1903 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1904 {
1905         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1906         uint32_t fctrl;
1907
1908         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1909         fctrl &= (~IXGBE_FCTRL_UPE);
1910         if (dev->data->all_multicast == 1)
1911                 fctrl |= IXGBE_FCTRL_MPE;
1912         else
1913                 fctrl &= (~IXGBE_FCTRL_MPE);
1914         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1915 }
1916
1917 static void
1918 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1919 {
1920         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1921         uint32_t fctrl;
1922
1923         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1924         fctrl |= IXGBE_FCTRL_MPE;
1925         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1926 }
1927
1928 static void
1929 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1930 {
1931         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1932         uint32_t fctrl;
1933
1934         if (dev->data->promiscuous == 1)
1935                 return; /* must remain in all_multicast mode */
1936
1937         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1938         fctrl &= (~IXGBE_FCTRL_MPE);
1939         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1940 }
1941
1942 /**
1943  * It clears the interrupt causes and enables the interrupt.
1944  * It will be called once only during nic initialized.
1945  *
1946  * @param dev
1947  *  Pointer to struct rte_eth_dev.
1948  *
1949  * @return
1950  *  - On success, zero.
1951  *  - On failure, a negative value.
1952  */
1953 static int
1954 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
1955 {
1956         struct ixgbe_interrupt *intr =
1957                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1958
1959         ixgbe_dev_link_status_print(dev);
1960         intr->mask |= IXGBE_EICR_LSC;
1961
1962         return 0;
1963 }
1964
1965 /*
1966  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1967  *
1968  * @param dev
1969  *  Pointer to struct rte_eth_dev.
1970  *
1971  * @return
1972  *  - On success, zero.
1973  *  - On failure, a negative value.
1974  */
1975 static int
1976 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1977 {
1978         uint32_t eicr;
1979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980         struct ixgbe_interrupt *intr =
1981                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1982
1983         /* clear all cause mask */
1984         ixgbe_disable_intr(hw);
1985
1986         /* read-on-clear nic registers here */
1987         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1988         PMD_DRV_LOG(INFO, "eicr %x", eicr);
1989
1990         intr->flags = 0;
1991         if (eicr & IXGBE_EICR_LSC) {
1992                 /* set flag for async link update */
1993                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1994         }
1995
1996         if (eicr & IXGBE_EICR_MAILBOX)
1997                 intr->flags |= IXGBE_FLAG_MAILBOX;
1998
1999         return 0;
2000 }
2001
2002 /**
2003  * It gets and then prints the link status.
2004  *
2005  * @param dev
2006  *  Pointer to struct rte_eth_dev.
2007  *
2008  * @return
2009  *  - On success, zero.
2010  *  - On failure, a negative value.
2011  */
2012 static void
2013 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
2014 {
2015         struct rte_eth_link link;
2016
2017         memset(&link, 0, sizeof(link));
2018         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2019         if (link.link_status) {
2020                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2021                                         (int)(dev->data->port_id),
2022                                         (unsigned)link.link_speed,
2023                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2024                                         "full-duplex" : "half-duplex");
2025         } else {
2026                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2027                                 (int)(dev->data->port_id));
2028         }
2029         PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
2030                                 dev->pci_dev->addr.domain,
2031                                 dev->pci_dev->addr.bus,
2032                                 dev->pci_dev->addr.devid,
2033                                 dev->pci_dev->addr.function);
2034 }
2035
2036 /*
2037  * It executes link_update after knowing an interrupt occurred.
2038  *
2039  * @param dev
2040  *  Pointer to struct rte_eth_dev.
2041  *
2042  * @return
2043  *  - On success, zero.
2044  *  - On failure, a negative value.
2045  */
2046 static int
2047 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
2048 {
2049         struct ixgbe_interrupt *intr =
2050                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2051         int64_t timeout;
2052         struct rte_eth_link link;
2053         int intr_enable_delay = false;
2054
2055         PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
2056
2057         if (intr->flags & IXGBE_FLAG_MAILBOX) {
2058                 ixgbe_pf_mbx_process(dev);
2059                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
2060         }
2061
2062         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2063                 /* get the link status before link update, for predicting later */
2064                 memset(&link, 0, sizeof(link));
2065                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2066
2067                 ixgbe_dev_link_update(dev, 0);
2068
2069                 /* likely to up */
2070                 if (!link.link_status)
2071                         /* handle it 1 sec later, wait it being stable */
2072                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
2073                 /* likely to down */
2074                 else
2075                         /* handle it 4 sec later, wait it being stable */
2076                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
2077
2078                 ixgbe_dev_link_status_print(dev);
2079
2080                 intr_enable_delay = true;
2081         }
2082
2083         if (intr_enable_delay) {
2084                 if (rte_eal_alarm_set(timeout * 1000,
2085                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
2086                         PMD_DRV_LOG(ERR, "Error setting alarm");
2087         } else {
2088                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2089                 ixgbe_enable_intr(dev);
2090                 rte_intr_enable(&(dev->pci_dev->intr_handle));
2091         }
2092
2093
2094         return 0;
2095 }
2096
2097 /**
2098  * Interrupt handler which shall be registered for alarm callback for delayed
2099  * handling specific interrupt to wait for the stable nic state. As the
2100  * NIC interrupt state is not stable for ixgbe after link is just down,
2101  * it needs to wait 4 seconds to get the stable status.
2102  *
2103  * @param handle
2104  *  Pointer to interrupt handle.
2105  * @param param
2106  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2107  *
2108  * @return
2109  *  void
2110  */
2111 static void
2112 ixgbe_dev_interrupt_delayed_handler(void *param)
2113 {
2114         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2115         struct ixgbe_interrupt *intr =
2116                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2117         struct ixgbe_hw *hw =
2118                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2119         uint32_t eicr;
2120
2121         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2122         if (eicr & IXGBE_EICR_MAILBOX)
2123                 ixgbe_pf_mbx_process(dev);
2124
2125         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2126                 ixgbe_dev_link_update(dev, 0);
2127                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
2128                 ixgbe_dev_link_status_print(dev);
2129                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2130         }
2131
2132         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
2133         ixgbe_enable_intr(dev);
2134         rte_intr_enable(&(dev->pci_dev->intr_handle));
2135 }
2136
2137 /**
2138  * Interrupt handler triggered by NIC  for handling
2139  * specific interrupt.
2140  *
2141  * @param handle
2142  *  Pointer to interrupt handle.
2143  * @param param
2144  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2145  *
2146  * @return
2147  *  void
2148  */
2149 static void
2150 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2151                                                         void *param)
2152 {
2153         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2154         ixgbe_dev_interrupt_get_status(dev);
2155         ixgbe_dev_interrupt_action(dev);
2156 }
2157
2158 static int
2159 ixgbe_dev_led_on(struct rte_eth_dev *dev)
2160 {
2161         struct ixgbe_hw *hw;
2162
2163         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2164         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2165 }
2166
2167 static int
2168 ixgbe_dev_led_off(struct rte_eth_dev *dev)
2169 {
2170         struct ixgbe_hw *hw;
2171
2172         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2173         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2174 }
2175
2176 static int
2177 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2178 {
2179         struct ixgbe_hw *hw;
2180         int err;
2181         uint32_t rx_buf_size;
2182         uint32_t max_high_water;
2183         uint32_t mflcn;
2184         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2185                 ixgbe_fc_none,
2186                 ixgbe_fc_rx_pause,
2187                 ixgbe_fc_tx_pause,
2188                 ixgbe_fc_full
2189         };
2190
2191         PMD_INIT_FUNC_TRACE();
2192
2193         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2194         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
2195         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
2196
2197         /*
2198          * At least reserve one Ethernet frame for watermark
2199          * high_water/low_water in kilo bytes for ixgbe
2200          */
2201         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2202         if ((fc_conf->high_water > max_high_water) ||
2203                 (fc_conf->high_water < fc_conf->low_water)) {
2204                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
2205                 PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
2206                 return (-EINVAL);
2207         }
2208
2209         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
2210         hw->fc.pause_time     = fc_conf->pause_time;
2211         hw->fc.high_water[0]  = fc_conf->high_water;
2212         hw->fc.low_water[0]   = fc_conf->low_water;
2213         hw->fc.send_xon       = fc_conf->send_xon;
2214
2215         err = ixgbe_fc_enable(hw);
2216
2217         /* Not negotiated is not an error case */
2218         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
2219
2220                 /* check if we want to forward MAC frames - driver doesn't have native
2221                  * capability to do that, so we'll write the registers ourselves */
2222
2223                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2224
2225                 /* set or clear MFLCN.PMCF bit depending on configuration */
2226                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2227                         mflcn |= IXGBE_MFLCN_PMCF;
2228                 else
2229                         mflcn &= ~IXGBE_MFLCN_PMCF;
2230
2231                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
2232                 IXGBE_WRITE_FLUSH(hw);
2233
2234                 return 0;
2235         }
2236
2237         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
2238         return -EIO;
2239 }
2240
2241 /**
2242  *  ixgbe_pfc_enable_generic - Enable flow control
2243  *  @hw: pointer to hardware structure
2244  *  @tc_num: traffic class number
2245  *  Enable flow control according to the current settings.
2246  */
2247 static int
2248 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
2249 {
2250         int ret_val = 0;
2251         uint32_t mflcn_reg, fccfg_reg;
2252         uint32_t reg;
2253         uint32_t fcrtl, fcrth;
2254         uint8_t i;
2255         uint8_t nb_rx_en;
2256
2257         /* Validate the water mark configuration */
2258         if (!hw->fc.pause_time) {
2259                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2260                 goto out;
2261         }
2262
2263         /* Low water mark of zero causes XOFF floods */
2264         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2265                  /* High/Low water can not be 0 */
2266                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
2267                         PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
2268                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2269                         goto out;
2270                 }
2271
2272                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
2273                         PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
2274                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2275                         goto out;
2276                 }
2277         }
2278         /* Negotiate the fc mode to use */
2279         ixgbe_fc_autoneg(hw);
2280
2281         /* Disable any previous flow control settings */
2282         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2283         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
2284
2285         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2286         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2287
2288         switch (hw->fc.current_mode) {
2289         case ixgbe_fc_none:
2290                 /*
2291                  * If the count of enabled RX Priority Flow control >1,
2292                  * and the TX pause can not be disabled
2293                  */
2294                 nb_rx_en = 0;
2295                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2296                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2297                         if (reg & IXGBE_FCRTH_FCEN)
2298                                 nb_rx_en++;
2299                 }
2300                 if (nb_rx_en > 1)
2301                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2302                 break;
2303         case ixgbe_fc_rx_pause:
2304                 /*
2305                  * Rx Flow control is enabled and Tx Flow control is
2306                  * disabled by software override. Since there really
2307                  * isn't a way to advertise that we are capable of RX
2308                  * Pause ONLY, we will advertise that we support both
2309                  * symmetric and asymmetric Rx PAUSE.  Later, we will
2310                  * disable the adapter's ability to send PAUSE frames.
2311                  */
2312                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2313                 /*
2314                  * If the count of enabled RX Priority Flow control >1,
2315                  * and the TX pause can not be disabled
2316                  */
2317                 nb_rx_en = 0;
2318                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2319                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2320                         if (reg & IXGBE_FCRTH_FCEN)
2321                                 nb_rx_en++;
2322                 }
2323                 if (nb_rx_en > 1)
2324                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2325                 break;
2326         case ixgbe_fc_tx_pause:
2327                 /*
2328                  * Tx Flow control is enabled, and Rx Flow control is
2329                  * disabled by software override.
2330                  */
2331                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2332                 break;
2333         case ixgbe_fc_full:
2334                 /* Flow control (both Rx and Tx) is enabled by SW override. */
2335                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2336                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
2337                 break;
2338         default:
2339                 DEBUGOUT("Flow control param set incorrectly\n");
2340                 ret_val = IXGBE_ERR_CONFIG;
2341                 goto out;
2342                 break;
2343         }
2344
2345         /* Set 802.3x based flow control settings. */
2346         mflcn_reg |= IXGBE_MFLCN_DPF;
2347         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2348         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2349
2350         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2351         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2352                 hw->fc.high_water[tc_num]) {
2353                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
2354                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
2355                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
2356         } else {
2357                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
2358                 /*
2359                  * In order to prevent Tx hangs when the internal Tx
2360                  * switch is enabled we must set the high water mark
2361                  * to the maximum FCRTH value.  This allows the Tx
2362                  * switch to function even under heavy Rx workloads.
2363                  */
2364                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
2365         }
2366         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
2367
2368         /* Configure pause time (2 TCs per register) */
2369         reg = hw->fc.pause_time * 0x00010001;
2370         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2371                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2372
2373         /* Configure flow control refresh threshold value */
2374         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2375
2376 out:
2377         return ret_val;
2378 }
2379
2380 static int
2381 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
2382 {
2383         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2384         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
2385
2386         if(hw->mac.type != ixgbe_mac_82598EB) {
2387                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
2388         }
2389         return ret_val;
2390 }
2391
2392 static int
2393 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
2394 {
2395         int err;
2396         uint32_t rx_buf_size;
2397         uint32_t max_high_water;
2398         uint8_t tc_num;
2399         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
2400         struct ixgbe_hw *hw =
2401                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2402         struct ixgbe_dcb_config *dcb_config =
2403                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
2404
2405         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2406                 ixgbe_fc_none,
2407                 ixgbe_fc_rx_pause,
2408                 ixgbe_fc_tx_pause,
2409                 ixgbe_fc_full
2410         };
2411
2412         PMD_INIT_FUNC_TRACE();
2413
2414         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
2415         tc_num = map[pfc_conf->priority];
2416         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
2417         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
2418         /*
2419          * At least reserve one Ethernet frame for watermark
2420          * high_water/low_water in kilo bytes for ixgbe
2421          */
2422         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2423         if ((pfc_conf->fc.high_water > max_high_water) ||
2424                 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
2425                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
2426                 PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
2427                 return (-EINVAL);
2428         }
2429
2430         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
2431         hw->fc.pause_time = pfc_conf->fc.pause_time;
2432         hw->fc.send_xon = pfc_conf->fc.send_xon;
2433         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
2434         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
2435
2436         err = ixgbe_dcb_pfc_enable(dev,tc_num);
2437
2438         /* Not negotiated is not an error case */
2439         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
2440                 return 0;
2441
2442         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
2443         return -EIO;
2444 }
2445
2446 static int
2447 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2448                                 struct rte_eth_rss_reta *reta_conf)
2449 {
2450         uint8_t i,j,mask;
2451         uint32_t reta;
2452         struct ixgbe_hw *hw =
2453                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2454
2455         PMD_INIT_FUNC_TRACE();
2456         /*
2457         * Update Redirection Table RETA[n],n=0...31,The redirection table has
2458         * 128-entries in 32 registers
2459          */
2460         for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
2461                 if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
2462                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2463                 else
2464                         mask = (uint8_t)((reta_conf->mask_hi >>
2465                                 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2466                 if (mask != 0) {
2467                         reta = 0;
2468                         if (mask != 0xF)
2469                                 reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
2470
2471                         for (j = 0; j < 4; j++) {
2472                                 if (mask & (0x1 << j)) {
2473                                         if (mask != 0xF)
2474                                                 reta &= ~(0xFF << 8 * j);
2475                                         reta |= reta_conf->reta[i + j] << 8*j;
2476                                 }
2477                         }
2478                         IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
2479                 }
2480         }
2481
2482         return 0;
2483 }
2484
2485 static int
2486 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2487                                 struct rte_eth_rss_reta *reta_conf)
2488 {
2489         uint8_t i,j,mask;
2490         uint32_t reta;
2491         struct ixgbe_hw *hw =
2492                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493
2494         PMD_INIT_FUNC_TRACE();
2495         /*
2496          * Read Redirection Table RETA[n],n=0...31,The redirection table has
2497          * 128-entries in 32 registers
2498          */
2499         for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
2500                 if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
2501                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
2502                 else
2503                         mask = (uint8_t)((reta_conf->mask_hi >>
2504                                 (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
2505
2506                 if (mask != 0) {
2507                         reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
2508                         for (j = 0; j < 4; j++) {
2509                                 if (mask & (0x1 << j))
2510                                         reta_conf->reta[i + j] =
2511                                                 (uint8_t)((reta >> 8 * j) & 0xFF);
2512                         }
2513                 }
2514         }
2515
2516         return 0;
2517 }
2518
2519 static void
2520 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2521                                 uint32_t index, uint32_t pool)
2522 {
2523         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2524         uint32_t enable_addr = 1;
2525
2526         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
2527 }
2528
2529 static void
2530 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2531 {
2532         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2533
2534         ixgbe_clear_rar(hw, index);
2535 }
2536
2537 /*
2538  * Virtual Function operations
2539  */
2540 static void
2541 ixgbevf_intr_disable(struct ixgbe_hw *hw)
2542 {
2543         PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
2544
2545         /* Clear interrupt mask to stop from interrupts being generated */
2546         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
2547
2548         IXGBE_WRITE_FLUSH(hw);
2549 }
2550
2551 static int
2552 ixgbevf_dev_configure(struct rte_eth_dev *dev)
2553 {
2554         struct rte_eth_conf* conf = &dev->data->dev_conf;
2555
2556         PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
2557                 dev->data->port_id);
2558
2559         /*
2560          * VF has no ability to enable/disable HW CRC
2561          * Keep the persistent behavior the same as Host PF
2562          */
2563 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
2564         if (!conf->rxmode.hw_strip_crc) {
2565                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
2566                 conf->rxmode.hw_strip_crc = 1;
2567         }
2568 #else
2569         if (conf->rxmode.hw_strip_crc) {
2570                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
2571                 conf->rxmode.hw_strip_crc = 0;
2572         }
2573 #endif
2574
2575         return 0;
2576 }
2577
2578 static int
2579 ixgbevf_dev_start(struct rte_eth_dev *dev)
2580 {
2581         struct ixgbe_hw *hw =
2582                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2583         int err, mask = 0;
2584
2585         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
2586
2587         hw->mac.ops.reset_hw(hw);
2588
2589         /* negotiate mailbox API version to use with the PF. */
2590         ixgbevf_negotiate_api(hw);
2591
2592         ixgbevf_dev_tx_init(dev);
2593
2594         /* This can fail when allocating mbufs for descriptor rings */
2595         err = ixgbevf_dev_rx_init(dev);
2596         if (err) {
2597                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
2598                 ixgbe_dev_clear_queues(dev);
2599                 return err;
2600         }
2601
2602         /* Set vfta */
2603         ixgbevf_set_vfta_all(dev,1);
2604
2605         /* Set HW strip */
2606         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
2607                 ETH_VLAN_EXTEND_MASK;
2608         ixgbevf_vlan_offload_set(dev, mask);
2609
2610         ixgbevf_dev_rxtx_start(dev);
2611
2612         return 0;
2613 }
2614
2615 static void
2616 ixgbevf_dev_stop(struct rte_eth_dev *dev)
2617 {
2618         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2619
2620         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
2621
2622         hw->adapter_stopped = TRUE;
2623         ixgbe_stop_adapter(hw);
2624
2625         /*
2626           * Clear what we set, but we still keep shadow_vfta to
2627           * restore after device starts
2628           */
2629         ixgbevf_set_vfta_all(dev,0);
2630
2631         ixgbe_dev_clear_queues(dev);
2632 }
2633
2634 static void
2635 ixgbevf_dev_close(struct rte_eth_dev *dev)
2636 {
2637         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2638
2639         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
2640
2641         ixgbe_reset_hw(hw);
2642
2643         ixgbevf_dev_stop(dev);
2644
2645         /* reprogram the RAR[0] in case user changed it. */
2646         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2647 }
2648
2649 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2650 {
2651         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2652         struct ixgbe_vfta * shadow_vfta =
2653                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2654         int i = 0, j = 0, vfta = 0, mask = 1;
2655
2656         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
2657                 vfta = shadow_vfta->vfta[i];
2658                 if(vfta){
2659                         mask = 1;
2660                         for (j = 0; j < 32; j++){
2661                                 if(vfta & mask)
2662                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
2663                                 mask<<=1;
2664                         }
2665                 }
2666         }
2667
2668 }
2669
2670 static int
2671 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2672 {
2673         struct ixgbe_hw *hw =
2674                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2675         struct ixgbe_vfta * shadow_vfta =
2676                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2677         uint32_t vid_idx = 0;
2678         uint32_t vid_bit = 0;
2679         int ret = 0;
2680
2681         PMD_INIT_FUNC_TRACE();
2682
2683         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
2684         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
2685         if(ret){
2686                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2687                 return ret;
2688         }
2689         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2690         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2691
2692         /* Save what we set and retore it after device reset */
2693         if (on)
2694                 shadow_vfta->vfta[vid_idx] |= vid_bit;
2695         else
2696                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2697
2698         return 0;
2699 }
2700
2701 static void
2702 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
2703 {
2704         struct ixgbe_hw *hw =
2705                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2706         uint32_t ctrl;
2707
2708         PMD_INIT_FUNC_TRACE();
2709
2710         if(queue >= hw->mac.max_rx_queues)
2711                 return;
2712
2713         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2714         if(on)
2715                 ctrl |= IXGBE_RXDCTL_VME;
2716         else
2717                 ctrl &= ~IXGBE_RXDCTL_VME;
2718         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2719
2720         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
2721 }
2722
2723 static void
2724 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2725 {
2726         struct ixgbe_hw *hw =
2727                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2728         uint16_t i;
2729         int on = 0;
2730
2731         /* VF function only support hw strip feature, others are not support */
2732         if(mask & ETH_VLAN_STRIP_MASK){
2733                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
2734
2735                 for(i=0; i < hw->mac.max_rx_queues; i++)
2736                         ixgbevf_vlan_strip_queue_set(dev,i,on);
2737         }
2738 }
2739
2740 static int
2741 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
2742 {
2743         uint32_t reg_val;
2744
2745         /* we only need to do this if VMDq is enabled */
2746         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2747         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
2748                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
2749                 return (-1);
2750         }
2751
2752         return 0;
2753 }
2754
2755 static uint32_t
2756 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
2757 {
2758         uint32_t vector = 0;
2759         switch (hw->mac.mc_filter_type) {
2760         case 0:   /* use bits [47:36] of the address */
2761                 vector = ((uc_addr->addr_bytes[4] >> 4) |
2762                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
2763                 break;
2764         case 1:   /* use bits [46:35] of the address */
2765                 vector = ((uc_addr->addr_bytes[4] >> 3) |
2766                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
2767                 break;
2768         case 2:   /* use bits [45:34] of the address */
2769                 vector = ((uc_addr->addr_bytes[4] >> 2) |
2770                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
2771                 break;
2772         case 3:   /* use bits [43:32] of the address */
2773                 vector = ((uc_addr->addr_bytes[4]) |
2774                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
2775                 break;
2776         default:  /* Invalid mc_filter_type */
2777                 break;
2778         }
2779
2780         /* vector can only be 12-bits or boundary will be exceeded */
2781         vector &= 0xFFF;
2782         return vector;
2783 }
2784
2785 static int
2786 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
2787                                uint8_t on)
2788 {
2789         uint32_t vector;
2790         uint32_t uta_idx;
2791         uint32_t reg_val;
2792         uint32_t uta_shift;
2793         uint32_t rc;
2794         const uint32_t ixgbe_uta_idx_mask = 0x7F;
2795         const uint32_t ixgbe_uta_bit_shift = 5;
2796         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
2797         const uint32_t bit1 = 0x1;
2798
2799         struct ixgbe_hw *hw =
2800                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2801         struct ixgbe_uta_info *uta_info =
2802                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
2803
2804         /* The UTA table only exists on 82599 hardware and newer */
2805         if (hw->mac.type < ixgbe_mac_82599EB)
2806                 return (-ENOTSUP);
2807
2808         vector = ixgbe_uta_vector(hw,mac_addr);
2809         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
2810         uta_shift = vector & ixgbe_uta_bit_mask;
2811
2812         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
2813         if(rc == on)
2814                 return 0;
2815
2816         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
2817         if (on) {
2818                 uta_info->uta_in_use++;
2819                 reg_val |= (bit1 << uta_shift);
2820                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
2821         } else {
2822                 uta_info->uta_in_use--;
2823                 reg_val &= ~(bit1 << uta_shift);
2824                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
2825         }
2826
2827         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
2828
2829         if (uta_info->uta_in_use > 0)
2830                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2831                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2832         else
2833                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
2834
2835         return 0;
2836 }
2837
2838 static int
2839 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
2840 {
2841         int i;
2842         struct ixgbe_hw *hw =
2843                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2844         struct ixgbe_uta_info *uta_info =
2845                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
2846
2847         /* The UTA table only exists on 82599 hardware and newer */
2848         if (hw->mac.type < ixgbe_mac_82599EB)
2849                 return (-ENOTSUP);
2850
2851         if(on) {
2852                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2853                         uta_info->uta_shadow[i] = ~0;
2854                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2855                 }
2856         } else {
2857                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
2858                         uta_info->uta_shadow[i] = 0;
2859                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2860                 }
2861         }
2862         return 0;
2863
2864 }
2865 static int
2866 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
2867                                uint16_t rx_mask, uint8_t on)
2868 {
2869         int val = 0;
2870
2871         struct ixgbe_hw *hw =
2872                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2873         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
2874
2875         if (hw->mac.type == ixgbe_mac_82598EB) {
2876                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
2877                         " on 82599 hardware and newer\n");
2878                 return (-ENOTSUP);
2879         }
2880         if (ixgbe_vmdq_mode_check(hw) < 0)
2881                 return (-ENOTSUP);
2882
2883         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
2884                 val |= IXGBE_VMOLR_AUPE;
2885         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
2886                 val |= IXGBE_VMOLR_ROMPE;
2887         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
2888                 val |= IXGBE_VMOLR_ROPE;
2889         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
2890                 val |= IXGBE_VMOLR_BAM;
2891         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
2892                 val |= IXGBE_VMOLR_MPE;
2893
2894         if (on)
2895                 vmolr |= val;
2896         else
2897                 vmolr &= ~val;
2898
2899         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
2900
2901         return 0;
2902 }
2903
2904 static int
2905 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
2906 {
2907         uint32_t reg,addr;
2908         uint32_t val;
2909         const uint8_t bit1 = 0x1;
2910
2911         struct ixgbe_hw *hw =
2912                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2913
2914         if (ixgbe_vmdq_mode_check(hw) < 0)
2915                 return (-ENOTSUP);
2916
2917         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
2918         reg = IXGBE_READ_REG(hw, addr);
2919         val = bit1 << pool;
2920
2921         if (on)
2922                 reg |= val;
2923         else
2924                 reg &= ~val;
2925
2926         IXGBE_WRITE_REG(hw, addr,reg);
2927
2928         return 0;
2929 }
2930
2931 static int
2932 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
2933 {
2934         uint32_t reg,addr;
2935         uint32_t val;
2936         const uint8_t bit1 = 0x1;
2937
2938         struct ixgbe_hw *hw =
2939                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2940
2941         if (ixgbe_vmdq_mode_check(hw) < 0)
2942                 return (-ENOTSUP);
2943
2944         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
2945         reg = IXGBE_READ_REG(hw, addr);
2946         val = bit1 << pool;
2947
2948         if (on)
2949                 reg |= val;
2950         else
2951                 reg &= ~val;
2952
2953         IXGBE_WRITE_REG(hw, addr,reg);
2954
2955         return 0;
2956 }
2957
2958 static int
2959 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
2960                         uint64_t pool_mask, uint8_t vlan_on)
2961 {
2962         int ret = 0;
2963         uint16_t pool_idx;
2964         struct ixgbe_hw *hw =
2965                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2966
2967         if (ixgbe_vmdq_mode_check(hw) < 0)
2968                 return (-ENOTSUP);
2969         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
2970                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
2971                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
2972                         if (ret < 0)
2973                                 return ret;
2974         }
2975
2976         return ret;
2977 }
2978
2979 static int
2980 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
2981                         struct rte_eth_vmdq_mirror_conf *mirror_conf,
2982                         uint8_t rule_id, uint8_t on)
2983 {
2984         uint32_t mr_ctl,vlvf;
2985         uint32_t mp_lsb = 0;
2986         uint32_t mv_msb = 0;
2987         uint32_t mv_lsb = 0;
2988         uint32_t mp_msb = 0;
2989         uint8_t i = 0;
2990         int reg_index = 0;
2991         uint64_t vlan_mask = 0;
2992
2993         const uint8_t pool_mask_offset = 32;
2994         const uint8_t vlan_mask_offset = 32;
2995         const uint8_t dst_pool_offset = 8;
2996         const uint8_t rule_mr_offset  = 4;
2997         const uint8_t mirror_rule_mask= 0x0F;
2998
2999         struct ixgbe_mirror_info *mr_info =
3000                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3001         struct ixgbe_hw *hw =
3002                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3003
3004         if (ixgbe_vmdq_mode_check(hw) < 0)
3005                 return (-ENOTSUP);
3006
3007         /* Check if vlan mask is valid */
3008         if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
3009                 if (mirror_conf->vlan.vlan_mask == 0)
3010                         return (-EINVAL);
3011         }
3012
3013         /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
3014         if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
3015                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
3016                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
3017                                 /* search vlan id related pool vlan filter index */
3018                                 reg_index = ixgbe_find_vlvf_slot(hw,
3019                                                 mirror_conf->vlan.vlan_id[i]);
3020                                 if(reg_index < 0)
3021                                         return (-EINVAL);
3022                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
3023                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
3024                                         ((vlvf & IXGBE_VLVF_VLANID_MASK)
3025                                                 == mirror_conf->vlan.vlan_id[i]))
3026                                         vlan_mask |= (1ULL << reg_index);
3027                                 else
3028                                         return (-EINVAL);
3029                         }
3030                 }
3031
3032                 if (on) {
3033                         mv_lsb = vlan_mask & 0xFFFFFFFF;
3034                         mv_msb = vlan_mask >> vlan_mask_offset;
3035
3036                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
3037                                                 mirror_conf->vlan.vlan_mask;
3038                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
3039                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
3040                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
3041                                                 mirror_conf->vlan.vlan_id[i];
3042                         }
3043                 } else {
3044                         mv_lsb = 0;
3045                         mv_msb = 0;
3046                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
3047                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
3048                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
3049                 }
3050         }
3051
3052         /*
3053          * if enable pool mirror, write related pool mask register,if disable
3054          * pool mirror, clear PFMRVM register
3055          */
3056         if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
3057                 if (on) {
3058                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
3059                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
3060                         mr_info->mr_conf[rule_id].pool_mask =
3061                                         mirror_conf->pool_mask;
3062
3063                 } else {
3064                         mp_lsb = 0;
3065                         mp_msb = 0;
3066                         mr_info->mr_conf[rule_id].pool_mask = 0;
3067                 }
3068         }
3069
3070         /* read  mirror control register and recalculate it */
3071         mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
3072
3073         if (on) {
3074                 mr_ctl |= mirror_conf->rule_type_mask;
3075                 mr_ctl &= mirror_rule_mask;
3076                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
3077         } else
3078                 mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
3079
3080         mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
3081         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
3082
3083         /* write mirrror control  register */
3084         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3085
3086         /* write pool mirrror control  register */
3087         if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
3088                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
3089                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
3090                                 mp_msb);
3091         }
3092         /* write VLAN mirrror control  register */
3093         if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
3094                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
3095                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
3096                                 mv_msb);
3097         }
3098
3099         return 0;
3100 }
3101
3102 static int
3103 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
3104 {
3105         int mr_ctl = 0;
3106         uint32_t lsb_val = 0;
3107         uint32_t msb_val = 0;
3108         const uint8_t rule_mr_offset = 4;
3109
3110         struct ixgbe_hw *hw =
3111                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3112         struct ixgbe_mirror_info *mr_info =
3113                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3114
3115         if (ixgbe_vmdq_mode_check(hw) < 0)
3116                 return (-ENOTSUP);
3117
3118         memset(&mr_info->mr_conf[rule_id], 0,
3119                 sizeof(struct rte_eth_vmdq_mirror_conf));
3120
3121         /* clear PFVMCTL register */
3122         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3123
3124         /* clear pool mask register */
3125         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
3126         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
3127
3128         /* clear vlan mask register */
3129         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
3130         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
3131
3132         return 0;
3133 }
3134
3135 static void
3136 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3137                      __attribute__((unused)) uint32_t index,
3138                      __attribute__((unused)) uint32_t pool)
3139 {
3140         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3141         int diag;
3142
3143         /*
3144          * On a 82599 VF, adding again the same MAC addr is not an idempotent
3145          * operation. Trap this case to avoid exhausting the [very limited]
3146          * set of PF resources used to store VF MAC addresses.
3147          */
3148         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3149                 return;
3150         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3151         if (diag == 0)
3152                 return;
3153         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
3154 }
3155
3156 static void
3157 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
3158 {
3159         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3160         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
3161         struct ether_addr *mac_addr;
3162         uint32_t i;
3163         int diag;
3164
3165         /*
3166          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
3167          * not support the deletion of a given MAC address.
3168          * Instead, it imposes to delete all MAC addresses, then to add again
3169          * all MAC addresses with the exception of the one to be deleted.
3170          */
3171         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
3172
3173         /*
3174          * Add again all MAC addresses, with the exception of the deleted one
3175          * and of the permanent MAC address.
3176          */
3177         for (i = 0, mac_addr = dev->data->mac_addrs;
3178              i < hw->mac.num_rar_entries; i++, mac_addr++) {
3179                 /* Skip the deleted MAC address */
3180                 if (i == index)
3181                         continue;
3182                 /* Skip NULL MAC addresses */
3183                 if (is_zero_ether_addr(mac_addr))
3184                         continue;
3185                 /* Skip the permanent MAC address */
3186                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3187                         continue;
3188                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3189                 if (diag != 0)
3190                         PMD_DRV_LOG(ERR,
3191                                     "Adding again MAC address "
3192                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
3193                                     "diag=%d",
3194                                     mac_addr->addr_bytes[0],
3195                                     mac_addr->addr_bytes[1],
3196                                     mac_addr->addr_bytes[2],
3197                                     mac_addr->addr_bytes[3],
3198                                     mac_addr->addr_bytes[4],
3199                                     mac_addr->addr_bytes[5],
3200                                     diag);
3201         }
3202 }
3203
3204 static struct rte_driver rte_ixgbe_driver = {
3205         .type = PMD_PDEV,
3206         .init = rte_ixgbe_pmd_init,
3207 };
3208
3209 static struct rte_driver rte_ixgbevf_driver = {
3210         .type = PMD_PDEV,
3211         .init = rte_ixgbevf_pmd_init,
3212 };
3213
3214 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
3215 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);