ixgbe: add access to specific device info
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63
64 #include "ixgbe_logs.h"
65 #include "base/ixgbe_api.h"
66 #include "base/ixgbe_vf.h"
67 #include "base/ixgbe_common.h"
68 #include "ixgbe_ethdev.h"
69 #include "ixgbe_bypass.h"
70 #include "ixgbe_rxtx.h"
71 #include "base/ixgbe_type.h"
72 #include "base/ixgbe_phy.h"
73 #include "ixgbe_regs.h"
74
75 /*
76  * High threshold controlling when to start sending XOFF frames. Must be at
77  * least 8 bytes less than receive packet buffer size. This value is in units
78  * of 1024 bytes.
79  */
80 #define IXGBE_FC_HI    0x80
81
82 /*
83  * Low threshold controlling when to start sending XON frames. This value is
84  * in units of 1024 bytes.
85  */
86 #define IXGBE_FC_LO    0x40
87
88 /* Timer value included in XOFF frames. */
89 #define IXGBE_FC_PAUSE 0x680
90
91 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
92 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
93 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
94
95 #define IXGBE_MMW_SIZE_DEFAULT        0x4
96 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
97 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
98
99 /*
100  *  Default values for RX/TX configuration
101  */
102 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
103 #define IXGBE_DEFAULT_RX_PTHRESH      8
104 #define IXGBE_DEFAULT_RX_HTHRESH      8
105 #define IXGBE_DEFAULT_RX_WTHRESH      0
106
107 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
108 #define IXGBE_DEFAULT_TX_PTHRESH      32
109 #define IXGBE_DEFAULT_TX_HTHRESH      0
110 #define IXGBE_DEFAULT_TX_WTHRESH      0
111 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
112
113 /* Bit shift and mask */
114 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
115 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
116 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
117 #define IXGBE_8_BIT_MASK   UINT8_MAX
118
119 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
120
121 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
122
123 #define IXGBE_HKEY_MAX_INDEX 10
124
125 /* Additional timesync values. */
126 #define IXGBE_TIMINCA_16NS_SHIFT 24
127 #define IXGBE_TIMINCA_INCVALUE   16000000
128 #define IXGBE_TIMINCA_INIT       ((0x02 << IXGBE_TIMINCA_16NS_SHIFT) \
129                                   | IXGBE_TIMINCA_INCVALUE)
130
131 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
132 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
133 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
134 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
135 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
136 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
137 static void ixgbe_dev_close(struct rte_eth_dev *dev);
138 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
139 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
140 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
141 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
142 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
143                                 int wait_to_complete);
144 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
145                                 struct rte_eth_stats *stats);
146 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
147 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
148                                              uint16_t queue_id,
149                                              uint8_t stat_idx,
150                                              uint8_t is_rx);
151 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
152                                struct rte_eth_dev_info *dev_info);
153 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
154                                  struct rte_eth_dev_info *dev_info);
155 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
156
157 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
158                 uint16_t vlan_id, int on);
159 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
160 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
161                 uint16_t queue, bool on);
162 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
163                 int on);
164 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
165 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
166 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
167 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
168 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
169
170 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
171 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
172 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
173                                struct rte_eth_fc_conf *fc_conf);
174 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
175                                struct rte_eth_fc_conf *fc_conf);
176 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
177                 struct rte_eth_pfc_conf *pfc_conf);
178 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
179                         struct rte_eth_rss_reta_entry64 *reta_conf,
180                         uint16_t reta_size);
181 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
182                         struct rte_eth_rss_reta_entry64 *reta_conf,
183                         uint16_t reta_size);
184 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
185 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
186 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
187 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
188 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
189                 void *param);
190 static void ixgbe_dev_interrupt_delayed_handler(void *param);
191 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
192                 uint32_t index, uint32_t pool);
193 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
194 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
195                                            struct ether_addr *mac_addr);
196 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
197
198 /* For Virtual Function support */
199 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
200 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
201 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
202 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
203 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
204 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
205 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
206                 struct rte_eth_stats *stats);
207 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
208 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
209                 uint16_t vlan_id, int on);
210 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
211                 uint16_t queue, int on);
212 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
213 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
214
215 /* For Eth VMDQ APIs support */
216 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
217                 ether_addr* mac_addr,uint8_t on);
218 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
219 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
220                 uint16_t rx_mask, uint8_t on);
221 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
222 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
223 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
224                 uint64_t pool_mask,uint8_t vlan_on);
225 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
226                 struct rte_eth_mirror_conf *mirror_conf,
227                 uint8_t rule_id, uint8_t on);
228 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
229                 uint8_t rule_id);
230
231 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
232                 uint16_t queue_idx, uint16_t tx_rate);
233 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
234                 uint16_t tx_rate, uint64_t q_msk);
235
236 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
237                                  struct ether_addr *mac_addr,
238                                  uint32_t index, uint32_t pool);
239 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
240 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
241                                              struct ether_addr *mac_addr);
242 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
243                         struct rte_eth_syn_filter *filter,
244                         bool add);
245 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
246                         struct rte_eth_syn_filter *filter);
247 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
248                         enum rte_filter_op filter_op,
249                         void *arg);
250 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
251                         struct ixgbe_5tuple_filter *filter);
252 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
253                         struct ixgbe_5tuple_filter *filter);
254 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
255                         struct rte_eth_ntuple_filter *filter,
256                         bool add);
257 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
258                                 enum rte_filter_op filter_op,
259                                 void *arg);
260 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
261                         struct rte_eth_ntuple_filter *filter);
262 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
263                         struct rte_eth_ethertype_filter *filter,
264                         bool add);
265 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
266                                 enum rte_filter_op filter_op,
267                                 void *arg);
268 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
269                         struct rte_eth_ethertype_filter *filter);
270 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
271                      enum rte_filter_type filter_type,
272                      enum rte_filter_op filter_op,
273                      void *arg);
274 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
275
276 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
277                                       struct ether_addr *mc_addr_set,
278                                       uint32_t nb_mc_addr);
279
280 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
281 static int ixgbe_get_regs(struct rte_eth_dev *dev,
282                             struct rte_dev_reg_info *regs);
283 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
284 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
285                                 struct rte_dev_eeprom_info *eeprom);
286 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
287                                 struct rte_dev_eeprom_info *eeprom);
288
289 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
290 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
291                                 struct rte_dev_reg_info *regs);
292
293 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
294 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
295 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
296                                             struct timespec *timestamp,
297                                             uint32_t flags);
298 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
299                                             struct timespec *timestamp);
300
301 /*
302  * Define VF Stats MACRO for Non "cleared on read" register
303  */
304 #define UPDATE_VF_STAT(reg, last, cur)                          \
305 {                                                               \
306         u32 latest = IXGBE_READ_REG(hw, reg);                   \
307         cur += latest - last;                                   \
308         last = latest;                                          \
309 }
310
311 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
312 {                                                                \
313         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
314         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
315         u64 latest = ((new_msb << 32) | new_lsb);                \
316         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
317         last = latest;                                           \
318 }
319
320 #define IXGBE_SET_HWSTRIP(h, q) do{\
321                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
322                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
323                 (h)->bitmap[idx] |= 1 << bit;\
324         }while(0)
325
326 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
327                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
328                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
329                 (h)->bitmap[idx] &= ~(1 << bit);\
330         }while(0)
331
332 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
333                 uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
334                 uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
335                 (r) = (h)->bitmap[idx] >> bit & 1;\
336         }while(0)
337
338 /*
339  * The set of PCI devices this driver supports
340  */
341 static const struct rte_pci_id pci_id_ixgbe_map[] = {
342
343 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
344 #include "rte_pci_dev_ids.h"
345
346 { .vendor_id = 0, /* sentinel */ },
347 };
348
349
350 /*
351  * The set of PCI devices this driver supports (for 82599 VF)
352  */
353 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
354
355 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
356 #include "rte_pci_dev_ids.h"
357 { .vendor_id = 0, /* sentinel */ },
358
359 };
360
361 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
362         .dev_configure        = ixgbe_dev_configure,
363         .dev_start            = ixgbe_dev_start,
364         .dev_stop             = ixgbe_dev_stop,
365         .dev_set_link_up    = ixgbe_dev_set_link_up,
366         .dev_set_link_down  = ixgbe_dev_set_link_down,
367         .dev_close            = ixgbe_dev_close,
368         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
369         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
370         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
371         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
372         .link_update          = ixgbe_dev_link_update,
373         .stats_get            = ixgbe_dev_stats_get,
374         .stats_reset          = ixgbe_dev_stats_reset,
375         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
376         .dev_infos_get        = ixgbe_dev_info_get,
377         .mtu_set              = ixgbe_dev_mtu_set,
378         .vlan_filter_set      = ixgbe_vlan_filter_set,
379         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
380         .vlan_offload_set     = ixgbe_vlan_offload_set,
381         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
382         .rx_queue_start       = ixgbe_dev_rx_queue_start,
383         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
384         .tx_queue_start       = ixgbe_dev_tx_queue_start,
385         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
386         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
387         .rx_queue_release     = ixgbe_dev_rx_queue_release,
388         .rx_queue_count       = ixgbe_dev_rx_queue_count,
389         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
390         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
391         .tx_queue_release     = ixgbe_dev_tx_queue_release,
392         .dev_led_on           = ixgbe_dev_led_on,
393         .dev_led_off          = ixgbe_dev_led_off,
394         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
395         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
396         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
397         .mac_addr_add         = ixgbe_add_rar,
398         .mac_addr_remove      = ixgbe_remove_rar,
399         .mac_addr_set         = ixgbe_set_default_mac_addr,
400         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
401         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
402         .mirror_rule_set      = ixgbe_mirror_rule_set,
403         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
404         .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
405         .set_vf_rx            = ixgbe_set_pool_rx,
406         .set_vf_tx            = ixgbe_set_pool_tx,
407         .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
408         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
409         .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
410         .reta_update          = ixgbe_dev_rss_reta_update,
411         .reta_query           = ixgbe_dev_rss_reta_query,
412 #ifdef RTE_NIC_BYPASS
413         .bypass_init          = ixgbe_bypass_init,
414         .bypass_state_set     = ixgbe_bypass_state_store,
415         .bypass_state_show    = ixgbe_bypass_state_show,
416         .bypass_event_set     = ixgbe_bypass_event_store,
417         .bypass_event_show    = ixgbe_bypass_event_show,
418         .bypass_wd_timeout_set  = ixgbe_bypass_wd_timeout_store,
419         .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show,
420         .bypass_ver_show      = ixgbe_bypass_ver_show,
421         .bypass_wd_reset      = ixgbe_bypass_wd_reset,
422 #endif /* RTE_NIC_BYPASS */
423         .rss_hash_update      = ixgbe_dev_rss_hash_update,
424         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
425         .filter_ctrl          = ixgbe_dev_filter_ctrl,
426         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
427         .timesync_enable      = ixgbe_timesync_enable,
428         .timesync_disable     = ixgbe_timesync_disable,
429         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
430         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
431         .get_reg_length       = ixgbe_get_reg_length,
432         .get_reg              = ixgbe_get_regs,
433         .get_eeprom_length    = ixgbe_get_eeprom_length,
434         .get_eeprom           = ixgbe_get_eeprom,
435         .set_eeprom           = ixgbe_set_eeprom,
436 };
437
438 /*
439  * dev_ops for virtual function, bare necessities for basic vf
440  * operation have been implemented
441  */
442 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
443         .dev_configure        = ixgbevf_dev_configure,
444         .dev_start            = ixgbevf_dev_start,
445         .dev_stop             = ixgbevf_dev_stop,
446         .link_update          = ixgbe_dev_link_update,
447         .stats_get            = ixgbevf_dev_stats_get,
448         .stats_reset          = ixgbevf_dev_stats_reset,
449         .dev_close            = ixgbevf_dev_close,
450         .dev_infos_get        = ixgbevf_dev_info_get,
451         .mtu_set              = ixgbevf_dev_set_mtu,
452         .vlan_filter_set      = ixgbevf_vlan_filter_set,
453         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
454         .vlan_offload_set     = ixgbevf_vlan_offload_set,
455         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
456         .rx_queue_release     = ixgbe_dev_rx_queue_release,
457         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
458         .tx_queue_release     = ixgbe_dev_tx_queue_release,
459         .mac_addr_add         = ixgbevf_add_mac_addr,
460         .mac_addr_remove      = ixgbevf_remove_mac_addr,
461         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
462         .mac_addr_set         = ixgbevf_set_default_mac_addr,
463         .get_reg_length       = ixgbevf_get_reg_length,
464         .get_reg              = ixgbevf_get_regs,
465 };
466
467 /**
468  * Atomically reads the link status information from global
469  * structure rte_eth_dev.
470  *
471  * @param dev
472  *   - Pointer to the structure rte_eth_dev to read from.
473  *   - Pointer to the buffer to be saved with the link status.
474  *
475  * @return
476  *   - On success, zero.
477  *   - On failure, negative value.
478  */
479 static inline int
480 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
481                                 struct rte_eth_link *link)
482 {
483         struct rte_eth_link *dst = link;
484         struct rte_eth_link *src = &(dev->data->dev_link);
485
486         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
487                                         *(uint64_t *)src) == 0)
488                 return -1;
489
490         return 0;
491 }
492
493 /**
494  * Atomically writes the link status information into global
495  * structure rte_eth_dev.
496  *
497  * @param dev
498  *   - Pointer to the structure rte_eth_dev to read from.
499  *   - Pointer to the buffer to be saved with the link status.
500  *
501  * @return
502  *   - On success, zero.
503  *   - On failure, negative value.
504  */
505 static inline int
506 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
507                                 struct rte_eth_link *link)
508 {
509         struct rte_eth_link *dst = &(dev->data->dev_link);
510         struct rte_eth_link *src = link;
511
512         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
513                                         *(uint64_t *)src) == 0)
514                 return -1;
515
516         return 0;
517 }
518
519 /*
520  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
521  */
522 static inline int
523 ixgbe_is_sfp(struct ixgbe_hw *hw)
524 {
525         switch (hw->phy.type) {
526         case ixgbe_phy_sfp_avago:
527         case ixgbe_phy_sfp_ftl:
528         case ixgbe_phy_sfp_intel:
529         case ixgbe_phy_sfp_unknown:
530         case ixgbe_phy_sfp_passive_tyco:
531         case ixgbe_phy_sfp_passive_unknown:
532                 return 1;
533         default:
534                 return 0;
535         }
536 }
537
538 static inline int32_t
539 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
540 {
541         uint32_t ctrl_ext;
542         int32_t status;
543
544         status = ixgbe_reset_hw(hw);
545
546         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
547         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
548         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
549         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
550         IXGBE_WRITE_FLUSH(hw);
551
552         return status;
553 }
554
555 static inline void
556 ixgbe_enable_intr(struct rte_eth_dev *dev)
557 {
558         struct ixgbe_interrupt *intr =
559                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
560         struct ixgbe_hw *hw =
561                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
562
563         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
564         IXGBE_WRITE_FLUSH(hw);
565 }
566
567 /*
568  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
569  */
570 static void
571 ixgbe_disable_intr(struct ixgbe_hw *hw)
572 {
573         PMD_INIT_FUNC_TRACE();
574
575         if (hw->mac.type == ixgbe_mac_82598EB) {
576                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
577         } else {
578                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
579                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
580                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
581         }
582         IXGBE_WRITE_FLUSH(hw);
583 }
584
585 /*
586  * This function resets queue statistics mapping registers.
587  * From Niantic datasheet, Initialization of Statistics section:
588  * "...if software requires the queue counters, the RQSMR and TQSM registers
589  * must be re-programmed following a device reset.
590  */
591 static void
592 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
593 {
594         uint32_t i;
595
596         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
597                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
598                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
599         }
600 }
601
602
603 static int
604 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
605                                   uint16_t queue_id,
606                                   uint8_t stat_idx,
607                                   uint8_t is_rx)
608 {
609 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
610 #define NB_QMAP_FIELDS_PER_QSM_REG 4
611 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
612
613         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
614         struct ixgbe_stat_mapping_registers *stat_mappings =
615                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
616         uint32_t qsmr_mask = 0;
617         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
618         uint32_t q_map;
619         uint8_t n, offset;
620
621         if ((hw->mac.type != ixgbe_mac_82599EB) &&
622                 (hw->mac.type != ixgbe_mac_X540) &&
623                 (hw->mac.type != ixgbe_mac_X550) &&
624                 (hw->mac.type != ixgbe_mac_X550EM_x))
625                 return -ENOSYS;
626
627         PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
628                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
629                      queue_id, stat_idx);
630
631         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
632         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
633                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
634                 return -EIO;
635         }
636         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
637
638         /* Now clear any previous stat_idx set */
639         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
640         if (!is_rx)
641                 stat_mappings->tqsm[n] &= ~clearing_mask;
642         else
643                 stat_mappings->rqsmr[n] &= ~clearing_mask;
644
645         q_map = (uint32_t)stat_idx;
646         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
647         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
648         if (!is_rx)
649                 stat_mappings->tqsm[n] |= qsmr_mask;
650         else
651                 stat_mappings->rqsmr[n] |= qsmr_mask;
652
653         PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
654                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
655                      queue_id, stat_idx);
656         PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
657                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
658
659         /* Now write the mapping in the appropriate register */
660         if (is_rx) {
661                 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
662                              stat_mappings->rqsmr[n], n);
663                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
664         }
665         else {
666                 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
667                              stat_mappings->tqsm[n], n);
668                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
669         }
670         return 0;
671 }
672
673 static void
674 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
675 {
676         struct ixgbe_stat_mapping_registers *stat_mappings =
677                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
678         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
679         int i;
680
681         /* write whatever was in stat mapping table to the NIC */
682         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
683                 /* rx */
684                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
685
686                 /* tx */
687                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
688         }
689 }
690
691 static void
692 ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
693 {
694         uint8_t i;
695         struct ixgbe_dcb_tc_config *tc;
696         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
697
698         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
699         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
700         for (i = 0; i < dcb_max_tc; i++) {
701                 tc = &dcb_config->tc_config[i];
702                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
703                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
704                                  (uint8_t)(100/dcb_max_tc + (i & 1));
705                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
706                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
707                                  (uint8_t)(100/dcb_max_tc + (i & 1));
708                 tc->pfc = ixgbe_dcb_pfc_disabled;
709         }
710
711         /* Initialize default user to priority mapping, UPx->TC0 */
712         tc = &dcb_config->tc_config[0];
713         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
714         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
715         for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
716                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
717                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
718         }
719         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
720         dcb_config->pfc_mode_enable = false;
721         dcb_config->vt_mode = true;
722         dcb_config->round_robin_enable = false;
723         /* support all DCB capabilities in 82599 */
724         dcb_config->support.capabilities = 0xFF;
725
726         /*we only support 4 Tcs for X540, X550 */
727         if (hw->mac.type == ixgbe_mac_X540 ||
728                 hw->mac.type == ixgbe_mac_X550 ||
729                 hw->mac.type == ixgbe_mac_X550EM_x) {
730                 dcb_config->num_tcs.pg_tcs = 4;
731                 dcb_config->num_tcs.pfc_tcs = 4;
732         }
733 }
734
735 /*
736  * Ensure that all locks are released before first NVM or PHY access
737  */
738 static void
739 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
740 {
741         uint16_t mask;
742
743         /*
744          * Phy lock should not fail in this early stage. If this is the case,
745          * it is due to an improper exit of the application.
746          * So force the release of the faulty lock. Release of common lock
747          * is done automatically by swfw_sync function.
748          */
749         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
750         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
751                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
752         }
753         ixgbe_release_swfw_semaphore(hw, mask);
754
755         /*
756          * These ones are more tricky since they are common to all ports; but
757          * swfw_sync retries last long enough (1s) to be almost sure that if
758          * lock can not be taken it is due to an improper lock of the
759          * semaphore.
760          */
761         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
762         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
763                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
764         }
765         ixgbe_release_swfw_semaphore(hw, mask);
766 }
767
768 /*
769  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
770  * It returns 0 on success.
771  */
772 static int
773 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
774 {
775         struct rte_pci_device *pci_dev;
776         struct ixgbe_hw *hw =
777                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
778         struct ixgbe_vfta * shadow_vfta =
779                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
780         struct ixgbe_hwstrip *hwstrip =
781                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
782         struct ixgbe_dcb_config *dcb_config =
783                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
784         struct ixgbe_filter_info *filter_info =
785                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
786         uint32_t ctrl_ext;
787         uint16_t csum;
788         int diag, i;
789
790         PMD_INIT_FUNC_TRACE();
791
792         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
793         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
794         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
795
796         /*
797          * For secondary processes, we don't initialise any further as primary
798          * has already done this work. Only check we don't need a different
799          * RX and TX function.
800          */
801         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
802                 struct ixgbe_tx_queue *txq;
803                 /* TX queue function in primary, set by last queue initialized
804                  * Tx queue may not initialized by primary process */
805                 if (eth_dev->data->tx_queues) {
806                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
807                         ixgbe_set_tx_function(eth_dev, txq);
808                 } else {
809                         /* Use default TX function if we get here */
810                         PMD_INIT_LOG(INFO, "No TX queues configured yet. "
811                                            "Using default TX function.");
812                 }
813
814                 ixgbe_set_rx_function(eth_dev);
815
816                 return 0;
817         }
818         pci_dev = eth_dev->pci_dev;
819
820         /* Vendor and Device ID need to be set before init of shared code */
821         hw->device_id = pci_dev->id.device_id;
822         hw->vendor_id = pci_dev->id.vendor_id;
823         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
824         hw->allow_unsupported_sfp = 1;
825
826         /* Initialize the shared code (base driver) */
827 #ifdef RTE_NIC_BYPASS
828         diag = ixgbe_bypass_init_shared_code(hw);
829 #else
830         diag = ixgbe_init_shared_code(hw);
831 #endif /* RTE_NIC_BYPASS */
832
833         if (diag != IXGBE_SUCCESS) {
834                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
835                 return -EIO;
836         }
837
838         /* pick up the PCI bus settings for reporting later */
839         ixgbe_get_bus_info(hw);
840
841         /* Unlock any pending hardware semaphore */
842         ixgbe_swfw_lock_reset(hw);
843
844         /* Initialize DCB configuration*/
845         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
846         ixgbe_dcb_init(hw,dcb_config);
847         /* Get Hardware Flow Control setting */
848         hw->fc.requested_mode = ixgbe_fc_full;
849         hw->fc.current_mode = ixgbe_fc_full;
850         hw->fc.pause_time = IXGBE_FC_PAUSE;
851         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
852                 hw->fc.low_water[i] = IXGBE_FC_LO;
853                 hw->fc.high_water[i] = IXGBE_FC_HI;
854         }
855         hw->fc.send_xon = 1;
856
857         /* Make sure we have a good EEPROM before we read from it */
858         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
859         if (diag != IXGBE_SUCCESS) {
860                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
861                 return -EIO;
862         }
863
864 #ifdef RTE_NIC_BYPASS
865         diag = ixgbe_bypass_init_hw(hw);
866 #else
867         diag = ixgbe_init_hw(hw);
868 #endif /* RTE_NIC_BYPASS */
869
870         /*
871          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
872          * is called too soon after the kernel driver unbinding/binding occurs.
873          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
874          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
875          * also called. See ixgbe_identify_phy_82599(). The reason for the
876          * failure is not known, and only occuts when virtualisation features
877          * are disabled in the bios. A delay of 100ms  was found to be enough by
878          * trial-and-error, and is doubled to be safe.
879          */
880         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
881                 rte_delay_ms(200);
882                 diag = ixgbe_init_hw(hw);
883         }
884
885         if (diag == IXGBE_ERR_EEPROM_VERSION) {
886                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
887                     "LOM.  Please be aware there may be issues associated "
888                     "with your hardware.");
889                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
890                     "please contact your Intel or hardware representative "
891                     "who provided you with this hardware.");
892         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
893                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
894         if (diag) {
895                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
896                 return -EIO;
897         }
898
899         /* Reset the hw statistics */
900         ixgbe_dev_stats_reset(eth_dev);
901
902         /* disable interrupt */
903         ixgbe_disable_intr(hw);
904
905         /* reset mappings for queue statistics hw counters*/
906         ixgbe_reset_qstat_mappings(hw);
907
908         /* Allocate memory for storing MAC addresses */
909         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
910                         hw->mac.num_rar_entries, 0);
911         if (eth_dev->data->mac_addrs == NULL) {
912                 PMD_INIT_LOG(ERR,
913                         "Failed to allocate %u bytes needed to store "
914                         "MAC addresses",
915                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
916                 return -ENOMEM;
917         }
918         /* Copy the permanent MAC address */
919         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
920                         &eth_dev->data->mac_addrs[0]);
921
922         /* Allocate memory for storing hash filter MAC addresses */
923         eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
924                         IXGBE_VMDQ_NUM_UC_MAC, 0);
925         if (eth_dev->data->hash_mac_addrs == NULL) {
926                 PMD_INIT_LOG(ERR,
927                         "Failed to allocate %d bytes needed to store MAC addresses",
928                         ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
929                 return -ENOMEM;
930         }
931
932         /* initialize the vfta */
933         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
934
935         /* initialize the hw strip bitmap*/
936         memset(hwstrip, 0, sizeof(*hwstrip));
937
938         /* initialize PF if max_vfs not zero */
939         ixgbe_pf_host_init(eth_dev);
940
941         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
942         /* let hardware know driver is loaded */
943         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
944         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
945         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
946         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
947         IXGBE_WRITE_FLUSH(hw);
948
949         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
950                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
951                              (int) hw->mac.type, (int) hw->phy.type,
952                              (int) hw->phy.sfp_type);
953         else
954                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
955                              (int) hw->mac.type, (int) hw->phy.type);
956
957         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
958                         eth_dev->data->port_id, pci_dev->id.vendor_id,
959                         pci_dev->id.device_id);
960
961         rte_intr_callback_register(&(pci_dev->intr_handle),
962                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
963
964         /* enable uio intr after callback register */
965         rte_intr_enable(&(pci_dev->intr_handle));
966
967         /* enable support intr */
968         ixgbe_enable_intr(eth_dev);
969
970         /* initialize 5tuple filter list */
971         TAILQ_INIT(&filter_info->fivetuple_list);
972         memset(filter_info->fivetuple_mask, 0,
973                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
974
975         return 0;
976 }
977
978
979 /*
980  * Negotiate mailbox API version with the PF.
981  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
982  * Then we try to negotiate starting with the most recent one.
983  * If all negotiation attempts fail, then we will proceed with
984  * the default one (ixgbe_mbox_api_10).
985  */
986 static void
987 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
988 {
989         int32_t i;
990
991         /* start with highest supported, proceed down */
992         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
993                 ixgbe_mbox_api_11,
994                 ixgbe_mbox_api_10,
995         };
996
997         for (i = 0;
998                         i != RTE_DIM(sup_ver) &&
999                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1000                         i++)
1001                 ;
1002 }
1003
1004 static void
1005 generate_random_mac_addr(struct ether_addr *mac_addr)
1006 {
1007         uint64_t random;
1008
1009         /* Set Organizationally Unique Identifier (OUI) prefix. */
1010         mac_addr->addr_bytes[0] = 0x00;
1011         mac_addr->addr_bytes[1] = 0x09;
1012         mac_addr->addr_bytes[2] = 0xC0;
1013         /* Force indication of locally assigned MAC address. */
1014         mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1015         /* Generate the last 3 bytes of the MAC address with a random number. */
1016         random = rte_rand();
1017         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1018 }
1019
1020 /*
1021  * Virtual Function device init
1022  */
1023 static int
1024 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1025 {
1026         int diag;
1027         uint32_t tc, tcs;
1028         struct rte_pci_device *pci_dev;
1029         struct ixgbe_hw *hw =
1030                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1031         struct ixgbe_vfta * shadow_vfta =
1032                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1033         struct ixgbe_hwstrip *hwstrip =
1034                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1035         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1036
1037         PMD_INIT_FUNC_TRACE();
1038
1039         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1040         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1041         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1042
1043         /* for secondary processes, we don't initialise any further as primary
1044          * has already done this work. Only check we don't need a different
1045          * RX function */
1046         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1047                 if (eth_dev->data->scattered_rx)
1048                         eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
1049                 return 0;
1050         }
1051
1052         pci_dev = eth_dev->pci_dev;
1053
1054         hw->device_id = pci_dev->id.device_id;
1055         hw->vendor_id = pci_dev->id.vendor_id;
1056         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1057
1058         /* initialize the vfta */
1059         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1060
1061         /* initialize the hw strip bitmap*/
1062         memset(hwstrip, 0, sizeof(*hwstrip));
1063
1064         /* Initialize the shared code (base driver) */
1065         diag = ixgbe_init_shared_code(hw);
1066         if (diag != IXGBE_SUCCESS) {
1067                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1068                 return -EIO;
1069         }
1070
1071         /* init_mailbox_params */
1072         hw->mbx.ops.init_params(hw);
1073
1074         /* Reset the hw statistics */
1075         ixgbevf_dev_stats_reset(eth_dev);
1076
1077         /* Disable the interrupts for VF */
1078         ixgbevf_intr_disable(hw);
1079
1080         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1081         diag = hw->mac.ops.reset_hw(hw);
1082
1083         /*
1084          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1085          * the underlying PF driver has not assigned a MAC address to the VF.
1086          * In this case, assign a random MAC address.
1087          */
1088         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1089                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1090                 return (diag);
1091         }
1092
1093         /* negotiate mailbox API version to use with the PF. */
1094         ixgbevf_negotiate_api(hw);
1095
1096         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1097         ixgbevf_get_queues(hw, &tcs, &tc);
1098
1099         /* Allocate memory for storing MAC addresses */
1100         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1101                         hw->mac.num_rar_entries, 0);
1102         if (eth_dev->data->mac_addrs == NULL) {
1103                 PMD_INIT_LOG(ERR,
1104                         "Failed to allocate %u bytes needed to store "
1105                         "MAC addresses",
1106                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1107                 return -ENOMEM;
1108         }
1109
1110         /* Generate a random MAC address, if none was assigned by PF. */
1111         if (is_zero_ether_addr(perm_addr)) {
1112                 generate_random_mac_addr(perm_addr);
1113                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1114                 if (diag) {
1115                         rte_free(eth_dev->data->mac_addrs);
1116                         eth_dev->data->mac_addrs = NULL;
1117                         return diag;
1118                 }
1119                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1120                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1121                              "%02x:%02x:%02x:%02x:%02x:%02x",
1122                              perm_addr->addr_bytes[0],
1123                              perm_addr->addr_bytes[1],
1124                              perm_addr->addr_bytes[2],
1125                              perm_addr->addr_bytes[3],
1126                              perm_addr->addr_bytes[4],
1127                              perm_addr->addr_bytes[5]);
1128         }
1129
1130         /* Copy the permanent MAC address */
1131         ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1132
1133         /* reset the hardware with the new settings */
1134         diag = hw->mac.ops.start_hw(hw);
1135         switch (diag) {
1136                 case  0:
1137                         break;
1138
1139                 default:
1140                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1141                         return (-EIO);
1142         }
1143
1144         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1145                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1146                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1147
1148         return 0;
1149 }
1150
1151 static struct eth_driver rte_ixgbe_pmd = {
1152         .pci_drv = {
1153                 .name = "rte_ixgbe_pmd",
1154                 .id_table = pci_id_ixgbe_map,
1155                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1156         },
1157         .eth_dev_init = eth_ixgbe_dev_init,
1158         .dev_private_size = sizeof(struct ixgbe_adapter),
1159 };
1160
1161 /*
1162  * virtual function driver struct
1163  */
1164 static struct eth_driver rte_ixgbevf_pmd = {
1165         .pci_drv = {
1166                 .name = "rte_ixgbevf_pmd",
1167                 .id_table = pci_id_ixgbevf_map,
1168                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1169         },
1170         .eth_dev_init = eth_ixgbevf_dev_init,
1171         .dev_private_size = sizeof(struct ixgbe_adapter),
1172 };
1173
1174 /*
1175  * Driver initialization routine.
1176  * Invoked once at EAL init time.
1177  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
1178  */
1179 static int
1180 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
1181 {
1182         PMD_INIT_FUNC_TRACE();
1183
1184         rte_eth_driver_register(&rte_ixgbe_pmd);
1185         return 0;
1186 }
1187
1188 /*
1189  * VF Driver initialization routine.
1190  * Invoked one at EAL init time.
1191  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
1192  */
1193 static int
1194 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
1195 {
1196         PMD_INIT_FUNC_TRACE();
1197
1198         rte_eth_driver_register(&rte_ixgbevf_pmd);
1199         return (0);
1200 }
1201
1202 static int
1203 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1204 {
1205         struct ixgbe_hw *hw =
1206                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207         struct ixgbe_vfta * shadow_vfta =
1208                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1209         uint32_t vfta;
1210         uint32_t vid_idx;
1211         uint32_t vid_bit;
1212
1213         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1214         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1215         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1216         if (on)
1217                 vfta |= vid_bit;
1218         else
1219                 vfta &= ~vid_bit;
1220         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1221
1222         /* update local VFTA copy */
1223         shadow_vfta->vfta[vid_idx] = vfta;
1224
1225         return 0;
1226 }
1227
1228 static void
1229 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1230 {
1231         if (on)
1232                 ixgbe_vlan_hw_strip_enable(dev, queue);
1233         else
1234                 ixgbe_vlan_hw_strip_disable(dev, queue);
1235 }
1236
1237 static void
1238 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1239 {
1240         struct ixgbe_hw *hw =
1241                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1242
1243         /* Only the high 16-bits is valid */
1244         IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
1245 }
1246
1247 void
1248 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1249 {
1250         struct ixgbe_hw *hw =
1251                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1252         uint32_t vlnctrl;
1253
1254         PMD_INIT_FUNC_TRACE();
1255
1256         /* Filter Table Disable */
1257         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1258         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1259
1260         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1261 }
1262
1263 void
1264 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1265 {
1266         struct ixgbe_hw *hw =
1267                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1268         struct ixgbe_vfta * shadow_vfta =
1269                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1270         uint32_t vlnctrl;
1271         uint16_t i;
1272
1273         PMD_INIT_FUNC_TRACE();
1274
1275         /* Filter Table Enable */
1276         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1277         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1278         vlnctrl |= IXGBE_VLNCTRL_VFE;
1279
1280         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1281
1282         /* write whatever is in local vfta copy */
1283         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1284                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1285 }
1286
1287 static void
1288 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1289 {
1290         struct ixgbe_hwstrip *hwstrip =
1291                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1292
1293         if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
1294                 return;
1295
1296         if (on)
1297                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1298         else
1299                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1300 }
1301
1302 static void
1303 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1304 {
1305         struct ixgbe_hw *hw =
1306                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1307         uint32_t ctrl;
1308
1309         PMD_INIT_FUNC_TRACE();
1310
1311         if (hw->mac.type == ixgbe_mac_82598EB) {
1312                 /* No queue level support */
1313                 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1314                 return;
1315         }
1316         else {
1317                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1318                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1319                 ctrl &= ~IXGBE_RXDCTL_VME;
1320                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1321         }
1322         /* record those setting for HW strip per queue */
1323         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1324 }
1325
1326 static void
1327 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1328 {
1329         struct ixgbe_hw *hw =
1330                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331         uint32_t ctrl;
1332
1333         PMD_INIT_FUNC_TRACE();
1334
1335         if (hw->mac.type == ixgbe_mac_82598EB) {
1336                 /* No queue level supported */
1337                 PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
1338                 return;
1339         }
1340         else {
1341                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1342                 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1343                 ctrl |= IXGBE_RXDCTL_VME;
1344                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1345         }
1346         /* record those setting for HW strip per queue */
1347         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1348 }
1349
1350 void
1351 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
1352 {
1353         struct ixgbe_hw *hw =
1354                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1355         uint32_t ctrl;
1356         uint16_t i;
1357
1358         PMD_INIT_FUNC_TRACE();
1359
1360         if (hw->mac.type == ixgbe_mac_82598EB) {
1361                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1362                 ctrl &= ~IXGBE_VLNCTRL_VME;
1363                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1364         }
1365         else {
1366                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1367                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1368                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1369                         ctrl &= ~IXGBE_RXDCTL_VME;
1370                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1371
1372                         /* record those setting for HW strip per queue */
1373                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
1374                 }
1375         }
1376 }
1377
1378 void
1379 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
1380 {
1381         struct ixgbe_hw *hw =
1382                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1383         uint32_t ctrl;
1384         uint16_t i;
1385
1386         PMD_INIT_FUNC_TRACE();
1387
1388         if (hw->mac.type == ixgbe_mac_82598EB) {
1389                 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1390                 ctrl |= IXGBE_VLNCTRL_VME;
1391                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1392         }
1393         else {
1394                 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1395                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1396                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1397                         ctrl |= IXGBE_RXDCTL_VME;
1398                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
1399
1400                         /* record those setting for HW strip per queue */
1401                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
1402                 }
1403         }
1404 }
1405
1406 static void
1407 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1408 {
1409         struct ixgbe_hw *hw =
1410                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1411         uint32_t ctrl;
1412
1413         PMD_INIT_FUNC_TRACE();
1414
1415         /* DMATXCTRL: Geric Double VLAN Disable */
1416         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1417         ctrl &= ~IXGBE_DMATXCTL_GDV;
1418         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1419
1420         /* CTRL_EXT: Global Double VLAN Disable */
1421         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1422         ctrl &= ~IXGBE_EXTENDED_VLAN;
1423         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1424
1425 }
1426
1427 static void
1428 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1429 {
1430         struct ixgbe_hw *hw =
1431                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1432         uint32_t ctrl;
1433
1434         PMD_INIT_FUNC_TRACE();
1435
1436         /* DMATXCTRL: Geric Double VLAN Enable */
1437         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1438         ctrl |= IXGBE_DMATXCTL_GDV;
1439         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
1440
1441         /* CTRL_EXT: Global Double VLAN Enable */
1442         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1443         ctrl |= IXGBE_EXTENDED_VLAN;
1444         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
1445
1446         /*
1447          * VET EXT field in the EXVET register = 0x8100 by default
1448          * So no need to change. Same to VT field of DMATXCTL register
1449          */
1450 }
1451
1452 static void
1453 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1454 {
1455         if(mask & ETH_VLAN_STRIP_MASK){
1456                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1457                         ixgbe_vlan_hw_strip_enable_all(dev);
1458                 else
1459                         ixgbe_vlan_hw_strip_disable_all(dev);
1460         }
1461
1462         if(mask & ETH_VLAN_FILTER_MASK){
1463                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1464                         ixgbe_vlan_hw_filter_enable(dev);
1465                 else
1466                         ixgbe_vlan_hw_filter_disable(dev);
1467         }
1468
1469         if(mask & ETH_VLAN_EXTEND_MASK){
1470                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1471                         ixgbe_vlan_hw_extend_enable(dev);
1472                 else
1473                         ixgbe_vlan_hw_extend_disable(dev);
1474         }
1475 }
1476
1477 static void
1478 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1479 {
1480         struct ixgbe_hw *hw =
1481                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1482         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
1483         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1484         vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
1485         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
1486 }
1487
1488 static int
1489 ixgbe_dev_configure(struct rte_eth_dev *dev)
1490 {
1491         struct ixgbe_interrupt *intr =
1492                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1493         struct ixgbe_adapter *adapter =
1494                 (struct ixgbe_adapter *)dev->data->dev_private;
1495
1496         PMD_INIT_FUNC_TRACE();
1497
1498         /* set flag to update link status after init */
1499         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1500
1501         /*
1502          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1503          * allocation or vector Rx preconditions we will reset it.
1504          */
1505         adapter->rx_bulk_alloc_allowed = true;
1506         adapter->rx_vec_allowed = true;
1507
1508         return 0;
1509 }
1510
1511 /*
1512  * Configure device link speed and setup link.
1513  * It returns 0 on success.
1514  */
1515 static int
1516 ixgbe_dev_start(struct rte_eth_dev *dev)
1517 {
1518         struct ixgbe_hw *hw =
1519                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1520         struct ixgbe_vf_info *vfinfo =
1521                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1522         int err, link_up = 0, negotiate = 0;
1523         uint32_t speed = 0;
1524         int mask = 0;
1525         int status;
1526         uint16_t vf, idx;
1527
1528         PMD_INIT_FUNC_TRACE();
1529
1530         /* IXGBE devices don't support half duplex */
1531         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1532                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1533                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1534                              dev->data->dev_conf.link_duplex,
1535                              dev->data->port_id);
1536                 return -EINVAL;
1537         }
1538
1539         /* stop adapter */
1540         hw->adapter_stopped = FALSE;
1541         ixgbe_stop_adapter(hw);
1542
1543         /* reinitialize adapter
1544          * this calls reset and start */
1545         status = ixgbe_pf_reset_hw(hw);
1546         if (status != 0)
1547                 return -1;
1548         hw->mac.ops.start_hw(hw);
1549         hw->mac.get_link_status = true;
1550
1551         /* configure PF module if SRIOV enabled */
1552         ixgbe_pf_host_configure(dev);
1553
1554         /* initialize transmission unit */
1555         ixgbe_dev_tx_init(dev);
1556
1557         /* This can fail when allocating mbufs for descriptor rings */
1558         err = ixgbe_dev_rx_init(dev);
1559         if (err) {
1560                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1561                 goto error;
1562         }
1563
1564         err = ixgbe_dev_rxtx_start(dev);
1565         if (err < 0) {
1566                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1567                 goto error;
1568         }
1569
1570         /* Skip link setup if loopback mode is enabled for 82599. */
1571         if (hw->mac.type == ixgbe_mac_82599EB &&
1572                         dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
1573                 goto skip_link_setup;
1574
1575         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1576                 err = hw->mac.ops.setup_sfp(hw);
1577                 if (err)
1578                         goto error;
1579         }
1580
1581         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
1582                 /* Turn on the copper */
1583                 ixgbe_set_phy_power(hw, true);
1584         } else {
1585                 /* Turn on the laser */
1586                 ixgbe_enable_tx_laser(hw);
1587         }
1588
1589         err = ixgbe_check_link(hw, &speed, &link_up, 0);
1590         if (err)
1591                 goto error;
1592         dev->data->dev_link.link_status = link_up;
1593
1594         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
1595         if (err)
1596                 goto error;
1597
1598         switch(dev->data->dev_conf.link_speed) {
1599         case ETH_LINK_SPEED_AUTONEG:
1600                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
1601                                 IXGBE_LINK_SPEED_82599_AUTONEG :
1602                                 IXGBE_LINK_SPEED_82598_AUTONEG;
1603                 break;
1604         case ETH_LINK_SPEED_100:
1605                 /*
1606                  * Invalid for 82598 but error will be detected by
1607                  * ixgbe_setup_link()
1608                  */
1609                 speed = IXGBE_LINK_SPEED_100_FULL;
1610                 break;
1611         case ETH_LINK_SPEED_1000:
1612                 speed = IXGBE_LINK_SPEED_1GB_FULL;
1613                 break;
1614         case ETH_LINK_SPEED_10000:
1615                 speed = IXGBE_LINK_SPEED_10GB_FULL;
1616                 break;
1617         default:
1618                 PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
1619                              dev->data->dev_conf.link_speed,
1620                              dev->data->port_id);
1621                 goto error;
1622         }
1623
1624         err = ixgbe_setup_link(hw, speed, link_up);
1625         if (err)
1626                 goto error;
1627
1628 skip_link_setup:
1629
1630         /* check if lsc interrupt is enabled */
1631         if (dev->data->dev_conf.intr_conf.lsc != 0)
1632                 ixgbe_dev_lsc_interrupt_setup(dev);
1633
1634         /* resume enabled intr since hw reset */
1635         ixgbe_enable_intr(dev);
1636
1637         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1638                 ETH_VLAN_EXTEND_MASK;
1639         ixgbe_vlan_offload_set(dev, mask);
1640
1641         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1642                 /* Enable vlan filtering for VMDq */
1643                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
1644         }
1645
1646         /* Configure DCB hw */
1647         ixgbe_configure_dcb(dev);
1648
1649         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1650                 err = ixgbe_fdir_configure(dev);
1651                 if (err)
1652                         goto error;
1653         }
1654
1655         /* Restore vf rate limit */
1656         if (vfinfo != NULL) {
1657                 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
1658                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1659                                 if (vfinfo[vf].tx_rate[idx] != 0)
1660                                         ixgbe_set_vf_rate_limit(dev, vf,
1661                                                 vfinfo[vf].tx_rate[idx],
1662                                                 1 << idx);
1663         }
1664
1665         ixgbe_restore_statistics_mapping(dev);
1666
1667         return (0);
1668
1669 error:
1670         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
1671         ixgbe_dev_clear_queues(dev);
1672         return -EIO;
1673 }
1674
1675 /*
1676  * Stop device: disable rx and tx functions to allow for reconfiguring.
1677  */
1678 static void
1679 ixgbe_dev_stop(struct rte_eth_dev *dev)
1680 {
1681         struct rte_eth_link link;
1682         struct ixgbe_hw *hw =
1683                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1684         struct ixgbe_vf_info *vfinfo =
1685                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
1686         struct ixgbe_filter_info *filter_info =
1687                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1688         struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
1689         int vf;
1690
1691         PMD_INIT_FUNC_TRACE();
1692
1693         /* disable interrupts */
1694         ixgbe_disable_intr(hw);
1695
1696         /* reset the NIC */
1697         ixgbe_pf_reset_hw(hw);
1698         hw->adapter_stopped = FALSE;
1699
1700         /* stop adapter */
1701         ixgbe_stop_adapter(hw);
1702
1703         for (vf = 0; vfinfo != NULL &&
1704                      vf < dev->pci_dev->max_vfs; vf++)
1705                 vfinfo[vf].clear_to_send = false;
1706
1707         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
1708                 /* Turn off the copper */
1709                 ixgbe_set_phy_power(hw, false);
1710         } else {
1711                 /* Turn off the laser */
1712                 ixgbe_disable_tx_laser(hw);
1713         }
1714
1715         ixgbe_dev_clear_queues(dev);
1716
1717         /* Clear stored conf */
1718         dev->data->scattered_rx = 0;
1719         dev->data->lro = 0;
1720
1721         /* Clear recorded link status */
1722         memset(&link, 0, sizeof(link));
1723         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1724
1725         /* Remove all ntuple filters of the device */
1726         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1727              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1728                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1729                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1730                              p_5tuple, entries);
1731                 rte_free(p_5tuple);
1732         }
1733         memset(filter_info->fivetuple_mask, 0,
1734                 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1735
1736 }
1737
1738 /*
1739  * Set device link up: enable tx.
1740  */
1741 static int
1742 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
1743 {
1744         struct ixgbe_hw *hw =
1745                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746         if (hw->mac.type == ixgbe_mac_82599EB) {
1747 #ifdef RTE_NIC_BYPASS
1748                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
1749                         /* Not suported in bypass mode */
1750                         PMD_INIT_LOG(ERR, "Set link up is not supported "
1751                                      "by device id 0x%x", hw->device_id);
1752                         return -ENOTSUP;
1753                 }
1754 #endif
1755         }
1756
1757         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
1758                 /* Turn on the copper */
1759                 ixgbe_set_phy_power(hw, true);
1760         } else {
1761                 /* Turn on the laser */
1762                 ixgbe_enable_tx_laser(hw);
1763         }
1764
1765         return 0;
1766 }
1767
1768 /*
1769  * Set device link down: disable tx.
1770  */
1771 static int
1772 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
1773 {
1774         struct ixgbe_hw *hw =
1775                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1776         if (hw->mac.type == ixgbe_mac_82599EB) {
1777 #ifdef RTE_NIC_BYPASS
1778                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
1779                         /* Not suported in bypass mode */
1780                         PMD_INIT_LOG(ERR, "Set link down is not supported "
1781                                      "by device id 0x%x", hw->device_id);
1782                         return -ENOTSUP;
1783                 }
1784 #endif
1785         }
1786
1787         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
1788                 /* Turn off the copper */
1789                 ixgbe_set_phy_power(hw, false);
1790         } else {
1791                 /* Turn off the laser */
1792                 ixgbe_disable_tx_laser(hw);
1793         }
1794
1795         return 0;
1796 }
1797
1798 /*
1799  * Reest and stop device.
1800  */
1801 static void
1802 ixgbe_dev_close(struct rte_eth_dev *dev)
1803 {
1804         struct ixgbe_hw *hw =
1805                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1806
1807         PMD_INIT_FUNC_TRACE();
1808
1809         ixgbe_pf_reset_hw(hw);
1810
1811         ixgbe_dev_stop(dev);
1812         hw->adapter_stopped = 1;
1813
1814         ixgbe_disable_pcie_master(hw);
1815
1816         /* reprogram the RAR[0] in case user changed it. */
1817         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1818 }
1819
1820 /*
1821  * This function is based on ixgbe_update_stats_counters() in base/ixgbe.c
1822  */
1823 static void
1824 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1825 {
1826         struct ixgbe_hw *hw =
1827                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828         struct ixgbe_hw_stats *hw_stats =
1829                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1830         uint32_t bprc, lxon, lxoff, total;
1831         uint64_t total_missed_rx, total_qbrc, total_qprc;
1832         unsigned i;
1833
1834         total_missed_rx = 0;
1835         total_qbrc = 0;
1836         total_qprc = 0;
1837
1838         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1839         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1840         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1841         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1842
1843         for (i = 0; i < 8; i++) {
1844                 uint32_t mp;
1845                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1846                 /* global total per queue */
1847                 hw_stats->mpc[i] += mp;
1848                 /* Running comprehensive total for stats display */
1849                 total_missed_rx += hw_stats->mpc[i];
1850                 if (hw->mac.type == ixgbe_mac_82598EB)
1851                         hw_stats->rnbc[i] +=
1852                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1853                 hw_stats->pxontxc[i] +=
1854                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1855                 hw_stats->pxonrxc[i] +=
1856                     IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1857                 hw_stats->pxofftxc[i] +=
1858                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1859                 hw_stats->pxoffrxc[i] +=
1860                     IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1861                 hw_stats->pxon2offc[i] +=
1862                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1863         }
1864         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1865                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1866                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1867                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1868                 hw_stats->qbrc[i] +=
1869                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1870                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1871                 hw_stats->qbtc[i] +=
1872                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1873                 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1874
1875                 total_qprc += hw_stats->qprc[i];
1876                 total_qbrc += hw_stats->qbrc[i];
1877         }
1878         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1879         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1880         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1881
1882         /* Note that gprc counts missed packets */
1883         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1884
1885         if (hw->mac.type != ixgbe_mac_82598EB) {
1886                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
1887                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1888                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
1889                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1890                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
1891                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1892                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1893                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1894         } else {
1895                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1896                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1897                 /* 82598 only has a counter in the high register */
1898                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1899                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1900                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1901         }
1902
1903         /*
1904          * Workaround: mprc hardware is incorrectly counting
1905          * broadcasts, so for now we subtract those.
1906          */
1907         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1908         hw_stats->bprc += bprc;
1909         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1910         if (hw->mac.type == ixgbe_mac_82598EB)
1911                 hw_stats->mprc -= bprc;
1912
1913         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1914         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1915         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1916         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1917         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1918         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1919
1920         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1921         hw_stats->lxontxc += lxon;
1922         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1923         hw_stats->lxofftxc += lxoff;
1924         total = lxon + lxoff;
1925
1926         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1927         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1928         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1929         hw_stats->gptc -= total;
1930         hw_stats->mptc -= total;
1931         hw_stats->ptc64 -= total;
1932         hw_stats->gotc -= total * ETHER_MIN_LEN;
1933
1934         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1935         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1936         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1937         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1938         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1939         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1940         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1941         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1942         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1943         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1944         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1945         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1946         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1947         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1948         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1949         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1950         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1951         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1952         /* Only read FCOE on 82599 */
1953         if (hw->mac.type != ixgbe_mac_82598EB) {
1954                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1955                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1956                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1957                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1958                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1959         }
1960
1961         if (stats == NULL)
1962                 return;
1963
1964         /* Fill out the rte_eth_stats statistics structure */
1965         stats->ipackets = total_qprc;
1966         stats->ibytes = total_qbrc;
1967         stats->opackets = hw_stats->gptc;
1968         stats->obytes = hw_stats->gotc;
1969         stats->imcasts = hw_stats->mprc;
1970
1971         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1972                 stats->q_ipackets[i] = hw_stats->qprc[i];
1973                 stats->q_opackets[i] = hw_stats->qptc[i];
1974                 stats->q_ibytes[i] = hw_stats->qbrc[i];
1975                 stats->q_obytes[i] = hw_stats->qbtc[i];
1976                 stats->q_errors[i] = hw_stats->qprdc[i];
1977         }
1978
1979         /* Rx Errors */
1980         stats->ibadcrc  = hw_stats->crcerrs;
1981         stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
1982         stats->imissed  = total_missed_rx;
1983         stats->ierrors  = stats->ibadcrc +
1984                           stats->ibadlen +
1985                           stats->imissed +
1986                           hw_stats->illerrc + hw_stats->errbc;
1987
1988         /* Tx Errors */
1989         stats->oerrors  = 0;
1990
1991         /* XON/XOFF pause frames */
1992         stats->tx_pause_xon  = hw_stats->lxontxc;
1993         stats->rx_pause_xon  = hw_stats->lxonrxc;
1994         stats->tx_pause_xoff = hw_stats->lxofftxc;
1995         stats->rx_pause_xoff = hw_stats->lxoffrxc;
1996
1997         /* Flow Director Stats registers */
1998         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1999         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
2000         stats->fdirmatch = hw_stats->fdirmatch;
2001         stats->fdirmiss = hw_stats->fdirmiss;
2002 }
2003
2004 static void
2005 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
2006 {
2007         struct ixgbe_hw_stats *stats =
2008                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2009
2010         /* HW registers are cleared on read */
2011         ixgbe_dev_stats_get(dev, NULL);
2012
2013         /* Reset software totals */
2014         memset(stats, 0, sizeof(*stats));
2015 }
2016
2017 static void
2018 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2019 {
2020         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2022                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2023
2024         /* Good Rx packet, include VF loopback */
2025         UPDATE_VF_STAT(IXGBE_VFGPRC,
2026             hw_stats->last_vfgprc, hw_stats->vfgprc);
2027
2028         /* Good Rx octets, include VF loopback */
2029         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2030             hw_stats->last_vfgorc, hw_stats->vfgorc);
2031
2032         /* Good Tx packet, include VF loopback */
2033         UPDATE_VF_STAT(IXGBE_VFGPTC,
2034             hw_stats->last_vfgptc, hw_stats->vfgptc);
2035
2036         /* Good Tx octets, include VF loopback */
2037         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2038             hw_stats->last_vfgotc, hw_stats->vfgotc);
2039
2040         /* Rx Multicst Packet */
2041         UPDATE_VF_STAT(IXGBE_VFMPRC,
2042             hw_stats->last_vfmprc, hw_stats->vfmprc);
2043
2044         if (stats == NULL)
2045                 return;
2046
2047         stats->ipackets = hw_stats->vfgprc;
2048         stats->ibytes = hw_stats->vfgorc;
2049         stats->opackets = hw_stats->vfgptc;
2050         stats->obytes = hw_stats->vfgotc;
2051         stats->imcasts = hw_stats->vfmprc;
2052 }
2053
2054 static void
2055 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
2056 {
2057         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
2058                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2059
2060         /* Sync HW register to the last stats */
2061         ixgbevf_dev_stats_get(dev, NULL);
2062
2063         /* reset HW current stats*/
2064         hw_stats->vfgprc = 0;
2065         hw_stats->vfgorc = 0;
2066         hw_stats->vfgptc = 0;
2067         hw_stats->vfgotc = 0;
2068         hw_stats->vfmprc = 0;
2069
2070 }
2071
2072 static void
2073 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2074 {
2075         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2076
2077         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2078         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2079         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
2080         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
2081         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2082         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2083         dev_info->max_vfs = dev->pci_dev->max_vfs;
2084         if (hw->mac.type == ixgbe_mac_82598EB)
2085                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2086         else
2087                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2088         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2089         dev_info->rx_offload_capa =
2090                 DEV_RX_OFFLOAD_VLAN_STRIP |
2091                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2092                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2093                 DEV_RX_OFFLOAD_TCP_CKSUM;
2094
2095         /*
2096          * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2097          * mode.
2098          */
2099         if ((hw->mac.type == ixgbe_mac_82599EB ||
2100              hw->mac.type == ixgbe_mac_X540) &&
2101             !RTE_ETH_DEV_SRIOV(dev).active)
2102                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2103
2104         dev_info->tx_offload_capa =
2105                 DEV_TX_OFFLOAD_VLAN_INSERT |
2106                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2107                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2108                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2109                 DEV_TX_OFFLOAD_SCTP_CKSUM  |
2110                 DEV_TX_OFFLOAD_TCP_TSO;
2111
2112         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2113                 .rx_thresh = {
2114                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2115                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2116                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2117                 },
2118                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2119                 .rx_drop_en = 0,
2120         };
2121
2122         dev_info->default_txconf = (struct rte_eth_txconf) {
2123                 .tx_thresh = {
2124                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2125                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2126                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2127                 },
2128                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2129                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2130                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2131                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2132         };
2133         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2134         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2135         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
2136 }
2137
2138 static void
2139 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
2140                      struct rte_eth_dev_info *dev_info)
2141 {
2142         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143
2144         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2145         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2146         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
2147         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
2148         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2149         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
2150         dev_info->max_vfs = dev->pci_dev->max_vfs;
2151         if (hw->mac.type == ixgbe_mac_82598EB)
2152                 dev_info->max_vmdq_pools = ETH_16_POOLS;
2153         else
2154                 dev_info->max_vmdq_pools = ETH_64_POOLS;
2155         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
2156                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2157                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
2158                                 DEV_RX_OFFLOAD_TCP_CKSUM;
2159         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2160                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
2161                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
2162                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
2163                                 DEV_TX_OFFLOAD_SCTP_CKSUM;
2164
2165         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2166                 .rx_thresh = {
2167                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
2168                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
2169                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
2170                 },
2171                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
2172                 .rx_drop_en = 0,
2173         };
2174
2175         dev_info->default_txconf = (struct rte_eth_txconf) {
2176                 .tx_thresh = {
2177                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
2178                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
2179                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
2180                 },
2181                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
2182                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
2183                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2184                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2185         };
2186 }
2187
2188 /* return 0 means link status changed, -1 means not changed */
2189 static int
2190 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2191 {
2192         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2193         struct rte_eth_link link, old;
2194         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
2195         int link_up;
2196         int diag;
2197
2198         link.link_status = 0;
2199         link.link_speed = 0;
2200         link.link_duplex = 0;
2201         memset(&old, 0, sizeof(old));
2202         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
2203
2204         hw->mac.get_link_status = true;
2205
2206         /* check if it needs to wait to complete, if lsc interrupt is enabled */
2207         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2208                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
2209         else
2210                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
2211
2212         if (diag != 0) {
2213                 link.link_speed = ETH_LINK_SPEED_100;
2214                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2215                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2216                 if (link.link_status == old.link_status)
2217                         return -1;
2218                 return 0;
2219         }
2220
2221         if (link_up == 0) {
2222                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2223                 if (link.link_status == old.link_status)
2224                         return -1;
2225                 return 0;
2226         }
2227         link.link_status = 1;
2228         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2229
2230         switch (link_speed) {
2231         default:
2232         case IXGBE_LINK_SPEED_UNKNOWN:
2233                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2234                 link.link_speed = ETH_LINK_SPEED_100;
2235                 break;
2236
2237         case IXGBE_LINK_SPEED_100_FULL:
2238                 link.link_speed = ETH_LINK_SPEED_100;
2239                 break;
2240
2241         case IXGBE_LINK_SPEED_1GB_FULL:
2242                 link.link_speed = ETH_LINK_SPEED_1000;
2243                 break;
2244
2245         case IXGBE_LINK_SPEED_10GB_FULL:
2246                 link.link_speed = ETH_LINK_SPEED_10000;
2247                 break;
2248         }
2249         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2250
2251         if (link.link_status == old.link_status)
2252                 return -1;
2253
2254         return 0;
2255 }
2256
2257 static void
2258 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2259 {
2260         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2261         uint32_t fctrl;
2262
2263         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2264         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2265         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2266 }
2267
2268 static void
2269 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2270 {
2271         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2272         uint32_t fctrl;
2273
2274         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2275         fctrl &= (~IXGBE_FCTRL_UPE);
2276         if (dev->data->all_multicast == 1)
2277                 fctrl |= IXGBE_FCTRL_MPE;
2278         else
2279                 fctrl &= (~IXGBE_FCTRL_MPE);
2280         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2281 }
2282
2283 static void
2284 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2285 {
2286         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2287         uint32_t fctrl;
2288
2289         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2290         fctrl |= IXGBE_FCTRL_MPE;
2291         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2292 }
2293
2294 static void
2295 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2296 {
2297         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2298         uint32_t fctrl;
2299
2300         if (dev->data->promiscuous == 1)
2301                 return; /* must remain in all_multicast mode */
2302
2303         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2304         fctrl &= (~IXGBE_FCTRL_MPE);
2305         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2306 }
2307
2308 /**
2309  * It clears the interrupt causes and enables the interrupt.
2310  * It will be called once only during nic initialized.
2311  *
2312  * @param dev
2313  *  Pointer to struct rte_eth_dev.
2314  *
2315  * @return
2316  *  - On success, zero.
2317  *  - On failure, a negative value.
2318  */
2319 static int
2320 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
2321 {
2322         struct ixgbe_interrupt *intr =
2323                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2324
2325         ixgbe_dev_link_status_print(dev);
2326         intr->mask |= IXGBE_EICR_LSC;
2327
2328         return 0;
2329 }
2330
2331 /*
2332  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
2333  *
2334  * @param dev
2335  *  Pointer to struct rte_eth_dev.
2336  *
2337  * @return
2338  *  - On success, zero.
2339  *  - On failure, a negative value.
2340  */
2341 static int
2342 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2343 {
2344         uint32_t eicr;
2345         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2346         struct ixgbe_interrupt *intr =
2347                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2348
2349         /* clear all cause mask */
2350         ixgbe_disable_intr(hw);
2351
2352         /* read-on-clear nic registers here */
2353         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2354         PMD_DRV_LOG(INFO, "eicr %x", eicr);
2355
2356         intr->flags = 0;
2357         if (eicr & IXGBE_EICR_LSC) {
2358                 /* set flag for async link update */
2359                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2360         }
2361
2362         if (eicr & IXGBE_EICR_MAILBOX)
2363                 intr->flags |= IXGBE_FLAG_MAILBOX;
2364
2365         return 0;
2366 }
2367
2368 /**
2369  * It gets and then prints the link status.
2370  *
2371  * @param dev
2372  *  Pointer to struct rte_eth_dev.
2373  *
2374  * @return
2375  *  - On success, zero.
2376  *  - On failure, a negative value.
2377  */
2378 static void
2379 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
2380 {
2381         struct rte_eth_link link;
2382
2383         memset(&link, 0, sizeof(link));
2384         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2385         if (link.link_status) {
2386                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2387                                         (int)(dev->data->port_id),
2388                                         (unsigned)link.link_speed,
2389                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2390                                         "full-duplex" : "half-duplex");
2391         } else {
2392                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2393                                 (int)(dev->data->port_id));
2394         }
2395         PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
2396                                 dev->pci_dev->addr.domain,
2397                                 dev->pci_dev->addr.bus,
2398                                 dev->pci_dev->addr.devid,
2399                                 dev->pci_dev->addr.function);
2400 }
2401
2402 /*
2403  * It executes link_update after knowing an interrupt occurred.
2404  *
2405  * @param dev
2406  *  Pointer to struct rte_eth_dev.
2407  *
2408  * @return
2409  *  - On success, zero.
2410  *  - On failure, a negative value.
2411  */
2412 static int
2413 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
2414 {
2415         struct ixgbe_interrupt *intr =
2416                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2417         int64_t timeout;
2418         struct rte_eth_link link;
2419         int intr_enable_delay = false;
2420
2421         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2422
2423         if (intr->flags & IXGBE_FLAG_MAILBOX) {
2424                 ixgbe_pf_mbx_process(dev);
2425                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
2426         }
2427
2428         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2429                 /* get the link status before link update, for predicting later */
2430                 memset(&link, 0, sizeof(link));
2431                 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
2432
2433                 ixgbe_dev_link_update(dev, 0);
2434
2435                 /* likely to up */
2436                 if (!link.link_status)
2437                         /* handle it 1 sec later, wait it being stable */
2438                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
2439                 /* likely to down */
2440                 else
2441                         /* handle it 4 sec later, wait it being stable */
2442                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
2443
2444                 ixgbe_dev_link_status_print(dev);
2445
2446                 intr_enable_delay = true;
2447         }
2448
2449         if (intr_enable_delay) {
2450                 if (rte_eal_alarm_set(timeout * 1000,
2451                                       ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
2452                         PMD_DRV_LOG(ERR, "Error setting alarm");
2453         } else {
2454                 PMD_DRV_LOG(DEBUG, "enable intr immediately");
2455                 ixgbe_enable_intr(dev);
2456                 rte_intr_enable(&(dev->pci_dev->intr_handle));
2457         }
2458
2459
2460         return 0;
2461 }
2462
2463 /**
2464  * Interrupt handler which shall be registered for alarm callback for delayed
2465  * handling specific interrupt to wait for the stable nic state. As the
2466  * NIC interrupt state is not stable for ixgbe after link is just down,
2467  * it needs to wait 4 seconds to get the stable status.
2468  *
2469  * @param handle
2470  *  Pointer to interrupt handle.
2471  * @param param
2472  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2473  *
2474  * @return
2475  *  void
2476  */
2477 static void
2478 ixgbe_dev_interrupt_delayed_handler(void *param)
2479 {
2480         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2481         struct ixgbe_interrupt *intr =
2482                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2483         struct ixgbe_hw *hw =
2484                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2485         uint32_t eicr;
2486
2487         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2488         if (eicr & IXGBE_EICR_MAILBOX)
2489                 ixgbe_pf_mbx_process(dev);
2490
2491         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
2492                 ixgbe_dev_link_update(dev, 0);
2493                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
2494                 ixgbe_dev_link_status_print(dev);
2495                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
2496         }
2497
2498         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2499         ixgbe_enable_intr(dev);
2500         rte_intr_enable(&(dev->pci_dev->intr_handle));
2501 }
2502
2503 /**
2504  * Interrupt handler triggered by NIC  for handling
2505  * specific interrupt.
2506  *
2507  * @param handle
2508  *  Pointer to interrupt handle.
2509  * @param param
2510  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2511  *
2512  * @return
2513  *  void
2514  */
2515 static void
2516 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2517                                                         void *param)
2518 {
2519         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2520         ixgbe_dev_interrupt_get_status(dev);
2521         ixgbe_dev_interrupt_action(dev);
2522 }
2523
2524 static int
2525 ixgbe_dev_led_on(struct rte_eth_dev *dev)
2526 {
2527         struct ixgbe_hw *hw;
2528
2529         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2530         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2531 }
2532
2533 static int
2534 ixgbe_dev_led_off(struct rte_eth_dev *dev)
2535 {
2536         struct ixgbe_hw *hw;
2537
2538         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2539         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
2540 }
2541
2542 static int
2543 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2544 {
2545         struct ixgbe_hw *hw;
2546         uint32_t mflcn_reg;
2547         uint32_t fccfg_reg;
2548         int rx_pause;
2549         int tx_pause;
2550
2551         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2552
2553         fc_conf->pause_time = hw->fc.pause_time;
2554         fc_conf->high_water = hw->fc.high_water[0];
2555         fc_conf->low_water = hw->fc.low_water[0];
2556         fc_conf->send_xon = hw->fc.send_xon;
2557         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2558
2559         /*
2560          * Return rx_pause status according to actual setting of
2561          * MFLCN register.
2562          */
2563         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2564         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
2565                 rx_pause = 1;
2566         else
2567                 rx_pause = 0;
2568
2569         /*
2570          * Return tx_pause status according to actual setting of
2571          * FCCFG register.
2572          */
2573         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2574         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
2575                 tx_pause = 1;
2576         else
2577                 tx_pause = 0;
2578
2579         if (rx_pause && tx_pause)
2580                 fc_conf->mode = RTE_FC_FULL;
2581         else if (rx_pause)
2582                 fc_conf->mode = RTE_FC_RX_PAUSE;
2583         else if (tx_pause)
2584                 fc_conf->mode = RTE_FC_TX_PAUSE;
2585         else
2586                 fc_conf->mode = RTE_FC_NONE;
2587
2588         return 0;
2589 }
2590
2591 static int
2592 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2593 {
2594         struct ixgbe_hw *hw;
2595         int err;
2596         uint32_t rx_buf_size;
2597         uint32_t max_high_water;
2598         uint32_t mflcn;
2599         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2600                 ixgbe_fc_none,
2601                 ixgbe_fc_rx_pause,
2602                 ixgbe_fc_tx_pause,
2603                 ixgbe_fc_full
2604         };
2605
2606         PMD_INIT_FUNC_TRACE();
2607
2608         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2609         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
2610         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2611
2612         /*
2613          * At least reserve one Ethernet frame for watermark
2614          * high_water/low_water in kilo bytes for ixgbe
2615          */
2616         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2617         if ((fc_conf->high_water > max_high_water) ||
2618                 (fc_conf->high_water < fc_conf->low_water)) {
2619                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2620                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2621                 return (-EINVAL);
2622         }
2623
2624         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
2625         hw->fc.pause_time     = fc_conf->pause_time;
2626         hw->fc.high_water[0]  = fc_conf->high_water;
2627         hw->fc.low_water[0]   = fc_conf->low_water;
2628         hw->fc.send_xon       = fc_conf->send_xon;
2629         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2630
2631         err = ixgbe_fc_enable(hw);
2632
2633         /* Not negotiated is not an error case */
2634         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
2635
2636                 /* check if we want to forward MAC frames - driver doesn't have native
2637                  * capability to do that, so we'll write the registers ourselves */
2638
2639                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2640
2641                 /* set or clear MFLCN.PMCF bit depending on configuration */
2642                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2643                         mflcn |= IXGBE_MFLCN_PMCF;
2644                 else
2645                         mflcn &= ~IXGBE_MFLCN_PMCF;
2646
2647                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
2648                 IXGBE_WRITE_FLUSH(hw);
2649
2650                 return 0;
2651         }
2652
2653         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
2654         return -EIO;
2655 }
2656
2657 /**
2658  *  ixgbe_pfc_enable_generic - Enable flow control
2659  *  @hw: pointer to hardware structure
2660  *  @tc_num: traffic class number
2661  *  Enable flow control according to the current settings.
2662  */
2663 static int
2664 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
2665 {
2666         int ret_val = 0;
2667         uint32_t mflcn_reg, fccfg_reg;
2668         uint32_t reg;
2669         uint32_t fcrtl, fcrth;
2670         uint8_t i;
2671         uint8_t nb_rx_en;
2672
2673         /* Validate the water mark configuration */
2674         if (!hw->fc.pause_time) {
2675                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2676                 goto out;
2677         }
2678
2679         /* Low water mark of zero causes XOFF floods */
2680         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2681                  /* High/Low water can not be 0 */
2682                 if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
2683                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
2684                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2685                         goto out;
2686                 }
2687
2688                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
2689                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
2690                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2691                         goto out;
2692                 }
2693         }
2694         /* Negotiate the fc mode to use */
2695         ixgbe_fc_autoneg(hw);
2696
2697         /* Disable any previous flow control settings */
2698         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2699         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
2700
2701         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2702         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2703
2704         switch (hw->fc.current_mode) {
2705         case ixgbe_fc_none:
2706                 /*
2707                  * If the count of enabled RX Priority Flow control >1,
2708                  * and the TX pause can not be disabled
2709                  */
2710                 nb_rx_en = 0;
2711                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2712                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2713                         if (reg & IXGBE_FCRTH_FCEN)
2714                                 nb_rx_en++;
2715                 }
2716                 if (nb_rx_en > 1)
2717                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2718                 break;
2719         case ixgbe_fc_rx_pause:
2720                 /*
2721                  * Rx Flow control is enabled and Tx Flow control is
2722                  * disabled by software override. Since there really
2723                  * isn't a way to advertise that we are capable of RX
2724                  * Pause ONLY, we will advertise that we support both
2725                  * symmetric and asymmetric Rx PAUSE.  Later, we will
2726                  * disable the adapter's ability to send PAUSE frames.
2727                  */
2728                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2729                 /*
2730                  * If the count of enabled RX Priority Flow control >1,
2731                  * and the TX pause can not be disabled
2732                  */
2733                 nb_rx_en = 0;
2734                 for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2735                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
2736                         if (reg & IXGBE_FCRTH_FCEN)
2737                                 nb_rx_en++;
2738                 }
2739                 if (nb_rx_en > 1)
2740                         fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2741                 break;
2742         case ixgbe_fc_tx_pause:
2743                 /*
2744                  * Tx Flow control is enabled, and Rx Flow control is
2745                  * disabled by software override.
2746                  */
2747                 fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
2748                 break;
2749         case ixgbe_fc_full:
2750                 /* Flow control (both Rx and Tx) is enabled by SW override. */
2751                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
2752                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
2753                 break;
2754         default:
2755                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
2756                 ret_val = IXGBE_ERR_CONFIG;
2757                 goto out;
2758                 break;
2759         }
2760
2761         /* Set 802.3x based flow control settings. */
2762         mflcn_reg |= IXGBE_MFLCN_DPF;
2763         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2764         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2765
2766         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2767         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2768                 hw->fc.high_water[tc_num]) {
2769                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
2770                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
2771                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
2772         } else {
2773                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
2774                 /*
2775                  * In order to prevent Tx hangs when the internal Tx
2776                  * switch is enabled we must set the high water mark
2777                  * to the maximum FCRTH value.  This allows the Tx
2778                  * switch to function even under heavy Rx workloads.
2779                  */
2780                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
2781         }
2782         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
2783
2784         /* Configure pause time (2 TCs per register) */
2785         reg = hw->fc.pause_time * 0x00010001;
2786         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2787                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2788
2789         /* Configure flow control refresh threshold value */
2790         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2791
2792 out:
2793         return ret_val;
2794 }
2795
2796 static int
2797 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
2798 {
2799         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2800         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
2801
2802         if(hw->mac.type != ixgbe_mac_82598EB) {
2803                 ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
2804         }
2805         return ret_val;
2806 }
2807
2808 static int
2809 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
2810 {
2811         int err;
2812         uint32_t rx_buf_size;
2813         uint32_t max_high_water;
2814         uint8_t tc_num;
2815         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
2816         struct ixgbe_hw *hw =
2817                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2818         struct ixgbe_dcb_config *dcb_config =
2819                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
2820
2821         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
2822                 ixgbe_fc_none,
2823                 ixgbe_fc_rx_pause,
2824                 ixgbe_fc_tx_pause,
2825                 ixgbe_fc_full
2826         };
2827
2828         PMD_INIT_FUNC_TRACE();
2829
2830         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
2831         tc_num = map[pfc_conf->priority];
2832         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
2833         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2834         /*
2835          * At least reserve one Ethernet frame for watermark
2836          * high_water/low_water in kilo bytes for ixgbe
2837          */
2838         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
2839         if ((pfc_conf->fc.high_water > max_high_water) ||
2840             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
2841                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2842                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2843                 return (-EINVAL);
2844         }
2845
2846         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
2847         hw->fc.pause_time = pfc_conf->fc.pause_time;
2848         hw->fc.send_xon = pfc_conf->fc.send_xon;
2849         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
2850         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
2851
2852         err = ixgbe_dcb_pfc_enable(dev,tc_num);
2853
2854         /* Not negotiated is not an error case */
2855         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
2856                 return 0;
2857
2858         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
2859         return -EIO;
2860 }
2861
2862 static int
2863 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
2864                           struct rte_eth_rss_reta_entry64 *reta_conf,
2865                           uint16_t reta_size)
2866 {
2867         uint8_t i, j, mask;
2868         uint32_t reta, r;
2869         uint16_t idx, shift;
2870         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2871
2872         PMD_INIT_FUNC_TRACE();
2873         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2874                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2875                         "(%d) doesn't match the number hardware can supported "
2876                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2877                 return -EINVAL;
2878         }
2879
2880         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
2881                 idx = i / RTE_RETA_GROUP_SIZE;
2882                 shift = i % RTE_RETA_GROUP_SIZE;
2883                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2884                                                 IXGBE_4_BIT_MASK);
2885                 if (!mask)
2886                         continue;
2887                 if (mask == IXGBE_4_BIT_MASK)
2888                         r = 0;
2889                 else
2890                         r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
2891                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
2892                         if (mask & (0x1 << j))
2893                                 reta |= reta_conf[idx].reta[shift + j] <<
2894                                                         (CHAR_BIT * j);
2895                         else
2896                                 reta |= r & (IXGBE_8_BIT_MASK <<
2897                                                 (CHAR_BIT * j));
2898                 }
2899                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2900         }
2901
2902         return 0;
2903 }
2904
2905 static int
2906 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
2907                          struct rte_eth_rss_reta_entry64 *reta_conf,
2908                          uint16_t reta_size)
2909 {
2910         uint8_t i, j, mask;
2911         uint32_t reta;
2912         uint16_t idx, shift;
2913         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2914
2915         PMD_INIT_FUNC_TRACE();
2916         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2917                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2918                         "(%d) doesn't match the number hardware can supported "
2919                                 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2920                 return -EINVAL;
2921         }
2922
2923         for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
2924                 idx = i / RTE_RETA_GROUP_SIZE;
2925                 shift = i % RTE_RETA_GROUP_SIZE;
2926                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2927                                                 IXGBE_4_BIT_MASK);
2928                 if (!mask)
2929                         continue;
2930
2931                 reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
2932                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
2933                         if (mask & (0x1 << j))
2934                                 reta_conf[idx].reta[shift + j] =
2935                                         ((reta >> (CHAR_BIT * j)) &
2936                                                 IXGBE_8_BIT_MASK);
2937                 }
2938         }
2939
2940         return 0;
2941 }
2942
2943 static void
2944 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2945                                 uint32_t index, uint32_t pool)
2946 {
2947         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2948         uint32_t enable_addr = 1;
2949
2950         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
2951 }
2952
2953 static void
2954 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
2955 {
2956         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2957
2958         ixgbe_clear_rar(hw, index);
2959 }
2960
2961 static void
2962 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
2963 {
2964         ixgbe_remove_rar(dev, 0);
2965
2966         ixgbe_add_rar(dev, addr, 0, 0);
2967 }
2968
2969 static int
2970 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2971 {
2972         uint32_t hlreg0;
2973         uint32_t maxfrs;
2974         struct ixgbe_hw *hw;
2975         struct rte_eth_dev_info dev_info;
2976         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2977
2978         ixgbe_dev_info_get(dev, &dev_info);
2979
2980         /* check that mtu is within the allowed range */
2981         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
2982                 return -EINVAL;
2983
2984         /* refuse mtu that requires the support of scattered packets when this
2985          * feature has not been enabled before. */
2986         if (!dev->data->scattered_rx &&
2987             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
2988              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
2989                 return -EINVAL;
2990
2991         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2992         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2993
2994         /* switch to jumbo mode if needed */
2995         if (frame_size > ETHER_MAX_LEN) {
2996                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
2997                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2998         } else {
2999                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3000                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
3001         }
3002         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3003
3004         /* update max frame size */
3005         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3006
3007         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
3008         maxfrs &= 0x0000FFFF;
3009         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
3010         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
3011
3012         return 0;
3013 }
3014
3015 /*
3016  * Virtual Function operations
3017  */
3018 static void
3019 ixgbevf_intr_disable(struct ixgbe_hw *hw)
3020 {
3021         PMD_INIT_FUNC_TRACE();
3022
3023         /* Clear interrupt mask to stop from interrupts being generated */
3024         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
3025
3026         IXGBE_WRITE_FLUSH(hw);
3027 }
3028
3029 static int
3030 ixgbevf_dev_configure(struct rte_eth_dev *dev)
3031 {
3032         struct rte_eth_conf* conf = &dev->data->dev_conf;
3033         struct ixgbe_adapter *adapter =
3034                         (struct ixgbe_adapter *)dev->data->dev_private;
3035
3036         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3037                      dev->data->port_id);
3038
3039         /*
3040          * VF has no ability to enable/disable HW CRC
3041          * Keep the persistent behavior the same as Host PF
3042          */
3043 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
3044         if (!conf->rxmode.hw_strip_crc) {
3045                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
3046                 conf->rxmode.hw_strip_crc = 1;
3047         }
3048 #else
3049         if (conf->rxmode.hw_strip_crc) {
3050                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
3051                 conf->rxmode.hw_strip_crc = 0;
3052         }
3053 #endif
3054
3055         /*
3056          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
3057          * allocation or vector Rx preconditions we will reset it.
3058          */
3059         adapter->rx_bulk_alloc_allowed = true;
3060         adapter->rx_vec_allowed = true;
3061
3062         return 0;
3063 }
3064
3065 static int
3066 ixgbevf_dev_start(struct rte_eth_dev *dev)
3067 {
3068         struct ixgbe_hw *hw =
3069                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3070         int err, mask = 0;
3071
3072         PMD_INIT_FUNC_TRACE();
3073
3074         hw->mac.ops.reset_hw(hw);
3075         hw->mac.get_link_status = true;
3076
3077         /* negotiate mailbox API version to use with the PF. */
3078         ixgbevf_negotiate_api(hw);
3079
3080         ixgbevf_dev_tx_init(dev);
3081
3082         /* This can fail when allocating mbufs for descriptor rings */
3083         err = ixgbevf_dev_rx_init(dev);
3084         if (err) {
3085                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
3086                 ixgbe_dev_clear_queues(dev);
3087                 return err;
3088         }
3089
3090         /* Set vfta */
3091         ixgbevf_set_vfta_all(dev,1);
3092
3093         /* Set HW strip */
3094         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
3095                 ETH_VLAN_EXTEND_MASK;
3096         ixgbevf_vlan_offload_set(dev, mask);
3097
3098         ixgbevf_dev_rxtx_start(dev);
3099
3100         return 0;
3101 }
3102
3103 static void
3104 ixgbevf_dev_stop(struct rte_eth_dev *dev)
3105 {
3106         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3107
3108         PMD_INIT_FUNC_TRACE();
3109
3110         hw->adapter_stopped = TRUE;
3111         ixgbe_stop_adapter(hw);
3112
3113         /*
3114           * Clear what we set, but we still keep shadow_vfta to
3115           * restore after device starts
3116           */
3117         ixgbevf_set_vfta_all(dev,0);
3118
3119         /* Clear stored conf */
3120         dev->data->scattered_rx = 0;
3121
3122         ixgbe_dev_clear_queues(dev);
3123 }
3124
3125 static void
3126 ixgbevf_dev_close(struct rte_eth_dev *dev)
3127 {
3128         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3129
3130         PMD_INIT_FUNC_TRACE();
3131
3132         ixgbe_reset_hw(hw);
3133
3134         ixgbevf_dev_stop(dev);
3135
3136         /* reprogram the RAR[0] in case user changed it. */
3137         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3138 }
3139
3140 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3141 {
3142         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3143         struct ixgbe_vfta * shadow_vfta =
3144                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3145         int i = 0, j = 0, vfta = 0, mask = 1;
3146
3147         for (i = 0; i < IXGBE_VFTA_SIZE; i++){
3148                 vfta = shadow_vfta->vfta[i];
3149                 if(vfta){
3150                         mask = 1;
3151                         for (j = 0; j < 32; j++){
3152                                 if(vfta & mask)
3153                                         ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
3154                                 mask<<=1;
3155                         }
3156                 }
3157         }
3158
3159 }
3160
3161 static int
3162 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3163 {
3164         struct ixgbe_hw *hw =
3165                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3166         struct ixgbe_vfta * shadow_vfta =
3167                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3168         uint32_t vid_idx = 0;
3169         uint32_t vid_bit = 0;
3170         int ret = 0;
3171
3172         PMD_INIT_FUNC_TRACE();
3173
3174         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
3175         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
3176         if(ret){
3177                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3178                 return ret;
3179         }
3180         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3181         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3182
3183         /* Save what we set and retore it after device reset */
3184         if (on)
3185                 shadow_vfta->vfta[vid_idx] |= vid_bit;
3186         else
3187                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3188
3189         return 0;
3190 }
3191
3192 static void
3193 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
3194 {
3195         struct ixgbe_hw *hw =
3196                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3197         uint32_t ctrl;
3198
3199         PMD_INIT_FUNC_TRACE();
3200
3201         if(queue >= hw->mac.max_rx_queues)
3202                 return;
3203
3204         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
3205         if(on)
3206                 ctrl |= IXGBE_RXDCTL_VME;
3207         else
3208                 ctrl &= ~IXGBE_RXDCTL_VME;
3209         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
3210
3211         ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
3212 }
3213
3214 static void
3215 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3216 {
3217         struct ixgbe_hw *hw =
3218                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3219         uint16_t i;
3220         int on = 0;
3221
3222         /* VF function only support hw strip feature, others are not support */
3223         if(mask & ETH_VLAN_STRIP_MASK){
3224                 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
3225
3226                 for(i=0; i < hw->mac.max_rx_queues; i++)
3227                         ixgbevf_vlan_strip_queue_set(dev,i,on);
3228         }
3229 }
3230
3231 static int
3232 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
3233 {
3234         uint32_t reg_val;
3235
3236         /* we only need to do this if VMDq is enabled */
3237         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3238         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
3239                 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
3240                 return (-1);
3241         }
3242
3243         return 0;
3244 }
3245
3246 static uint32_t
3247 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
3248 {
3249         uint32_t vector = 0;
3250         switch (hw->mac.mc_filter_type) {
3251         case 0:   /* use bits [47:36] of the address */
3252                 vector = ((uc_addr->addr_bytes[4] >> 4) |
3253                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
3254                 break;
3255         case 1:   /* use bits [46:35] of the address */
3256                 vector = ((uc_addr->addr_bytes[4] >> 3) |
3257                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
3258                 break;
3259         case 2:   /* use bits [45:34] of the address */
3260                 vector = ((uc_addr->addr_bytes[4] >> 2) |
3261                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
3262                 break;
3263         case 3:   /* use bits [43:32] of the address */
3264                 vector = ((uc_addr->addr_bytes[4]) |
3265                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
3266                 break;
3267         default:  /* Invalid mc_filter_type */
3268                 break;
3269         }
3270
3271         /* vector can only be 12-bits or boundary will be exceeded */
3272         vector &= 0xFFF;
3273         return vector;
3274 }
3275
3276 static int
3277 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
3278                                uint8_t on)
3279 {
3280         uint32_t vector;
3281         uint32_t uta_idx;
3282         uint32_t reg_val;
3283         uint32_t uta_shift;
3284         uint32_t rc;
3285         const uint32_t ixgbe_uta_idx_mask = 0x7F;
3286         const uint32_t ixgbe_uta_bit_shift = 5;
3287         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
3288         const uint32_t bit1 = 0x1;
3289
3290         struct ixgbe_hw *hw =
3291                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3292         struct ixgbe_uta_info *uta_info =
3293                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3294
3295         /* The UTA table only exists on 82599 hardware and newer */
3296         if (hw->mac.type < ixgbe_mac_82599EB)
3297                 return (-ENOTSUP);
3298
3299         vector = ixgbe_uta_vector(hw,mac_addr);
3300         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
3301         uta_shift = vector & ixgbe_uta_bit_mask;
3302
3303         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
3304         if(rc == on)
3305                 return 0;
3306
3307         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
3308         if (on) {
3309                 uta_info->uta_in_use++;
3310                 reg_val |= (bit1 << uta_shift);
3311                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
3312         } else {
3313                 uta_info->uta_in_use--;
3314                 reg_val &= ~(bit1 << uta_shift);
3315                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
3316         }
3317
3318         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
3319
3320         if (uta_info->uta_in_use > 0)
3321                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
3322                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
3323         else
3324                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
3325
3326         return 0;
3327 }
3328
3329 static int
3330 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3331 {
3332         int i;
3333         struct ixgbe_hw *hw =
3334                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3335         struct ixgbe_uta_info *uta_info =
3336                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
3337
3338         /* The UTA table only exists on 82599 hardware and newer */
3339         if (hw->mac.type < ixgbe_mac_82599EB)
3340                 return (-ENOTSUP);
3341
3342         if(on) {
3343                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3344                         uta_info->uta_shadow[i] = ~0;
3345                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3346                 }
3347         } else {
3348                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3349                         uta_info->uta_shadow[i] = 0;
3350                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3351                 }
3352         }
3353         return 0;
3354
3355 }
3356
3357 uint32_t
3358 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3359 {
3360         uint32_t new_val = orig_val;
3361
3362         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3363                 new_val |= IXGBE_VMOLR_AUPE;
3364         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3365                 new_val |= IXGBE_VMOLR_ROMPE;
3366         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3367                 new_val |= IXGBE_VMOLR_ROPE;
3368         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3369                 new_val |= IXGBE_VMOLR_BAM;
3370         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3371                 new_val |= IXGBE_VMOLR_MPE;
3372
3373         return new_val;
3374 }
3375
3376 static int
3377 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
3378                                uint16_t rx_mask, uint8_t on)
3379 {
3380         int val = 0;
3381
3382         struct ixgbe_hw *hw =
3383                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3384         uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
3385
3386         if (hw->mac.type == ixgbe_mac_82598EB) {
3387                 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
3388                              " on 82599 hardware and newer");
3389                 return (-ENOTSUP);
3390         }
3391         if (ixgbe_vmdq_mode_check(hw) < 0)
3392                 return (-ENOTSUP);
3393
3394         val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
3395
3396         if (on)
3397                 vmolr |= val;
3398         else
3399                 vmolr &= ~val;
3400
3401         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
3402
3403         return 0;
3404 }
3405
3406 static int
3407 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
3408 {
3409         uint32_t reg,addr;
3410         uint32_t val;
3411         const uint8_t bit1 = 0x1;
3412
3413         struct ixgbe_hw *hw =
3414                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3415
3416         if (ixgbe_vmdq_mode_check(hw) < 0)
3417                 return (-ENOTSUP);
3418
3419         addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
3420         reg = IXGBE_READ_REG(hw, addr);
3421         val = bit1 << pool;
3422
3423         if (on)
3424                 reg |= val;
3425         else
3426                 reg &= ~val;
3427
3428         IXGBE_WRITE_REG(hw, addr,reg);
3429
3430         return 0;
3431 }
3432
3433 static int
3434 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
3435 {
3436         uint32_t reg,addr;
3437         uint32_t val;
3438         const uint8_t bit1 = 0x1;
3439
3440         struct ixgbe_hw *hw =
3441                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3442
3443         if (ixgbe_vmdq_mode_check(hw) < 0)
3444                 return (-ENOTSUP);
3445
3446         addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
3447         reg = IXGBE_READ_REG(hw, addr);
3448         val = bit1 << pool;
3449
3450         if (on)
3451                 reg |= val;
3452         else
3453                 reg &= ~val;
3454
3455         IXGBE_WRITE_REG(hw, addr,reg);
3456
3457         return 0;
3458 }
3459
3460 static int
3461 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
3462                         uint64_t pool_mask, uint8_t vlan_on)
3463 {
3464         int ret = 0;
3465         uint16_t pool_idx;
3466         struct ixgbe_hw *hw =
3467                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3468
3469         if (ixgbe_vmdq_mode_check(hw) < 0)
3470                 return (-ENOTSUP);
3471         for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
3472                 if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
3473                         ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
3474                         if (ret < 0)
3475                                 return ret;
3476         }
3477
3478         return ret;
3479 }
3480
3481 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
3482 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
3483 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
3484 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
3485 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
3486         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
3487         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
3488
3489 static int
3490 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
3491                         struct rte_eth_mirror_conf *mirror_conf,
3492                         uint8_t rule_id, uint8_t on)
3493 {
3494         uint32_t mr_ctl,vlvf;
3495         uint32_t mp_lsb = 0;
3496         uint32_t mv_msb = 0;
3497         uint32_t mv_lsb = 0;
3498         uint32_t mp_msb = 0;
3499         uint8_t i = 0;
3500         int reg_index = 0;
3501         uint64_t vlan_mask = 0;
3502
3503         const uint8_t pool_mask_offset = 32;
3504         const uint8_t vlan_mask_offset = 32;
3505         const uint8_t dst_pool_offset = 8;
3506         const uint8_t rule_mr_offset  = 4;
3507         const uint8_t mirror_rule_mask= 0x0F;
3508
3509         struct ixgbe_mirror_info *mr_info =
3510                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3511         struct ixgbe_hw *hw =
3512                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3513         uint8_t mirror_type = 0;
3514
3515         if (ixgbe_vmdq_mode_check(hw) < 0)
3516                 return -ENOTSUP;
3517
3518         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
3519                 return -EINVAL;
3520
3521         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
3522                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
3523                         mirror_conf->rule_type);
3524                 return -EINVAL;
3525         }
3526
3527         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
3528                 mirror_type |= IXGBE_MRCTL_VLME;
3529                 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
3530                 for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
3531                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
3532                                 /* search vlan id related pool vlan filter index */
3533                                 reg_index = ixgbe_find_vlvf_slot(hw,
3534                                                 mirror_conf->vlan.vlan_id[i]);
3535                                 if(reg_index < 0)
3536                                         return -EINVAL;
3537                                 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
3538                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
3539                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
3540                                       mirror_conf->vlan.vlan_id[i]))
3541                                         vlan_mask |= (1ULL << reg_index);
3542                                 else
3543                                         return -EINVAL;
3544                         }
3545                 }
3546
3547                 if (on) {
3548                         mv_lsb = vlan_mask & 0xFFFFFFFF;
3549                         mv_msb = vlan_mask >> vlan_mask_offset;
3550
3551                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
3552                                                 mirror_conf->vlan.vlan_mask;
3553                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
3554                                 if(mirror_conf->vlan.vlan_mask & (1ULL << i))
3555                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
3556                                                 mirror_conf->vlan.vlan_id[i];
3557                         }
3558                 } else {
3559                         mv_lsb = 0;
3560                         mv_msb = 0;
3561                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
3562                         for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
3563                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
3564                 }
3565         }
3566
3567         /*
3568          * if enable pool mirror, write related pool mask register,if disable
3569          * pool mirror, clear PFMRVM register
3570          */
3571         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
3572                 mirror_type |= IXGBE_MRCTL_VPME;
3573                 if (on) {
3574                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
3575                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
3576                         mr_info->mr_conf[rule_id].pool_mask =
3577                                         mirror_conf->pool_mask;
3578
3579                 } else {
3580                         mp_lsb = 0;
3581                         mp_msb = 0;
3582                         mr_info->mr_conf[rule_id].pool_mask = 0;
3583                 }
3584         }
3585         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
3586                 mirror_type |= IXGBE_MRCTL_UPME;
3587         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
3588                 mirror_type |= IXGBE_MRCTL_DPME;
3589
3590         /* read  mirror control register and recalculate it */
3591         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
3592
3593         if (on) {
3594                 mr_ctl |= mirror_type;
3595                 mr_ctl &= mirror_rule_mask;
3596                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
3597         } else
3598                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
3599
3600         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
3601         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
3602
3603         /* write mirrror control  register */
3604         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3605
3606         /* write pool mirrror control  register */
3607         if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
3608                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
3609                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
3610                                 mp_msb);
3611         }
3612         /* write VLAN mirrror control  register */
3613         if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
3614                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
3615                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
3616                                 mv_msb);
3617         }
3618
3619         return 0;
3620 }
3621
3622 static int
3623 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
3624 {
3625         int mr_ctl = 0;
3626         uint32_t lsb_val = 0;
3627         uint32_t msb_val = 0;
3628         const uint8_t rule_mr_offset = 4;
3629
3630         struct ixgbe_hw *hw =
3631                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3632         struct ixgbe_mirror_info *mr_info =
3633                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
3634
3635         if (ixgbe_vmdq_mode_check(hw) < 0)
3636                 return (-ENOTSUP);
3637
3638         memset(&mr_info->mr_conf[rule_id], 0,
3639                 sizeof(struct rte_eth_mirror_conf));
3640
3641         /* clear PFVMCTL register */
3642         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
3643
3644         /* clear pool mask register */
3645         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
3646         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
3647
3648         /* clear vlan mask register */
3649         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
3650         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
3651
3652         return 0;
3653 }
3654
3655 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3656         uint16_t queue_idx, uint16_t tx_rate)
3657 {
3658         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3659         uint32_t rf_dec, rf_int;
3660         uint32_t bcnrc_val;
3661         uint16_t link_speed = dev->data->dev_link.link_speed;
3662
3663         if (queue_idx >= hw->mac.max_tx_queues)
3664                 return -EINVAL;
3665
3666         if (tx_rate != 0) {
3667                 /* Calculate the rate factor values to set */
3668                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
3669                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
3670                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
3671
3672                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
3673                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
3674                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
3675                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
3676         } else {
3677                 bcnrc_val = 0;
3678         }
3679
3680         /*
3681          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
3682          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
3683          * set as 0x4.
3684          */
3685         if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
3686                 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
3687                                 IXGBE_MAX_JUMBO_FRAME_SIZE))
3688                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
3689                         IXGBE_MMW_SIZE_JUMBO_FRAME);
3690         else
3691                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
3692                         IXGBE_MMW_SIZE_DEFAULT);
3693
3694         /* Set RTTBCNRC of queue X */
3695         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
3696         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
3697         IXGBE_WRITE_FLUSH(hw);
3698
3699         return 0;
3700 }
3701
3702 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
3703         uint16_t tx_rate, uint64_t q_msk)
3704 {
3705         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3706         struct ixgbe_vf_info *vfinfo =
3707                 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
3708         uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
3709         uint32_t queue_stride =
3710                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
3711         uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
3712         uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
3713         uint16_t total_rate = 0;
3714
3715         if (queue_end >= hw->mac.max_tx_queues)
3716                 return -EINVAL;
3717
3718         if (vfinfo != NULL) {
3719                 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
3720                         if (vf_idx == vf)
3721                                 continue;
3722                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
3723                                 idx++)
3724                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
3725                 }
3726         } else
3727                 return -EINVAL;
3728
3729         /* Store tx_rate for this vf. */
3730         for (idx = 0; idx < nb_q_per_pool; idx++) {
3731                 if (((uint64_t)0x1 << idx) & q_msk) {
3732                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
3733                                 vfinfo[vf].tx_rate[idx] = tx_rate;
3734                         total_rate += tx_rate;
3735                 }
3736         }
3737
3738         if (total_rate > dev->data->dev_link.link_speed) {
3739                 /*
3740                  * Reset stored TX rate of the VF if it causes exceed
3741                  * link speed.
3742                  */
3743                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
3744                 return -EINVAL;
3745         }
3746
3747         /* Set RTTBCNRC of each queue/pool for vf X  */
3748         for (; queue_idx <= queue_end; queue_idx++) {
3749                 if (0x1 & q_msk)
3750                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
3751                 q_msk = q_msk >> 1;
3752         }
3753
3754         return 0;
3755 }
3756
3757 static void
3758 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3759                      __attribute__((unused)) uint32_t index,
3760                      __attribute__((unused)) uint32_t pool)
3761 {
3762         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3763         int diag;
3764
3765         /*
3766          * On a 82599 VF, adding again the same MAC addr is not an idempotent
3767          * operation. Trap this case to avoid exhausting the [very limited]
3768          * set of PF resources used to store VF MAC addresses.
3769          */
3770         if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3771                 return;
3772         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3773         if (diag == 0)
3774                 return;
3775         PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
3776 }
3777
3778 static void
3779 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
3780 {
3781         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3782         struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
3783         struct ether_addr *mac_addr;
3784         uint32_t i;
3785         int diag;
3786
3787         /*
3788          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
3789          * not support the deletion of a given MAC address.
3790          * Instead, it imposes to delete all MAC addresses, then to add again
3791          * all MAC addresses with the exception of the one to be deleted.
3792          */
3793         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
3794
3795         /*
3796          * Add again all MAC addresses, with the exception of the deleted one
3797          * and of the permanent MAC address.
3798          */
3799         for (i = 0, mac_addr = dev->data->mac_addrs;
3800              i < hw->mac.num_rar_entries; i++, mac_addr++) {
3801                 /* Skip the deleted MAC address */
3802                 if (i == index)
3803                         continue;
3804                 /* Skip NULL MAC addresses */
3805                 if (is_zero_ether_addr(mac_addr))
3806                         continue;
3807                 /* Skip the permanent MAC address */
3808                 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
3809                         continue;
3810                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
3811                 if (diag != 0)
3812                         PMD_DRV_LOG(ERR,
3813                                     "Adding again MAC address "
3814                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
3815                                     "diag=%d",
3816                                     mac_addr->addr_bytes[0],
3817                                     mac_addr->addr_bytes[1],
3818                                     mac_addr->addr_bytes[2],
3819                                     mac_addr->addr_bytes[3],
3820                                     mac_addr->addr_bytes[4],
3821                                     mac_addr->addr_bytes[5],
3822                                     diag);
3823         }
3824 }
3825
3826 static void
3827 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
3828 {
3829         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3830
3831         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
3832 }
3833
3834 #define MAC_TYPE_FILTER_SUP(type)    do {\
3835         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
3836                 (type) != ixgbe_mac_X550)\
3837                 return -ENOTSUP;\
3838 } while (0)
3839
3840 static int
3841 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
3842                         struct rte_eth_syn_filter *filter,
3843                         bool add)
3844 {
3845         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3846         uint32_t synqf;
3847
3848         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
3849                 return -EINVAL;
3850
3851         synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
3852
3853         if (add) {
3854                 if (synqf & IXGBE_SYN_FILTER_ENABLE)
3855                         return -EINVAL;
3856                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
3857                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
3858
3859                 if (filter->hig_pri)
3860                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
3861                 else
3862                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
3863         } else {
3864                 if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
3865                         return -ENOENT;
3866                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
3867         }
3868         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
3869         IXGBE_WRITE_FLUSH(hw);
3870         return 0;
3871 }
3872
3873 static int
3874 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
3875                         struct rte_eth_syn_filter *filter)
3876 {
3877         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3878         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
3879
3880         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
3881                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
3882                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
3883                 return 0;
3884         }
3885         return -ENOENT;
3886 }
3887
3888 static int
3889 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
3890                         enum rte_filter_op filter_op,
3891                         void *arg)
3892 {
3893         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3894         int ret;
3895
3896         MAC_TYPE_FILTER_SUP(hw->mac.type);
3897
3898         if (filter_op == RTE_ETH_FILTER_NOP)
3899                 return 0;
3900
3901         if (arg == NULL) {
3902                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3903                             filter_op);
3904                 return -EINVAL;
3905         }
3906
3907         switch (filter_op) {
3908         case RTE_ETH_FILTER_ADD:
3909                 ret = ixgbe_syn_filter_set(dev,
3910                                 (struct rte_eth_syn_filter *)arg,
3911                                 TRUE);
3912                 break;
3913         case RTE_ETH_FILTER_DELETE:
3914                 ret = ixgbe_syn_filter_set(dev,
3915                                 (struct rte_eth_syn_filter *)arg,
3916                                 FALSE);
3917                 break;
3918         case RTE_ETH_FILTER_GET:
3919                 ret = ixgbe_syn_filter_get(dev,
3920                                 (struct rte_eth_syn_filter *)arg);
3921                 break;
3922         default:
3923                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
3924                 ret = -EINVAL;
3925                 break;
3926         }
3927
3928         return ret;
3929 }
3930
3931
3932 static inline enum ixgbe_5tuple_protocol
3933 convert_protocol_type(uint8_t protocol_value)
3934 {
3935         if (protocol_value == IPPROTO_TCP)
3936                 return IXGBE_FILTER_PROTOCOL_TCP;
3937         else if (protocol_value == IPPROTO_UDP)
3938                 return IXGBE_FILTER_PROTOCOL_UDP;
3939         else if (protocol_value == IPPROTO_SCTP)
3940                 return IXGBE_FILTER_PROTOCOL_SCTP;
3941         else
3942                 return IXGBE_FILTER_PROTOCOL_NONE;
3943 }
3944
3945 /*
3946  * add a 5tuple filter
3947  *
3948  * @param
3949  * dev: Pointer to struct rte_eth_dev.
3950  * index: the index the filter allocates.
3951  * filter: ponter to the filter that will be added.
3952  * rx_queue: the queue id the filter assigned to.
3953  *
3954  * @return
3955  *    - On success, zero.
3956  *    - On failure, a negative value.
3957  */
3958 static int
3959 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3960                         struct ixgbe_5tuple_filter *filter)
3961 {
3962         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3963         struct ixgbe_filter_info *filter_info =
3964                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3965         int i, idx, shift;
3966         uint32_t ftqf, sdpqf;
3967         uint32_t l34timir = 0;
3968         uint8_t mask = 0xff;
3969
3970         /*
3971          * look for an unused 5tuple filter index,
3972          * and insert the filter to list.
3973          */
3974         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
3975                 idx = i / (sizeof(uint32_t) * NBBY);
3976                 shift = i % (sizeof(uint32_t) * NBBY);
3977                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3978                         filter_info->fivetuple_mask[idx] |= 1 << shift;
3979                         filter->index = i;
3980                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3981                                           filter,
3982                                           entries);
3983                         break;
3984                 }
3985         }
3986         if (i >= IXGBE_MAX_FTQF_FILTERS) {
3987                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3988                 return -ENOSYS;
3989         }
3990
3991         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
3992                                 IXGBE_SDPQF_DSTPORT_SHIFT);
3993         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
3994
3995         ftqf = (uint32_t)(filter->filter_info.proto &
3996                 IXGBE_FTQF_PROTOCOL_MASK);
3997         ftqf |= (uint32_t)((filter->filter_info.priority &
3998                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
3999         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
4000                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
4001         if (filter->filter_info.dst_ip_mask == 0)
4002                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
4003         if (filter->filter_info.src_port_mask == 0)
4004                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
4005         if (filter->filter_info.dst_port_mask == 0)
4006                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
4007         if (filter->filter_info.proto_mask == 0)
4008                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
4009         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
4010         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
4011         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
4012
4013         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
4014         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
4015         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
4016         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
4017
4018         l34timir |= IXGBE_L34T_IMIR_RESERVE;
4019         l34timir |= (uint32_t)(filter->queue <<
4020                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
4021         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
4022         return 0;
4023 }
4024
4025 /*
4026  * remove a 5tuple filter
4027  *
4028  * @param
4029  * dev: Pointer to struct rte_eth_dev.
4030  * filter: the pointer of the filter will be removed.
4031  */
4032 static void
4033 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
4034                         struct ixgbe_5tuple_filter *filter)
4035 {
4036         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4037         struct ixgbe_filter_info *filter_info =
4038                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4039         uint16_t index = filter->index;
4040
4041         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
4042                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
4043         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4044         rte_free(filter);
4045
4046         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
4047         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
4048         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
4049         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
4050         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
4051 }
4052
4053 static int
4054 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
4055 {
4056         struct ixgbe_hw *hw;
4057         uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4058
4059         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4060
4061         if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
4062                 return -EINVAL;
4063
4064         /* refuse mtu that requires the support of scattered packets when this
4065          * feature has not been enabled before. */
4066         if (!dev->data->scattered_rx &&
4067             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
4068              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
4069                 return -EINVAL;
4070
4071         /*
4072          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
4073          * request of the version 2.0 of the mailbox API.
4074          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
4075          * of the mailbox API.
4076          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
4077          * prior to 3.11.33 which contains the following change:
4078          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
4079          */
4080         ixgbevf_rlpml_set_vf(hw, max_frame);
4081
4082         /* update max frame size */
4083         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
4084         return 0;
4085 }
4086
4087 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
4088         if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
4089                 return -ENOTSUP;\
4090 } while (0)
4091
4092 static inline struct ixgbe_5tuple_filter *
4093 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
4094                         struct ixgbe_5tuple_filter_info *key)
4095 {
4096         struct ixgbe_5tuple_filter *it;
4097
4098         TAILQ_FOREACH(it, filter_list, entries) {
4099                 if (memcmp(key, &it->filter_info,
4100                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
4101                         return it;
4102                 }
4103         }
4104         return NULL;
4105 }
4106
4107 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
4108 static inline int
4109 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
4110                         struct ixgbe_5tuple_filter_info *filter_info)
4111 {
4112         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
4113                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
4114                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
4115                 return -EINVAL;
4116
4117         switch (filter->dst_ip_mask) {
4118         case UINT32_MAX:
4119                 filter_info->dst_ip_mask = 0;
4120                 filter_info->dst_ip = filter->dst_ip;
4121                 break;
4122         case 0:
4123                 filter_info->dst_ip_mask = 1;
4124                 break;
4125         default:
4126                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4127                 return -EINVAL;
4128         }
4129
4130         switch (filter->src_ip_mask) {
4131         case UINT32_MAX:
4132                 filter_info->src_ip_mask = 0;
4133                 filter_info->src_ip = filter->src_ip;
4134                 break;
4135         case 0:
4136                 filter_info->src_ip_mask = 1;
4137                 break;
4138         default:
4139                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4140                 return -EINVAL;
4141         }
4142
4143         switch (filter->dst_port_mask) {
4144         case UINT16_MAX:
4145                 filter_info->dst_port_mask = 0;
4146                 filter_info->dst_port = filter->dst_port;
4147                 break;
4148         case 0:
4149                 filter_info->dst_port_mask = 1;
4150                 break;
4151         default:
4152                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4153                 return -EINVAL;
4154         }
4155
4156         switch (filter->src_port_mask) {
4157         case UINT16_MAX:
4158                 filter_info->src_port_mask = 0;
4159                 filter_info->src_port = filter->src_port;
4160                 break;
4161         case 0:
4162                 filter_info->src_port_mask = 1;
4163                 break;
4164         default:
4165                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
4166                 return -EINVAL;
4167         }
4168
4169         switch (filter->proto_mask) {
4170         case UINT8_MAX:
4171                 filter_info->proto_mask = 0;
4172                 filter_info->proto =
4173                         convert_protocol_type(filter->proto);
4174                 break;
4175         case 0:
4176                 filter_info->proto_mask = 1;
4177                 break;
4178         default:
4179                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
4180                 return -EINVAL;
4181         }
4182
4183         filter_info->priority = (uint8_t)filter->priority;
4184         return 0;
4185 }
4186
4187 /*
4188  * add or delete a ntuple filter
4189  *
4190  * @param
4191  * dev: Pointer to struct rte_eth_dev.
4192  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4193  * add: if true, add filter, if false, remove filter
4194  *
4195  * @return
4196  *    - On success, zero.
4197  *    - On failure, a negative value.
4198  */
4199 static int
4200 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
4201                         struct rte_eth_ntuple_filter *ntuple_filter,
4202                         bool add)
4203 {
4204         struct ixgbe_filter_info *filter_info =
4205                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4206         struct ixgbe_5tuple_filter_info filter_5tuple;
4207         struct ixgbe_5tuple_filter *filter;
4208         int ret;
4209
4210         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4211                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4212                 return -EINVAL;
4213         }
4214
4215         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
4216         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4217         if (ret < 0)
4218                 return ret;
4219
4220         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4221                                          &filter_5tuple);
4222         if (filter != NULL && add) {
4223                 PMD_DRV_LOG(ERR, "filter exists.");
4224                 return -EEXIST;
4225         }
4226         if (filter == NULL && !add) {
4227                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4228                 return -ENOENT;
4229         }
4230
4231         if (add) {
4232                 filter = rte_zmalloc("ixgbe_5tuple_filter",
4233                                 sizeof(struct ixgbe_5tuple_filter), 0);
4234                 if (filter == NULL)
4235                         return -ENOMEM;
4236                 (void)rte_memcpy(&filter->filter_info,
4237                                  &filter_5tuple,
4238                                  sizeof(struct ixgbe_5tuple_filter_info));
4239                 filter->queue = ntuple_filter->queue;
4240                 ret = ixgbe_add_5tuple_filter(dev, filter);
4241                 if (ret < 0) {
4242                         rte_free(filter);
4243                         return ret;
4244                 }
4245         } else
4246                 ixgbe_remove_5tuple_filter(dev, filter);
4247
4248         return 0;
4249 }
4250
4251 /*
4252  * get a ntuple filter
4253  *
4254  * @param
4255  * dev: Pointer to struct rte_eth_dev.
4256  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4257  *
4258  * @return
4259  *    - On success, zero.
4260  *    - On failure, a negative value.
4261  */
4262 static int
4263 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
4264                         struct rte_eth_ntuple_filter *ntuple_filter)
4265 {
4266         struct ixgbe_filter_info *filter_info =
4267                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4268         struct ixgbe_5tuple_filter_info filter_5tuple;
4269         struct ixgbe_5tuple_filter *filter;
4270         int ret;
4271
4272         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4273                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4274                 return -EINVAL;
4275         }
4276
4277         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
4278         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4279         if (ret < 0)
4280                 return ret;
4281
4282         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4283                                          &filter_5tuple);
4284         if (filter == NULL) {
4285                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4286                 return -ENOENT;
4287         }
4288         ntuple_filter->queue = filter->queue;
4289         return 0;
4290 }
4291
4292 /*
4293  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
4294  * @dev: pointer to rte_eth_dev structure
4295  * @filter_op:operation will be taken.
4296  * @arg: a pointer to specific structure corresponding to the filter_op
4297  *
4298  * @return
4299  *    - On success, zero.
4300  *    - On failure, a negative value.
4301  */
4302 static int
4303 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
4304                                 enum rte_filter_op filter_op,
4305                                 void *arg)
4306 {
4307         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4308         int ret;
4309
4310         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
4311
4312         if (filter_op == RTE_ETH_FILTER_NOP)
4313                 return 0;
4314
4315         if (arg == NULL) {
4316                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4317                             filter_op);
4318                 return -EINVAL;
4319         }
4320
4321         switch (filter_op) {
4322         case RTE_ETH_FILTER_ADD:
4323                 ret = ixgbe_add_del_ntuple_filter(dev,
4324                         (struct rte_eth_ntuple_filter *)arg,
4325                         TRUE);
4326                 break;
4327         case RTE_ETH_FILTER_DELETE:
4328                 ret = ixgbe_add_del_ntuple_filter(dev,
4329                         (struct rte_eth_ntuple_filter *)arg,
4330                         FALSE);
4331                 break;
4332         case RTE_ETH_FILTER_GET:
4333                 ret = ixgbe_get_ntuple_filter(dev,
4334                         (struct rte_eth_ntuple_filter *)arg);
4335                 break;
4336         default:
4337                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4338                 ret = -EINVAL;
4339                 break;
4340         }
4341         return ret;
4342 }
4343
4344 static inline int
4345 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
4346                         uint16_t ethertype)
4347 {
4348         int i;
4349
4350         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
4351                 if (filter_info->ethertype_filters[i] == ethertype &&
4352                     (filter_info->ethertype_mask & (1 << i)))
4353                         return i;
4354         }
4355         return -1;
4356 }
4357
4358 static inline int
4359 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
4360                         uint16_t ethertype)
4361 {
4362         int i;
4363
4364         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
4365                 if (!(filter_info->ethertype_mask & (1 << i))) {
4366                         filter_info->ethertype_mask |= 1 << i;
4367                         filter_info->ethertype_filters[i] = ethertype;
4368                         return i;
4369                 }
4370         }
4371         return -1;
4372 }
4373
4374 static inline int
4375 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
4376                         uint8_t idx)
4377 {
4378         if (idx >= IXGBE_MAX_ETQF_FILTERS)
4379                 return -1;
4380         filter_info->ethertype_mask &= ~(1 << idx);
4381         filter_info->ethertype_filters[idx] = 0;
4382         return idx;
4383 }
4384
4385 static int
4386 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4387                         struct rte_eth_ethertype_filter *filter,
4388                         bool add)
4389 {
4390         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4391         struct ixgbe_filter_info *filter_info =
4392                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4393         uint32_t etqf = 0;
4394         uint32_t etqs = 0;
4395         int ret;
4396
4397         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
4398                 return -EINVAL;
4399
4400         if (filter->ether_type == ETHER_TYPE_IPv4 ||
4401                 filter->ether_type == ETHER_TYPE_IPv6) {
4402                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4403                         " ethertype filter.", filter->ether_type);
4404                 return -EINVAL;
4405         }
4406
4407         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4408                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4409                 return -EINVAL;
4410         }
4411         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4412                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4413                 return -EINVAL;
4414         }
4415
4416         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4417         if (ret >= 0 && add) {
4418                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4419                             filter->ether_type);
4420                 return -EEXIST;
4421         }
4422         if (ret < 0 && !add) {
4423                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4424                             filter->ether_type);
4425                 return -ENOENT;
4426         }
4427
4428         if (add) {
4429                 ret = ixgbe_ethertype_filter_insert(filter_info,
4430                         filter->ether_type);
4431                 if (ret < 0) {
4432                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
4433                         return -ENOSYS;
4434                 }
4435                 etqf = IXGBE_ETQF_FILTER_EN;
4436                 etqf |= (uint32_t)filter->ether_type;
4437                 etqs |= (uint32_t)((filter->queue <<
4438                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
4439                                     IXGBE_ETQS_RX_QUEUE);
4440                 etqs |= IXGBE_ETQS_QUEUE_EN;
4441         } else {
4442                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4443                 if (ret < 0)
4444                         return -ENOSYS;
4445         }
4446         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
4447         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
4448         IXGBE_WRITE_FLUSH(hw);
4449
4450         return 0;
4451 }
4452
4453 static int
4454 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
4455                         struct rte_eth_ethertype_filter *filter)
4456 {
4457         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4458         struct ixgbe_filter_info *filter_info =
4459                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4460         uint32_t etqf, etqs;
4461         int ret;
4462
4463         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4464         if (ret < 0) {
4465                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4466                             filter->ether_type);
4467                 return -ENOENT;
4468         }
4469
4470         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
4471         if (etqf & IXGBE_ETQF_FILTER_EN) {
4472                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
4473                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
4474                 filter->flags = 0;
4475                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
4476                                IXGBE_ETQS_RX_QUEUE_SHIFT;
4477                 return 0;
4478         }
4479         return -ENOENT;
4480 }
4481
4482 /*
4483  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
4484  * @dev: pointer to rte_eth_dev structure
4485  * @filter_op:operation will be taken.
4486  * @arg: a pointer to specific structure corresponding to the filter_op
4487  */
4488 static int
4489 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
4490                                 enum rte_filter_op filter_op,
4491                                 void *arg)
4492 {
4493         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4494         int ret;
4495
4496         MAC_TYPE_FILTER_SUP(hw->mac.type);
4497
4498         if (filter_op == RTE_ETH_FILTER_NOP)
4499                 return 0;
4500
4501         if (arg == NULL) {
4502                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4503                             filter_op);
4504                 return -EINVAL;
4505         }
4506
4507         switch (filter_op) {
4508         case RTE_ETH_FILTER_ADD:
4509                 ret = ixgbe_add_del_ethertype_filter(dev,
4510                         (struct rte_eth_ethertype_filter *)arg,
4511                         TRUE);
4512                 break;
4513         case RTE_ETH_FILTER_DELETE:
4514                 ret = ixgbe_add_del_ethertype_filter(dev,
4515                         (struct rte_eth_ethertype_filter *)arg,
4516                         FALSE);
4517                 break;
4518         case RTE_ETH_FILTER_GET:
4519                 ret = ixgbe_get_ethertype_filter(dev,
4520                         (struct rte_eth_ethertype_filter *)arg);
4521                 break;
4522         default:
4523                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4524                 ret = -EINVAL;
4525                 break;
4526         }
4527         return ret;
4528 }
4529
4530 static int
4531 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
4532                      enum rte_filter_type filter_type,
4533                      enum rte_filter_op filter_op,
4534                      void *arg)
4535 {
4536         int ret = -EINVAL;
4537
4538         switch (filter_type) {
4539         case RTE_ETH_FILTER_NTUPLE:
4540                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
4541                 break;
4542         case RTE_ETH_FILTER_ETHERTYPE:
4543                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
4544                 break;
4545         case RTE_ETH_FILTER_SYN:
4546                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
4547                 break;
4548         case RTE_ETH_FILTER_FDIR:
4549                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
4550                 break;
4551         default:
4552                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4553                                                         filter_type);
4554                 break;
4555         }
4556
4557         return ret;
4558 }
4559
4560 static u8 *
4561 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
4562                         u8 **mc_addr_ptr, u32 *vmdq)
4563 {
4564         u8 *mc_addr;
4565
4566         *vmdq = 0;
4567         mc_addr = *mc_addr_ptr;
4568         *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
4569         return mc_addr;
4570 }
4571
4572 static int
4573 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4574                           struct ether_addr *mc_addr_set,
4575                           uint32_t nb_mc_addr)
4576 {
4577         struct ixgbe_hw *hw;
4578         u8 *mc_addr_list;
4579
4580         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4581         mc_addr_list = (u8 *)mc_addr_set;
4582         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4583                                          ixgbe_dev_addr_list_itr, TRUE);
4584 }
4585
4586 static int
4587 ixgbe_timesync_enable(struct rte_eth_dev *dev)
4588 {
4589         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4590         uint32_t tsync_ctl;
4591         uint32_t tsauxc;
4592
4593         /* Enable system time for platforms where it isn't on by default. */
4594         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
4595         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
4596         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
4597
4598         /* Start incrementing the register used to timestamp PTP packets. */
4599         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_TIMINCA_INIT);
4600
4601         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4602         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
4603                         (ETHER_TYPE_1588 |
4604                          IXGBE_ETQF_FILTER_EN |
4605                          IXGBE_ETQF_1588));
4606
4607         /* Enable timestamping of received PTP packets. */
4608         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
4609         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
4610         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
4611
4612         /* Enable timestamping of transmitted PTP packets. */
4613         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
4614         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
4615         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
4616
4617         return 0;
4618 }
4619
4620 static int
4621 ixgbe_timesync_disable(struct rte_eth_dev *dev)
4622 {
4623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4624         uint32_t tsync_ctl;
4625
4626         /* Disable timestamping of transmitted PTP packets. */
4627         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
4628         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
4629         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
4630
4631         /* Disable timestamping of received PTP packets. */
4632         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
4633         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
4634         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
4635
4636         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4637         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
4638
4639         /* Stop incrementating the System Time registers. */
4640         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
4641
4642         return 0;
4643 }
4644
4645 static int
4646 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4647                                  struct timespec *timestamp,
4648                                  uint32_t flags __rte_unused)
4649 {
4650         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4651         uint32_t tsync_rxctl;
4652         uint32_t rx_stmpl;
4653         uint32_t rx_stmph;
4654
4655         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
4656         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
4657                 return -EINVAL;
4658
4659         rx_stmpl = IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
4660         rx_stmph = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
4661
4662         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
4663         timestamp->tv_nsec = 0;
4664
4665         return  0;
4666 }
4667
4668 static int
4669 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4670                                  struct timespec *timestamp)
4671 {
4672         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4673         uint32_t tsync_txctl;
4674         uint32_t tx_stmpl;
4675         uint32_t tx_stmph;
4676
4677         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
4678         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
4679                 return -EINVAL;
4680
4681         tx_stmpl = IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
4682         tx_stmph = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
4683
4684         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
4685         timestamp->tv_nsec = 0;
4686
4687         return  0;
4688 }
4689
4690 static int
4691 ixgbe_get_reg_length(struct rte_eth_dev *dev)
4692 {
4693         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4694         int count = 0;
4695         int g_ind = 0;
4696         const struct reg_info *reg_group;
4697         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
4698                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
4699
4700         while ((reg_group = reg_set[g_ind++]))
4701                 count += ixgbe_regs_group_count(reg_group);
4702
4703         return count;
4704 }
4705
4706 static int
4707 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4708 {
4709         int count = 0;
4710         int g_ind = 0;
4711         const struct reg_info *reg_group;
4712
4713         while ((reg_group = ixgbevf_regs[g_ind++]))
4714                 count += ixgbe_regs_group_count(reg_group);
4715
4716         return count;
4717 }
4718
4719 static int
4720 ixgbe_get_regs(struct rte_eth_dev *dev,
4721               struct rte_dev_reg_info *regs)
4722 {
4723         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4724         uint32_t *data = regs->data;
4725         int g_ind = 0;
4726         int count = 0;
4727         const struct reg_info *reg_group;
4728         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
4729                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
4730
4731         /* Support only full register dump */
4732         if ((regs->length == 0) ||
4733             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
4734                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4735                         hw->device_id;
4736                 while ((reg_group = reg_set[g_ind++]))
4737                         count += ixgbe_read_regs_group(dev, &data[count],
4738                                 reg_group);
4739                 return 0;
4740         }
4741
4742         return -ENOTSUP;
4743 }
4744
4745 static int
4746 ixgbevf_get_regs(struct rte_eth_dev *dev,
4747                 struct rte_dev_reg_info *regs)
4748 {
4749         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4750         uint32_t *data = regs->data;
4751         int g_ind = 0;
4752         int count = 0;
4753         const struct reg_info *reg_group;
4754
4755         /* Support only full register dump */
4756         if ((regs->length == 0) ||
4757             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
4758                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4759                         hw->device_id;
4760                 while ((reg_group = ixgbevf_regs[g_ind++]))
4761                         count += ixgbe_read_regs_group(dev, &data[count],
4762                                                       reg_group);
4763                 return 0;
4764         }
4765
4766         return -ENOTSUP;
4767 }
4768
4769 static int
4770 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
4771 {
4772         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4773
4774         /* Return unit is byte count */
4775         return hw->eeprom.word_size * 2;
4776 }
4777
4778 static int
4779 ixgbe_get_eeprom(struct rte_eth_dev *dev,
4780                 struct rte_dev_eeprom_info *in_eeprom)
4781 {
4782         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4783         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
4784         uint16_t *data = in_eeprom->data;
4785         int first, length;
4786
4787         first = in_eeprom->offset >> 1;
4788         length = in_eeprom->length >> 1;
4789         if ((first >= hw->eeprom.word_size) ||
4790             ((first + length) >= hw->eeprom.word_size))
4791                 return -EINVAL;
4792
4793         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4794
4795         return eeprom->ops.read_buffer(hw, first, length, data);
4796 }
4797
4798 static int
4799 ixgbe_set_eeprom(struct rte_eth_dev *dev,
4800                 struct rte_dev_eeprom_info *in_eeprom)
4801 {
4802         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4803         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
4804         uint16_t *data = in_eeprom->data;
4805         int first, length;
4806
4807         first = in_eeprom->offset >> 1;
4808         length = in_eeprom->length >> 1;
4809         if ((first >= hw->eeprom.word_size) ||
4810             ((first + length) >= hw->eeprom.word_size))
4811                 return -EINVAL;
4812
4813         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4814
4815         return eeprom->ops.write_buffer(hw,  first, length, data);
4816 }
4817
4818 static struct rte_driver rte_ixgbe_driver = {
4819         .type = PMD_PDEV,
4820         .init = rte_ixgbe_pmd_init,
4821 };
4822
4823 static struct rte_driver rte_ixgbevf_driver = {
4824         .type = PMD_PDEV,
4825         .init = rte_ixgbevf_pmd_init,
4826 };
4827
4828 PMD_REGISTER_DRIVER(rte_ixgbe_driver);
4829 PMD_REGISTER_DRIVER(rte_ixgbevf_driver);