igb: add access to specific device info
[dpdk.git] / drivers / net / e1000 / igb_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_eal.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
53 #include <rte_dev.h>
54
55 #include "e1000_logs.h"
56 #include "base/e1000_api.h"
57 #include "e1000_ethdev.h"
58 #include "igb_regs.h"
59
60 /*
61  * Default values for port configuration
62  */
63 #define IGB_DEFAULT_RX_FREE_THRESH  32
64 #define IGB_DEFAULT_RX_PTHRESH      8
65 #define IGB_DEFAULT_RX_HTHRESH      8
66 #define IGB_DEFAULT_RX_WTHRESH      0
67
68 #define IGB_DEFAULT_TX_PTHRESH      32
69 #define IGB_DEFAULT_TX_HTHRESH      0
70 #define IGB_DEFAULT_TX_WTHRESH      0
71
72 #define IGB_HKEY_MAX_INDEX 10
73
74 /* Bit shift and mask */
75 #define IGB_4_BIT_WIDTH  (CHAR_BIT / 2)
76 #define IGB_4_BIT_MASK   RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
77 #define IGB_8_BIT_WIDTH  CHAR_BIT
78 #define IGB_8_BIT_MASK   UINT8_MAX
79
80 /* Additional timesync values. */
81 #define E1000_ETQF_FILTER_1588 3
82 #define E1000_TIMINCA_INCVALUE 16000000
83 #define E1000_TIMINCA_INIT     ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
84                                 | E1000_TIMINCA_INCVALUE)
85
86 static int  eth_igb_configure(struct rte_eth_dev *dev);
87 static int  eth_igb_start(struct rte_eth_dev *dev);
88 static void eth_igb_stop(struct rte_eth_dev *dev);
89 static void eth_igb_close(struct rte_eth_dev *dev);
90 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
91 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
92 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
93 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
94 static int  eth_igb_link_update(struct rte_eth_dev *dev,
95                                 int wait_to_complete);
96 static void eth_igb_stats_get(struct rte_eth_dev *dev,
97                                 struct rte_eth_stats *rte_stats);
98 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
99 static void eth_igb_infos_get(struct rte_eth_dev *dev,
100                               struct rte_eth_dev_info *dev_info);
101 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
102                                 struct rte_eth_dev_info *dev_info);
103 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
104                                 struct rte_eth_fc_conf *fc_conf);
105 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
106                                 struct rte_eth_fc_conf *fc_conf);
107 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
108 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
109 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
110 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
111                                                         void *param);
112 static int  igb_hardware_init(struct e1000_hw *hw);
113 static void igb_hw_control_acquire(struct e1000_hw *hw);
114 static void igb_hw_control_release(struct e1000_hw *hw);
115 static void igb_init_manageability(struct e1000_hw *hw);
116 static void igb_release_manageability(struct e1000_hw *hw);
117
118 static int  eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
119
120 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
121                 uint16_t vlan_id, int on);
122 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
123 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
124
125 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
126 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
127 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
128 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
129 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
130 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
131
132 static int eth_igb_led_on(struct rte_eth_dev *dev);
133 static int eth_igb_led_off(struct rte_eth_dev *dev);
134
135 static void igb_intr_disable(struct e1000_hw *hw);
136 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
137 static void eth_igb_rar_set(struct rte_eth_dev *dev,
138                 struct ether_addr *mac_addr,
139                 uint32_t index, uint32_t pool);
140 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
141 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
142                 struct ether_addr *addr);
143
144 static void igbvf_intr_disable(struct e1000_hw *hw);
145 static int igbvf_dev_configure(struct rte_eth_dev *dev);
146 static int igbvf_dev_start(struct rte_eth_dev *dev);
147 static void igbvf_dev_stop(struct rte_eth_dev *dev);
148 static void igbvf_dev_close(struct rte_eth_dev *dev);
149 static int eth_igbvf_link_update(struct e1000_hw *hw);
150 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
151 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
152 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
153                 uint16_t vlan_id, int on);
154 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
155 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
156 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
157                 struct ether_addr *addr);
158 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
159 static int igbvf_get_regs(struct rte_eth_dev *dev,
160                 struct rte_dev_reg_info *regs);
161
162 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
163                                    struct rte_eth_rss_reta_entry64 *reta_conf,
164                                    uint16_t reta_size);
165 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
166                                   struct rte_eth_rss_reta_entry64 *reta_conf,
167                                   uint16_t reta_size);
168
169 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
170                         struct rte_eth_syn_filter *filter,
171                         bool add);
172 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
173                         struct rte_eth_syn_filter *filter);
174 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
175                         enum rte_filter_op filter_op,
176                         void *arg);
177 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
178                         struct rte_eth_ntuple_filter *ntuple_filter);
179 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
180                         struct rte_eth_ntuple_filter *ntuple_filter);
181 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
182                         struct rte_eth_flex_filter *filter,
183                         bool add);
184 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
185                         struct rte_eth_flex_filter *filter);
186 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
187                         enum rte_filter_op filter_op,
188                         void *arg);
189 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
190                         struct rte_eth_ntuple_filter *ntuple_filter);
191 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
192                         struct rte_eth_ntuple_filter *ntuple_filter);
193 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
194                         struct rte_eth_ntuple_filter *filter,
195                         bool add);
196 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
197                         struct rte_eth_ntuple_filter *filter);
198 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
199                                 enum rte_filter_op filter_op,
200                                 void *arg);
201 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
202                         struct rte_eth_ethertype_filter *filter,
203                         bool add);
204 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
205                                 enum rte_filter_op filter_op,
206                                 void *arg);
207 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
208                         struct rte_eth_ethertype_filter *filter);
209 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
210                      enum rte_filter_type filter_type,
211                      enum rte_filter_op filter_op,
212                      void *arg);
213 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
214 static int eth_igb_get_regs(struct rte_eth_dev *dev,
215                 struct rte_dev_reg_info *regs);
216 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
217 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
218                 struct rte_dev_eeprom_info *eeprom);
219 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
220                 struct rte_dev_eeprom_info *eeprom);
221
222 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
223                                     struct ether_addr *mc_addr_set,
224                                     uint32_t nb_mc_addr);
225 static int igb_timesync_enable(struct rte_eth_dev *dev);
226 static int igb_timesync_disable(struct rte_eth_dev *dev);
227 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
228                                           struct timespec *timestamp,
229                                           uint32_t flags);
230 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
231                                           struct timespec *timestamp);
232
233 /*
234  * Define VF Stats MACRO for Non "cleared on read" register
235  */
236 #define UPDATE_VF_STAT(reg, last, cur)            \
237 {                                                 \
238         u32 latest = E1000_READ_REG(hw, reg);     \
239         cur += latest - last;                     \
240         last = latest;                            \
241 }
242
243
244 #define IGB_FC_PAUSE_TIME 0x0680
245 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
246 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
247
248 #define IGBVF_PMD_NAME "rte_igbvf_pmd"     /* PMD name */
249
250 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
251
252 /*
253  * The set of PCI devices this driver supports
254  */
255 static const struct rte_pci_id pci_id_igb_map[] = {
256
257 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
258 #include "rte_pci_dev_ids.h"
259
260 {0},
261 };
262
263 /*
264  * The set of PCI devices this driver supports (for 82576&I350 VF)
265  */
266 static const struct rte_pci_id pci_id_igbvf_map[] = {
267
268 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
269 #include "rte_pci_dev_ids.h"
270
271 {0},
272 };
273
274 static const struct eth_dev_ops eth_igb_ops = {
275         .dev_configure        = eth_igb_configure,
276         .dev_start            = eth_igb_start,
277         .dev_stop             = eth_igb_stop,
278         .dev_close            = eth_igb_close,
279         .promiscuous_enable   = eth_igb_promiscuous_enable,
280         .promiscuous_disable  = eth_igb_promiscuous_disable,
281         .allmulticast_enable  = eth_igb_allmulticast_enable,
282         .allmulticast_disable = eth_igb_allmulticast_disable,
283         .link_update          = eth_igb_link_update,
284         .stats_get            = eth_igb_stats_get,
285         .stats_reset          = eth_igb_stats_reset,
286         .dev_infos_get        = eth_igb_infos_get,
287         .mtu_set              = eth_igb_mtu_set,
288         .vlan_filter_set      = eth_igb_vlan_filter_set,
289         .vlan_tpid_set        = eth_igb_vlan_tpid_set,
290         .vlan_offload_set     = eth_igb_vlan_offload_set,
291         .rx_queue_setup       = eth_igb_rx_queue_setup,
292         .rx_queue_release     = eth_igb_rx_queue_release,
293         .rx_queue_count       = eth_igb_rx_queue_count,
294         .rx_descriptor_done   = eth_igb_rx_descriptor_done,
295         .tx_queue_setup       = eth_igb_tx_queue_setup,
296         .tx_queue_release     = eth_igb_tx_queue_release,
297         .dev_led_on           = eth_igb_led_on,
298         .dev_led_off          = eth_igb_led_off,
299         .flow_ctrl_get        = eth_igb_flow_ctrl_get,
300         .flow_ctrl_set        = eth_igb_flow_ctrl_set,
301         .mac_addr_add         = eth_igb_rar_set,
302         .mac_addr_remove      = eth_igb_rar_clear,
303         .mac_addr_set         = eth_igb_default_mac_addr_set,
304         .reta_update          = eth_igb_rss_reta_update,
305         .reta_query           = eth_igb_rss_reta_query,
306         .rss_hash_update      = eth_igb_rss_hash_update,
307         .rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
308         .filter_ctrl          = eth_igb_filter_ctrl,
309         .set_mc_addr_list     = eth_igb_set_mc_addr_list,
310         .timesync_enable      = igb_timesync_enable,
311         .timesync_disable     = igb_timesync_disable,
312         .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
313         .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
314         .get_reg_length       = eth_igb_get_reg_length,
315         .get_reg              = eth_igb_get_regs,
316         .get_eeprom_length    = eth_igb_get_eeprom_length,
317         .get_eeprom           = eth_igb_get_eeprom,
318         .set_eeprom           = eth_igb_set_eeprom,
319 };
320
321 /*
322  * dev_ops for virtual function, bare necessities for basic vf
323  * operation have been implemented
324  */
325 static const struct eth_dev_ops igbvf_eth_dev_ops = {
326         .dev_configure        = igbvf_dev_configure,
327         .dev_start            = igbvf_dev_start,
328         .dev_stop             = igbvf_dev_stop,
329         .dev_close            = igbvf_dev_close,
330         .link_update          = eth_igb_link_update,
331         .stats_get            = eth_igbvf_stats_get,
332         .stats_reset          = eth_igbvf_stats_reset,
333         .vlan_filter_set      = igbvf_vlan_filter_set,
334         .dev_infos_get        = eth_igbvf_infos_get,
335         .rx_queue_setup       = eth_igb_rx_queue_setup,
336         .rx_queue_release     = eth_igb_rx_queue_release,
337         .tx_queue_setup       = eth_igb_tx_queue_setup,
338         .tx_queue_release     = eth_igb_tx_queue_release,
339         .set_mc_addr_list     = eth_igb_set_mc_addr_list,
340         .mac_addr_set         = igbvf_default_mac_addr_set,
341         .get_reg_length       = igbvf_get_reg_length,
342         .get_reg              = igbvf_get_regs,
343 };
344
345 /**
346  * Atomically reads the link status information from global
347  * structure rte_eth_dev.
348  *
349  * @param dev
350  *   - Pointer to the structure rte_eth_dev to read from.
351  *   - Pointer to the buffer to be saved with the link status.
352  *
353  * @return
354  *   - On success, zero.
355  *   - On failure, negative value.
356  */
357 static inline int
358 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
359                                 struct rte_eth_link *link)
360 {
361         struct rte_eth_link *dst = link;
362         struct rte_eth_link *src = &(dev->data->dev_link);
363
364         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
365                                         *(uint64_t *)src) == 0)
366                 return -1;
367
368         return 0;
369 }
370
371 /**
372  * Atomically writes the link status information into global
373  * structure rte_eth_dev.
374  *
375  * @param dev
376  *   - Pointer to the structure rte_eth_dev to read from.
377  *   - Pointer to the buffer to be saved with the link status.
378  *
379  * @return
380  *   - On success, zero.
381  *   - On failure, negative value.
382  */
383 static inline int
384 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
385                                 struct rte_eth_link *link)
386 {
387         struct rte_eth_link *dst = &(dev->data->dev_link);
388         struct rte_eth_link *src = link;
389
390         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
391                                         *(uint64_t *)src) == 0)
392                 return -1;
393
394         return 0;
395 }
396
397 static inline void
398 igb_intr_enable(struct rte_eth_dev *dev)
399 {
400         struct e1000_interrupt *intr =
401                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
402         struct e1000_hw *hw =
403                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
404
405         E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
406         E1000_WRITE_FLUSH(hw);
407 }
408
409 static void
410 igb_intr_disable(struct e1000_hw *hw)
411 {
412         E1000_WRITE_REG(hw, E1000_IMC, ~0);
413         E1000_WRITE_FLUSH(hw);
414 }
415
416 static inline int32_t
417 igb_pf_reset_hw(struct e1000_hw *hw)
418 {
419         uint32_t ctrl_ext;
420         int32_t status;
421
422         status = e1000_reset_hw(hw);
423
424         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
425         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
426         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
427         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
428         E1000_WRITE_FLUSH(hw);
429
430         return status;
431 }
432
433 static void
434 igb_identify_hardware(struct rte_eth_dev *dev)
435 {
436         struct e1000_hw *hw =
437                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
438
439         hw->vendor_id = dev->pci_dev->id.vendor_id;
440         hw->device_id = dev->pci_dev->id.device_id;
441         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
442         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
443
444         e1000_set_mac_type(hw);
445
446         /* need to check if it is a vf device below */
447 }
448
449 static int
450 igb_reset_swfw_lock(struct e1000_hw *hw)
451 {
452         int ret_val;
453
454         /*
455          * Do mac ops initialization manually here, since we will need
456          * some function pointers set by this call.
457          */
458         ret_val = e1000_init_mac_params(hw);
459         if (ret_val)
460                 return ret_val;
461
462         /*
463          * SMBI lock should not fail in this early stage. If this is the case,
464          * it is due to an improper exit of the application.
465          * So force the release of the faulty lock.
466          */
467         if (e1000_get_hw_semaphore_generic(hw) < 0) {
468                 PMD_DRV_LOG(DEBUG, "SMBI lock released");
469         }
470         e1000_put_hw_semaphore_generic(hw);
471
472         if (hw->mac.ops.acquire_swfw_sync != NULL) {
473                 uint16_t mask;
474
475                 /*
476                  * Phy lock should not fail in this early stage. If this is the case,
477                  * it is due to an improper exit of the application.
478                  * So force the release of the faulty lock.
479                  */
480                 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
481                 if (hw->bus.func > E1000_FUNC_1)
482                         mask <<= 2;
483                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
484                         PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
485                                     hw->bus.func);
486                 }
487                 hw->mac.ops.release_swfw_sync(hw, mask);
488
489                 /*
490                  * This one is more tricky since it is common to all ports; but
491                  * swfw_sync retries last long enough (1s) to be almost sure that if
492                  * lock can not be taken it is due to an improper lock of the
493                  * semaphore.
494                  */
495                 mask = E1000_SWFW_EEP_SM;
496                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
497                         PMD_DRV_LOG(DEBUG, "SWFW common locks released");
498                 }
499                 hw->mac.ops.release_swfw_sync(hw, mask);
500         }
501
502         return E1000_SUCCESS;
503 }
504
505 static int
506 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
507 {
508         int error = 0;
509         struct rte_pci_device *pci_dev;
510         struct e1000_hw *hw =
511                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
512         struct e1000_vfta * shadow_vfta =
513                         E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
514         struct e1000_filter_info *filter_info =
515                 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
516         uint32_t ctrl_ext;
517
518         pci_dev = eth_dev->pci_dev;
519         eth_dev->dev_ops = &eth_igb_ops;
520         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
521         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
522
523         /* for secondary processes, we don't initialise any further as primary
524          * has already done this work. Only check we don't need a different
525          * RX function */
526         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
527                 if (eth_dev->data->scattered_rx)
528                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
529                 return 0;
530         }
531
532         hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
533
534         igb_identify_hardware(eth_dev);
535         if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
536                 error = -EIO;
537                 goto err_late;
538         }
539
540         e1000_get_bus_info(hw);
541
542         /* Reset any pending lock */
543         if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
544                 error = -EIO;
545                 goto err_late;
546         }
547
548         /* Finish initialization */
549         if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
550                 error = -EIO;
551                 goto err_late;
552         }
553
554         hw->mac.autoneg = 1;
555         hw->phy.autoneg_wait_to_complete = 0;
556         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
557
558         /* Copper options */
559         if (hw->phy.media_type == e1000_media_type_copper) {
560                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
561                 hw->phy.disable_polarity_correction = 0;
562                 hw->phy.ms_type = e1000_ms_hw_default;
563         }
564
565         /*
566          * Start from a known state, this is important in reading the nvm
567          * and mac from that.
568          */
569         igb_pf_reset_hw(hw);
570
571         /* Make sure we have a good EEPROM before we read from it */
572         if (e1000_validate_nvm_checksum(hw) < 0) {
573                 /*
574                  * Some PCI-E parts fail the first check due to
575                  * the link being in sleep state, call it again,
576                  * if it fails a second time its a real issue.
577                  */
578                 if (e1000_validate_nvm_checksum(hw) < 0) {
579                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
580                         error = -EIO;
581                         goto err_late;
582                 }
583         }
584
585         /* Read the permanent MAC address out of the EEPROM */
586         if (e1000_read_mac_addr(hw) != 0) {
587                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
588                 error = -EIO;
589                 goto err_late;
590         }
591
592         /* Allocate memory for storing MAC addresses */
593         eth_dev->data->mac_addrs = rte_zmalloc("e1000",
594                 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
595         if (eth_dev->data->mac_addrs == NULL) {
596                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
597                                                 "store MAC addresses",
598                                 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
599                 error = -ENOMEM;
600                 goto err_late;
601         }
602
603         /* Copy the permanent MAC address */
604         ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
605
606         /* initialize the vfta */
607         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
608
609         /* Now initialize the hardware */
610         if (igb_hardware_init(hw) != 0) {
611                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
612                 rte_free(eth_dev->data->mac_addrs);
613                 eth_dev->data->mac_addrs = NULL;
614                 error = -ENODEV;
615                 goto err_late;
616         }
617         hw->mac.get_link_status = 1;
618
619         /* Indicate SOL/IDER usage */
620         if (e1000_check_reset_block(hw) < 0) {
621                 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
622                                         "SOL/IDER session");
623         }
624
625         /* initialize PF if max_vfs not zero */
626         igb_pf_host_init(eth_dev);
627
628         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
629         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
630         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
631         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
632         E1000_WRITE_FLUSH(hw);
633
634         PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
635                      eth_dev->data->port_id, pci_dev->id.vendor_id,
636                      pci_dev->id.device_id);
637
638         rte_intr_callback_register(&(pci_dev->intr_handle),
639                 eth_igb_interrupt_handler, (void *)eth_dev);
640
641         /* enable uio intr after callback register */
642         rte_intr_enable(&(pci_dev->intr_handle));
643
644         /* enable support intr */
645         igb_intr_enable(eth_dev);
646
647         TAILQ_INIT(&filter_info->flex_list);
648         filter_info->flex_mask = 0;
649         TAILQ_INIT(&filter_info->twotuple_list);
650         filter_info->twotuple_mask = 0;
651         TAILQ_INIT(&filter_info->fivetuple_list);
652         filter_info->fivetuple_mask = 0;
653
654         return 0;
655
656 err_late:
657         igb_hw_control_release(hw);
658
659         return (error);
660 }
661
662 /*
663  * Virtual Function device init
664  */
665 static int
666 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
667 {
668         struct rte_pci_device *pci_dev;
669         struct e1000_hw *hw =
670                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
671         int diag;
672
673         PMD_INIT_FUNC_TRACE();
674
675         eth_dev->dev_ops = &igbvf_eth_dev_ops;
676         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
677         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
678
679         /* for secondary processes, we don't initialise any further as primary
680          * has already done this work. Only check we don't need a different
681          * RX function */
682         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
683                 if (eth_dev->data->scattered_rx)
684                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
685                 return 0;
686         }
687
688         pci_dev = eth_dev->pci_dev;
689
690         hw->device_id = pci_dev->id.device_id;
691         hw->vendor_id = pci_dev->id.vendor_id;
692         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
693
694         /* Initialize the shared code (base driver) */
695         diag = e1000_setup_init_funcs(hw, TRUE);
696         if (diag != 0) {
697                 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
698                         diag);
699                 return -EIO;
700         }
701
702         /* init_mailbox_params */
703         hw->mbx.ops.init_params(hw);
704
705         /* Disable the interrupts for VF */
706         igbvf_intr_disable(hw);
707
708         diag = hw->mac.ops.reset_hw(hw);
709
710         /* Allocate memory for storing MAC addresses */
711         eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
712                 hw->mac.rar_entry_count, 0);
713         if (eth_dev->data->mac_addrs == NULL) {
714                 PMD_INIT_LOG(ERR,
715                         "Failed to allocate %d bytes needed to store MAC "
716                         "addresses",
717                         ETHER_ADDR_LEN * hw->mac.rar_entry_count);
718                 return -ENOMEM;
719         }
720
721         /* Copy the permanent MAC address */
722         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
723                         &eth_dev->data->mac_addrs[0]);
724
725         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
726                      "mac.type=%s",
727                      eth_dev->data->port_id, pci_dev->id.vendor_id,
728                      pci_dev->id.device_id, "igb_mac_82576_vf");
729
730         return 0;
731 }
732
733 static struct eth_driver rte_igb_pmd = {
734         .pci_drv = {
735                 .name = "rte_igb_pmd",
736                 .id_table = pci_id_igb_map,
737                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
738         },
739         .eth_dev_init = eth_igb_dev_init,
740         .dev_private_size = sizeof(struct e1000_adapter),
741 };
742
743 /*
744  * virtual function driver struct
745  */
746 static struct eth_driver rte_igbvf_pmd = {
747         .pci_drv = {
748                 .name = "rte_igbvf_pmd",
749                 .id_table = pci_id_igbvf_map,
750                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
751         },
752         .eth_dev_init = eth_igbvf_dev_init,
753         .dev_private_size = sizeof(struct e1000_adapter),
754 };
755
756 static int
757 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
758 {
759         rte_eth_driver_register(&rte_igb_pmd);
760         return 0;
761 }
762
763 static void
764 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
765 {
766         struct e1000_hw *hw =
767                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
768         /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
769         uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
770         rctl |= E1000_RCTL_VFE;
771         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
772 }
773
774 /*
775  * VF Driver initialization routine.
776  * Invoked one at EAL init time.
777  * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
778  */
779 static int
780 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
781 {
782         PMD_INIT_FUNC_TRACE();
783
784         rte_eth_driver_register(&rte_igbvf_pmd);
785         return (0);
786 }
787
788 static int
789 eth_igb_configure(struct rte_eth_dev *dev)
790 {
791         struct e1000_interrupt *intr =
792                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
793
794         PMD_INIT_FUNC_TRACE();
795         intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
796         PMD_INIT_FUNC_TRACE();
797
798         return (0);
799 }
800
801 static int
802 eth_igb_start(struct rte_eth_dev *dev)
803 {
804         struct e1000_hw *hw =
805                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
806         int ret, i, mask;
807         uint32_t ctrl_ext;
808
809         PMD_INIT_FUNC_TRACE();
810
811         /* Power up the phy. Needed to make the link go Up */
812         e1000_power_up_phy(hw);
813
814         /*
815          * Packet Buffer Allocation (PBA)
816          * Writing PBA sets the receive portion of the buffer
817          * the remainder is used for the transmit buffer.
818          */
819         if (hw->mac.type == e1000_82575) {
820                 uint32_t pba;
821
822                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
823                 E1000_WRITE_REG(hw, E1000_PBA, pba);
824         }
825
826         /* Put the address into the Receive Address Array */
827         e1000_rar_set(hw, hw->mac.addr, 0);
828
829         /* Initialize the hardware */
830         if (igb_hardware_init(hw)) {
831                 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
832                 return (-EIO);
833         }
834
835         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
836
837         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
838         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
839         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
840         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
841         E1000_WRITE_FLUSH(hw);
842
843         /* configure PF module if SRIOV enabled */
844         igb_pf_host_configure(dev);
845
846         /* Configure for OS presence */
847         igb_init_manageability(hw);
848
849         eth_igb_tx_init(dev);
850
851         /* This can fail when allocating mbufs for descriptor rings */
852         ret = eth_igb_rx_init(dev);
853         if (ret) {
854                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
855                 igb_dev_clear_queues(dev);
856                 return ret;
857         }
858
859         e1000_clear_hw_cntrs_base_generic(hw);
860
861         /*
862          * VLAN Offload Settings
863          */
864         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
865                         ETH_VLAN_EXTEND_MASK;
866         eth_igb_vlan_offload_set(dev, mask);
867
868         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
869                 /* Enable VLAN filter since VMDq always use VLAN filter */
870                 igb_vmdq_vlan_hw_filter_enable(dev);
871         }
872
873         /*
874          * Configure the Interrupt Moderation register (EITR) with the maximum
875          * possible value (0xFFFF) to minimize "System Partial Write" issued by
876          * spurious [DMA] memory updates of RX and TX ring descriptors.
877          *
878          * With a EITR granularity of 2 microseconds in the 82576, only 7/8
879          * spurious memory updates per second should be expected.
880          * ((65535 * 2) / 1000.1000 ~= 0.131 second).
881          *
882          * Because interrupts are not used at all, the MSI-X is not activated
883          * and interrupt moderation is controlled by EITR[0].
884          *
885          * Note that having [almost] disabled memory updates of RX and TX ring
886          * descriptors through the Interrupt Moderation mechanism, memory
887          * updates of ring descriptors are now moderated by the configurable
888          * value of Write-Back Threshold registers.
889          */
890         if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
891                 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
892                 (hw->mac.type == e1000_i211)) {
893                 uint32_t ivar;
894
895                 /* Enable all RX & TX queues in the IVAR registers */
896                 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
897                 for (i = 0; i < 8; i++)
898                         E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
899
900                 /* Configure EITR with the maximum possible value (0xFFFF) */
901                 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
902         }
903
904         /* Setup link speed and duplex */
905         switch (dev->data->dev_conf.link_speed) {
906         case ETH_LINK_SPEED_AUTONEG:
907                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
908                         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
909                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
910                         hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
911                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
912                         hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
913                 else
914                         goto error_invalid_config;
915                 break;
916         case ETH_LINK_SPEED_10:
917                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
918                         hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
919                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
920                         hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
921                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
922                         hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
923                 else
924                         goto error_invalid_config;
925                 break;
926         case ETH_LINK_SPEED_100:
927                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
928                         hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
929                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
930                         hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
931                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
932                         hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
933                 else
934                         goto error_invalid_config;
935                 break;
936         case ETH_LINK_SPEED_1000:
937                 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
938                                 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
939                         hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
940                 else
941                         goto error_invalid_config;
942                 break;
943         case ETH_LINK_SPEED_10000:
944         default:
945                 goto error_invalid_config;
946         }
947         e1000_setup_link(hw);
948
949         /* check if lsc interrupt feature is enabled */
950         if (dev->data->dev_conf.intr_conf.lsc != 0)
951                 ret = eth_igb_lsc_interrupt_setup(dev);
952
953         /* resume enabled intr since hw reset */
954         igb_intr_enable(dev);
955
956         PMD_INIT_LOG(DEBUG, "<<");
957
958         return (0);
959
960 error_invalid_config:
961         PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
962                      dev->data->dev_conf.link_speed,
963                      dev->data->dev_conf.link_duplex, dev->data->port_id);
964         igb_dev_clear_queues(dev);
965         return (-EINVAL);
966 }
967
968 /*********************************************************************
969  *
970  *  This routine disables all traffic on the adapter by issuing a
971  *  global reset on the MAC.
972  *
973  **********************************************************************/
974 static void
975 eth_igb_stop(struct rte_eth_dev *dev)
976 {
977         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978         struct e1000_filter_info *filter_info =
979                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
980         struct rte_eth_link link;
981         struct e1000_flex_filter *p_flex;
982         struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
983         struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
984
985         igb_intr_disable(hw);
986         igb_pf_reset_hw(hw);
987         E1000_WRITE_REG(hw, E1000_WUC, 0);
988
989         /* Set bit for Go Link disconnect */
990         if (hw->mac.type >= e1000_82580) {
991                 uint32_t phpm_reg;
992
993                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
994                 phpm_reg |= E1000_82580_PM_GO_LINKD;
995                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
996         }
997
998         /* Power down the phy. Needed to make the link go Down */
999         if (hw->phy.media_type == e1000_media_type_copper)
1000                 e1000_power_down_phy(hw);
1001         else
1002                 e1000_shutdown_fiber_serdes_link(hw);
1003
1004         igb_dev_clear_queues(dev);
1005
1006         /* clear the recorded link status */
1007         memset(&link, 0, sizeof(link));
1008         rte_igb_dev_atomic_write_link_status(dev, &link);
1009
1010         /* Remove all flex filters of the device */
1011         while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
1012                 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
1013                 rte_free(p_flex);
1014         }
1015         filter_info->flex_mask = 0;
1016
1017         /* Remove all ntuple filters of the device */
1018         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
1019              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
1020                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
1021                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1022                              p_5tuple, entries);
1023                 rte_free(p_5tuple);
1024         }
1025         filter_info->fivetuple_mask = 0;
1026         for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
1027              p_2tuple != NULL; p_2tuple = p_2tuple_next) {
1028                 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
1029                 TAILQ_REMOVE(&filter_info->twotuple_list,
1030                              p_2tuple, entries);
1031                 rte_free(p_2tuple);
1032         }
1033         filter_info->twotuple_mask = 0;
1034 }
1035
1036 static void
1037 eth_igb_close(struct rte_eth_dev *dev)
1038 {
1039         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1040         struct rte_eth_link link;
1041
1042         eth_igb_stop(dev);
1043         e1000_phy_hw_reset(hw);
1044         igb_release_manageability(hw);
1045         igb_hw_control_release(hw);
1046
1047         /* Clear bit for Go Link disconnect */
1048         if (hw->mac.type >= e1000_82580) {
1049                 uint32_t phpm_reg;
1050
1051                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1052                 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1053                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1054         }
1055
1056         igb_dev_clear_queues(dev);
1057
1058         memset(&link, 0, sizeof(link));
1059         rte_igb_dev_atomic_write_link_status(dev, &link);
1060 }
1061
1062 static int
1063 igb_get_rx_buffer_size(struct e1000_hw *hw)
1064 {
1065         uint32_t rx_buf_size;
1066         if (hw->mac.type == e1000_82576) {
1067                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1068         } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1069                 /* PBS needs to be translated according to a lookup table */
1070                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1071                 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1072                 rx_buf_size = (rx_buf_size << 10);
1073         } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1074                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1075         } else {
1076                 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1077         }
1078
1079         return rx_buf_size;
1080 }
1081
1082 /*********************************************************************
1083  *
1084  *  Initialize the hardware
1085  *
1086  **********************************************************************/
1087 static int
1088 igb_hardware_init(struct e1000_hw *hw)
1089 {
1090         uint32_t rx_buf_size;
1091         int diag;
1092
1093         /* Let the firmware know the OS is in control */
1094         igb_hw_control_acquire(hw);
1095
1096         /*
1097          * These parameters control the automatic generation (Tx) and
1098          * response (Rx) to Ethernet PAUSE frames.
1099          * - High water mark should allow for at least two standard size (1518)
1100          *   frames to be received after sending an XOFF.
1101          * - Low water mark works best when it is very near the high water mark.
1102          *   This allows the receiver to restart by sending XON when it has
1103          *   drained a bit. Here we use an arbitrary value of 1500 which will
1104          *   restart after one full frame is pulled from the buffer. There
1105          *   could be several smaller frames in the buffer and if so they will
1106          *   not trigger the XON until their total number reduces the buffer
1107          *   by 1500.
1108          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1109          */
1110         rx_buf_size = igb_get_rx_buffer_size(hw);
1111
1112         hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1113         hw->fc.low_water = hw->fc.high_water - 1500;
1114         hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1115         hw->fc.send_xon = 1;
1116
1117         /* Set Flow control, use the tunable location if sane */
1118         if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1119                 hw->fc.requested_mode = igb_fc_setting;
1120         else
1121                 hw->fc.requested_mode = e1000_fc_none;
1122
1123         /* Issue a global reset */
1124         igb_pf_reset_hw(hw);
1125         E1000_WRITE_REG(hw, E1000_WUC, 0);
1126
1127         diag = e1000_init_hw(hw);
1128         if (diag < 0)
1129                 return (diag);
1130
1131         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1132         e1000_get_phy_info(hw);
1133         e1000_check_for_link(hw);
1134
1135         return (0);
1136 }
1137
1138 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1139 static void
1140 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1141 {
1142         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1143         struct e1000_hw_stats *stats =
1144                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1145         int pause_frames;
1146
1147         if(hw->phy.media_type == e1000_media_type_copper ||
1148             (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1149                 stats->symerrs +=
1150                     E1000_READ_REG(hw,E1000_SYMERRS);
1151                 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1152         }
1153
1154         stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1155         stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1156         stats->scc += E1000_READ_REG(hw, E1000_SCC);
1157         stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1158
1159         stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1160         stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1161         stats->colc += E1000_READ_REG(hw, E1000_COLC);
1162         stats->dc += E1000_READ_REG(hw, E1000_DC);
1163         stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1164         stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1165         stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1166         /*
1167         ** For watchdog management we need to know if we have been
1168         ** paused during the last interval, so capture that here.
1169         */
1170         pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1171         stats->xoffrxc += pause_frames;
1172         stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1173         stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1174         stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1175         stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1176         stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1177         stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1178         stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1179         stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1180         stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1181         stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1182         stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1183         stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1184
1185         /* For the 64-bit byte counters the low dword must be read first. */
1186         /* Both registers clear on the read of the high dword */
1187
1188         stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1189         stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1190         stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1191         stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1192
1193         stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1194         stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1195         stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1196         stats->roc += E1000_READ_REG(hw, E1000_ROC);
1197         stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1198
1199         stats->tor += E1000_READ_REG(hw, E1000_TORH);
1200         stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1201
1202         stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1203         stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1204         stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1205         stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1206         stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1207         stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1208         stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1209         stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1210         stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1211         stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1212
1213         /* Interrupt Counts */
1214
1215         stats->iac += E1000_READ_REG(hw, E1000_IAC);
1216         stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1217         stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1218         stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1219         stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1220         stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1221         stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1222         stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1223         stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1224
1225         /* Host to Card Statistics */
1226
1227         stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1228         stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1229         stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1230         stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1231         stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1232         stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1233         stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1234         stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1235         stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1236         stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1237         stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1238         stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1239         stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1240         stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1241
1242         stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1243         stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1244         stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1245         stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1246         stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1247         stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1248
1249         if (rte_stats == NULL)
1250                 return;
1251
1252         /* Rx Errors */
1253         rte_stats->ibadcrc = stats->crcerrs;
1254         rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1255         rte_stats->imissed = stats->mpc;
1256         rte_stats->ierrors = rte_stats->ibadcrc +
1257                              rte_stats->ibadlen +
1258                              rte_stats->imissed +
1259                              stats->rxerrc + stats->algnerrc + stats->cexterr;
1260
1261         /* Tx Errors */
1262         rte_stats->oerrors = stats->ecol + stats->latecol;
1263
1264         /* XON/XOFF pause frames */
1265         rte_stats->tx_pause_xon  = stats->xontxc;
1266         rte_stats->rx_pause_xon  = stats->xonrxc;
1267         rte_stats->tx_pause_xoff = stats->xofftxc;
1268         rte_stats->rx_pause_xoff = stats->xoffrxc;
1269
1270         rte_stats->ipackets = stats->gprc;
1271         rte_stats->opackets = stats->gptc;
1272         rte_stats->ibytes   = stats->gorc;
1273         rte_stats->obytes   = stats->gotc;
1274 }
1275
1276 static void
1277 eth_igb_stats_reset(struct rte_eth_dev *dev)
1278 {
1279         struct e1000_hw_stats *hw_stats =
1280                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1281
1282         /* HW registers are cleared on read */
1283         eth_igb_stats_get(dev, NULL);
1284
1285         /* Reset software totals */
1286         memset(hw_stats, 0, sizeof(*hw_stats));
1287 }
1288
1289 static void
1290 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1291 {
1292         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1293         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1294                           E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1295
1296         /* Good Rx packets, include VF loopback */
1297         UPDATE_VF_STAT(E1000_VFGPRC,
1298             hw_stats->last_gprc, hw_stats->gprc);
1299
1300         /* Good Rx octets, include VF loopback */
1301         UPDATE_VF_STAT(E1000_VFGORC,
1302             hw_stats->last_gorc, hw_stats->gorc);
1303
1304         /* Good Tx packets, include VF loopback */
1305         UPDATE_VF_STAT(E1000_VFGPTC,
1306             hw_stats->last_gptc, hw_stats->gptc);
1307
1308         /* Good Tx octets, include VF loopback */
1309         UPDATE_VF_STAT(E1000_VFGOTC,
1310             hw_stats->last_gotc, hw_stats->gotc);
1311
1312         /* Rx Multicst packets */
1313         UPDATE_VF_STAT(E1000_VFMPRC,
1314             hw_stats->last_mprc, hw_stats->mprc);
1315
1316         /* Good Rx loopback packets */
1317         UPDATE_VF_STAT(E1000_VFGPRLBC,
1318             hw_stats->last_gprlbc, hw_stats->gprlbc);
1319
1320         /* Good Rx loopback octets */
1321         UPDATE_VF_STAT(E1000_VFGORLBC,
1322             hw_stats->last_gorlbc, hw_stats->gorlbc);
1323
1324         /* Good Tx loopback packets */
1325         UPDATE_VF_STAT(E1000_VFGPTLBC,
1326             hw_stats->last_gptlbc, hw_stats->gptlbc);
1327
1328         /* Good Tx loopback octets */
1329         UPDATE_VF_STAT(E1000_VFGOTLBC,
1330             hw_stats->last_gotlbc, hw_stats->gotlbc);
1331
1332         if (rte_stats == NULL)
1333                 return;
1334
1335         rte_stats->ipackets = hw_stats->gprc;
1336         rte_stats->ibytes = hw_stats->gorc;
1337         rte_stats->opackets = hw_stats->gptc;
1338         rte_stats->obytes = hw_stats->gotc;
1339         rte_stats->imcasts = hw_stats->mprc;
1340         rte_stats->ilbpackets = hw_stats->gprlbc;
1341         rte_stats->ilbbytes = hw_stats->gorlbc;
1342         rte_stats->olbpackets = hw_stats->gptlbc;
1343         rte_stats->olbbytes = hw_stats->gotlbc;
1344
1345 }
1346
1347 static void
1348 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1349 {
1350         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1351                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1352
1353         /* Sync HW register to the last stats */
1354         eth_igbvf_stats_get(dev, NULL);
1355
1356         /* reset HW current stats*/
1357         memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1358                offsetof(struct e1000_vf_stats, gprc));
1359
1360 }
1361
1362 static void
1363 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1364 {
1365         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366
1367         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1368         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1369         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1370         dev_info->rx_offload_capa =
1371                 DEV_RX_OFFLOAD_VLAN_STRIP |
1372                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1373                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1374                 DEV_RX_OFFLOAD_TCP_CKSUM;
1375         dev_info->tx_offload_capa =
1376                 DEV_TX_OFFLOAD_VLAN_INSERT |
1377                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1378                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1379                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1380                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1381
1382         switch (hw->mac.type) {
1383         case e1000_82575:
1384                 dev_info->max_rx_queues = 4;
1385                 dev_info->max_tx_queues = 4;
1386                 dev_info->max_vmdq_pools = 0;
1387                 break;
1388
1389         case e1000_82576:
1390                 dev_info->max_rx_queues = 16;
1391                 dev_info->max_tx_queues = 16;
1392                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1393                 dev_info->vmdq_queue_num = 16;
1394                 break;
1395
1396         case e1000_82580:
1397                 dev_info->max_rx_queues = 8;
1398                 dev_info->max_tx_queues = 8;
1399                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1400                 dev_info->vmdq_queue_num = 8;
1401                 break;
1402
1403         case e1000_i350:
1404                 dev_info->max_rx_queues = 8;
1405                 dev_info->max_tx_queues = 8;
1406                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1407                 dev_info->vmdq_queue_num = 8;
1408                 break;
1409
1410         case e1000_i354:
1411                 dev_info->max_rx_queues = 8;
1412                 dev_info->max_tx_queues = 8;
1413                 break;
1414
1415         case e1000_i210:
1416                 dev_info->max_rx_queues = 4;
1417                 dev_info->max_tx_queues = 4;
1418                 dev_info->max_vmdq_pools = 0;
1419                 break;
1420
1421         case e1000_i211:
1422                 dev_info->max_rx_queues = 2;
1423                 dev_info->max_tx_queues = 2;
1424                 dev_info->max_vmdq_pools = 0;
1425                 break;
1426
1427         default:
1428                 /* Should not happen */
1429                 break;
1430         }
1431         dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
1432         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1433         dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1434
1435         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1436                 .rx_thresh = {
1437                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1438                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1439                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1440                 },
1441                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1442                 .rx_drop_en = 0,
1443         };
1444
1445         dev_info->default_txconf = (struct rte_eth_txconf) {
1446                 .tx_thresh = {
1447                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1448                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1449                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1450                 },
1451                 .txq_flags = 0,
1452         };
1453 }
1454
1455 static void
1456 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1457 {
1458         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1459
1460         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1461         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1462         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1463         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1464                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1465                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1466                                 DEV_RX_OFFLOAD_TCP_CKSUM;
1467         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1468                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1469                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1470                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1471                                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1472         switch (hw->mac.type) {
1473         case e1000_vfadapt:
1474                 dev_info->max_rx_queues = 2;
1475                 dev_info->max_tx_queues = 2;
1476                 break;
1477         case e1000_vfadapt_i350:
1478                 dev_info->max_rx_queues = 1;
1479                 dev_info->max_tx_queues = 1;
1480                 break;
1481         default:
1482                 /* Should not happen */
1483                 break;
1484         }
1485
1486         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1487                 .rx_thresh = {
1488                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1489                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1490                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1491                 },
1492                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1493                 .rx_drop_en = 0,
1494         };
1495
1496         dev_info->default_txconf = (struct rte_eth_txconf) {
1497                 .tx_thresh = {
1498                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1499                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1500                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1501                 },
1502                 .txq_flags = 0,
1503         };
1504 }
1505
1506 /* return 0 means link status changed, -1 means not changed */
1507 static int
1508 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1509 {
1510         struct e1000_hw *hw =
1511                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1512         struct rte_eth_link link, old;
1513         int link_check, count;
1514
1515         link_check = 0;
1516         hw->mac.get_link_status = 1;
1517
1518         /* possible wait-to-complete in up to 9 seconds */
1519         for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1520                 /* Read the real link status */
1521                 switch (hw->phy.media_type) {
1522                 case e1000_media_type_copper:
1523                         /* Do the work to read phy */
1524                         e1000_check_for_link(hw);
1525                         link_check = !hw->mac.get_link_status;
1526                         break;
1527
1528                 case e1000_media_type_fiber:
1529                         e1000_check_for_link(hw);
1530                         link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1531                                       E1000_STATUS_LU);
1532                         break;
1533
1534                 case e1000_media_type_internal_serdes:
1535                         e1000_check_for_link(hw);
1536                         link_check = hw->mac.serdes_has_link;
1537                         break;
1538
1539                 /* VF device is type_unknown */
1540                 case e1000_media_type_unknown:
1541                         eth_igbvf_link_update(hw);
1542                         link_check = !hw->mac.get_link_status;
1543                         break;
1544
1545                 default:
1546                         break;
1547                 }
1548                 if (link_check || wait_to_complete == 0)
1549                         break;
1550                 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1551         }
1552         memset(&link, 0, sizeof(link));
1553         rte_igb_dev_atomic_read_link_status(dev, &link);
1554         old = link;
1555
1556         /* Now we check if a transition has happened */
1557         if (link_check) {
1558                 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1559                                           &link.link_duplex);
1560                 link.link_status = 1;
1561         } else if (!link_check) {
1562                 link.link_speed = 0;
1563                 link.link_duplex = 0;
1564                 link.link_status = 0;
1565         }
1566         rte_igb_dev_atomic_write_link_status(dev, &link);
1567
1568         /* not changed */
1569         if (old.link_status == link.link_status)
1570                 return -1;
1571
1572         /* changed */
1573         return 0;
1574 }
1575
1576 /*
1577  * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1578  * For ASF and Pass Through versions of f/w this means
1579  * that the driver is loaded.
1580  */
1581 static void
1582 igb_hw_control_acquire(struct e1000_hw *hw)
1583 {
1584         uint32_t ctrl_ext;
1585
1586         /* Let firmware know the driver has taken over */
1587         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1588         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1589 }
1590
1591 /*
1592  * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1593  * For ASF and Pass Through versions of f/w this means that the
1594  * driver is no longer loaded.
1595  */
1596 static void
1597 igb_hw_control_release(struct e1000_hw *hw)
1598 {
1599         uint32_t ctrl_ext;
1600
1601         /* Let firmware taken over control of h/w */
1602         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1603         E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1604                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1605 }
1606
1607 /*
1608  * Bit of a misnomer, what this really means is
1609  * to enable OS management of the system... aka
1610  * to disable special hardware management features.
1611  */
1612 static void
1613 igb_init_manageability(struct e1000_hw *hw)
1614 {
1615         if (e1000_enable_mng_pass_thru(hw)) {
1616                 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1617                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1618
1619                 /* disable hardware interception of ARP */
1620                 manc &= ~(E1000_MANC_ARP_EN);
1621
1622                 /* enable receiving management packets to the host */
1623                 manc |= E1000_MANC_EN_MNG2HOST;
1624                 manc2h |= 1 << 5;  /* Mng Port 623 */
1625                 manc2h |= 1 << 6;  /* Mng Port 664 */
1626                 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1627                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1628         }
1629 }
1630
1631 static void
1632 igb_release_manageability(struct e1000_hw *hw)
1633 {
1634         if (e1000_enable_mng_pass_thru(hw)) {
1635                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1636
1637                 manc |= E1000_MANC_ARP_EN;
1638                 manc &= ~E1000_MANC_EN_MNG2HOST;
1639
1640                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1641         }
1642 }
1643
1644 static void
1645 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1646 {
1647         struct e1000_hw *hw =
1648                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649         uint32_t rctl;
1650
1651         rctl = E1000_READ_REG(hw, E1000_RCTL);
1652         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1653         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1654 }
1655
1656 static void
1657 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1658 {
1659         struct e1000_hw *hw =
1660                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1661         uint32_t rctl;
1662
1663         rctl = E1000_READ_REG(hw, E1000_RCTL);
1664         rctl &= (~E1000_RCTL_UPE);
1665         if (dev->data->all_multicast == 1)
1666                 rctl |= E1000_RCTL_MPE;
1667         else
1668                 rctl &= (~E1000_RCTL_MPE);
1669         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1670 }
1671
1672 static void
1673 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1674 {
1675         struct e1000_hw *hw =
1676                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1677         uint32_t rctl;
1678
1679         rctl = E1000_READ_REG(hw, E1000_RCTL);
1680         rctl |= E1000_RCTL_MPE;
1681         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1682 }
1683
1684 static void
1685 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1686 {
1687         struct e1000_hw *hw =
1688                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1689         uint32_t rctl;
1690
1691         if (dev->data->promiscuous == 1)
1692                 return; /* must remain in all_multicast mode */
1693         rctl = E1000_READ_REG(hw, E1000_RCTL);
1694         rctl &= (~E1000_RCTL_MPE);
1695         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1696 }
1697
1698 static int
1699 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1700 {
1701         struct e1000_hw *hw =
1702                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1703         struct e1000_vfta * shadow_vfta =
1704                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1705         uint32_t vfta;
1706         uint32_t vid_idx;
1707         uint32_t vid_bit;
1708
1709         vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1710                               E1000_VFTA_ENTRY_MASK);
1711         vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1712         vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1713         if (on)
1714                 vfta |= vid_bit;
1715         else
1716                 vfta &= ~vid_bit;
1717         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1718
1719         /* update local VFTA copy */
1720         shadow_vfta->vfta[vid_idx] = vfta;
1721
1722         return 0;
1723 }
1724
1725 static void
1726 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1727 {
1728         struct e1000_hw *hw =
1729                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730         uint32_t reg = ETHER_TYPE_VLAN ;
1731
1732         reg |= (tpid << 16);
1733         E1000_WRITE_REG(hw, E1000_VET, reg);
1734 }
1735
1736 static void
1737 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1738 {
1739         struct e1000_hw *hw =
1740                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1741         uint32_t reg;
1742
1743         /* Filter Table Disable */
1744         reg = E1000_READ_REG(hw, E1000_RCTL);
1745         reg &= ~E1000_RCTL_CFIEN;
1746         reg &= ~E1000_RCTL_VFE;
1747         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1748 }
1749
1750 static void
1751 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1752 {
1753         struct e1000_hw *hw =
1754                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1755         struct e1000_vfta * shadow_vfta =
1756                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1757         uint32_t reg;
1758         int i;
1759
1760         /* Filter Table Enable, CFI not used for packet acceptance */
1761         reg = E1000_READ_REG(hw, E1000_RCTL);
1762         reg &= ~E1000_RCTL_CFIEN;
1763         reg |= E1000_RCTL_VFE;
1764         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1765
1766         /* restore VFTA table */
1767         for (i = 0; i < IGB_VFTA_SIZE; i++)
1768                 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1769 }
1770
1771 static void
1772 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1773 {
1774         struct e1000_hw *hw =
1775                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1776         uint32_t reg;
1777
1778         /* VLAN Mode Disable */
1779         reg = E1000_READ_REG(hw, E1000_CTRL);
1780         reg &= ~E1000_CTRL_VME;
1781         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1782 }
1783
1784 static void
1785 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1786 {
1787         struct e1000_hw *hw =
1788                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789         uint32_t reg;
1790
1791         /* VLAN Mode Enable */
1792         reg = E1000_READ_REG(hw, E1000_CTRL);
1793         reg |= E1000_CTRL_VME;
1794         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1795 }
1796
1797 static void
1798 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1799 {
1800         struct e1000_hw *hw =
1801                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1802         uint32_t reg;
1803
1804         /* CTRL_EXT: Extended VLAN */
1805         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1806         reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1807         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1808
1809         /* Update maximum packet length */
1810         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1811                 E1000_WRITE_REG(hw, E1000_RLPML,
1812                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1813                                                 VLAN_TAG_SIZE);
1814 }
1815
1816 static void
1817 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1818 {
1819         struct e1000_hw *hw =
1820                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1821         uint32_t reg;
1822
1823         /* CTRL_EXT: Extended VLAN */
1824         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1825         reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1826         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1827
1828         /* Update maximum packet length */
1829         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1830                 E1000_WRITE_REG(hw, E1000_RLPML,
1831                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1832                                                 2 * VLAN_TAG_SIZE);
1833 }
1834
1835 static void
1836 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1837 {
1838         if(mask & ETH_VLAN_STRIP_MASK){
1839                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1840                         igb_vlan_hw_strip_enable(dev);
1841                 else
1842                         igb_vlan_hw_strip_disable(dev);
1843         }
1844
1845         if(mask & ETH_VLAN_FILTER_MASK){
1846                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1847                         igb_vlan_hw_filter_enable(dev);
1848                 else
1849                         igb_vlan_hw_filter_disable(dev);
1850         }
1851
1852         if(mask & ETH_VLAN_EXTEND_MASK){
1853                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1854                         igb_vlan_hw_extend_enable(dev);
1855                 else
1856                         igb_vlan_hw_extend_disable(dev);
1857         }
1858 }
1859
1860
1861 /**
1862  * It enables the interrupt mask and then enable the interrupt.
1863  *
1864  * @param dev
1865  *  Pointer to struct rte_eth_dev.
1866  *
1867  * @return
1868  *  - On success, zero.
1869  *  - On failure, a negative value.
1870  */
1871 static int
1872 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1873 {
1874         struct e1000_interrupt *intr =
1875                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1876
1877         intr->mask |= E1000_ICR_LSC;
1878
1879         return 0;
1880 }
1881
1882 /*
1883  * It reads ICR and gets interrupt causes, check it and set a bit flag
1884  * to update link status.
1885  *
1886  * @param dev
1887  *  Pointer to struct rte_eth_dev.
1888  *
1889  * @return
1890  *  - On success, zero.
1891  *  - On failure, a negative value.
1892  */
1893 static int
1894 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1895 {
1896         uint32_t icr;
1897         struct e1000_hw *hw =
1898                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899         struct e1000_interrupt *intr =
1900                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1901
1902         igb_intr_disable(hw);
1903
1904         /* read-on-clear nic registers here */
1905         icr = E1000_READ_REG(hw, E1000_ICR);
1906
1907         intr->flags = 0;
1908         if (icr & E1000_ICR_LSC) {
1909                 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1910         }
1911
1912         if (icr & E1000_ICR_VMMB)
1913                 intr->flags |= E1000_FLAG_MAILBOX;
1914
1915         return 0;
1916 }
1917
1918 /*
1919  * It executes link_update after knowing an interrupt is prsent.
1920  *
1921  * @param dev
1922  *  Pointer to struct rte_eth_dev.
1923  *
1924  * @return
1925  *  - On success, zero.
1926  *  - On failure, a negative value.
1927  */
1928 static int
1929 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1930 {
1931         struct e1000_hw *hw =
1932                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1933         struct e1000_interrupt *intr =
1934                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1935         uint32_t tctl, rctl;
1936         struct rte_eth_link link;
1937         int ret;
1938
1939         if (intr->flags & E1000_FLAG_MAILBOX) {
1940                 igb_pf_mbx_process(dev);
1941                 intr->flags &= ~E1000_FLAG_MAILBOX;
1942         }
1943
1944         igb_intr_enable(dev);
1945         rte_intr_enable(&(dev->pci_dev->intr_handle));
1946
1947         if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1948                 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1949
1950                 /* set get_link_status to check register later */
1951                 hw->mac.get_link_status = 1;
1952                 ret = eth_igb_link_update(dev, 0);
1953
1954                 /* check if link has changed */
1955                 if (ret < 0)
1956                         return 0;
1957
1958                 memset(&link, 0, sizeof(link));
1959                 rte_igb_dev_atomic_read_link_status(dev, &link);
1960                 if (link.link_status) {
1961                         PMD_INIT_LOG(INFO,
1962                                      " Port %d: Link Up - speed %u Mbps - %s",
1963                                      dev->data->port_id,
1964                                      (unsigned)link.link_speed,
1965                                      link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1966                                      "full-duplex" : "half-duplex");
1967                 } else {
1968                         PMD_INIT_LOG(INFO, " Port %d: Link Down",
1969                                      dev->data->port_id);
1970                 }
1971                 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1972                              dev->pci_dev->addr.domain,
1973                              dev->pci_dev->addr.bus,
1974                              dev->pci_dev->addr.devid,
1975                              dev->pci_dev->addr.function);
1976                 tctl = E1000_READ_REG(hw, E1000_TCTL);
1977                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1978                 if (link.link_status) {
1979                         /* enable Tx/Rx */
1980                         tctl |= E1000_TCTL_EN;
1981                         rctl |= E1000_RCTL_EN;
1982                 } else {
1983                         /* disable Tx/Rx */
1984                         tctl &= ~E1000_TCTL_EN;
1985                         rctl &= ~E1000_RCTL_EN;
1986                 }
1987                 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1988                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1989                 E1000_WRITE_FLUSH(hw);
1990                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1991         }
1992
1993         return 0;
1994 }
1995
1996 /**
1997  * Interrupt handler which shall be registered at first.
1998  *
1999  * @param handle
2000  *  Pointer to interrupt handle.
2001  * @param param
2002  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2003  *
2004  * @return
2005  *  void
2006  */
2007 static void
2008 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
2009                                                         void *param)
2010 {
2011         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2012
2013         eth_igb_interrupt_get_status(dev);
2014         eth_igb_interrupt_action(dev);
2015 }
2016
2017 static int
2018 eth_igb_led_on(struct rte_eth_dev *dev)
2019 {
2020         struct e1000_hw *hw;
2021
2022         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023         return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2024 }
2025
2026 static int
2027 eth_igb_led_off(struct rte_eth_dev *dev)
2028 {
2029         struct e1000_hw *hw;
2030
2031         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2032         return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
2033 }
2034
2035 static int
2036 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2037 {
2038         struct e1000_hw *hw;
2039         uint32_t ctrl;
2040         int tx_pause;
2041         int rx_pause;
2042
2043         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2044         fc_conf->pause_time = hw->fc.pause_time;
2045         fc_conf->high_water = hw->fc.high_water;
2046         fc_conf->low_water = hw->fc.low_water;
2047         fc_conf->send_xon = hw->fc.send_xon;
2048         fc_conf->autoneg = hw->mac.autoneg;
2049
2050         /*
2051          * Return rx_pause and tx_pause status according to actual setting of
2052          * the TFCE and RFCE bits in the CTRL register.
2053          */
2054         ctrl = E1000_READ_REG(hw, E1000_CTRL);
2055         if (ctrl & E1000_CTRL_TFCE)
2056                 tx_pause = 1;
2057         else
2058                 tx_pause = 0;
2059
2060         if (ctrl & E1000_CTRL_RFCE)
2061                 rx_pause = 1;
2062         else
2063                 rx_pause = 0;
2064
2065         if (rx_pause && tx_pause)
2066                 fc_conf->mode = RTE_FC_FULL;
2067         else if (rx_pause)
2068                 fc_conf->mode = RTE_FC_RX_PAUSE;
2069         else if (tx_pause)
2070                 fc_conf->mode = RTE_FC_TX_PAUSE;
2071         else
2072                 fc_conf->mode = RTE_FC_NONE;
2073
2074         return 0;
2075 }
2076
2077 static int
2078 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2079 {
2080         struct e1000_hw *hw;
2081         int err;
2082         enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2083                 e1000_fc_none,
2084                 e1000_fc_rx_pause,
2085                 e1000_fc_tx_pause,
2086                 e1000_fc_full
2087         };
2088         uint32_t rx_buf_size;
2089         uint32_t max_high_water;
2090         uint32_t rctl;
2091
2092         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2093         if (fc_conf->autoneg != hw->mac.autoneg)
2094                 return -ENOTSUP;
2095         rx_buf_size = igb_get_rx_buffer_size(hw);
2096         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2097
2098         /* At least reserve one Ethernet frame for watermark */
2099         max_high_water = rx_buf_size - ETHER_MAX_LEN;
2100         if ((fc_conf->high_water > max_high_water) ||
2101             (fc_conf->high_water < fc_conf->low_water)) {
2102                 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2103                 PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
2104                 return (-EINVAL);
2105         }
2106
2107         hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2108         hw->fc.pause_time     = fc_conf->pause_time;
2109         hw->fc.high_water     = fc_conf->high_water;
2110         hw->fc.low_water      = fc_conf->low_water;
2111         hw->fc.send_xon       = fc_conf->send_xon;
2112
2113         err = e1000_setup_link_generic(hw);
2114         if (err == E1000_SUCCESS) {
2115
2116                 /* check if we want to forward MAC frames - driver doesn't have native
2117                  * capability to do that, so we'll write the registers ourselves */
2118
2119                 rctl = E1000_READ_REG(hw, E1000_RCTL);
2120
2121                 /* set or clear MFLCN.PMCF bit depending on configuration */
2122                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2123                         rctl |= E1000_RCTL_PMCF;
2124                 else
2125                         rctl &= ~E1000_RCTL_PMCF;
2126
2127                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2128                 E1000_WRITE_FLUSH(hw);
2129
2130                 return 0;
2131         }
2132
2133         PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2134         return (-EIO);
2135 }
2136
2137 #define E1000_RAH_POOLSEL_SHIFT      (18)
2138 static void
2139 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2140                 uint32_t index, __rte_unused uint32_t pool)
2141 {
2142         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2143         uint32_t rah;
2144
2145         e1000_rar_set(hw, mac_addr->addr_bytes, index);
2146         rah = E1000_READ_REG(hw, E1000_RAH(index));
2147         rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2148         E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2149 }
2150
2151 static void
2152 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2153 {
2154         uint8_t addr[ETHER_ADDR_LEN];
2155         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2156
2157         memset(addr, 0, sizeof(addr));
2158
2159         e1000_rar_set(hw, addr, index);
2160 }
2161
2162 static void
2163 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
2164                                 struct ether_addr *addr)
2165 {
2166         eth_igb_rar_clear(dev, 0);
2167
2168         eth_igb_rar_set(dev, (void *)addr, 0, 0);
2169 }
2170 /*
2171  * Virtual Function operations
2172  */
2173 static void
2174 igbvf_intr_disable(struct e1000_hw *hw)
2175 {
2176         PMD_INIT_FUNC_TRACE();
2177
2178         /* Clear interrupt mask to stop from interrupts being generated */
2179         E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2180
2181         E1000_WRITE_FLUSH(hw);
2182 }
2183
2184 static void
2185 igbvf_stop_adapter(struct rte_eth_dev *dev)
2186 {
2187         u32 reg_val;
2188         u16 i;
2189         struct rte_eth_dev_info dev_info;
2190         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2191
2192         memset(&dev_info, 0, sizeof(dev_info));
2193         eth_igbvf_infos_get(dev, &dev_info);
2194
2195         /* Clear interrupt mask to stop from interrupts being generated */
2196         igbvf_intr_disable(hw);
2197
2198         /* Clear any pending interrupts, flush previous writes */
2199         E1000_READ_REG(hw, E1000_EICR);
2200
2201         /* Disable the transmit unit.  Each queue must be disabled. */
2202         for (i = 0; i < dev_info.max_tx_queues; i++)
2203                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2204
2205         /* Disable the receive unit by stopping each queue */
2206         for (i = 0; i < dev_info.max_rx_queues; i++) {
2207                 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2208                 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2209                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2210                 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2211                         ;
2212         }
2213
2214         /* flush all queues disables */
2215         E1000_WRITE_FLUSH(hw);
2216         msec_delay(2);
2217 }
2218
2219 static int eth_igbvf_link_update(struct e1000_hw *hw)
2220 {
2221         struct e1000_mbx_info *mbx = &hw->mbx;
2222         struct e1000_mac_info *mac = &hw->mac;
2223         int ret_val = E1000_SUCCESS;
2224
2225         PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2226
2227         /*
2228          * We only want to run this if there has been a rst asserted.
2229          * in this case that could mean a link change, device reset,
2230          * or a virtual function reset
2231          */
2232
2233         /* If we were hit with a reset or timeout drop the link */
2234         if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2235                 mac->get_link_status = TRUE;
2236
2237         if (!mac->get_link_status)
2238                 goto out;
2239
2240         /* if link status is down no point in checking to see if pf is up */
2241         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2242                 goto out;
2243
2244         /* if we passed all the tests above then the link is up and we no
2245          * longer need to check for link */
2246         mac->get_link_status = FALSE;
2247
2248 out:
2249         return ret_val;
2250 }
2251
2252
2253 static int
2254 igbvf_dev_configure(struct rte_eth_dev *dev)
2255 {
2256         struct rte_eth_conf* conf = &dev->data->dev_conf;
2257
2258         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2259                      dev->data->port_id);
2260
2261         /*
2262          * VF has no ability to enable/disable HW CRC
2263          * Keep the persistent behavior the same as Host PF
2264          */
2265 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2266         if (!conf->rxmode.hw_strip_crc) {
2267                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2268                 conf->rxmode.hw_strip_crc = 1;
2269         }
2270 #else
2271         if (conf->rxmode.hw_strip_crc) {
2272                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2273                 conf->rxmode.hw_strip_crc = 0;
2274         }
2275 #endif
2276
2277         return 0;
2278 }
2279
2280 static int
2281 igbvf_dev_start(struct rte_eth_dev *dev)
2282 {
2283         struct e1000_hw *hw =
2284                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2285         int ret;
2286
2287         PMD_INIT_FUNC_TRACE();
2288
2289         hw->mac.ops.reset_hw(hw);
2290
2291         /* Set all vfta */
2292         igbvf_set_vfta_all(dev,1);
2293
2294         eth_igbvf_tx_init(dev);
2295
2296         /* This can fail when allocating mbufs for descriptor rings */
2297         ret = eth_igbvf_rx_init(dev);
2298         if (ret) {
2299                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2300                 igb_dev_clear_queues(dev);
2301                 return ret;
2302         }
2303
2304         return 0;
2305 }
2306
2307 static void
2308 igbvf_dev_stop(struct rte_eth_dev *dev)
2309 {
2310         PMD_INIT_FUNC_TRACE();
2311
2312         igbvf_stop_adapter(dev);
2313
2314         /*
2315           * Clear what we set, but we still keep shadow_vfta to
2316           * restore after device starts
2317           */
2318         igbvf_set_vfta_all(dev,0);
2319
2320         igb_dev_clear_queues(dev);
2321 }
2322
2323 static void
2324 igbvf_dev_close(struct rte_eth_dev *dev)
2325 {
2326         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2327
2328         PMD_INIT_FUNC_TRACE();
2329
2330         e1000_reset_hw(hw);
2331
2332         igbvf_dev_stop(dev);
2333 }
2334
2335 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2336 {
2337         struct e1000_mbx_info *mbx = &hw->mbx;
2338         uint32_t msgbuf[2];
2339
2340         /* After set vlan, vlan strip will also be enabled in igb driver*/
2341         msgbuf[0] = E1000_VF_SET_VLAN;
2342         msgbuf[1] = vid;
2343         /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2344         if (on)
2345                 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2346
2347         return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2348 }
2349
2350 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2351 {
2352         struct e1000_hw *hw =
2353                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2354         struct e1000_vfta * shadow_vfta =
2355                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2356         int i = 0, j = 0, vfta = 0, mask = 1;
2357
2358         for (i = 0; i < IGB_VFTA_SIZE; i++){
2359                 vfta = shadow_vfta->vfta[i];
2360                 if(vfta){
2361                         mask = 1;
2362                         for (j = 0; j < 32; j++){
2363                                 if(vfta & mask)
2364                                         igbvf_set_vfta(hw,
2365                                                 (uint16_t)((i<<5)+j), on);
2366                                 mask<<=1;
2367                         }
2368                 }
2369         }
2370
2371 }
2372
2373 static int
2374 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2375 {
2376         struct e1000_hw *hw =
2377                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2378         struct e1000_vfta * shadow_vfta =
2379                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2380         uint32_t vid_idx = 0;
2381         uint32_t vid_bit = 0;
2382         int ret = 0;
2383
2384         PMD_INIT_FUNC_TRACE();
2385
2386         /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2387         ret = igbvf_set_vfta(hw, vlan_id, !!on);
2388         if(ret){
2389                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2390                 return ret;
2391         }
2392         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2393         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2394
2395         /*Save what we set and retore it after device reset*/
2396         if (on)
2397                 shadow_vfta->vfta[vid_idx] |= vid_bit;
2398         else
2399                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2400
2401         return 0;
2402 }
2403
2404 static void
2405 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
2406 {
2407         struct e1000_hw *hw =
2408                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2409
2410         /* index is not used by rar_set() */
2411         hw->mac.ops.rar_set(hw, (void *)addr, 0);
2412 }
2413
2414
2415 static int
2416 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2417                         struct rte_eth_rss_reta_entry64 *reta_conf,
2418                         uint16_t reta_size)
2419 {
2420         uint8_t i, j, mask;
2421         uint32_t reta, r;
2422         uint16_t idx, shift;
2423         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2424
2425         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2426                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2427                         "(%d) doesn't match the number hardware can supported "
2428                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2429                 return -EINVAL;
2430         }
2431
2432         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2433                 idx = i / RTE_RETA_GROUP_SIZE;
2434                 shift = i % RTE_RETA_GROUP_SIZE;
2435                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2436                                                 IGB_4_BIT_MASK);
2437                 if (!mask)
2438                         continue;
2439                 if (mask == IGB_4_BIT_MASK)
2440                         r = 0;
2441                 else
2442                         r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2443                 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2444                         if (mask & (0x1 << j))
2445                                 reta |= reta_conf[idx].reta[shift + j] <<
2446                                                         (CHAR_BIT * j);
2447                         else
2448                                 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2449                 }
2450                 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2451         }
2452
2453         return 0;
2454 }
2455
2456 static int
2457 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2458                        struct rte_eth_rss_reta_entry64 *reta_conf,
2459                        uint16_t reta_size)
2460 {
2461         uint8_t i, j, mask;
2462         uint32_t reta;
2463         uint16_t idx, shift;
2464         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2465
2466         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2467                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2468                         "(%d) doesn't match the number hardware can supported "
2469                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2470                 return -EINVAL;
2471         }
2472
2473         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2474                 idx = i / RTE_RETA_GROUP_SIZE;
2475                 shift = i % RTE_RETA_GROUP_SIZE;
2476                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2477                                                 IGB_4_BIT_MASK);
2478                 if (!mask)
2479                         continue;
2480                 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2481                 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2482                         if (mask & (0x1 << j))
2483                                 reta_conf[idx].reta[shift + j] =
2484                                         ((reta >> (CHAR_BIT * j)) &
2485                                                 IGB_8_BIT_MASK);
2486                 }
2487         }
2488
2489         return 0;
2490 }
2491
2492 #define MAC_TYPE_FILTER_SUP(type)    do {\
2493         if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2494                 (type) != e1000_82576)\
2495                 return -ENOTSUP;\
2496 } while (0)
2497
2498 static int
2499 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2500                         struct rte_eth_syn_filter *filter,
2501                         bool add)
2502 {
2503         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2504         uint32_t synqf, rfctl;
2505
2506         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2507                 return -EINVAL;
2508
2509         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2510
2511         if (add) {
2512                 if (synqf & E1000_SYN_FILTER_ENABLE)
2513                         return -EINVAL;
2514
2515                 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2516                         E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2517
2518                 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2519                 if (filter->hig_pri)
2520                         rfctl |= E1000_RFCTL_SYNQFP;
2521                 else
2522                         rfctl &= ~E1000_RFCTL_SYNQFP;
2523
2524                 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2525         } else {
2526                 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2527                         return -ENOENT;
2528                 synqf = 0;
2529         }
2530
2531         E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2532         E1000_WRITE_FLUSH(hw);
2533         return 0;
2534 }
2535
2536 static int
2537 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2538                         struct rte_eth_syn_filter *filter)
2539 {
2540         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2541         uint32_t synqf, rfctl;
2542
2543         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2544         if (synqf & E1000_SYN_FILTER_ENABLE) {
2545                 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2546                 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2547                 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2548                                 E1000_SYN_FILTER_QUEUE_SHIFT);
2549                 return 0;
2550         }
2551
2552         return -ENOENT;
2553 }
2554
2555 static int
2556 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2557                         enum rte_filter_op filter_op,
2558                         void *arg)
2559 {
2560         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2561         int ret;
2562
2563         MAC_TYPE_FILTER_SUP(hw->mac.type);
2564
2565         if (filter_op == RTE_ETH_FILTER_NOP)
2566                 return 0;
2567
2568         if (arg == NULL) {
2569                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2570                             filter_op);
2571                 return -EINVAL;
2572         }
2573
2574         switch (filter_op) {
2575         case RTE_ETH_FILTER_ADD:
2576                 ret = eth_igb_syn_filter_set(dev,
2577                                 (struct rte_eth_syn_filter *)arg,
2578                                 TRUE);
2579                 break;
2580         case RTE_ETH_FILTER_DELETE:
2581                 ret = eth_igb_syn_filter_set(dev,
2582                                 (struct rte_eth_syn_filter *)arg,
2583                                 FALSE);
2584                 break;
2585         case RTE_ETH_FILTER_GET:
2586                 ret = eth_igb_syn_filter_get(dev,
2587                                 (struct rte_eth_syn_filter *)arg);
2588                 break;
2589         default:
2590                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2591                 ret = -EINVAL;
2592                 break;
2593         }
2594
2595         return ret;
2596 }
2597
2598 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
2599         if ((type) != e1000_82580 && (type) != e1000_i350)\
2600                 return -ENOSYS; \
2601 } while (0)
2602
2603 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2604 static inline int
2605 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2606                         struct e1000_2tuple_filter_info *filter_info)
2607 {
2608         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2609                 return -EINVAL;
2610         if (filter->priority > E1000_2TUPLE_MAX_PRI)
2611                 return -EINVAL;  /* filter index is out of range. */
2612         if (filter->tcp_flags > TCP_FLAG_ALL)
2613                 return -EINVAL;  /* flags is invalid. */
2614
2615         switch (filter->dst_port_mask) {
2616         case UINT16_MAX:
2617                 filter_info->dst_port_mask = 0;
2618                 filter_info->dst_port = filter->dst_port;
2619                 break;
2620         case 0:
2621                 filter_info->dst_port_mask = 1;
2622                 break;
2623         default:
2624                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2625                 return -EINVAL;
2626         }
2627
2628         switch (filter->proto_mask) {
2629         case UINT8_MAX:
2630                 filter_info->proto_mask = 0;
2631                 filter_info->proto = filter->proto;
2632                 break;
2633         case 0:
2634                 filter_info->proto_mask = 1;
2635                 break;
2636         default:
2637                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2638                 return -EINVAL;
2639         }
2640
2641         filter_info->priority = (uint8_t)filter->priority;
2642         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2643                 filter_info->tcp_flags = filter->tcp_flags;
2644         else
2645                 filter_info->tcp_flags = 0;
2646
2647         return 0;
2648 }
2649
2650 static inline struct e1000_2tuple_filter *
2651 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2652                         struct e1000_2tuple_filter_info *key)
2653 {
2654         struct e1000_2tuple_filter *it;
2655
2656         TAILQ_FOREACH(it, filter_list, entries) {
2657                 if (memcmp(key, &it->filter_info,
2658                         sizeof(struct e1000_2tuple_filter_info)) == 0) {
2659                         return it;
2660                 }
2661         }
2662         return NULL;
2663 }
2664
2665 /*
2666  * igb_add_2tuple_filter - add a 2tuple filter
2667  *
2668  * @param
2669  * dev: Pointer to struct rte_eth_dev.
2670  * ntuple_filter: ponter to the filter that will be added.
2671  *
2672  * @return
2673  *    - On success, zero.
2674  *    - On failure, a negative value.
2675  */
2676 static int
2677 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2678                         struct rte_eth_ntuple_filter *ntuple_filter)
2679 {
2680         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2681         struct e1000_filter_info *filter_info =
2682                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2683         struct e1000_2tuple_filter *filter;
2684         uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2685         uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2686         int i, ret;
2687
2688         filter = rte_zmalloc("e1000_2tuple_filter",
2689                         sizeof(struct e1000_2tuple_filter), 0);
2690         if (filter == NULL)
2691                 return -ENOMEM;
2692
2693         ret = ntuple_filter_to_2tuple(ntuple_filter,
2694                                       &filter->filter_info);
2695         if (ret < 0) {
2696                 rte_free(filter);
2697                 return ret;
2698         }
2699         if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2700                                          &filter->filter_info) != NULL) {
2701                 PMD_DRV_LOG(ERR, "filter exists.");
2702                 rte_free(filter);
2703                 return -EEXIST;
2704         }
2705         filter->queue = ntuple_filter->queue;
2706
2707         /*
2708          * look for an unused 2tuple filter index,
2709          * and insert the filter to list.
2710          */
2711         for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2712                 if (!(filter_info->twotuple_mask & (1 << i))) {
2713                         filter_info->twotuple_mask |= 1 << i;
2714                         filter->index = i;
2715                         TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2716                                           filter,
2717                                           entries);
2718                         break;
2719                 }
2720         }
2721         if (i >= E1000_MAX_TTQF_FILTERS) {
2722                 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2723                 rte_free(filter);
2724                 return -ENOSYS;
2725         }
2726
2727         imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2728         if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2729                 imir |= E1000_IMIR_PORT_BP;
2730         else
2731                 imir &= ~E1000_IMIR_PORT_BP;
2732
2733         imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2734
2735         ttqf |= E1000_TTQF_QUEUE_ENABLE;
2736         ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2737         ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2738         if (filter->filter_info.proto_mask == 0)
2739                 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2740
2741         /* tcp flags bits setting. */
2742         if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2743                 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2744                         imir_ext |= E1000_IMIREXT_CTRL_URG;
2745                 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2746                         imir_ext |= E1000_IMIREXT_CTRL_ACK;
2747                 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2748                         imir_ext |= E1000_IMIREXT_CTRL_PSH;
2749                 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2750                         imir_ext |= E1000_IMIREXT_CTRL_RST;
2751                 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2752                         imir_ext |= E1000_IMIREXT_CTRL_SYN;
2753                 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2754                         imir_ext |= E1000_IMIREXT_CTRL_FIN;
2755         } else
2756                 imir_ext |= E1000_IMIREXT_CTRL_BP;
2757         E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2758         E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2759         E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2760         return 0;
2761 }
2762
2763 /*
2764  * igb_remove_2tuple_filter - remove a 2tuple filter
2765  *
2766  * @param
2767  * dev: Pointer to struct rte_eth_dev.
2768  * ntuple_filter: ponter to the filter that will be removed.
2769  *
2770  * @return
2771  *    - On success, zero.
2772  *    - On failure, a negative value.
2773  */
2774 static int
2775 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2776                         struct rte_eth_ntuple_filter *ntuple_filter)
2777 {
2778         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2779         struct e1000_filter_info *filter_info =
2780                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2781         struct e1000_2tuple_filter_info filter_2tuple;
2782         struct e1000_2tuple_filter *filter;
2783         int ret;
2784
2785         memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2786         ret = ntuple_filter_to_2tuple(ntuple_filter,
2787                                       &filter_2tuple);
2788         if (ret < 0)
2789                 return ret;
2790
2791         filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2792                                          &filter_2tuple);
2793         if (filter == NULL) {
2794                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2795                 return -ENOENT;
2796         }
2797
2798         filter_info->twotuple_mask &= ~(1 << filter->index);
2799         TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2800         rte_free(filter);
2801
2802         E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2803         E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2804         E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2805         return 0;
2806 }
2807
2808 static inline struct e1000_flex_filter *
2809 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2810                         struct e1000_flex_filter_info *key)
2811 {
2812         struct e1000_flex_filter *it;
2813
2814         TAILQ_FOREACH(it, filter_list, entries) {
2815                 if (memcmp(key, &it->filter_info,
2816                         sizeof(struct e1000_flex_filter_info)) == 0)
2817                         return it;
2818         }
2819
2820         return NULL;
2821 }
2822
2823 static int
2824 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2825                         struct rte_eth_flex_filter *filter,
2826                         bool add)
2827 {
2828         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2829         struct e1000_filter_info *filter_info =
2830                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2831         struct e1000_flex_filter *flex_filter, *it;
2832         uint32_t wufc, queueing, mask;
2833         uint32_t reg_off;
2834         uint8_t shift, i, j = 0;
2835
2836         flex_filter = rte_zmalloc("e1000_flex_filter",
2837                         sizeof(struct e1000_flex_filter), 0);
2838         if (flex_filter == NULL)
2839                 return -ENOMEM;
2840
2841         flex_filter->filter_info.len = filter->len;
2842         flex_filter->filter_info.priority = filter->priority;
2843         memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
2844         for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
2845                 mask = 0;
2846                 /* reverse bits in flex filter's mask*/
2847                 for (shift = 0; shift < CHAR_BIT; shift++) {
2848                         if (filter->mask[i] & (0x01 << shift))
2849                                 mask |= (0x80 >> shift);
2850                 }
2851                 flex_filter->filter_info.mask[i] = mask;
2852         }
2853
2854         wufc = E1000_READ_REG(hw, E1000_WUFC);
2855         if (flex_filter->index < E1000_MAX_FHFT)
2856                 reg_off = E1000_FHFT(flex_filter->index);
2857         else
2858                 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
2859
2860         if (add) {
2861                 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
2862                                 &flex_filter->filter_info) != NULL) {
2863                         PMD_DRV_LOG(ERR, "filter exists.");
2864                         rte_free(flex_filter);
2865                         return -EEXIST;
2866                 }
2867                 flex_filter->queue = filter->queue;
2868                 /*
2869                  * look for an unused flex filter index
2870                  * and insert the filter into the list.
2871                  */
2872                 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
2873                         if (!(filter_info->flex_mask & (1 << i))) {
2874                                 filter_info->flex_mask |= 1 << i;
2875                                 flex_filter->index = i;
2876                                 TAILQ_INSERT_TAIL(&filter_info->flex_list,
2877                                         flex_filter,
2878                                         entries);
2879                                 break;
2880                         }
2881                 }
2882                 if (i >= E1000_MAX_FLEX_FILTERS) {
2883                         PMD_DRV_LOG(ERR, "flex filters are full.");
2884                         rte_free(flex_filter);
2885                         return -ENOSYS;
2886                 }
2887
2888                 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
2889                                 (E1000_WUFC_FLX0 << flex_filter->index));
2890                 queueing = filter->len |
2891                         (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
2892                         (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
2893                 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
2894                                 queueing);
2895                 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
2896                         E1000_WRITE_REG(hw, reg_off,
2897                                         flex_filter->filter_info.dwords[j]);
2898                         reg_off += sizeof(uint32_t);
2899                         E1000_WRITE_REG(hw, reg_off,
2900                                         flex_filter->filter_info.dwords[++j]);
2901                         reg_off += sizeof(uint32_t);
2902                         E1000_WRITE_REG(hw, reg_off,
2903                                 (uint32_t)flex_filter->filter_info.mask[i]);
2904                         reg_off += sizeof(uint32_t) * 2;
2905                         ++j;
2906                 }
2907         } else {
2908                 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2909                                 &flex_filter->filter_info);
2910                 if (it == NULL) {
2911                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
2912                         rte_free(flex_filter);
2913                         return -ENOENT;
2914                 }
2915
2916                 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
2917                         E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
2918                 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
2919                         (~(E1000_WUFC_FLX0 << it->index)));
2920
2921                 filter_info->flex_mask &= ~(1 << it->index);
2922                 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
2923                 rte_free(it);
2924                 rte_free(flex_filter);
2925         }
2926
2927         return 0;
2928 }
2929
2930 static int
2931 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
2932                         struct rte_eth_flex_filter *filter)
2933 {
2934         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2935         struct e1000_filter_info *filter_info =
2936                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2937         struct e1000_flex_filter flex_filter, *it;
2938         uint32_t wufc, queueing, wufc_en = 0;
2939
2940         memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
2941         flex_filter.filter_info.len = filter->len;
2942         flex_filter.filter_info.priority = filter->priority;
2943         memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
2944         memcpy(flex_filter.filter_info.mask, filter->mask,
2945                         RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
2946
2947         it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2948                                 &flex_filter.filter_info);
2949         if (it == NULL) {
2950                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2951                 return -ENOENT;
2952         }
2953
2954         wufc = E1000_READ_REG(hw, E1000_WUFC);
2955         wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
2956
2957         if ((wufc & wufc_en) == wufc_en) {
2958                 uint32_t reg_off = 0;
2959                 if (it->index < E1000_MAX_FHFT)
2960                         reg_off = E1000_FHFT(it->index);
2961                 else
2962                         reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
2963
2964                 queueing = E1000_READ_REG(hw,
2965                                 reg_off + E1000_FHFT_QUEUEING_OFFSET);
2966                 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
2967                 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
2968                         E1000_FHFT_QUEUEING_PRIO_SHIFT;
2969                 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
2970                         E1000_FHFT_QUEUEING_QUEUE_SHIFT;
2971                 return 0;
2972         }
2973         return -ENOENT;
2974 }
2975
2976 static int
2977 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
2978                         enum rte_filter_op filter_op,
2979                         void *arg)
2980 {
2981         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2982         struct rte_eth_flex_filter *filter;
2983         int ret = 0;
2984
2985         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2986
2987         if (filter_op == RTE_ETH_FILTER_NOP)
2988                 return ret;
2989
2990         if (arg == NULL) {
2991                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2992                             filter_op);
2993                 return -EINVAL;
2994         }
2995
2996         filter = (struct rte_eth_flex_filter *)arg;
2997         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
2998             || filter->len % sizeof(uint64_t) != 0) {
2999                 PMD_DRV_LOG(ERR, "filter's length is out of range");
3000                 return -EINVAL;
3001         }
3002         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
3003                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
3004                 return -EINVAL;
3005         }
3006
3007         switch (filter_op) {
3008         case RTE_ETH_FILTER_ADD:
3009                 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
3010                 break;
3011         case RTE_ETH_FILTER_DELETE:
3012                 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
3013                 break;
3014         case RTE_ETH_FILTER_GET:
3015                 ret = eth_igb_get_flex_filter(dev, filter);
3016                 break;
3017         default:
3018                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3019                 ret = -EINVAL;
3020                 break;
3021         }
3022
3023         return ret;
3024 }
3025
3026 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
3027 static inline int
3028 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
3029                         struct e1000_5tuple_filter_info *filter_info)
3030 {
3031         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
3032                 return -EINVAL;
3033         if (filter->priority > E1000_2TUPLE_MAX_PRI)
3034                 return -EINVAL;  /* filter index is out of range. */
3035         if (filter->tcp_flags > TCP_FLAG_ALL)
3036                 return -EINVAL;  /* flags is invalid. */
3037
3038         switch (filter->dst_ip_mask) {
3039         case UINT32_MAX:
3040                 filter_info->dst_ip_mask = 0;
3041                 filter_info->dst_ip = filter->dst_ip;
3042                 break;
3043         case 0:
3044                 filter_info->dst_ip_mask = 1;
3045                 break;
3046         default:
3047                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3048                 return -EINVAL;
3049         }
3050
3051         switch (filter->src_ip_mask) {
3052         case UINT32_MAX:
3053                 filter_info->src_ip_mask = 0;
3054                 filter_info->src_ip = filter->src_ip;
3055                 break;
3056         case 0:
3057                 filter_info->src_ip_mask = 1;
3058                 break;
3059         default:
3060                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3061                 return -EINVAL;
3062         }
3063
3064         switch (filter->dst_port_mask) {
3065         case UINT16_MAX:
3066                 filter_info->dst_port_mask = 0;
3067                 filter_info->dst_port = filter->dst_port;
3068                 break;
3069         case 0:
3070                 filter_info->dst_port_mask = 1;
3071                 break;
3072         default:
3073                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3074                 return -EINVAL;
3075         }
3076
3077         switch (filter->src_port_mask) {
3078         case UINT16_MAX:
3079                 filter_info->src_port_mask = 0;
3080                 filter_info->src_port = filter->src_port;
3081                 break;
3082         case 0:
3083                 filter_info->src_port_mask = 1;
3084                 break;
3085         default:
3086                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3087                 return -EINVAL;
3088         }
3089
3090         switch (filter->proto_mask) {
3091         case UINT8_MAX:
3092                 filter_info->proto_mask = 0;
3093                 filter_info->proto = filter->proto;
3094                 break;
3095         case 0:
3096                 filter_info->proto_mask = 1;
3097                 break;
3098         default:
3099                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3100                 return -EINVAL;
3101         }
3102
3103         filter_info->priority = (uint8_t)filter->priority;
3104         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3105                 filter_info->tcp_flags = filter->tcp_flags;
3106         else
3107                 filter_info->tcp_flags = 0;
3108
3109         return 0;
3110 }
3111
3112 static inline struct e1000_5tuple_filter *
3113 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3114                         struct e1000_5tuple_filter_info *key)
3115 {
3116         struct e1000_5tuple_filter *it;
3117
3118         TAILQ_FOREACH(it, filter_list, entries) {
3119                 if (memcmp(key, &it->filter_info,
3120                         sizeof(struct e1000_5tuple_filter_info)) == 0) {
3121                         return it;
3122                 }
3123         }
3124         return NULL;
3125 }
3126
3127 /*
3128  * igb_add_5tuple_filter_82576 - add a 5tuple filter
3129  *
3130  * @param
3131  * dev: Pointer to struct rte_eth_dev.
3132  * ntuple_filter: ponter to the filter that will be added.
3133  *
3134  * @return
3135  *    - On success, zero.
3136  *    - On failure, a negative value.
3137  */
3138 static int
3139 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3140                         struct rte_eth_ntuple_filter *ntuple_filter)
3141 {
3142         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3143         struct e1000_filter_info *filter_info =
3144                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3145         struct e1000_5tuple_filter *filter;
3146         uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3147         uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3148         uint8_t i;
3149         int ret;
3150
3151         filter = rte_zmalloc("e1000_5tuple_filter",
3152                         sizeof(struct e1000_5tuple_filter), 0);
3153         if (filter == NULL)
3154                 return -ENOMEM;
3155
3156         ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3157                                             &filter->filter_info);
3158         if (ret < 0) {
3159                 rte_free(filter);
3160                 return ret;
3161         }
3162
3163         if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3164                                          &filter->filter_info) != NULL) {
3165                 PMD_DRV_LOG(ERR, "filter exists.");
3166                 rte_free(filter);
3167                 return -EEXIST;
3168         }
3169         filter->queue = ntuple_filter->queue;
3170
3171         /*
3172          * look for an unused 5tuple filter index,
3173          * and insert the filter to list.
3174          */
3175         for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3176                 if (!(filter_info->fivetuple_mask & (1 << i))) {
3177                         filter_info->fivetuple_mask |= 1 << i;
3178                         filter->index = i;
3179                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3180                                           filter,
3181                                           entries);
3182                         break;
3183                 }
3184         }
3185         if (i >= E1000_MAX_FTQF_FILTERS) {
3186                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3187                 rte_free(filter);
3188                 return -ENOSYS;
3189         }
3190
3191         ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3192         if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3193                 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3194         if (filter->filter_info.dst_ip_mask == 0)
3195                 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3196         if (filter->filter_info.src_port_mask == 0)
3197                 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3198         if (filter->filter_info.proto_mask == 0)
3199                 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3200         ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3201                 E1000_FTQF_QUEUE_MASK;
3202         ftqf |= E1000_FTQF_QUEUE_ENABLE;
3203         E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3204         E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3205         E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3206
3207         spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3208         E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3209
3210         imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3211         if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3212                 imir |= E1000_IMIR_PORT_BP;
3213         else
3214                 imir &= ~E1000_IMIR_PORT_BP;
3215         imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3216
3217         /* tcp flags bits setting. */
3218         if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3219                 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3220                         imir_ext |= E1000_IMIREXT_CTRL_URG;
3221                 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3222                         imir_ext |= E1000_IMIREXT_CTRL_ACK;
3223                 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3224                         imir_ext |= E1000_IMIREXT_CTRL_PSH;
3225                 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3226                         imir_ext |= E1000_IMIREXT_CTRL_RST;
3227                 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3228                         imir_ext |= E1000_IMIREXT_CTRL_SYN;
3229                 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3230                         imir_ext |= E1000_IMIREXT_CTRL_FIN;
3231         } else
3232                 imir_ext |= E1000_IMIREXT_CTRL_BP;
3233         E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3234         E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3235         return 0;
3236 }
3237
3238 /*
3239  * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3240  *
3241  * @param
3242  * dev: Pointer to struct rte_eth_dev.
3243  * ntuple_filter: ponter to the filter that will be removed.
3244  *
3245  * @return
3246  *    - On success, zero.
3247  *    - On failure, a negative value.
3248  */
3249 static int
3250 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3251                                 struct rte_eth_ntuple_filter *ntuple_filter)
3252 {
3253         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3254         struct e1000_filter_info *filter_info =
3255                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3256         struct e1000_5tuple_filter_info filter_5tuple;
3257         struct e1000_5tuple_filter *filter;
3258         int ret;
3259
3260         memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3261         ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3262                                             &filter_5tuple);
3263         if (ret < 0)
3264                 return ret;
3265
3266         filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3267                                          &filter_5tuple);
3268         if (filter == NULL) {
3269                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3270                 return -ENOENT;
3271         }
3272
3273         filter_info->fivetuple_mask &= ~(1 << filter->index);
3274         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3275         rte_free(filter);
3276
3277         E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3278                         E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3279         E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3280         E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3281         E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3282         E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3283         E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3284         return 0;
3285 }
3286
3287 static int
3288 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3289 {
3290         uint32_t rctl;
3291         struct e1000_hw *hw;
3292         struct rte_eth_dev_info dev_info;
3293         uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3294                                      VLAN_TAG_SIZE);
3295
3296         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3297
3298 #ifdef RTE_LIBRTE_82571_SUPPORT
3299         /* XXX: not bigger than max_rx_pktlen */
3300         if (hw->mac.type == e1000_82571)
3301                 return -ENOTSUP;
3302 #endif
3303         eth_igb_infos_get(dev, &dev_info);
3304
3305         /* check that mtu is within the allowed range */
3306         if ((mtu < ETHER_MIN_MTU) ||
3307             (frame_size > dev_info.max_rx_pktlen))
3308                 return -EINVAL;
3309
3310         /* refuse mtu that requires the support of scattered packets when this
3311          * feature has not been enabled before. */
3312         if (!dev->data->scattered_rx &&
3313             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3314                 return -EINVAL;
3315
3316         rctl = E1000_READ_REG(hw, E1000_RCTL);
3317
3318         /* switch to jumbo mode if needed */
3319         if (frame_size > ETHER_MAX_LEN) {
3320                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3321                 rctl |= E1000_RCTL_LPE;
3322         } else {
3323                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3324                 rctl &= ~E1000_RCTL_LPE;
3325         }
3326         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3327
3328         /* update max frame size */
3329         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3330
3331         E1000_WRITE_REG(hw, E1000_RLPML,
3332                         dev->data->dev_conf.rxmode.max_rx_pkt_len);
3333
3334         return 0;
3335 }
3336
3337 /*
3338  * igb_add_del_ntuple_filter - add or delete a ntuple filter
3339  *
3340  * @param
3341  * dev: Pointer to struct rte_eth_dev.
3342  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3343  * add: if true, add filter, if false, remove filter
3344  *
3345  * @return
3346  *    - On success, zero.
3347  *    - On failure, a negative value.
3348  */
3349 static int
3350 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3351                         struct rte_eth_ntuple_filter *ntuple_filter,
3352                         bool add)
3353 {
3354         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3355         int ret;
3356
3357         switch (ntuple_filter->flags) {
3358         case RTE_5TUPLE_FLAGS:
3359         case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3360                 if (hw->mac.type != e1000_82576)
3361                         return -ENOTSUP;
3362                 if (add)
3363                         ret = igb_add_5tuple_filter_82576(dev,
3364                                                           ntuple_filter);
3365                 else
3366                         ret = igb_remove_5tuple_filter_82576(dev,
3367                                                              ntuple_filter);
3368                 break;
3369         case RTE_2TUPLE_FLAGS:
3370         case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3371                 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3372                         return -ENOTSUP;
3373                 if (add)
3374                         ret = igb_add_2tuple_filter(dev, ntuple_filter);
3375                 else
3376                         ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3377                 break;
3378         default:
3379                 ret = -EINVAL;
3380                 break;
3381         }
3382
3383         return ret;
3384 }
3385
3386 /*
3387  * igb_get_ntuple_filter - get a ntuple filter
3388  *
3389  * @param
3390  * dev: Pointer to struct rte_eth_dev.
3391  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3392  *
3393  * @return
3394  *    - On success, zero.
3395  *    - On failure, a negative value.
3396  */
3397 static int
3398 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3399                         struct rte_eth_ntuple_filter *ntuple_filter)
3400 {
3401         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3402         struct e1000_filter_info *filter_info =
3403                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3404         struct e1000_5tuple_filter_info filter_5tuple;
3405         struct e1000_2tuple_filter_info filter_2tuple;
3406         struct e1000_5tuple_filter *p_5tuple_filter;
3407         struct e1000_2tuple_filter *p_2tuple_filter;
3408         int ret;
3409
3410         switch (ntuple_filter->flags) {
3411         case RTE_5TUPLE_FLAGS:
3412         case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3413                 if (hw->mac.type != e1000_82576)
3414                         return -ENOTSUP;
3415                 memset(&filter_5tuple,
3416                         0,
3417                         sizeof(struct e1000_5tuple_filter_info));
3418                 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3419                                                     &filter_5tuple);
3420                 if (ret < 0)
3421                         return ret;
3422                 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3423                                         &filter_info->fivetuple_list,
3424                                         &filter_5tuple);
3425                 if (p_5tuple_filter == NULL) {
3426                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
3427                         return -ENOENT;
3428                 }
3429                 ntuple_filter->queue = p_5tuple_filter->queue;
3430                 break;
3431         case RTE_2TUPLE_FLAGS:
3432         case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3433                 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3434                         return -ENOTSUP;
3435                 memset(&filter_2tuple,
3436                         0,
3437                         sizeof(struct e1000_2tuple_filter_info));
3438                 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3439                 if (ret < 0)
3440                         return ret;
3441                 p_2tuple_filter = igb_2tuple_filter_lookup(
3442                                         &filter_info->twotuple_list,
3443                                         &filter_2tuple);
3444                 if (p_2tuple_filter == NULL) {
3445                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
3446                         return -ENOENT;
3447                 }
3448                 ntuple_filter->queue = p_2tuple_filter->queue;
3449                 break;
3450         default:
3451                 ret = -EINVAL;
3452                 break;
3453         }
3454
3455         return 0;
3456 }
3457
3458 /*
3459  * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3460  * @dev: pointer to rte_eth_dev structure
3461  * @filter_op:operation will be taken.
3462  * @arg: a pointer to specific structure corresponding to the filter_op
3463  */
3464 static int
3465 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3466                                 enum rte_filter_op filter_op,
3467                                 void *arg)
3468 {
3469         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3470         int ret;
3471
3472         MAC_TYPE_FILTER_SUP(hw->mac.type);
3473
3474         if (filter_op == RTE_ETH_FILTER_NOP)
3475                 return 0;
3476
3477         if (arg == NULL) {
3478                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3479                             filter_op);
3480                 return -EINVAL;
3481         }
3482
3483         switch (filter_op) {
3484         case RTE_ETH_FILTER_ADD:
3485                 ret = igb_add_del_ntuple_filter(dev,
3486                         (struct rte_eth_ntuple_filter *)arg,
3487                         TRUE);
3488                 break;
3489         case RTE_ETH_FILTER_DELETE:
3490                 ret = igb_add_del_ntuple_filter(dev,
3491                         (struct rte_eth_ntuple_filter *)arg,
3492                         FALSE);
3493                 break;
3494         case RTE_ETH_FILTER_GET:
3495                 ret = igb_get_ntuple_filter(dev,
3496                         (struct rte_eth_ntuple_filter *)arg);
3497                 break;
3498         default:
3499                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3500                 ret = -EINVAL;
3501                 break;
3502         }
3503         return ret;
3504 }
3505
3506 static inline int
3507 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3508                         uint16_t ethertype)
3509 {
3510         int i;
3511
3512         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3513                 if (filter_info->ethertype_filters[i] == ethertype &&
3514                     (filter_info->ethertype_mask & (1 << i)))
3515                         return i;
3516         }
3517         return -1;
3518 }
3519
3520 static inline int
3521 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3522                         uint16_t ethertype)
3523 {
3524         int i;
3525
3526         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3527                 if (!(filter_info->ethertype_mask & (1 << i))) {
3528                         filter_info->ethertype_mask |= 1 << i;
3529                         filter_info->ethertype_filters[i] = ethertype;
3530                         return i;
3531                 }
3532         }
3533         return -1;
3534 }
3535
3536 static inline int
3537 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3538                         uint8_t idx)
3539 {
3540         if (idx >= E1000_MAX_ETQF_FILTERS)
3541                 return -1;
3542         filter_info->ethertype_mask &= ~(1 << idx);
3543         filter_info->ethertype_filters[idx] = 0;
3544         return idx;
3545 }
3546
3547
3548 static int
3549 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3550                         struct rte_eth_ethertype_filter *filter,
3551                         bool add)
3552 {
3553         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3554         struct e1000_filter_info *filter_info =
3555                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3556         uint32_t etqf = 0;
3557         int ret;
3558
3559         if (filter->ether_type == ETHER_TYPE_IPv4 ||
3560                 filter->ether_type == ETHER_TYPE_IPv6) {
3561                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3562                         " ethertype filter.", filter->ether_type);
3563                 return -EINVAL;
3564         }
3565
3566         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3567                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3568                 return -EINVAL;
3569         }
3570         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3571                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3572                 return -EINVAL;
3573         }
3574
3575         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3576         if (ret >= 0 && add) {
3577                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3578                             filter->ether_type);
3579                 return -EEXIST;
3580         }
3581         if (ret < 0 && !add) {
3582                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3583                             filter->ether_type);
3584                 return -ENOENT;
3585         }
3586
3587         if (add) {
3588                 ret = igb_ethertype_filter_insert(filter_info,
3589                         filter->ether_type);
3590                 if (ret < 0) {
3591                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
3592                         return -ENOSYS;
3593                 }
3594
3595                 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3596                 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3597                 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3598         } else {
3599                 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3600                 if (ret < 0)
3601                         return -ENOSYS;
3602         }
3603         E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3604         E1000_WRITE_FLUSH(hw);
3605
3606         return 0;
3607 }
3608
3609 static int
3610 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3611                         struct rte_eth_ethertype_filter *filter)
3612 {
3613         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3614         struct e1000_filter_info *filter_info =
3615                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3616         uint32_t etqf;
3617         int ret;
3618
3619         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3620         if (ret < 0) {
3621                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3622                             filter->ether_type);
3623                 return -ENOENT;
3624         }
3625
3626         etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3627         if (etqf & E1000_ETQF_FILTER_ENABLE) {
3628                 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3629                 filter->flags = 0;
3630                 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3631                                 E1000_ETQF_QUEUE_SHIFT;
3632                 return 0;
3633         }
3634
3635         return -ENOENT;
3636 }
3637
3638 /*
3639  * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3640  * @dev: pointer to rte_eth_dev structure
3641  * @filter_op:operation will be taken.
3642  * @arg: a pointer to specific structure corresponding to the filter_op
3643  */
3644 static int
3645 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3646                                 enum rte_filter_op filter_op,
3647                                 void *arg)
3648 {
3649         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3650         int ret;
3651
3652         MAC_TYPE_FILTER_SUP(hw->mac.type);
3653
3654         if (filter_op == RTE_ETH_FILTER_NOP)
3655                 return 0;
3656
3657         if (arg == NULL) {
3658                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3659                             filter_op);
3660                 return -EINVAL;
3661         }
3662
3663         switch (filter_op) {
3664         case RTE_ETH_FILTER_ADD:
3665                 ret = igb_add_del_ethertype_filter(dev,
3666                         (struct rte_eth_ethertype_filter *)arg,
3667                         TRUE);
3668                 break;
3669         case RTE_ETH_FILTER_DELETE:
3670                 ret = igb_add_del_ethertype_filter(dev,
3671                         (struct rte_eth_ethertype_filter *)arg,
3672                         FALSE);
3673                 break;
3674         case RTE_ETH_FILTER_GET:
3675                 ret = igb_get_ethertype_filter(dev,
3676                         (struct rte_eth_ethertype_filter *)arg);
3677                 break;
3678         default:
3679                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3680                 ret = -EINVAL;
3681                 break;
3682         }
3683         return ret;
3684 }
3685
3686 static int
3687 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3688                      enum rte_filter_type filter_type,
3689                      enum rte_filter_op filter_op,
3690                      void *arg)
3691 {
3692         int ret = -EINVAL;
3693
3694         switch (filter_type) {
3695         case RTE_ETH_FILTER_NTUPLE:
3696                 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3697                 break;
3698         case RTE_ETH_FILTER_ETHERTYPE:
3699                 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3700                 break;
3701         case RTE_ETH_FILTER_SYN:
3702                 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3703                 break;
3704         case RTE_ETH_FILTER_FLEXIBLE:
3705                 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3706                 break;
3707         default:
3708                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3709                                                         filter_type);
3710                 break;
3711         }
3712
3713         return ret;
3714 }
3715
3716 static int
3717 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
3718                          struct ether_addr *mc_addr_set,
3719                          uint32_t nb_mc_addr)
3720 {
3721         struct e1000_hw *hw;
3722
3723         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3724         e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
3725         return 0;
3726 }
3727
3728 static int
3729 igb_timesync_enable(struct rte_eth_dev *dev)
3730 {
3731         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732         uint32_t tsync_ctl;
3733
3734         /* Start incrementing the register used to timestamp PTP packets. */
3735         E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
3736
3737         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3738         E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
3739                         (ETHER_TYPE_1588 |
3740                          E1000_ETQF_FILTER_ENABLE |
3741                          E1000_ETQF_1588));
3742
3743         /* Enable timestamping of received PTP packets. */
3744         tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3745         tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
3746         E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3747
3748         /* Enable Timestamping of transmitted PTP packets. */
3749         tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3750         tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
3751         E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3752
3753         return 0;
3754 }
3755
3756 static int
3757 igb_timesync_disable(struct rte_eth_dev *dev)
3758 {
3759         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3760         uint32_t tsync_ctl;
3761
3762         /* Disable timestamping of transmitted PTP packets. */
3763         tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3764         tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
3765         E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
3766
3767         /* Disable timestamping of received PTP packets. */
3768         tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3769         tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
3770         E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
3771
3772         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3773         E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
3774
3775         /* Stop incrementating the System Time registers. */
3776         E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
3777
3778         return 0;
3779 }
3780
3781 static int
3782 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3783                                struct timespec *timestamp,
3784                                uint32_t flags __rte_unused)
3785 {
3786         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3787         uint32_t tsync_rxctl;
3788         uint32_t rx_stmpl;
3789         uint32_t rx_stmph;
3790
3791         tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
3792         if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
3793                 return -EINVAL;
3794
3795         rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
3796         rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
3797
3798         timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
3799         timestamp->tv_nsec = 0;
3800
3801         return  0;
3802 }
3803
3804 static int
3805 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3806                                struct timespec *timestamp)
3807 {
3808         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3809         uint32_t tsync_txctl;
3810         uint32_t tx_stmpl;
3811         uint32_t tx_stmph;
3812
3813         tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
3814         if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
3815                 return -EINVAL;
3816
3817         tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
3818         tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
3819
3820         timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
3821         timestamp->tv_nsec = 0;
3822
3823         return  0;
3824 }
3825
3826 static int
3827 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3828 {
3829         int count = 0;
3830         int g_ind = 0;
3831         const struct reg_info *reg_group;
3832
3833         while ((reg_group = igb_regs[g_ind++]))
3834                 count += igb_reg_group_count(reg_group);
3835
3836         return count;
3837 }
3838
3839 static int
3840 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3841 {
3842         int count = 0;
3843         int g_ind = 0;
3844         const struct reg_info *reg_group;
3845
3846         while ((reg_group = igbvf_regs[g_ind++]))
3847                 count += igb_reg_group_count(reg_group);
3848
3849         return count;
3850 }
3851
3852 static int
3853 eth_igb_get_regs(struct rte_eth_dev *dev,
3854         struct rte_dev_reg_info *regs)
3855 {
3856         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3857         uint32_t *data = regs->data;
3858         int g_ind = 0;
3859         int count = 0;
3860         const struct reg_info *reg_group;
3861
3862         /* Support only full register dump */
3863         if ((regs->length == 0) ||
3864             (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
3865                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
3866                         hw->device_id;
3867                 while ((reg_group = igb_regs[g_ind++]))
3868                         count += igb_read_regs_group(dev, &data[count],
3869                                                         reg_group);
3870                 return 0;
3871         }
3872
3873         return -ENOTSUP;
3874 }
3875
3876 static int
3877 igbvf_get_regs(struct rte_eth_dev *dev,
3878         struct rte_dev_reg_info *regs)
3879 {
3880         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3881         uint32_t *data = regs->data;
3882         int g_ind = 0;
3883         int count = 0;
3884         const struct reg_info *reg_group;
3885
3886         /* Support only full register dump */
3887         if ((regs->length == 0) ||
3888             (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
3889                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
3890                         hw->device_id;
3891                 while ((reg_group = igbvf_regs[g_ind++]))
3892                         count += igb_read_regs_group(dev, &data[count],
3893                                                         reg_group);
3894                 return 0;
3895         }
3896
3897         return -ENOTSUP;
3898 }
3899
3900 static int
3901 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
3902 {
3903         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3904
3905         /* Return unit is byte count */
3906         return hw->nvm.word_size * 2;
3907 }
3908
3909 static int
3910 eth_igb_get_eeprom(struct rte_eth_dev *dev,
3911         struct rte_dev_eeprom_info *in_eeprom)
3912 {
3913         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3914         struct e1000_nvm_info *nvm = &hw->nvm;
3915         uint16_t *data = in_eeprom->data;
3916         int first, length;
3917
3918         first = in_eeprom->offset >> 1;
3919         length = in_eeprom->length >> 1;
3920         if ((first >= hw->nvm.word_size) ||
3921             ((first + length) >= hw->nvm.word_size))
3922                 return -EINVAL;
3923
3924         in_eeprom->magic = hw->vendor_id |
3925                 ((uint32_t)hw->device_id << 16);
3926
3927         if ((nvm->ops.read) == NULL)
3928                 return -ENOTSUP;
3929
3930         return nvm->ops.read(hw, first, length, data);
3931 }
3932
3933 static int
3934 eth_igb_set_eeprom(struct rte_eth_dev *dev,
3935         struct rte_dev_eeprom_info *in_eeprom)
3936 {
3937         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3938         struct e1000_nvm_info *nvm = &hw->nvm;
3939         uint16_t *data = in_eeprom->data;
3940         int first, length;
3941
3942         first = in_eeprom->offset >> 1;
3943         length = in_eeprom->length >> 1;
3944         if ((first >= hw->nvm.word_size) ||
3945             ((first + length) >= hw->nvm.word_size))
3946                 return -EINVAL;
3947
3948         in_eeprom->magic = (uint32_t)hw->vendor_id |
3949                 ((uint32_t)hw->device_id << 16);
3950
3951         if ((nvm->ops.write) == NULL)
3952                 return -ENOTSUP;
3953         return nvm->ops.write(hw,  first, length, data);
3954 }
3955
3956 static struct rte_driver pmd_igb_drv = {
3957         .type = PMD_PDEV,
3958         .init = rte_igb_pmd_init,
3959 };
3960
3961 static struct rte_driver pmd_igbvf_drv = {
3962         .type = PMD_PDEV,
3963         .init = rte_igbvf_pmd_init,
3964 };
3965
3966 PMD_REGISTER_DRIVER(pmd_igb_drv);
3967 PMD_REGISTER_DRIVER(pmd_igbvf_drv);