igb: migrate flex filter to new API
[dpdk.git] / lib / librte_pmd_e1000 / igb_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_tailq.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55
56 #include "e1000_logs.h"
57 #include "e1000/e1000_api.h"
58 #include "e1000_ethdev.h"
59
60 /*
61  * Default values for port configuration
62  */
63 #define IGB_DEFAULT_RX_FREE_THRESH  32
64 #define IGB_DEFAULT_RX_PTHRESH      8
65 #define IGB_DEFAULT_RX_HTHRESH      8
66 #define IGB_DEFAULT_RX_WTHRESH      0
67
68 #define IGB_DEFAULT_TX_PTHRESH      32
69 #define IGB_DEFAULT_TX_HTHRESH      0
70 #define IGB_DEFAULT_TX_WTHRESH      0
71
72 /* Bit shift and mask */
73 #define IGB_4_BIT_WIDTH  (CHAR_BIT / 2)
74 #define IGB_4_BIT_MASK   RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
75 #define IGB_8_BIT_WIDTH  CHAR_BIT
76 #define IGB_8_BIT_MASK   UINT8_MAX
77
78 static int  eth_igb_configure(struct rte_eth_dev *dev);
79 static int  eth_igb_start(struct rte_eth_dev *dev);
80 static void eth_igb_stop(struct rte_eth_dev *dev);
81 static void eth_igb_close(struct rte_eth_dev *dev);
82 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
83 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
84 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
85 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
86 static int  eth_igb_link_update(struct rte_eth_dev *dev,
87                                 int wait_to_complete);
88 static void eth_igb_stats_get(struct rte_eth_dev *dev,
89                                 struct rte_eth_stats *rte_stats);
90 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
91 static void eth_igb_infos_get(struct rte_eth_dev *dev,
92                               struct rte_eth_dev_info *dev_info);
93 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
94                                 struct rte_eth_dev_info *dev_info);
95 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
96                                 struct rte_eth_fc_conf *fc_conf);
97 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
98                                 struct rte_eth_fc_conf *fc_conf);
99 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
100 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
101 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
102 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
103                                                         void *param);
104 static int  igb_hardware_init(struct e1000_hw *hw);
105 static void igb_hw_control_acquire(struct e1000_hw *hw);
106 static void igb_hw_control_release(struct e1000_hw *hw);
107 static void igb_init_manageability(struct e1000_hw *hw);
108 static void igb_release_manageability(struct e1000_hw *hw);
109
110 static int  eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
111
112 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
113                 uint16_t vlan_id, int on);
114 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
115 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
116
117 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
118 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
119 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
120 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
121 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
122 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
123
124 static int eth_igb_led_on(struct rte_eth_dev *dev);
125 static int eth_igb_led_off(struct rte_eth_dev *dev);
126
127 static void igb_intr_disable(struct e1000_hw *hw);
128 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
129 static void eth_igb_rar_set(struct rte_eth_dev *dev,
130                 struct ether_addr *mac_addr,
131                 uint32_t index, uint32_t pool);
132 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
133
134 static void igbvf_intr_disable(struct e1000_hw *hw);
135 static int igbvf_dev_configure(struct rte_eth_dev *dev);
136 static int igbvf_dev_start(struct rte_eth_dev *dev);
137 static void igbvf_dev_stop(struct rte_eth_dev *dev);
138 static void igbvf_dev_close(struct rte_eth_dev *dev);
139 static int eth_igbvf_link_update(struct e1000_hw *hw);
140 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
141 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
142 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
143                 uint16_t vlan_id, int on);
144 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
145 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
146 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
147                                    struct rte_eth_rss_reta_entry64 *reta_conf,
148                                    uint16_t reta_size);
149 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
150                                   struct rte_eth_rss_reta_entry64 *reta_conf,
151                                   uint16_t reta_size);
152 static int eth_igb_add_syn_filter(struct rte_eth_dev *dev,
153                         struct rte_syn_filter *filter, uint16_t rx_queue);
154 static int eth_igb_remove_syn_filter(struct rte_eth_dev *dev);
155 static int eth_igb_get_syn_filter(struct rte_eth_dev *dev,
156                         struct rte_syn_filter *filter, uint16_t *rx_queue);
157 static int eth_igb_add_2tuple_filter(struct rte_eth_dev *dev,
158                         uint16_t index,
159                         struct rte_2tuple_filter *filter, uint16_t rx_queue);
160 static int eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
161                         uint16_t index);
162 static int eth_igb_get_2tuple_filter(struct rte_eth_dev *dev,
163                         uint16_t index,
164                         struct rte_2tuple_filter *filter, uint16_t *rx_queue);
165 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
166                         struct rte_eth_flex_filter *filter,
167                         bool add);
168 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
169                         struct rte_eth_flex_filter *filter);
170 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
171                         enum rte_filter_op filter_op,
172                         void *arg);
173 static int eth_igb_add_5tuple_filter(struct rte_eth_dev *dev,
174                         uint16_t index,
175                         struct rte_5tuple_filter *filter, uint16_t rx_queue);
176 static int eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
177                         uint16_t index);
178 static int eth_igb_get_5tuple_filter(struct rte_eth_dev *dev,
179                         uint16_t index,
180                         struct rte_5tuple_filter *filter, uint16_t *rx_queue);
181 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
182                         struct rte_eth_ethertype_filter *filter,
183                         bool add);
184 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
185                                 enum rte_filter_op filter_op,
186                                 void *arg);
187 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
188                         struct rte_eth_ethertype_filter *filter);
189 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
190                      enum rte_filter_type filter_type,
191                      enum rte_filter_op filter_op,
192                      void *arg);
193
194 /*
195  * Define VF Stats MACRO for Non "cleared on read" register
196  */
197 #define UPDATE_VF_STAT(reg, last, cur)            \
198 {                                                 \
199         u32 latest = E1000_READ_REG(hw, reg);     \
200         cur += latest - last;                     \
201         last = latest;                            \
202 }
203
204
205 #define IGB_FC_PAUSE_TIME 0x0680
206 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
207 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
208
209 #define IGBVF_PMD_NAME "rte_igbvf_pmd"     /* PMD name */
210
211 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
212
213 /*
214  * The set of PCI devices this driver supports
215  */
216 static struct rte_pci_id pci_id_igb_map[] = {
217
218 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
219 #include "rte_pci_dev_ids.h"
220
221 {.device_id = 0},
222 };
223
224 /*
225  * The set of PCI devices this driver supports (for 82576&I350 VF)
226  */
227 static struct rte_pci_id pci_id_igbvf_map[] = {
228
229 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
230 #include "rte_pci_dev_ids.h"
231
232 {.device_id = 0},
233 };
234
235 static struct eth_dev_ops eth_igb_ops = {
236         .dev_configure        = eth_igb_configure,
237         .dev_start            = eth_igb_start,
238         .dev_stop             = eth_igb_stop,
239         .dev_close            = eth_igb_close,
240         .promiscuous_enable   = eth_igb_promiscuous_enable,
241         .promiscuous_disable  = eth_igb_promiscuous_disable,
242         .allmulticast_enable  = eth_igb_allmulticast_enable,
243         .allmulticast_disable = eth_igb_allmulticast_disable,
244         .link_update          = eth_igb_link_update,
245         .stats_get            = eth_igb_stats_get,
246         .stats_reset          = eth_igb_stats_reset,
247         .dev_infos_get        = eth_igb_infos_get,
248         .mtu_set              = eth_igb_mtu_set,
249         .vlan_filter_set      = eth_igb_vlan_filter_set,
250         .vlan_tpid_set        = eth_igb_vlan_tpid_set,
251         .vlan_offload_set     = eth_igb_vlan_offload_set,
252         .rx_queue_setup       = eth_igb_rx_queue_setup,
253         .rx_queue_release     = eth_igb_rx_queue_release,
254         .rx_queue_count       = eth_igb_rx_queue_count,
255         .rx_descriptor_done   = eth_igb_rx_descriptor_done,
256         .tx_queue_setup       = eth_igb_tx_queue_setup,
257         .tx_queue_release     = eth_igb_tx_queue_release,
258         .dev_led_on           = eth_igb_led_on,
259         .dev_led_off          = eth_igb_led_off,
260         .flow_ctrl_get        = eth_igb_flow_ctrl_get,
261         .flow_ctrl_set        = eth_igb_flow_ctrl_set,
262         .mac_addr_add         = eth_igb_rar_set,
263         .mac_addr_remove      = eth_igb_rar_clear,
264         .reta_update          = eth_igb_rss_reta_update,
265         .reta_query           = eth_igb_rss_reta_query,
266         .rss_hash_update      = eth_igb_rss_hash_update,
267         .rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
268         .add_syn_filter          = eth_igb_add_syn_filter,
269         .remove_syn_filter       = eth_igb_remove_syn_filter,
270         .get_syn_filter          = eth_igb_get_syn_filter,
271         .add_2tuple_filter       = eth_igb_add_2tuple_filter,
272         .remove_2tuple_filter    = eth_igb_remove_2tuple_filter,
273         .get_2tuple_filter       = eth_igb_get_2tuple_filter,
274         .add_5tuple_filter       = eth_igb_add_5tuple_filter,
275         .remove_5tuple_filter    = eth_igb_remove_5tuple_filter,
276         .get_5tuple_filter       = eth_igb_get_5tuple_filter,
277         .filter_ctrl             = eth_igb_filter_ctrl,
278 };
279
280 /*
281  * dev_ops for virtual function, bare necessities for basic vf
282  * operation have been implemented
283  */
284 static struct eth_dev_ops igbvf_eth_dev_ops = {
285         .dev_configure        = igbvf_dev_configure,
286         .dev_start            = igbvf_dev_start,
287         .dev_stop             = igbvf_dev_stop,
288         .dev_close            = igbvf_dev_close,
289         .link_update          = eth_igb_link_update,
290         .stats_get            = eth_igbvf_stats_get,
291         .stats_reset          = eth_igbvf_stats_reset,
292         .vlan_filter_set      = igbvf_vlan_filter_set,
293         .dev_infos_get        = eth_igbvf_infos_get,
294         .rx_queue_setup       = eth_igb_rx_queue_setup,
295         .rx_queue_release     = eth_igb_rx_queue_release,
296         .tx_queue_setup       = eth_igb_tx_queue_setup,
297         .tx_queue_release     = eth_igb_tx_queue_release,
298 };
299
300 /**
301  * Atomically reads the link status information from global
302  * structure rte_eth_dev.
303  *
304  * @param dev
305  *   - Pointer to the structure rte_eth_dev to read from.
306  *   - Pointer to the buffer to be saved with the link status.
307  *
308  * @return
309  *   - On success, zero.
310  *   - On failure, negative value.
311  */
312 static inline int
313 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
314                                 struct rte_eth_link *link)
315 {
316         struct rte_eth_link *dst = link;
317         struct rte_eth_link *src = &(dev->data->dev_link);
318
319         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
320                                         *(uint64_t *)src) == 0)
321                 return -1;
322
323         return 0;
324 }
325
326 /**
327  * Atomically writes the link status information into global
328  * structure rte_eth_dev.
329  *
330  * @param dev
331  *   - Pointer to the structure rte_eth_dev to read from.
332  *   - Pointer to the buffer to be saved with the link status.
333  *
334  * @return
335  *   - On success, zero.
336  *   - On failure, negative value.
337  */
338 static inline int
339 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
340                                 struct rte_eth_link *link)
341 {
342         struct rte_eth_link *dst = &(dev->data->dev_link);
343         struct rte_eth_link *src = link;
344
345         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
346                                         *(uint64_t *)src) == 0)
347                 return -1;
348
349         return 0;
350 }
351
352 static inline void
353 igb_intr_enable(struct rte_eth_dev *dev)
354 {
355         struct e1000_interrupt *intr =
356                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
357         struct e1000_hw *hw =
358                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
359
360         E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
361         E1000_WRITE_FLUSH(hw);
362 }
363
364 static void
365 igb_intr_disable(struct e1000_hw *hw)
366 {
367         E1000_WRITE_REG(hw, E1000_IMC, ~0);
368         E1000_WRITE_FLUSH(hw);
369 }
370
371 static inline int32_t
372 igb_pf_reset_hw(struct e1000_hw *hw)
373 {
374         uint32_t ctrl_ext;
375         int32_t status;
376
377         status = e1000_reset_hw(hw);
378
379         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
380         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
381         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
382         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
383         E1000_WRITE_FLUSH(hw);
384
385         return status;
386 }
387
388 static void
389 igb_identify_hardware(struct rte_eth_dev *dev)
390 {
391         struct e1000_hw *hw =
392                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393
394         hw->vendor_id = dev->pci_dev->id.vendor_id;
395         hw->device_id = dev->pci_dev->id.device_id;
396         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
397         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
398
399         e1000_set_mac_type(hw);
400
401         /* need to check if it is a vf device below */
402 }
403
404 static int
405 igb_reset_swfw_lock(struct e1000_hw *hw)
406 {
407         int ret_val;
408
409         /*
410          * Do mac ops initialization manually here, since we will need
411          * some function pointers set by this call.
412          */
413         ret_val = e1000_init_mac_params(hw);
414         if (ret_val)
415                 return ret_val;
416
417         /*
418          * SMBI lock should not fail in this early stage. If this is the case,
419          * it is due to an improper exit of the application.
420          * So force the release of the faulty lock.
421          */
422         if (e1000_get_hw_semaphore_generic(hw) < 0) {
423                 PMD_DRV_LOG(DEBUG, "SMBI lock released");
424         }
425         e1000_put_hw_semaphore_generic(hw);
426
427         if (hw->mac.ops.acquire_swfw_sync != NULL) {
428                 uint16_t mask;
429
430                 /*
431                  * Phy lock should not fail in this early stage. If this is the case,
432                  * it is due to an improper exit of the application.
433                  * So force the release of the faulty lock.
434                  */
435                 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
436                 if (hw->bus.func > E1000_FUNC_1)
437                         mask <<= 2;
438                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
439                         PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
440                                     hw->bus.func);
441                 }
442                 hw->mac.ops.release_swfw_sync(hw, mask);
443
444                 /*
445                  * This one is more tricky since it is common to all ports; but
446                  * swfw_sync retries last long enough (1s) to be almost sure that if
447                  * lock can not be taken it is due to an improper lock of the
448                  * semaphore.
449                  */
450                 mask = E1000_SWFW_EEP_SM;
451                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
452                         PMD_DRV_LOG(DEBUG, "SWFW common locks released");
453                 }
454                 hw->mac.ops.release_swfw_sync(hw, mask);
455         }
456
457         return E1000_SUCCESS;
458 }
459
460 static int
461 eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
462                    struct rte_eth_dev *eth_dev)
463 {
464         int error = 0;
465         struct rte_pci_device *pci_dev;
466         struct e1000_hw *hw =
467                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
468         struct e1000_vfta * shadow_vfta =
469                         E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
470         struct e1000_filter_info *filter_info =
471                 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
472         uint32_t ctrl_ext;
473
474         pci_dev = eth_dev->pci_dev;
475         eth_dev->dev_ops = &eth_igb_ops;
476         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
477         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
478
479         /* for secondary processes, we don't initialise any further as primary
480          * has already done this work. Only check we don't need a different
481          * RX function */
482         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
483                 if (eth_dev->data->scattered_rx)
484                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
485                 return 0;
486         }
487
488         hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
489
490         igb_identify_hardware(eth_dev);
491         if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
492                 error = -EIO;
493                 goto err_late;
494         }
495
496         e1000_get_bus_info(hw);
497
498         /* Reset any pending lock */
499         if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
500                 error = -EIO;
501                 goto err_late;
502         }
503
504         /* Finish initialization */
505         if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
506                 error = -EIO;
507                 goto err_late;
508         }
509
510         hw->mac.autoneg = 1;
511         hw->phy.autoneg_wait_to_complete = 0;
512         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
513
514         /* Copper options */
515         if (hw->phy.media_type == e1000_media_type_copper) {
516                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
517                 hw->phy.disable_polarity_correction = 0;
518                 hw->phy.ms_type = e1000_ms_hw_default;
519         }
520
521         /*
522          * Start from a known state, this is important in reading the nvm
523          * and mac from that.
524          */
525         igb_pf_reset_hw(hw);
526
527         /* Make sure we have a good EEPROM before we read from it */
528         if (e1000_validate_nvm_checksum(hw) < 0) {
529                 /*
530                  * Some PCI-E parts fail the first check due to
531                  * the link being in sleep state, call it again,
532                  * if it fails a second time its a real issue.
533                  */
534                 if (e1000_validate_nvm_checksum(hw) < 0) {
535                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
536                         error = -EIO;
537                         goto err_late;
538                 }
539         }
540
541         /* Read the permanent MAC address out of the EEPROM */
542         if (e1000_read_mac_addr(hw) != 0) {
543                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
544                 error = -EIO;
545                 goto err_late;
546         }
547
548         /* Allocate memory for storing MAC addresses */
549         eth_dev->data->mac_addrs = rte_zmalloc("e1000",
550                 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
551         if (eth_dev->data->mac_addrs == NULL) {
552                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
553                                                 "store MAC addresses",
554                                 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
555                 error = -ENOMEM;
556                 goto err_late;
557         }
558
559         /* Copy the permanent MAC address */
560         ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
561
562         /* initialize the vfta */
563         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
564
565         /* Now initialize the hardware */
566         if (igb_hardware_init(hw) != 0) {
567                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
568                 rte_free(eth_dev->data->mac_addrs);
569                 eth_dev->data->mac_addrs = NULL;
570                 error = -ENODEV;
571                 goto err_late;
572         }
573         hw->mac.get_link_status = 1;
574
575         /* Indicate SOL/IDER usage */
576         if (e1000_check_reset_block(hw) < 0) {
577                 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
578                                         "SOL/IDER session");
579         }
580
581         /* initialize PF if max_vfs not zero */
582         igb_pf_host_init(eth_dev);
583
584         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
585         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
586         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
587         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
588         E1000_WRITE_FLUSH(hw);
589
590         PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
591                      eth_dev->data->port_id, pci_dev->id.vendor_id,
592                      pci_dev->id.device_id);
593
594         rte_intr_callback_register(&(pci_dev->intr_handle),
595                 eth_igb_interrupt_handler, (void *)eth_dev);
596
597         /* enable uio intr after callback register */
598         rte_intr_enable(&(pci_dev->intr_handle));
599
600         /* enable support intr */
601         igb_intr_enable(eth_dev);
602
603         TAILQ_INIT(&filter_info->flex_list);
604         filter_info->flex_mask = 0;
605
606         return 0;
607
608 err_late:
609         igb_hw_control_release(hw);
610
611         return (error);
612 }
613
614 /*
615  * Virtual Function device init
616  */
617 static int
618 eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
619                 struct rte_eth_dev *eth_dev)
620 {
621         struct rte_pci_device *pci_dev;
622         struct e1000_hw *hw =
623                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
624         int diag;
625
626         PMD_INIT_FUNC_TRACE();
627
628         eth_dev->dev_ops = &igbvf_eth_dev_ops;
629         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
630         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
631
632         /* for secondary processes, we don't initialise any further as primary
633          * has already done this work. Only check we don't need a different
634          * RX function */
635         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
636                 if (eth_dev->data->scattered_rx)
637                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
638                 return 0;
639         }
640
641         pci_dev = eth_dev->pci_dev;
642
643         hw->device_id = pci_dev->id.device_id;
644         hw->vendor_id = pci_dev->id.vendor_id;
645         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
646
647         /* Initialize the shared code (base driver) */
648         diag = e1000_setup_init_funcs(hw, TRUE);
649         if (diag != 0) {
650                 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
651                         diag);
652                 return -EIO;
653         }
654
655         /* init_mailbox_params */
656         hw->mbx.ops.init_params(hw);
657
658         /* Disable the interrupts for VF */
659         igbvf_intr_disable(hw);
660
661         diag = hw->mac.ops.reset_hw(hw);
662
663         /* Allocate memory for storing MAC addresses */
664         eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
665                 hw->mac.rar_entry_count, 0);
666         if (eth_dev->data->mac_addrs == NULL) {
667                 PMD_INIT_LOG(ERR,
668                         "Failed to allocate %d bytes needed to store MAC "
669                         "addresses",
670                         ETHER_ADDR_LEN * hw->mac.rar_entry_count);
671                 return -ENOMEM;
672         }
673
674         /* Copy the permanent MAC address */
675         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
676                         &eth_dev->data->mac_addrs[0]);
677
678         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
679                      "mac.type=%s",
680                      eth_dev->data->port_id, pci_dev->id.vendor_id,
681                      pci_dev->id.device_id, "igb_mac_82576_vf");
682
683         return 0;
684 }
685
686 static struct eth_driver rte_igb_pmd = {
687         {
688                 .name = "rte_igb_pmd",
689                 .id_table = pci_id_igb_map,
690                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
691         },
692         .eth_dev_init = eth_igb_dev_init,
693         .dev_private_size = sizeof(struct e1000_adapter),
694 };
695
696 /*
697  * virtual function driver struct
698  */
699 static struct eth_driver rte_igbvf_pmd = {
700         {
701                 .name = "rte_igbvf_pmd",
702                 .id_table = pci_id_igbvf_map,
703                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
704         },
705         .eth_dev_init = eth_igbvf_dev_init,
706         .dev_private_size = sizeof(struct e1000_adapter),
707 };
708
709 static int
710 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
711 {
712         rte_eth_driver_register(&rte_igb_pmd);
713         return 0;
714 }
715
716 static void
717 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
718 {
719         struct e1000_hw *hw =
720                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721         /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
722         uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
723         rctl |= E1000_RCTL_VFE;
724         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
725 }
726
727 /*
728  * VF Driver initialization routine.
729  * Invoked one at EAL init time.
730  * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
731  */
732 static int
733 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
734 {
735         PMD_INIT_FUNC_TRACE();
736
737         rte_eth_driver_register(&rte_igbvf_pmd);
738         return (0);
739 }
740
741 static int
742 eth_igb_configure(struct rte_eth_dev *dev)
743 {
744         struct e1000_interrupt *intr =
745                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
746
747         PMD_INIT_FUNC_TRACE();
748         intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
749         PMD_INIT_FUNC_TRACE();
750
751         return (0);
752 }
753
754 static int
755 eth_igb_start(struct rte_eth_dev *dev)
756 {
757         struct e1000_hw *hw =
758                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
759         int ret, i, mask;
760         uint32_t ctrl_ext;
761
762         PMD_INIT_FUNC_TRACE();
763
764         /* Power up the phy. Needed to make the link go Up */
765         e1000_power_up_phy(hw);
766
767         /*
768          * Packet Buffer Allocation (PBA)
769          * Writing PBA sets the receive portion of the buffer
770          * the remainder is used for the transmit buffer.
771          */
772         if (hw->mac.type == e1000_82575) {
773                 uint32_t pba;
774
775                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
776                 E1000_WRITE_REG(hw, E1000_PBA, pba);
777         }
778
779         /* Put the address into the Receive Address Array */
780         e1000_rar_set(hw, hw->mac.addr, 0);
781
782         /* Initialize the hardware */
783         if (igb_hardware_init(hw)) {
784                 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
785                 return (-EIO);
786         }
787
788         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
789
790         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
791         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
792         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
793         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
794         E1000_WRITE_FLUSH(hw);
795
796         /* configure PF module if SRIOV enabled */
797         igb_pf_host_configure(dev);
798
799         /* Configure for OS presence */
800         igb_init_manageability(hw);
801
802         eth_igb_tx_init(dev);
803
804         /* This can fail when allocating mbufs for descriptor rings */
805         ret = eth_igb_rx_init(dev);
806         if (ret) {
807                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
808                 igb_dev_clear_queues(dev);
809                 return ret;
810         }
811
812         e1000_clear_hw_cntrs_base_generic(hw);
813
814         /*
815          * VLAN Offload Settings
816          */
817         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
818                         ETH_VLAN_EXTEND_MASK;
819         eth_igb_vlan_offload_set(dev, mask);
820
821         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
822                 /* Enable VLAN filter since VMDq always use VLAN filter */
823                 igb_vmdq_vlan_hw_filter_enable(dev);
824         }
825
826         /*
827          * Configure the Interrupt Moderation register (EITR) with the maximum
828          * possible value (0xFFFF) to minimize "System Partial Write" issued by
829          * spurious [DMA] memory updates of RX and TX ring descriptors.
830          *
831          * With a EITR granularity of 2 microseconds in the 82576, only 7/8
832          * spurious memory updates per second should be expected.
833          * ((65535 * 2) / 1000.1000 ~= 0.131 second).
834          *
835          * Because interrupts are not used at all, the MSI-X is not activated
836          * and interrupt moderation is controlled by EITR[0].
837          *
838          * Note that having [almost] disabled memory updates of RX and TX ring
839          * descriptors through the Interrupt Moderation mechanism, memory
840          * updates of ring descriptors are now moderated by the configurable
841          * value of Write-Back Threshold registers.
842          */
843         if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
844                 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
845                 (hw->mac.type == e1000_i211)) {
846                 uint32_t ivar;
847
848                 /* Enable all RX & TX queues in the IVAR registers */
849                 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
850                 for (i = 0; i < 8; i++)
851                         E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
852
853                 /* Configure EITR with the maximum possible value (0xFFFF) */
854                 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
855         }
856
857         /* Setup link speed and duplex */
858         switch (dev->data->dev_conf.link_speed) {
859         case ETH_LINK_SPEED_AUTONEG:
860                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
861                         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
862                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
863                         hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
864                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
865                         hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
866                 else
867                         goto error_invalid_config;
868                 break;
869         case ETH_LINK_SPEED_10:
870                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
871                         hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
872                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
873                         hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
874                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
875                         hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
876                 else
877                         goto error_invalid_config;
878                 break;
879         case ETH_LINK_SPEED_100:
880                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
881                         hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
882                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
883                         hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
884                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
885                         hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
886                 else
887                         goto error_invalid_config;
888                 break;
889         case ETH_LINK_SPEED_1000:
890                 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
891                                 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
892                         hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
893                 else
894                         goto error_invalid_config;
895                 break;
896         case ETH_LINK_SPEED_10000:
897         default:
898                 goto error_invalid_config;
899         }
900         e1000_setup_link(hw);
901
902         /* check if lsc interrupt feature is enabled */
903         if (dev->data->dev_conf.intr_conf.lsc != 0)
904                 ret = eth_igb_lsc_interrupt_setup(dev);
905
906         /* resume enabled intr since hw reset */
907         igb_intr_enable(dev);
908
909         PMD_INIT_LOG(DEBUG, "<<");
910
911         return (0);
912
913 error_invalid_config:
914         PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
915                      dev->data->dev_conf.link_speed,
916                      dev->data->dev_conf.link_duplex, dev->data->port_id);
917         igb_dev_clear_queues(dev);
918         return (-EINVAL);
919 }
920
921 /*********************************************************************
922  *
923  *  This routine disables all traffic on the adapter by issuing a
924  *  global reset on the MAC.
925  *
926  **********************************************************************/
927 static void
928 eth_igb_stop(struct rte_eth_dev *dev)
929 {
930         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
931         struct e1000_filter_info *filter_info =
932                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
933         struct rte_eth_link link;
934         struct e1000_flex_filter *p_flex;
935
936         igb_intr_disable(hw);
937         igb_pf_reset_hw(hw);
938         E1000_WRITE_REG(hw, E1000_WUC, 0);
939
940         /* Set bit for Go Link disconnect */
941         if (hw->mac.type >= e1000_82580) {
942                 uint32_t phpm_reg;
943
944                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
945                 phpm_reg |= E1000_82580_PM_GO_LINKD;
946                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
947         }
948
949         /* Power down the phy. Needed to make the link go Down */
950         e1000_power_down_phy(hw);
951
952         igb_dev_clear_queues(dev);
953
954         /* clear the recorded link status */
955         memset(&link, 0, sizeof(link));
956         rte_igb_dev_atomic_write_link_status(dev, &link);
957
958         /* Remove all flex filters of the device */
959         while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
960                 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
961                 rte_free(p_flex);
962         }
963         filter_info->flex_mask = 0;
964 }
965
966 static void
967 eth_igb_close(struct rte_eth_dev *dev)
968 {
969         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
970         struct rte_eth_link link;
971
972         eth_igb_stop(dev);
973         e1000_phy_hw_reset(hw);
974         igb_release_manageability(hw);
975         igb_hw_control_release(hw);
976
977         /* Clear bit for Go Link disconnect */
978         if (hw->mac.type >= e1000_82580) {
979                 uint32_t phpm_reg;
980
981                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
982                 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
983                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
984         }
985
986         igb_dev_clear_queues(dev);
987
988         memset(&link, 0, sizeof(link));
989         rte_igb_dev_atomic_write_link_status(dev, &link);
990 }
991
992 static int
993 igb_get_rx_buffer_size(struct e1000_hw *hw)
994 {
995         uint32_t rx_buf_size;
996         if (hw->mac.type == e1000_82576) {
997                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
998         } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
999                 /* PBS needs to be translated according to a lookup table */
1000                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1001                 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1002                 rx_buf_size = (rx_buf_size << 10);
1003         } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1004                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1005         } else {
1006                 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1007         }
1008
1009         return rx_buf_size;
1010 }
1011
1012 /*********************************************************************
1013  *
1014  *  Initialize the hardware
1015  *
1016  **********************************************************************/
1017 static int
1018 igb_hardware_init(struct e1000_hw *hw)
1019 {
1020         uint32_t rx_buf_size;
1021         int diag;
1022
1023         /* Let the firmware know the OS is in control */
1024         igb_hw_control_acquire(hw);
1025
1026         /*
1027          * These parameters control the automatic generation (Tx) and
1028          * response (Rx) to Ethernet PAUSE frames.
1029          * - High water mark should allow for at least two standard size (1518)
1030          *   frames to be received after sending an XOFF.
1031          * - Low water mark works best when it is very near the high water mark.
1032          *   This allows the receiver to restart by sending XON when it has
1033          *   drained a bit. Here we use an arbitrary value of 1500 which will
1034          *   restart after one full frame is pulled from the buffer. There
1035          *   could be several smaller frames in the buffer and if so they will
1036          *   not trigger the XON until their total number reduces the buffer
1037          *   by 1500.
1038          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1039          */
1040         rx_buf_size = igb_get_rx_buffer_size(hw);
1041
1042         hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1043         hw->fc.low_water = hw->fc.high_water - 1500;
1044         hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1045         hw->fc.send_xon = 1;
1046
1047         /* Set Flow control, use the tunable location if sane */
1048         if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1049                 hw->fc.requested_mode = igb_fc_setting;
1050         else
1051                 hw->fc.requested_mode = e1000_fc_none;
1052
1053         /* Issue a global reset */
1054         igb_pf_reset_hw(hw);
1055         E1000_WRITE_REG(hw, E1000_WUC, 0);
1056
1057         diag = e1000_init_hw(hw);
1058         if (diag < 0)
1059                 return (diag);
1060
1061         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1062         e1000_get_phy_info(hw);
1063         e1000_check_for_link(hw);
1064
1065         return (0);
1066 }
1067
1068 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1069 static void
1070 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1071 {
1072         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073         struct e1000_hw_stats *stats =
1074                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1075         int pause_frames;
1076
1077         if(hw->phy.media_type == e1000_media_type_copper ||
1078             (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1079                 stats->symerrs +=
1080                     E1000_READ_REG(hw,E1000_SYMERRS);
1081                 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1082         }
1083
1084         stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1085         stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1086         stats->scc += E1000_READ_REG(hw, E1000_SCC);
1087         stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1088
1089         stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1090         stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1091         stats->colc += E1000_READ_REG(hw, E1000_COLC);
1092         stats->dc += E1000_READ_REG(hw, E1000_DC);
1093         stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1094         stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1095         stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1096         /*
1097         ** For watchdog management we need to know if we have been
1098         ** paused during the last interval, so capture that here.
1099         */
1100         pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1101         stats->xoffrxc += pause_frames;
1102         stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1103         stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1104         stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1105         stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1106         stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1107         stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1108         stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1109         stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1110         stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1111         stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1112         stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1113         stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1114
1115         /* For the 64-bit byte counters the low dword must be read first. */
1116         /* Both registers clear on the read of the high dword */
1117
1118         stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1119         stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1120         stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1121         stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1122
1123         stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1124         stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1125         stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1126         stats->roc += E1000_READ_REG(hw, E1000_ROC);
1127         stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1128
1129         stats->tor += E1000_READ_REG(hw, E1000_TORH);
1130         stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1131
1132         stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1133         stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1134         stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1135         stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1136         stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1137         stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1138         stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1139         stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1140         stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1141         stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1142
1143         /* Interrupt Counts */
1144
1145         stats->iac += E1000_READ_REG(hw, E1000_IAC);
1146         stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1147         stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1148         stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1149         stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1150         stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1151         stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1152         stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1153         stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1154
1155         /* Host to Card Statistics */
1156
1157         stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1158         stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1159         stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1160         stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1161         stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1162         stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1163         stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1164         stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1165         stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1166         stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1167         stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1168         stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1169         stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1170         stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1171
1172         stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1173         stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1174         stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1175         stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1176         stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1177         stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1178
1179         if (rte_stats == NULL)
1180                 return;
1181
1182         /* Rx Errors */
1183         rte_stats->ibadcrc = stats->crcerrs;
1184         rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1185         rte_stats->imissed = stats->mpc;
1186         rte_stats->ierrors = rte_stats->ibadcrc +
1187                              rte_stats->ibadlen +
1188                              rte_stats->imissed +
1189                              stats->rxerrc + stats->algnerrc + stats->cexterr;
1190
1191         /* Tx Errors */
1192         rte_stats->oerrors = stats->ecol + stats->latecol;
1193
1194         /* XON/XOFF pause frames */
1195         rte_stats->tx_pause_xon  = stats->xontxc;
1196         rte_stats->rx_pause_xon  = stats->xonrxc;
1197         rte_stats->tx_pause_xoff = stats->xofftxc;
1198         rte_stats->rx_pause_xoff = stats->xoffrxc;
1199
1200         rte_stats->ipackets = stats->gprc;
1201         rte_stats->opackets = stats->gptc;
1202         rte_stats->ibytes   = stats->gorc;
1203         rte_stats->obytes   = stats->gotc;
1204 }
1205
1206 static void
1207 eth_igb_stats_reset(struct rte_eth_dev *dev)
1208 {
1209         struct e1000_hw_stats *hw_stats =
1210                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1211
1212         /* HW registers are cleared on read */
1213         eth_igb_stats_get(dev, NULL);
1214
1215         /* Reset software totals */
1216         memset(hw_stats, 0, sizeof(*hw_stats));
1217 }
1218
1219 static void
1220 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1221 {
1222         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1223         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1224                           E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1225
1226         /* Good Rx packets, include VF loopback */
1227         UPDATE_VF_STAT(E1000_VFGPRC,
1228             hw_stats->last_gprc, hw_stats->gprc);
1229
1230         /* Good Rx octets, include VF loopback */
1231         UPDATE_VF_STAT(E1000_VFGORC,
1232             hw_stats->last_gorc, hw_stats->gorc);
1233
1234         /* Good Tx packets, include VF loopback */
1235         UPDATE_VF_STAT(E1000_VFGPTC,
1236             hw_stats->last_gptc, hw_stats->gptc);
1237
1238         /* Good Tx octets, include VF loopback */
1239         UPDATE_VF_STAT(E1000_VFGOTC,
1240             hw_stats->last_gotc, hw_stats->gotc);
1241
1242         /* Rx Multicst packets */
1243         UPDATE_VF_STAT(E1000_VFMPRC,
1244             hw_stats->last_mprc, hw_stats->mprc);
1245
1246         /* Good Rx loopback packets */
1247         UPDATE_VF_STAT(E1000_VFGPRLBC,
1248             hw_stats->last_gprlbc, hw_stats->gprlbc);
1249
1250         /* Good Rx loopback octets */
1251         UPDATE_VF_STAT(E1000_VFGORLBC,
1252             hw_stats->last_gorlbc, hw_stats->gorlbc);
1253
1254         /* Good Tx loopback packets */
1255         UPDATE_VF_STAT(E1000_VFGPTLBC,
1256             hw_stats->last_gptlbc, hw_stats->gptlbc);
1257
1258         /* Good Tx loopback octets */
1259         UPDATE_VF_STAT(E1000_VFGOTLBC,
1260             hw_stats->last_gotlbc, hw_stats->gotlbc);
1261
1262         if (rte_stats == NULL)
1263                 return;
1264
1265         rte_stats->ipackets = hw_stats->gprc;
1266         rte_stats->ibytes = hw_stats->gorc;
1267         rte_stats->opackets = hw_stats->gptc;
1268         rte_stats->obytes = hw_stats->gotc;
1269         rte_stats->imcasts = hw_stats->mprc;
1270         rte_stats->ilbpackets = hw_stats->gprlbc;
1271         rte_stats->ilbbytes = hw_stats->gorlbc;
1272         rte_stats->olbpackets = hw_stats->gptlbc;
1273         rte_stats->olbbytes = hw_stats->gotlbc;
1274
1275 }
1276
1277 static void
1278 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1279 {
1280         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1281                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1282
1283         /* Sync HW register to the last stats */
1284         eth_igbvf_stats_get(dev, NULL);
1285
1286         /* reset HW current stats*/
1287         memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1288                offsetof(struct e1000_vf_stats, gprc));
1289
1290 }
1291
1292 static void
1293 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1294 {
1295         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1296
1297         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1298         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1299         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1300         dev_info->rx_offload_capa =
1301                 DEV_RX_OFFLOAD_VLAN_STRIP |
1302                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1303                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1304                 DEV_RX_OFFLOAD_TCP_CKSUM;
1305         dev_info->tx_offload_capa =
1306                 DEV_TX_OFFLOAD_VLAN_INSERT |
1307                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1308                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1309                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1310                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1311
1312         switch (hw->mac.type) {
1313         case e1000_82575:
1314                 dev_info->max_rx_queues = 4;
1315                 dev_info->max_tx_queues = 4;
1316                 dev_info->max_vmdq_pools = 0;
1317                 break;
1318
1319         case e1000_82576:
1320                 dev_info->max_rx_queues = 16;
1321                 dev_info->max_tx_queues = 16;
1322                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1323                 dev_info->vmdq_queue_num = 16;
1324                 break;
1325
1326         case e1000_82580:
1327                 dev_info->max_rx_queues = 8;
1328                 dev_info->max_tx_queues = 8;
1329                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1330                 dev_info->vmdq_queue_num = 8;
1331                 break;
1332
1333         case e1000_i350:
1334                 dev_info->max_rx_queues = 8;
1335                 dev_info->max_tx_queues = 8;
1336                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1337                 dev_info->vmdq_queue_num = 8;
1338                 break;
1339
1340         case e1000_i354:
1341                 dev_info->max_rx_queues = 8;
1342                 dev_info->max_tx_queues = 8;
1343                 break;
1344
1345         case e1000_i210:
1346                 dev_info->max_rx_queues = 4;
1347                 dev_info->max_tx_queues = 4;
1348                 dev_info->max_vmdq_pools = 0;
1349                 break;
1350
1351         case e1000_i211:
1352                 dev_info->max_rx_queues = 2;
1353                 dev_info->max_tx_queues = 2;
1354                 dev_info->max_vmdq_pools = 0;
1355                 break;
1356
1357         default:
1358                 /* Should not happen */
1359                 break;
1360         }
1361         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1362
1363         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1364                 .rx_thresh = {
1365                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1366                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1367                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1368                 },
1369                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1370                 .rx_drop_en = 0,
1371         };
1372
1373         dev_info->default_txconf = (struct rte_eth_txconf) {
1374                 .tx_thresh = {
1375                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1376                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1377                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1378                 },
1379                 .txq_flags = 0,
1380         };
1381 }
1382
1383 static void
1384 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1385 {
1386         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1387
1388         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1389         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1390         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1391         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1392                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1393                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1394                                 DEV_RX_OFFLOAD_TCP_CKSUM;
1395         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1396                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1397                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1398                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1399                                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1400         switch (hw->mac.type) {
1401         case e1000_vfadapt:
1402                 dev_info->max_rx_queues = 2;
1403                 dev_info->max_tx_queues = 2;
1404                 break;
1405         case e1000_vfadapt_i350:
1406                 dev_info->max_rx_queues = 1;
1407                 dev_info->max_tx_queues = 1;
1408                 break;
1409         default:
1410                 /* Should not happen */
1411                 break;
1412         }
1413
1414         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1415                 .rx_thresh = {
1416                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1417                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1418                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1419                 },
1420                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1421                 .rx_drop_en = 0,
1422         };
1423
1424         dev_info->default_txconf = (struct rte_eth_txconf) {
1425                 .tx_thresh = {
1426                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1427                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1428                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1429                 },
1430                 .txq_flags = 0,
1431         };
1432 }
1433
1434 /* return 0 means link status changed, -1 means not changed */
1435 static int
1436 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1437 {
1438         struct e1000_hw *hw =
1439                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1440         struct rte_eth_link link, old;
1441         int link_check, count;
1442
1443         link_check = 0;
1444         hw->mac.get_link_status = 1;
1445
1446         /* possible wait-to-complete in up to 9 seconds */
1447         for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1448                 /* Read the real link status */
1449                 switch (hw->phy.media_type) {
1450                 case e1000_media_type_copper:
1451                         /* Do the work to read phy */
1452                         e1000_check_for_link(hw);
1453                         link_check = !hw->mac.get_link_status;
1454                         break;
1455
1456                 case e1000_media_type_fiber:
1457                         e1000_check_for_link(hw);
1458                         link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1459                                       E1000_STATUS_LU);
1460                         break;
1461
1462                 case e1000_media_type_internal_serdes:
1463                         e1000_check_for_link(hw);
1464                         link_check = hw->mac.serdes_has_link;
1465                         break;
1466
1467                 /* VF device is type_unknown */
1468                 case e1000_media_type_unknown:
1469                         eth_igbvf_link_update(hw);
1470                         link_check = !hw->mac.get_link_status;
1471                         break;
1472
1473                 default:
1474                         break;
1475                 }
1476                 if (link_check || wait_to_complete == 0)
1477                         break;
1478                 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1479         }
1480         memset(&link, 0, sizeof(link));
1481         rte_igb_dev_atomic_read_link_status(dev, &link);
1482         old = link;
1483
1484         /* Now we check if a transition has happened */
1485         if (link_check) {
1486                 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1487                                           &link.link_duplex);
1488                 link.link_status = 1;
1489         } else if (!link_check) {
1490                 link.link_speed = 0;
1491                 link.link_duplex = 0;
1492                 link.link_status = 0;
1493         }
1494         rte_igb_dev_atomic_write_link_status(dev, &link);
1495
1496         /* not changed */
1497         if (old.link_status == link.link_status)
1498                 return -1;
1499
1500         /* changed */
1501         return 0;
1502 }
1503
1504 /*
1505  * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1506  * For ASF and Pass Through versions of f/w this means
1507  * that the driver is loaded.
1508  */
1509 static void
1510 igb_hw_control_acquire(struct e1000_hw *hw)
1511 {
1512         uint32_t ctrl_ext;
1513
1514         /* Let firmware know the driver has taken over */
1515         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1516         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1517 }
1518
1519 /*
1520  * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1521  * For ASF and Pass Through versions of f/w this means that the
1522  * driver is no longer loaded.
1523  */
1524 static void
1525 igb_hw_control_release(struct e1000_hw *hw)
1526 {
1527         uint32_t ctrl_ext;
1528
1529         /* Let firmware taken over control of h/w */
1530         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1531         E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1532                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1533 }
1534
1535 /*
1536  * Bit of a misnomer, what this really means is
1537  * to enable OS management of the system... aka
1538  * to disable special hardware management features.
1539  */
1540 static void
1541 igb_init_manageability(struct e1000_hw *hw)
1542 {
1543         if (e1000_enable_mng_pass_thru(hw)) {
1544                 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1545                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1546
1547                 /* disable hardware interception of ARP */
1548                 manc &= ~(E1000_MANC_ARP_EN);
1549
1550                 /* enable receiving management packets to the host */
1551                 manc |= E1000_MANC_EN_MNG2HOST;
1552                 manc2h |= 1 << 5;  /* Mng Port 623 */
1553                 manc2h |= 1 << 6;  /* Mng Port 664 */
1554                 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1555                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1556         }
1557 }
1558
1559 static void
1560 igb_release_manageability(struct e1000_hw *hw)
1561 {
1562         if (e1000_enable_mng_pass_thru(hw)) {
1563                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1564
1565                 manc |= E1000_MANC_ARP_EN;
1566                 manc &= ~E1000_MANC_EN_MNG2HOST;
1567
1568                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1569         }
1570 }
1571
1572 static void
1573 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1574 {
1575         struct e1000_hw *hw =
1576                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1577         uint32_t rctl;
1578
1579         rctl = E1000_READ_REG(hw, E1000_RCTL);
1580         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1581         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1582 }
1583
1584 static void
1585 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1586 {
1587         struct e1000_hw *hw =
1588                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1589         uint32_t rctl;
1590
1591         rctl = E1000_READ_REG(hw, E1000_RCTL);
1592         rctl &= (~E1000_RCTL_UPE);
1593         if (dev->data->all_multicast == 1)
1594                 rctl |= E1000_RCTL_MPE;
1595         else
1596                 rctl &= (~E1000_RCTL_MPE);
1597         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1598 }
1599
1600 static void
1601 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1602 {
1603         struct e1000_hw *hw =
1604                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605         uint32_t rctl;
1606
1607         rctl = E1000_READ_REG(hw, E1000_RCTL);
1608         rctl |= E1000_RCTL_MPE;
1609         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1610 }
1611
1612 static void
1613 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1614 {
1615         struct e1000_hw *hw =
1616                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1617         uint32_t rctl;
1618
1619         if (dev->data->promiscuous == 1)
1620                 return; /* must remain in all_multicast mode */
1621         rctl = E1000_READ_REG(hw, E1000_RCTL);
1622         rctl &= (~E1000_RCTL_MPE);
1623         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1624 }
1625
1626 static int
1627 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1628 {
1629         struct e1000_hw *hw =
1630                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631         struct e1000_vfta * shadow_vfta =
1632                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1633         uint32_t vfta;
1634         uint32_t vid_idx;
1635         uint32_t vid_bit;
1636
1637         vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1638                               E1000_VFTA_ENTRY_MASK);
1639         vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1640         vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1641         if (on)
1642                 vfta |= vid_bit;
1643         else
1644                 vfta &= ~vid_bit;
1645         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1646
1647         /* update local VFTA copy */
1648         shadow_vfta->vfta[vid_idx] = vfta;
1649
1650         return 0;
1651 }
1652
1653 static void
1654 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1655 {
1656         struct e1000_hw *hw =
1657                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658         uint32_t reg = ETHER_TYPE_VLAN ;
1659
1660         reg |= (tpid << 16);
1661         E1000_WRITE_REG(hw, E1000_VET, reg);
1662 }
1663
1664 static void
1665 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1666 {
1667         struct e1000_hw *hw =
1668                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1669         uint32_t reg;
1670
1671         /* Filter Table Disable */
1672         reg = E1000_READ_REG(hw, E1000_RCTL);
1673         reg &= ~E1000_RCTL_CFIEN;
1674         reg &= ~E1000_RCTL_VFE;
1675         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1676 }
1677
1678 static void
1679 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1680 {
1681         struct e1000_hw *hw =
1682                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1683         struct e1000_vfta * shadow_vfta =
1684                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1685         uint32_t reg;
1686         int i;
1687
1688         /* Filter Table Enable, CFI not used for packet acceptance */
1689         reg = E1000_READ_REG(hw, E1000_RCTL);
1690         reg &= ~E1000_RCTL_CFIEN;
1691         reg |= E1000_RCTL_VFE;
1692         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1693
1694         /* restore VFTA table */
1695         for (i = 0; i < IGB_VFTA_SIZE; i++)
1696                 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1697 }
1698
1699 static void
1700 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1701 {
1702         struct e1000_hw *hw =
1703                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1704         uint32_t reg;
1705
1706         /* VLAN Mode Disable */
1707         reg = E1000_READ_REG(hw, E1000_CTRL);
1708         reg &= ~E1000_CTRL_VME;
1709         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1710 }
1711
1712 static void
1713 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1714 {
1715         struct e1000_hw *hw =
1716                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1717         uint32_t reg;
1718
1719         /* VLAN Mode Enable */
1720         reg = E1000_READ_REG(hw, E1000_CTRL);
1721         reg |= E1000_CTRL_VME;
1722         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1723 }
1724
1725 static void
1726 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1727 {
1728         struct e1000_hw *hw =
1729                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730         uint32_t reg;
1731
1732         /* CTRL_EXT: Extended VLAN */
1733         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1734         reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1735         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1736
1737         /* Update maximum packet length */
1738         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1739                 E1000_WRITE_REG(hw, E1000_RLPML,
1740                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1741                                                 VLAN_TAG_SIZE);
1742 }
1743
1744 static void
1745 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1746 {
1747         struct e1000_hw *hw =
1748                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1749         uint32_t reg;
1750
1751         /* CTRL_EXT: Extended VLAN */
1752         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1753         reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1754         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1755
1756         /* Update maximum packet length */
1757         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1758                 E1000_WRITE_REG(hw, E1000_RLPML,
1759                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1760                                                 2 * VLAN_TAG_SIZE);
1761 }
1762
1763 static void
1764 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1765 {
1766         if(mask & ETH_VLAN_STRIP_MASK){
1767                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1768                         igb_vlan_hw_strip_enable(dev);
1769                 else
1770                         igb_vlan_hw_strip_disable(dev);
1771         }
1772
1773         if(mask & ETH_VLAN_FILTER_MASK){
1774                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1775                         igb_vlan_hw_filter_enable(dev);
1776                 else
1777                         igb_vlan_hw_filter_disable(dev);
1778         }
1779
1780         if(mask & ETH_VLAN_EXTEND_MASK){
1781                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1782                         igb_vlan_hw_extend_enable(dev);
1783                 else
1784                         igb_vlan_hw_extend_disable(dev);
1785         }
1786 }
1787
1788
1789 /**
1790  * It enables the interrupt mask and then enable the interrupt.
1791  *
1792  * @param dev
1793  *  Pointer to struct rte_eth_dev.
1794  *
1795  * @return
1796  *  - On success, zero.
1797  *  - On failure, a negative value.
1798  */
1799 static int
1800 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1801 {
1802         struct e1000_interrupt *intr =
1803                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1804
1805         intr->mask |= E1000_ICR_LSC;
1806
1807         return 0;
1808 }
1809
1810 /*
1811  * It reads ICR and gets interrupt causes, check it and set a bit flag
1812  * to update link status.
1813  *
1814  * @param dev
1815  *  Pointer to struct rte_eth_dev.
1816  *
1817  * @return
1818  *  - On success, zero.
1819  *  - On failure, a negative value.
1820  */
1821 static int
1822 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1823 {
1824         uint32_t icr;
1825         struct e1000_hw *hw =
1826                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1827         struct e1000_interrupt *intr =
1828                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1829
1830         igb_intr_disable(hw);
1831
1832         /* read-on-clear nic registers here */
1833         icr = E1000_READ_REG(hw, E1000_ICR);
1834
1835         intr->flags = 0;
1836         if (icr & E1000_ICR_LSC) {
1837                 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1838         }
1839
1840         if (icr & E1000_ICR_VMMB)
1841                 intr->flags |= E1000_FLAG_MAILBOX;
1842
1843         return 0;
1844 }
1845
1846 /*
1847  * It executes link_update after knowing an interrupt is prsent.
1848  *
1849  * @param dev
1850  *  Pointer to struct rte_eth_dev.
1851  *
1852  * @return
1853  *  - On success, zero.
1854  *  - On failure, a negative value.
1855  */
1856 static int
1857 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1858 {
1859         struct e1000_hw *hw =
1860                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1861         struct e1000_interrupt *intr =
1862                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1863         uint32_t tctl, rctl;
1864         struct rte_eth_link link;
1865         int ret;
1866
1867         if (intr->flags & E1000_FLAG_MAILBOX) {
1868                 igb_pf_mbx_process(dev);
1869                 intr->flags &= ~E1000_FLAG_MAILBOX;
1870         }
1871
1872         igb_intr_enable(dev);
1873         rte_intr_enable(&(dev->pci_dev->intr_handle));
1874
1875         if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1876                 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1877
1878                 /* set get_link_status to check register later */
1879                 hw->mac.get_link_status = 1;
1880                 ret = eth_igb_link_update(dev, 0);
1881
1882                 /* check if link has changed */
1883                 if (ret < 0)
1884                         return 0;
1885
1886                 memset(&link, 0, sizeof(link));
1887                 rte_igb_dev_atomic_read_link_status(dev, &link);
1888                 if (link.link_status) {
1889                         PMD_INIT_LOG(INFO,
1890                                      " Port %d: Link Up - speed %u Mbps - %s",
1891                                      dev->data->port_id,
1892                                      (unsigned)link.link_speed,
1893                                      link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1894                                      "full-duplex" : "half-duplex");
1895                 } else {
1896                         PMD_INIT_LOG(INFO, " Port %d: Link Down",
1897                                      dev->data->port_id);
1898                 }
1899                 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1900                              dev->pci_dev->addr.domain,
1901                              dev->pci_dev->addr.bus,
1902                              dev->pci_dev->addr.devid,
1903                              dev->pci_dev->addr.function);
1904                 tctl = E1000_READ_REG(hw, E1000_TCTL);
1905                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1906                 if (link.link_status) {
1907                         /* enable Tx/Rx */
1908                         tctl |= E1000_TCTL_EN;
1909                         rctl |= E1000_RCTL_EN;
1910                 } else {
1911                         /* disable Tx/Rx */
1912                         tctl &= ~E1000_TCTL_EN;
1913                         rctl &= ~E1000_RCTL_EN;
1914                 }
1915                 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1916                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1917                 E1000_WRITE_FLUSH(hw);
1918                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1919         }
1920
1921         return 0;
1922 }
1923
1924 /**
1925  * Interrupt handler which shall be registered at first.
1926  *
1927  * @param handle
1928  *  Pointer to interrupt handle.
1929  * @param param
1930  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1931  *
1932  * @return
1933  *  void
1934  */
1935 static void
1936 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1937                                                         void *param)
1938 {
1939         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1940
1941         eth_igb_interrupt_get_status(dev);
1942         eth_igb_interrupt_action(dev);
1943 }
1944
1945 static int
1946 eth_igb_led_on(struct rte_eth_dev *dev)
1947 {
1948         struct e1000_hw *hw;
1949
1950         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1951         return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1952 }
1953
1954 static int
1955 eth_igb_led_off(struct rte_eth_dev *dev)
1956 {
1957         struct e1000_hw *hw;
1958
1959         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1960         return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1961 }
1962
1963 static int
1964 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1965 {
1966         struct e1000_hw *hw;
1967         uint32_t ctrl;
1968         int tx_pause;
1969         int rx_pause;
1970
1971         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1972         fc_conf->pause_time = hw->fc.pause_time;
1973         fc_conf->high_water = hw->fc.high_water;
1974         fc_conf->low_water = hw->fc.low_water;
1975         fc_conf->send_xon = hw->fc.send_xon;
1976         fc_conf->autoneg = hw->mac.autoneg;
1977
1978         /*
1979          * Return rx_pause and tx_pause status according to actual setting of
1980          * the TFCE and RFCE bits in the CTRL register.
1981          */
1982         ctrl = E1000_READ_REG(hw, E1000_CTRL);
1983         if (ctrl & E1000_CTRL_TFCE)
1984                 tx_pause = 1;
1985         else
1986                 tx_pause = 0;
1987
1988         if (ctrl & E1000_CTRL_RFCE)
1989                 rx_pause = 1;
1990         else
1991                 rx_pause = 0;
1992
1993         if (rx_pause && tx_pause)
1994                 fc_conf->mode = RTE_FC_FULL;
1995         else if (rx_pause)
1996                 fc_conf->mode = RTE_FC_RX_PAUSE;
1997         else if (tx_pause)
1998                 fc_conf->mode = RTE_FC_TX_PAUSE;
1999         else
2000                 fc_conf->mode = RTE_FC_NONE;
2001
2002         return 0;
2003 }
2004
2005 static int
2006 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2007 {
2008         struct e1000_hw *hw;
2009         int err;
2010         enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2011                 e1000_fc_none,
2012                 e1000_fc_rx_pause,
2013                 e1000_fc_tx_pause,
2014                 e1000_fc_full
2015         };
2016         uint32_t rx_buf_size;
2017         uint32_t max_high_water;
2018         uint32_t rctl;
2019
2020         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021         if (fc_conf->autoneg != hw->mac.autoneg)
2022                 return -ENOTSUP;
2023         rx_buf_size = igb_get_rx_buffer_size(hw);
2024         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2025
2026         /* At least reserve one Ethernet frame for watermark */
2027         max_high_water = rx_buf_size - ETHER_MAX_LEN;
2028         if ((fc_conf->high_water > max_high_water) ||
2029             (fc_conf->high_water < fc_conf->low_water)) {
2030                 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2031                 PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
2032                 return (-EINVAL);
2033         }
2034
2035         hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2036         hw->fc.pause_time     = fc_conf->pause_time;
2037         hw->fc.high_water     = fc_conf->high_water;
2038         hw->fc.low_water      = fc_conf->low_water;
2039         hw->fc.send_xon       = fc_conf->send_xon;
2040
2041         err = e1000_setup_link_generic(hw);
2042         if (err == E1000_SUCCESS) {
2043
2044                 /* check if we want to forward MAC frames - driver doesn't have native
2045                  * capability to do that, so we'll write the registers ourselves */
2046
2047                 rctl = E1000_READ_REG(hw, E1000_RCTL);
2048
2049                 /* set or clear MFLCN.PMCF bit depending on configuration */
2050                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2051                         rctl |= E1000_RCTL_PMCF;
2052                 else
2053                         rctl &= ~E1000_RCTL_PMCF;
2054
2055                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2056                 E1000_WRITE_FLUSH(hw);
2057
2058                 return 0;
2059         }
2060
2061         PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2062         return (-EIO);
2063 }
2064
2065 #define E1000_RAH_POOLSEL_SHIFT      (18)
2066 static void
2067 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2068                 uint32_t index, __rte_unused uint32_t pool)
2069 {
2070         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071         uint32_t rah;
2072
2073         e1000_rar_set(hw, mac_addr->addr_bytes, index);
2074         rah = E1000_READ_REG(hw, E1000_RAH(index));
2075         rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2076         E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2077 }
2078
2079 static void
2080 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2081 {
2082         uint8_t addr[ETHER_ADDR_LEN];
2083         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2084
2085         memset(addr, 0, sizeof(addr));
2086
2087         e1000_rar_set(hw, addr, index);
2088 }
2089
2090 /*
2091  * Virtual Function operations
2092  */
2093 static void
2094 igbvf_intr_disable(struct e1000_hw *hw)
2095 {
2096         PMD_INIT_FUNC_TRACE();
2097
2098         /* Clear interrupt mask to stop from interrupts being generated */
2099         E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2100
2101         E1000_WRITE_FLUSH(hw);
2102 }
2103
2104 static void
2105 igbvf_stop_adapter(struct rte_eth_dev *dev)
2106 {
2107         u32 reg_val;
2108         u16 i;
2109         struct rte_eth_dev_info dev_info;
2110         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2111
2112         memset(&dev_info, 0, sizeof(dev_info));
2113         eth_igbvf_infos_get(dev, &dev_info);
2114
2115         /* Clear interrupt mask to stop from interrupts being generated */
2116         igbvf_intr_disable(hw);
2117
2118         /* Clear any pending interrupts, flush previous writes */
2119         E1000_READ_REG(hw, E1000_EICR);
2120
2121         /* Disable the transmit unit.  Each queue must be disabled. */
2122         for (i = 0; i < dev_info.max_tx_queues; i++)
2123                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2124
2125         /* Disable the receive unit by stopping each queue */
2126         for (i = 0; i < dev_info.max_rx_queues; i++) {
2127                 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2128                 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2129                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2130                 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2131                         ;
2132         }
2133
2134         /* flush all queues disables */
2135         E1000_WRITE_FLUSH(hw);
2136         msec_delay(2);
2137 }
2138
2139 static int eth_igbvf_link_update(struct e1000_hw *hw)
2140 {
2141         struct e1000_mbx_info *mbx = &hw->mbx;
2142         struct e1000_mac_info *mac = &hw->mac;
2143         int ret_val = E1000_SUCCESS;
2144
2145         PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2146
2147         /*
2148          * We only want to run this if there has been a rst asserted.
2149          * in this case that could mean a link change, device reset,
2150          * or a virtual function reset
2151          */
2152
2153         /* If we were hit with a reset or timeout drop the link */
2154         if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2155                 mac->get_link_status = TRUE;
2156
2157         if (!mac->get_link_status)
2158                 goto out;
2159
2160         /* if link status is down no point in checking to see if pf is up */
2161         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2162                 goto out;
2163
2164         /* if we passed all the tests above then the link is up and we no
2165          * longer need to check for link */
2166         mac->get_link_status = FALSE;
2167
2168 out:
2169         return ret_val;
2170 }
2171
2172
2173 static int
2174 igbvf_dev_configure(struct rte_eth_dev *dev)
2175 {
2176         struct rte_eth_conf* conf = &dev->data->dev_conf;
2177
2178         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2179                      dev->data->port_id);
2180
2181         /*
2182          * VF has no ability to enable/disable HW CRC
2183          * Keep the persistent behavior the same as Host PF
2184          */
2185 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2186         if (!conf->rxmode.hw_strip_crc) {
2187                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2188                 conf->rxmode.hw_strip_crc = 1;
2189         }
2190 #else
2191         if (conf->rxmode.hw_strip_crc) {
2192                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2193                 conf->rxmode.hw_strip_crc = 0;
2194         }
2195 #endif
2196
2197         return 0;
2198 }
2199
2200 static int
2201 igbvf_dev_start(struct rte_eth_dev *dev)
2202 {
2203         struct e1000_hw *hw =
2204                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2205         int ret;
2206
2207         PMD_INIT_FUNC_TRACE();
2208
2209         hw->mac.ops.reset_hw(hw);
2210
2211         /* Set all vfta */
2212         igbvf_set_vfta_all(dev,1);
2213
2214         eth_igbvf_tx_init(dev);
2215
2216         /* This can fail when allocating mbufs for descriptor rings */
2217         ret = eth_igbvf_rx_init(dev);
2218         if (ret) {
2219                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2220                 igb_dev_clear_queues(dev);
2221                 return ret;
2222         }
2223
2224         return 0;
2225 }
2226
2227 static void
2228 igbvf_dev_stop(struct rte_eth_dev *dev)
2229 {
2230         PMD_INIT_FUNC_TRACE();
2231
2232         igbvf_stop_adapter(dev);
2233
2234         /*
2235           * Clear what we set, but we still keep shadow_vfta to
2236           * restore after device starts
2237           */
2238         igbvf_set_vfta_all(dev,0);
2239
2240         igb_dev_clear_queues(dev);
2241 }
2242
2243 static void
2244 igbvf_dev_close(struct rte_eth_dev *dev)
2245 {
2246         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2247
2248         PMD_INIT_FUNC_TRACE();
2249
2250         e1000_reset_hw(hw);
2251
2252         igbvf_dev_stop(dev);
2253 }
2254
2255 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2256 {
2257         struct e1000_mbx_info *mbx = &hw->mbx;
2258         uint32_t msgbuf[2];
2259
2260         /* After set vlan, vlan strip will also be enabled in igb driver*/
2261         msgbuf[0] = E1000_VF_SET_VLAN;
2262         msgbuf[1] = vid;
2263         /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2264         if (on)
2265                 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2266
2267         return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2268 }
2269
2270 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2271 {
2272         struct e1000_hw *hw =
2273                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2274         struct e1000_vfta * shadow_vfta =
2275                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2276         int i = 0, j = 0, vfta = 0, mask = 1;
2277
2278         for (i = 0; i < IGB_VFTA_SIZE; i++){
2279                 vfta = shadow_vfta->vfta[i];
2280                 if(vfta){
2281                         mask = 1;
2282                         for (j = 0; j < 32; j++){
2283                                 if(vfta & mask)
2284                                         igbvf_set_vfta(hw,
2285                                                 (uint16_t)((i<<5)+j), on);
2286                                 mask<<=1;
2287                         }
2288                 }
2289         }
2290
2291 }
2292
2293 static int
2294 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2295 {
2296         struct e1000_hw *hw =
2297                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2298         struct e1000_vfta * shadow_vfta =
2299                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2300         uint32_t vid_idx = 0;
2301         uint32_t vid_bit = 0;
2302         int ret = 0;
2303
2304         PMD_INIT_FUNC_TRACE();
2305
2306         /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2307         ret = igbvf_set_vfta(hw, vlan_id, !!on);
2308         if(ret){
2309                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2310                 return ret;
2311         }
2312         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2313         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2314
2315         /*Save what we set and retore it after device reset*/
2316         if (on)
2317                 shadow_vfta->vfta[vid_idx] |= vid_bit;
2318         else
2319                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2320
2321         return 0;
2322 }
2323
2324 static int
2325 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2326                         struct rte_eth_rss_reta_entry64 *reta_conf,
2327                         uint16_t reta_size)
2328 {
2329         uint8_t i, j, mask;
2330         uint32_t reta, r;
2331         uint16_t idx, shift;
2332         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2333
2334         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2335                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2336                         "(%d) doesn't match the number hardware can supported "
2337                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2338                 return -EINVAL;
2339         }
2340
2341         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2342                 idx = i / RTE_RETA_GROUP_SIZE;
2343                 shift = i % RTE_RETA_GROUP_SIZE;
2344                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2345                                                 IGB_4_BIT_MASK);
2346                 if (!mask)
2347                         continue;
2348                 if (mask == IGB_4_BIT_MASK)
2349                         r = 0;
2350                 else
2351                         r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2352                 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2353                         if (mask & (0x1 << j))
2354                                 reta |= reta_conf[idx].reta[shift + j] <<
2355                                                         (CHAR_BIT * j);
2356                         else
2357                                 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2358                 }
2359                 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2360         }
2361
2362         return 0;
2363 }
2364
2365 static int
2366 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2367                        struct rte_eth_rss_reta_entry64 *reta_conf,
2368                        uint16_t reta_size)
2369 {
2370         uint8_t i, j, mask;
2371         uint32_t reta;
2372         uint16_t idx, shift;
2373         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2374
2375         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2376                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2377                         "(%d) doesn't match the number hardware can supported "
2378                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2379                 return -EINVAL;
2380         }
2381
2382         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2383                 idx = i / RTE_RETA_GROUP_SIZE;
2384                 shift = i % RTE_RETA_GROUP_SIZE;
2385                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2386                                                 IGB_4_BIT_MASK);
2387                 if (!mask)
2388                         continue;
2389                 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2390                 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2391                         if (mask & (0x1 << j))
2392                                 reta_conf[idx].reta[shift + j] =
2393                                         ((reta >> (CHAR_BIT * j)) &
2394                                                 IGB_8_BIT_MASK);
2395                 }
2396         }
2397
2398         return 0;
2399 }
2400
2401 #define MAC_TYPE_FILTER_SUP(type)    do {\
2402         if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2403                 (type) != e1000_82576)\
2404                 return -ENOTSUP;\
2405 } while (0)
2406
2407 /*
2408  * add the syn filter
2409  *
2410  * @param
2411  * dev: Pointer to struct rte_eth_dev.
2412  * filter: ponter to the filter that will be added.
2413  * rx_queue: the queue id the filter assigned to.
2414  *
2415  * @return
2416  *    - On success, zero.
2417  *    - On failure, a negative value.
2418  */
2419 static int
2420 eth_igb_add_syn_filter(struct rte_eth_dev *dev,
2421                         struct rte_syn_filter *filter, uint16_t rx_queue)
2422 {
2423         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2424         uint32_t synqf, rfctl;
2425
2426         MAC_TYPE_FILTER_SUP(hw->mac.type);
2427
2428         if (rx_queue >= IGB_MAX_RX_QUEUE_NUM)
2429                 return -EINVAL;
2430
2431         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2432         if (synqf & E1000_SYN_FILTER_ENABLE)
2433                 return -EINVAL;
2434
2435         synqf = (uint32_t)(((rx_queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2436                 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2437
2438         rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2439         if (filter->hig_pri)
2440                 rfctl |= E1000_RFCTL_SYNQFP;
2441         else
2442                 rfctl &= ~E1000_RFCTL_SYNQFP;
2443
2444         E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2445         E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2446         return 0;
2447 }
2448
2449 /*
2450  * remove the syn filter
2451  *
2452  * @param
2453  * dev: Pointer to struct rte_eth_dev.
2454  *
2455  * @return
2456  *    - On success, zero.
2457  *    - On failure, a negative value.
2458  */
2459 static int
2460 eth_igb_remove_syn_filter(struct rte_eth_dev *dev)
2461 {
2462         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2463
2464         MAC_TYPE_FILTER_SUP(hw->mac.type);
2465
2466         E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
2467         return 0;
2468 }
2469
2470 /*
2471  * get the syn filter's info
2472  *
2473  * @param
2474  * dev: Pointer to struct rte_eth_dev.
2475  * filter: ponter to the filter that returns.
2476  * *rx_queue: pointer to the queue id the filter assigned to.
2477  *
2478  * @return
2479  *    - On success, zero.
2480  *    - On failure, a negative value.
2481  */
2482 static int
2483 eth_igb_get_syn_filter(struct rte_eth_dev *dev,
2484                         struct rte_syn_filter *filter, uint16_t *rx_queue)
2485 {
2486         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2487         uint32_t synqf, rfctl;
2488
2489         MAC_TYPE_FILTER_SUP(hw->mac.type);
2490         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2491         if (synqf & E1000_SYN_FILTER_ENABLE) {
2492                 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2493                 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2494                 *rx_queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2495                                 E1000_SYN_FILTER_QUEUE_SHIFT);
2496                 return 0;
2497         }
2498         return -ENOENT;
2499 }
2500
2501 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
2502         if ((type) != e1000_82580 && (type) != e1000_i350)\
2503                 return -ENOSYS; \
2504 } while (0)
2505
2506 /*
2507  * add a 2tuple filter
2508  *
2509  * @param
2510  * dev: Pointer to struct rte_eth_dev.
2511  * index: the index the filter allocates.
2512  * filter: ponter to the filter that will be added.
2513  * rx_queue: the queue id the filter assigned to.
2514  *
2515  * @return
2516  *    - On success, zero.
2517  *    - On failure, a negative value.
2518  */
2519 static int
2520 eth_igb_add_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2521                         struct rte_2tuple_filter *filter, uint16_t rx_queue)
2522 {
2523         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2524         uint32_t ttqf, imir = 0;
2525         uint32_t imir_ext = 0;
2526
2527         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2528
2529         if (index >= E1000_MAX_TTQF_FILTERS ||
2530                 rx_queue >= IGB_MAX_RX_QUEUE_NUM ||
2531                 filter->priority > E1000_2TUPLE_MAX_PRI)
2532                 return -EINVAL;  /* filter index is out of range. */
2533         if  (filter->tcp_flags > TCP_FLAG_ALL)
2534                 return -EINVAL;  /* flags is invalid. */
2535
2536         ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
2537         if (ttqf & E1000_TTQF_QUEUE_ENABLE)
2538                 return -EINVAL;  /* filter index is in use. */
2539
2540         imir = (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
2541         if (filter->dst_port_mask == 1) /* 1b means not compare. */
2542                 imir |= E1000_IMIR_PORT_BP;
2543         else
2544                 imir &= ~E1000_IMIR_PORT_BP;
2545
2546         imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
2547
2548         ttqf = 0;
2549         ttqf |= E1000_TTQF_QUEUE_ENABLE;
2550         ttqf |= (uint32_t)(rx_queue << E1000_TTQF_QUEUE_SHIFT);
2551         ttqf |= (uint32_t)(filter->protocol & E1000_TTQF_PROTOCOL_MASK);
2552         if (filter->protocol_mask == 1)
2553                 ttqf |= E1000_TTQF_MASK_ENABLE;
2554         else
2555                 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2556
2557         imir_ext |= E1000_IMIR_EXT_SIZE_BP;
2558         /* tcp flags bits setting. */
2559         if (filter->tcp_flags & TCP_FLAG_ALL) {
2560                 if (filter->tcp_flags & TCP_UGR_FLAG)
2561                         imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
2562                 if (filter->tcp_flags & TCP_ACK_FLAG)
2563                         imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
2564                 if (filter->tcp_flags & TCP_PSH_FLAG)
2565                         imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
2566                 if (filter->tcp_flags & TCP_RST_FLAG)
2567                         imir_ext |= E1000_IMIR_EXT_CTRL_RST;
2568                 if (filter->tcp_flags & TCP_SYN_FLAG)
2569                         imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
2570                 if (filter->tcp_flags & TCP_FIN_FLAG)
2571                         imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
2572                 imir_ext &= ~E1000_IMIR_EXT_CTRL_BP;
2573         } else
2574                 imir_ext |= E1000_IMIR_EXT_CTRL_BP;
2575         E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
2576         E1000_WRITE_REG(hw, E1000_TTQF(index), ttqf);
2577         E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
2578         return 0;
2579 }
2580
2581 /*
2582  * remove a 2tuple filter
2583  *
2584  * @param
2585  * dev: Pointer to struct rte_eth_dev.
2586  * index: the index the filter allocates.
2587  *
2588  * @return
2589  *    - On success, zero.
2590  *    - On failure, a negative value.
2591  */
2592 static int
2593 eth_igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2594                         uint16_t index)
2595 {
2596         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2597
2598         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2599
2600         if (index >= E1000_MAX_TTQF_FILTERS)
2601                 return -EINVAL;  /* filter index is out of range */
2602
2603         E1000_WRITE_REG(hw, E1000_TTQF(index), 0);
2604         E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
2605         E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
2606         return 0;
2607 }
2608
2609 /*
2610  * get a 2tuple filter
2611  *
2612  * @param
2613  * dev: Pointer to struct rte_eth_dev.
2614  * index: the index the filter allocates.
2615  * filter: ponter to the filter that returns.
2616  * *rx_queue: pointer of the queue id the filter assigned to.
2617  *
2618  * @return
2619  *    - On success, zero.
2620  *    - On failure, a negative value.
2621  */
2622 static int
2623 eth_igb_get_2tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2624                         struct rte_2tuple_filter *filter, uint16_t *rx_queue)
2625 {
2626         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2627         uint32_t imir, ttqf, imir_ext;
2628
2629         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
2630
2631         if (index >= E1000_MAX_TTQF_FILTERS)
2632                 return -EINVAL;  /* filter index is out of range. */
2633
2634         ttqf = E1000_READ_REG(hw, E1000_TTQF(index));
2635         if (ttqf & E1000_TTQF_QUEUE_ENABLE) {
2636                 imir = E1000_READ_REG(hw, E1000_IMIR(index));
2637                 filter->protocol = ttqf & E1000_TTQF_PROTOCOL_MASK;
2638                 filter->protocol_mask = (ttqf & E1000_TTQF_MASK_ENABLE) ? 1 : 0;
2639                 *rx_queue = (ttqf & E1000_TTQF_RX_QUEUE_MASK) >>
2640                                 E1000_TTQF_QUEUE_SHIFT;
2641                 filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
2642                 filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
2643                 filter->priority = (imir & E1000_IMIR_PRIORITY) >>
2644                         E1000_IMIR_PRIORITY_SHIFT;
2645
2646                 imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
2647                 if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
2648                         if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
2649                                 filter->tcp_flags |= TCP_UGR_FLAG;
2650                         if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
2651                                 filter->tcp_flags |= TCP_ACK_FLAG;
2652                         if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
2653                                 filter->tcp_flags |= TCP_PSH_FLAG;
2654                         if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
2655                                 filter->tcp_flags |= TCP_RST_FLAG;
2656                         if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
2657                                 filter->tcp_flags |= TCP_SYN_FLAG;
2658                         if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
2659                                 filter->tcp_flags |= TCP_FIN_FLAG;
2660                 } else
2661                         filter->tcp_flags = 0;
2662                 return 0;
2663         }
2664         return -ENOENT;
2665 }
2666
2667 static inline struct e1000_flex_filter *
2668 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2669                         struct e1000_flex_filter_info *key)
2670 {
2671         struct e1000_flex_filter *it;
2672
2673         TAILQ_FOREACH(it, filter_list, entries) {
2674                 if (memcmp(key, &it->filter_info,
2675                         sizeof(struct e1000_flex_filter_info)) == 0)
2676                         return it;
2677         }
2678
2679         return NULL;
2680 }
2681
2682 static int
2683 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2684                         struct rte_eth_flex_filter *filter,
2685                         bool add)
2686 {
2687         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2688         struct e1000_filter_info *filter_info =
2689                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2690         struct e1000_flex_filter *flex_filter, *it;
2691         uint32_t wufc, queueing, mask;
2692         uint32_t reg_off;
2693         uint8_t shift, i, j = 0;
2694
2695         flex_filter = rte_zmalloc("e1000_flex_filter",
2696                         sizeof(struct e1000_flex_filter), 0);
2697         if (flex_filter == NULL)
2698                 return -ENOMEM;
2699
2700         flex_filter->filter_info.len = filter->len;
2701         flex_filter->filter_info.priority = filter->priority;
2702         memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
2703         for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
2704                 mask = 0;
2705                 /* reverse bits in flex filter's mask*/
2706                 for (shift = 0; shift < CHAR_BIT; shift++) {
2707                         if (filter->mask[i] & (0x01 << shift))
2708                                 mask |= (0x80 >> shift);
2709                 }
2710                 flex_filter->filter_info.mask[i] = mask;
2711         }
2712
2713         wufc = E1000_READ_REG(hw, E1000_WUFC);
2714         if (flex_filter->index < E1000_MAX_FHFT)
2715                 reg_off = E1000_FHFT(flex_filter->index);
2716         else
2717                 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
2718
2719         if (add) {
2720                 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
2721                                 &flex_filter->filter_info) != NULL) {
2722                         PMD_DRV_LOG(ERR, "filter exists.");
2723                         rte_free(flex_filter);
2724                         return -EEXIST;
2725                 }
2726                 flex_filter->queue = filter->queue;
2727                 /*
2728                  * look for an unused flex filter index
2729                  * and insert the filter into the list.
2730                  */
2731                 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
2732                         if (!(filter_info->flex_mask & (1 << i))) {
2733                                 filter_info->flex_mask |= 1 << i;
2734                                 flex_filter->index = i;
2735                                 TAILQ_INSERT_TAIL(&filter_info->flex_list,
2736                                         flex_filter,
2737                                         entries);
2738                                 break;
2739                         }
2740                 }
2741                 if (i >= E1000_MAX_FLEX_FILTERS) {
2742                         PMD_DRV_LOG(ERR, "flex filters are full.");
2743                         rte_free(flex_filter);
2744                         return -ENOSYS;
2745                 }
2746
2747                 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
2748                                 (E1000_WUFC_FLX0 << flex_filter->index));
2749                 queueing = filter->len |
2750                         (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
2751                         (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
2752                 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
2753                                 queueing);
2754                 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
2755                         E1000_WRITE_REG(hw, reg_off,
2756                                         flex_filter->filter_info.dwords[j]);
2757                         reg_off += sizeof(uint32_t);
2758                         E1000_WRITE_REG(hw, reg_off,
2759                                         flex_filter->filter_info.dwords[++j]);
2760                         reg_off += sizeof(uint32_t);
2761                         E1000_WRITE_REG(hw, reg_off,
2762                                 (uint32_t)flex_filter->filter_info.mask[i]);
2763                         reg_off += sizeof(uint32_t) * 2;
2764                         ++j;
2765                 }
2766         } else {
2767                 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2768                                 &flex_filter->filter_info);
2769                 if (it == NULL) {
2770                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
2771                         rte_free(flex_filter);
2772                         return -ENOENT;
2773                 }
2774
2775                 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
2776                         E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
2777                 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
2778                         (~(E1000_WUFC_FLX0 << it->index)));
2779
2780                 filter_info->flex_mask &= ~(1 << it->index);
2781                 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
2782                 rte_free(it);
2783                 rte_free(flex_filter);
2784         }
2785
2786         return 0;
2787 }
2788
2789 static int
2790 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
2791                         struct rte_eth_flex_filter *filter)
2792 {
2793         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2794         struct e1000_filter_info *filter_info =
2795                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2796         struct e1000_flex_filter flex_filter, *it;
2797         uint32_t wufc, queueing, wufc_en = 0;
2798
2799         memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
2800         flex_filter.filter_info.len = filter->len;
2801         flex_filter.filter_info.priority = filter->priority;
2802         memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
2803         memcpy(flex_filter.filter_info.mask, filter->mask,
2804                         RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
2805
2806         it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2807                                 &flex_filter.filter_info);
2808         if (it == NULL) {
2809                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2810                 return -ENOENT;
2811         }
2812
2813         wufc = E1000_READ_REG(hw, E1000_WUFC);
2814         wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
2815
2816         if ((wufc & wufc_en) == wufc_en) {
2817                 uint32_t reg_off = 0;
2818                 if (it->index < E1000_MAX_FHFT)
2819                         reg_off = E1000_FHFT(it->index);
2820                 else
2821                         reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
2822
2823                 queueing = E1000_READ_REG(hw,
2824                                 reg_off + E1000_FHFT_QUEUEING_OFFSET);
2825                 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
2826                 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
2827                         E1000_FHFT_QUEUEING_PRIO_SHIFT;
2828                 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
2829                         E1000_FHFT_QUEUEING_QUEUE_SHIFT;
2830                 return 0;
2831         }
2832         return -ENOENT;
2833 }
2834
2835 static int
2836 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
2837                         enum rte_filter_op filter_op,
2838                         void *arg)
2839 {
2840         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2841         struct rte_eth_flex_filter *filter;
2842         int ret = 0;
2843
2844         MAC_TYPE_FILTER_SUP(hw->mac.type);
2845
2846         if (filter_op == RTE_ETH_FILTER_NOP)
2847                 return ret;
2848
2849         if (arg == NULL) {
2850                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2851                             filter_op);
2852                 return -EINVAL;
2853         }
2854
2855         filter = (struct rte_eth_flex_filter *)arg;
2856         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
2857             || filter->len % sizeof(uint64_t) != 0) {
2858                 PMD_DRV_LOG(ERR, "filter's length is out of range");
2859                 return -EINVAL;
2860         }
2861         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
2862                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
2863                 return -EINVAL;
2864         }
2865
2866         switch (filter_op) {
2867         case RTE_ETH_FILTER_ADD:
2868                 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
2869                 break;
2870         case RTE_ETH_FILTER_DELETE:
2871                 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
2872                 break;
2873         case RTE_ETH_FILTER_GET:
2874                 ret = eth_igb_get_flex_filter(dev, filter);
2875                 break;
2876         default:
2877                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
2878                 ret = -EINVAL;
2879                 break;
2880         }
2881
2882         return ret;
2883 }
2884
2885 /*
2886  * add a 5tuple filter
2887  *
2888  * @param
2889  * dev: Pointer to struct rte_eth_dev.
2890  * index: the index the filter allocates.
2891  * filter: ponter to the filter that will be added.
2892  * rx_queue: the queue id the filter assigned to.
2893  *
2894  * @return
2895  *    - On success, zero.
2896  *    - On failure, a negative value.
2897  */
2898 static int
2899 eth_igb_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
2900                         struct rte_5tuple_filter *filter, uint16_t rx_queue)
2901 {
2902         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2903         uint32_t ftqf, spqf = 0;
2904         uint32_t imir = 0;
2905         uint32_t imir_ext = 0;
2906
2907         if (hw->mac.type != e1000_82576)
2908                 return -ENOSYS;
2909
2910         if (index >= E1000_MAX_FTQF_FILTERS ||
2911                 rx_queue >= IGB_MAX_RX_QUEUE_NUM_82576)
2912                 return -EINVAL;  /* filter index is out of range. */
2913
2914         ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
2915         if (ftqf & E1000_FTQF_QUEUE_ENABLE)
2916                 return -EINVAL;  /* filter index is in use. */
2917
2918         ftqf = 0;
2919         ftqf |= filter->protocol & E1000_FTQF_PROTOCOL_MASK;
2920         if (filter->src_ip_mask == 1) /* 1b means not compare. */
2921                 ftqf |= E1000_FTQF_SOURCE_ADDR_MASK;
2922         if (filter->dst_ip_mask == 1)
2923                 ftqf |= E1000_FTQF_DEST_ADDR_MASK;
2924         if (filter->src_port_mask == 1)
2925                 ftqf |= E1000_FTQF_SOURCE_PORT_MASK;
2926         if (filter->protocol_mask == 1)
2927                 ftqf |= E1000_FTQF_PROTOCOL_COMP_MASK;
2928         ftqf |= (rx_queue << E1000_FTQF_QUEUE_SHIFT) & E1000_FTQF_QUEUE_MASK;
2929         ftqf |= E1000_FTQF_VF_MASK_EN;
2930         ftqf |= E1000_FTQF_QUEUE_ENABLE;
2931         E1000_WRITE_REG(hw, E1000_FTQF(index), ftqf);
2932         E1000_WRITE_REG(hw, E1000_DAQF(index), filter->dst_ip);
2933         E1000_WRITE_REG(hw, E1000_SAQF(index), filter->src_ip);
2934
2935         spqf |= filter->src_port & E1000_SPQF_SRCPORT;
2936         E1000_WRITE_REG(hw, E1000_SPQF(index), spqf);
2937
2938         imir |= (uint32_t)(filter->dst_port & E1000_IMIR_DSTPORT);
2939         if (filter->dst_port_mask == 1) /* 1b means not compare. */
2940                 imir |= E1000_IMIR_PORT_BP;
2941         else
2942                 imir &= ~E1000_IMIR_PORT_BP;
2943         imir |= filter->priority << E1000_IMIR_PRIORITY_SHIFT;
2944
2945         imir_ext |= E1000_IMIR_EXT_SIZE_BP;
2946         /* tcp flags bits setting. */
2947         if (filter->tcp_flags & TCP_FLAG_ALL) {
2948                 if (filter->tcp_flags & TCP_UGR_FLAG)
2949                         imir_ext |= E1000_IMIR_EXT_CTRL_UGR;
2950                 if (filter->tcp_flags & TCP_ACK_FLAG)
2951                         imir_ext |= E1000_IMIR_EXT_CTRL_ACK;
2952                 if (filter->tcp_flags & TCP_PSH_FLAG)
2953                         imir_ext |= E1000_IMIR_EXT_CTRL_PSH;
2954                 if (filter->tcp_flags & TCP_RST_FLAG)
2955                         imir_ext |= E1000_IMIR_EXT_CTRL_RST;
2956                 if (filter->tcp_flags & TCP_SYN_FLAG)
2957                         imir_ext |= E1000_IMIR_EXT_CTRL_SYN;
2958                 if (filter->tcp_flags & TCP_FIN_FLAG)
2959                         imir_ext |= E1000_IMIR_EXT_CTRL_FIN;
2960         } else
2961                 imir_ext |= E1000_IMIR_EXT_CTRL_BP;
2962         E1000_WRITE_REG(hw, E1000_IMIR(index), imir);
2963         E1000_WRITE_REG(hw, E1000_IMIREXT(index), imir_ext);
2964         return 0;
2965 }
2966
2967 /*
2968  * remove a 5tuple filter
2969  *
2970  * @param
2971  * dev: Pointer to struct rte_eth_dev.
2972  * index: the index the filter allocates
2973  *
2974  * @return
2975  *    - On success, zero.
2976  *    - On failure, a negative value.
2977  */
2978 static int
2979 eth_igb_remove_5tuple_filter(struct rte_eth_dev *dev,
2980                                 uint16_t index)
2981 {
2982         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2983
2984         if (hw->mac.type != e1000_82576)
2985                 return -ENOSYS;
2986
2987         if (index >= E1000_MAX_FTQF_FILTERS)
2988                 return -EINVAL;  /* filter index is out of range. */
2989
2990         E1000_WRITE_REG(hw, E1000_FTQF(index), 0);
2991         E1000_WRITE_REG(hw, E1000_DAQF(index), 0);
2992         E1000_WRITE_REG(hw, E1000_SAQF(index), 0);
2993         E1000_WRITE_REG(hw, E1000_SPQF(index), 0);
2994         E1000_WRITE_REG(hw, E1000_IMIR(index), 0);
2995         E1000_WRITE_REG(hw, E1000_IMIREXT(index), 0);
2996         return 0;
2997 }
2998
2999 /*
3000  * get a 5tuple filter
3001  *
3002  * @param
3003  * dev: Pointer to struct rte_eth_dev.
3004  * index: the index the filter allocates
3005  * filter: ponter to the filter that returns
3006  * *rx_queue: pointer of the queue id the filter assigned to
3007  *
3008  * @return
3009  *    - On success, zero.
3010  *    - On failure, a negative value.
3011  */
3012 static int
3013 eth_igb_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
3014                         struct rte_5tuple_filter *filter, uint16_t *rx_queue)
3015 {
3016         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3017         uint32_t spqf, ftqf, imir, imir_ext;
3018
3019         if (hw->mac.type != e1000_82576)
3020                 return -ENOSYS;
3021
3022         if (index >= E1000_MAX_FTQF_FILTERS)
3023                 return -EINVAL;  /* filter index is out of range. */
3024
3025         ftqf = E1000_READ_REG(hw, E1000_FTQF(index));
3026         if (ftqf & E1000_FTQF_QUEUE_ENABLE) {
3027                 filter->src_ip_mask =
3028                         (ftqf & E1000_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
3029                 filter->dst_ip_mask =
3030                         (ftqf & E1000_FTQF_DEST_ADDR_MASK) ? 1 : 0;
3031                 filter->src_port_mask =
3032                         (ftqf & E1000_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
3033                 filter->protocol_mask =
3034                         (ftqf & E1000_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
3035                 filter->protocol =
3036                         (uint8_t)ftqf & E1000_FTQF_PROTOCOL_MASK;
3037                 *rx_queue = (uint16_t)((ftqf & E1000_FTQF_QUEUE_MASK) >>
3038                                 E1000_FTQF_QUEUE_SHIFT);
3039
3040                 spqf = E1000_READ_REG(hw, E1000_SPQF(index));
3041                 filter->src_port = spqf & E1000_SPQF_SRCPORT;
3042
3043                 filter->dst_ip = E1000_READ_REG(hw, E1000_DAQF(index));
3044                 filter->src_ip = E1000_READ_REG(hw, E1000_SAQF(index));
3045
3046                 imir = E1000_READ_REG(hw, E1000_IMIR(index));
3047                 filter->dst_port_mask = (imir & E1000_IMIR_PORT_BP) ? 1 : 0;
3048                 filter->dst_port = (uint16_t)(imir & E1000_IMIR_DSTPORT);
3049                 filter->priority = (imir & E1000_IMIR_PRIORITY) >>
3050                         E1000_IMIR_PRIORITY_SHIFT;
3051
3052                 imir_ext = E1000_READ_REG(hw, E1000_IMIREXT(index));
3053                 if (!(imir_ext & E1000_IMIR_EXT_CTRL_BP)) {
3054                         if (imir_ext & E1000_IMIR_EXT_CTRL_UGR)
3055                                 filter->tcp_flags |= TCP_UGR_FLAG;
3056                         if (imir_ext & E1000_IMIR_EXT_CTRL_ACK)
3057                                 filter->tcp_flags |= TCP_ACK_FLAG;
3058                         if (imir_ext & E1000_IMIR_EXT_CTRL_PSH)
3059                                 filter->tcp_flags |= TCP_PSH_FLAG;
3060                         if (imir_ext & E1000_IMIR_EXT_CTRL_RST)
3061                                 filter->tcp_flags |= TCP_RST_FLAG;
3062                         if (imir_ext & E1000_IMIR_EXT_CTRL_SYN)
3063                                 filter->tcp_flags |= TCP_SYN_FLAG;
3064                         if (imir_ext & E1000_IMIR_EXT_CTRL_FIN)
3065                                 filter->tcp_flags |= TCP_FIN_FLAG;
3066                 } else
3067                         filter->tcp_flags = 0;
3068                 return 0;
3069         }
3070         return -ENOENT;
3071 }
3072
3073 static int
3074 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3075 {
3076         uint32_t rctl;
3077         struct e1000_hw *hw;
3078         struct rte_eth_dev_info dev_info;
3079         uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3080                                      VLAN_TAG_SIZE);
3081
3082         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3083
3084 #ifdef RTE_LIBRTE_82571_SUPPORT
3085         /* XXX: not bigger than max_rx_pktlen */
3086         if (hw->mac.type == e1000_82571)
3087                 return -ENOTSUP;
3088 #endif
3089         eth_igb_infos_get(dev, &dev_info);
3090
3091         /* check that mtu is within the allowed range */
3092         if ((mtu < ETHER_MIN_MTU) ||
3093             (frame_size > dev_info.max_rx_pktlen))
3094                 return -EINVAL;
3095
3096         /* refuse mtu that requires the support of scattered packets when this
3097          * feature has not been enabled before. */
3098         if (!dev->data->scattered_rx &&
3099             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3100                 return -EINVAL;
3101
3102         rctl = E1000_READ_REG(hw, E1000_RCTL);
3103
3104         /* switch to jumbo mode if needed */
3105         if (frame_size > ETHER_MAX_LEN) {
3106                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3107                 rctl |= E1000_RCTL_LPE;
3108         } else {
3109                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3110                 rctl &= ~E1000_RCTL_LPE;
3111         }
3112         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3113
3114         /* update max frame size */
3115         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3116
3117         E1000_WRITE_REG(hw, E1000_RLPML,
3118                         dev->data->dev_conf.rxmode.max_rx_pkt_len);
3119
3120         return 0;
3121 }
3122
3123 static inline int
3124 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3125                         uint16_t ethertype)
3126 {
3127         int i;
3128
3129         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3130                 if (filter_info->ethertype_filters[i] == ethertype &&
3131                     (filter_info->ethertype_mask & (1 << i)))
3132                         return i;
3133         }
3134         return -1;
3135 }
3136
3137 static inline int
3138 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3139                         uint16_t ethertype)
3140 {
3141         int i;
3142
3143         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3144                 if (!(filter_info->ethertype_mask & (1 << i))) {
3145                         filter_info->ethertype_mask |= 1 << i;
3146                         filter_info->ethertype_filters[i] = ethertype;
3147                         return i;
3148                 }
3149         }
3150         return -1;
3151 }
3152
3153 static inline int
3154 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3155                         uint8_t idx)
3156 {
3157         if (idx >= E1000_MAX_ETQF_FILTERS)
3158                 return -1;
3159         filter_info->ethertype_mask &= ~(1 << idx);
3160         filter_info->ethertype_filters[idx] = 0;
3161         return idx;
3162 }
3163
3164
3165 static int
3166 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3167                         struct rte_eth_ethertype_filter *filter,
3168                         bool add)
3169 {
3170         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3171         struct e1000_filter_info *filter_info =
3172                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3173         uint32_t etqf = 0;
3174         int ret;
3175
3176         if (filter->ether_type == ETHER_TYPE_IPv4 ||
3177                 filter->ether_type == ETHER_TYPE_IPv6) {
3178                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3179                         " ethertype filter.", filter->ether_type);
3180                 return -EINVAL;
3181         }
3182
3183         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3184                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3185                 return -EINVAL;
3186         }
3187         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3188                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3189                 return -EINVAL;
3190         }
3191
3192         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3193         if (ret >= 0 && add) {
3194                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3195                             filter->ether_type);
3196                 return -EEXIST;
3197         }
3198         if (ret < 0 && !add) {
3199                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3200                             filter->ether_type);
3201                 return -ENOENT;
3202         }
3203
3204         if (add) {
3205                 ret = igb_ethertype_filter_insert(filter_info,
3206                         filter->ether_type);
3207                 if (ret < 0) {
3208                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
3209                         return -ENOSYS;
3210                 }
3211
3212                 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3213                 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3214                 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3215         } else {
3216                 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3217                 if (ret < 0)
3218                         return -ENOSYS;
3219         }
3220         E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3221         E1000_WRITE_FLUSH(hw);
3222
3223         return 0;
3224 }
3225
3226 static int
3227 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3228                         struct rte_eth_ethertype_filter *filter)
3229 {
3230         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3231         struct e1000_filter_info *filter_info =
3232                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3233         uint32_t etqf;
3234         int ret;
3235
3236         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3237         if (ret < 0) {
3238                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3239                             filter->ether_type);
3240                 return -ENOENT;
3241         }
3242
3243         etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3244         if (etqf & E1000_ETQF_FILTER_ENABLE) {
3245                 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3246                 filter->flags = 0;
3247                 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3248                                 E1000_ETQF_QUEUE_SHIFT;
3249                 return 0;
3250         }
3251
3252         return -ENOENT;
3253 }
3254
3255 /*
3256  * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3257  * @dev: pointer to rte_eth_dev structure
3258  * @filter_op:operation will be taken.
3259  * @arg: a pointer to specific structure corresponding to the filter_op
3260  */
3261 static int
3262 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3263                                 enum rte_filter_op filter_op,
3264                                 void *arg)
3265 {
3266         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3267         int ret;
3268
3269         MAC_TYPE_FILTER_SUP(hw->mac.type);
3270
3271         if (filter_op == RTE_ETH_FILTER_NOP)
3272                 return 0;
3273
3274         if (arg == NULL) {
3275                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3276                             filter_op);
3277                 return -EINVAL;
3278         }
3279
3280         switch (filter_op) {
3281         case RTE_ETH_FILTER_ADD:
3282                 ret = igb_add_del_ethertype_filter(dev,
3283                         (struct rte_eth_ethertype_filter *)arg,
3284                         TRUE);
3285                 break;
3286         case RTE_ETH_FILTER_DELETE:
3287                 ret = igb_add_del_ethertype_filter(dev,
3288                         (struct rte_eth_ethertype_filter *)arg,
3289                         FALSE);
3290                 break;
3291         case RTE_ETH_FILTER_GET:
3292                 ret = igb_get_ethertype_filter(dev,
3293                         (struct rte_eth_ethertype_filter *)arg);
3294                 break;
3295         default:
3296                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3297                 ret = -EINVAL;
3298                 break;
3299         }
3300         return ret;
3301 }
3302
3303 static int
3304 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3305                      enum rte_filter_type filter_type,
3306                      enum rte_filter_op filter_op,
3307                      void *arg)
3308 {
3309         int ret = -EINVAL;
3310
3311         switch (filter_type) {
3312         case RTE_ETH_FILTER_ETHERTYPE:
3313                 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3314                 break;
3315         case RTE_ETH_FILTER_FLEXIBLE:
3316                 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3317                 break;
3318         default:
3319                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3320                                                         filter_type);
3321                 break;
3322         }
3323
3324         return ret;
3325 }
3326
3327 static struct rte_driver pmd_igb_drv = {
3328         .type = PMD_PDEV,
3329         .init = rte_igb_pmd_init,
3330 };
3331
3332 static struct rte_driver pmd_igbvf_drv = {
3333         .type = PMD_PDEV,
3334         .init = rte_igbvf_pmd_init,
3335 };
3336
3337 PMD_REGISTER_DRIVER(pmd_igb_drv);
3338 PMD_REGISTER_DRIVER(pmd_igbvf_drv);