tailq: remove unneeded inclusions
[dpdk.git] / lib / librte_pmd_e1000 / igb_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memory.h>
49 #include <rte_memzone.h>
50 #include <rte_eal.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
53 #include <rte_dev.h>
54
55 #include "e1000_logs.h"
56 #include "e1000/e1000_api.h"
57 #include "e1000_ethdev.h"
58
59 /*
60  * Default values for port configuration
61  */
62 #define IGB_DEFAULT_RX_FREE_THRESH  32
63 #define IGB_DEFAULT_RX_PTHRESH      8
64 #define IGB_DEFAULT_RX_HTHRESH      8
65 #define IGB_DEFAULT_RX_WTHRESH      0
66
67 #define IGB_DEFAULT_TX_PTHRESH      32
68 #define IGB_DEFAULT_TX_HTHRESH      0
69 #define IGB_DEFAULT_TX_WTHRESH      0
70
71 /* Bit shift and mask */
72 #define IGB_4_BIT_WIDTH  (CHAR_BIT / 2)
73 #define IGB_4_BIT_MASK   RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
74 #define IGB_8_BIT_WIDTH  CHAR_BIT
75 #define IGB_8_BIT_MASK   UINT8_MAX
76
77 static int  eth_igb_configure(struct rte_eth_dev *dev);
78 static int  eth_igb_start(struct rte_eth_dev *dev);
79 static void eth_igb_stop(struct rte_eth_dev *dev);
80 static void eth_igb_close(struct rte_eth_dev *dev);
81 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
82 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
83 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
84 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
85 static int  eth_igb_link_update(struct rte_eth_dev *dev,
86                                 int wait_to_complete);
87 static void eth_igb_stats_get(struct rte_eth_dev *dev,
88                                 struct rte_eth_stats *rte_stats);
89 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
90 static void eth_igb_infos_get(struct rte_eth_dev *dev,
91                               struct rte_eth_dev_info *dev_info);
92 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
93                                 struct rte_eth_dev_info *dev_info);
94 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
95                                 struct rte_eth_fc_conf *fc_conf);
96 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
97                                 struct rte_eth_fc_conf *fc_conf);
98 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
99 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
100 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
101 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
102                                                         void *param);
103 static int  igb_hardware_init(struct e1000_hw *hw);
104 static void igb_hw_control_acquire(struct e1000_hw *hw);
105 static void igb_hw_control_release(struct e1000_hw *hw);
106 static void igb_init_manageability(struct e1000_hw *hw);
107 static void igb_release_manageability(struct e1000_hw *hw);
108
109 static int  eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
110
111 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
112                 uint16_t vlan_id, int on);
113 static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
114 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
115
116 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
117 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
118 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
119 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
120 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
121 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
122
123 static int eth_igb_led_on(struct rte_eth_dev *dev);
124 static int eth_igb_led_off(struct rte_eth_dev *dev);
125
126 static void igb_intr_disable(struct e1000_hw *hw);
127 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
128 static void eth_igb_rar_set(struct rte_eth_dev *dev,
129                 struct ether_addr *mac_addr,
130                 uint32_t index, uint32_t pool);
131 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
132
133 static void igbvf_intr_disable(struct e1000_hw *hw);
134 static int igbvf_dev_configure(struct rte_eth_dev *dev);
135 static int igbvf_dev_start(struct rte_eth_dev *dev);
136 static void igbvf_dev_stop(struct rte_eth_dev *dev);
137 static void igbvf_dev_close(struct rte_eth_dev *dev);
138 static int eth_igbvf_link_update(struct e1000_hw *hw);
139 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats);
140 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
141 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
142                 uint16_t vlan_id, int on);
143 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
144 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
145 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
146                                    struct rte_eth_rss_reta_entry64 *reta_conf,
147                                    uint16_t reta_size);
148 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
149                                   struct rte_eth_rss_reta_entry64 *reta_conf,
150                                   uint16_t reta_size);
151
152 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
153                         struct rte_eth_syn_filter *filter,
154                         bool add);
155 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
156                         struct rte_eth_syn_filter *filter);
157 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
158                         enum rte_filter_op filter_op,
159                         void *arg);
160 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
161                         struct rte_eth_ntuple_filter *ntuple_filter);
162 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
163                         struct rte_eth_ntuple_filter *ntuple_filter);
164 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
165                         struct rte_eth_flex_filter *filter,
166                         bool add);
167 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
168                         struct rte_eth_flex_filter *filter);
169 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
170                         enum rte_filter_op filter_op,
171                         void *arg);
172 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
173                         struct rte_eth_ntuple_filter *ntuple_filter);
174 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
175                         struct rte_eth_ntuple_filter *ntuple_filter);
176 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
177                         struct rte_eth_ntuple_filter *filter,
178                         bool add);
179 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
180                         struct rte_eth_ntuple_filter *filter);
181 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
182                                 enum rte_filter_op filter_op,
183                                 void *arg);
184 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
185                         struct rte_eth_ethertype_filter *filter,
186                         bool add);
187 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
188                                 enum rte_filter_op filter_op,
189                                 void *arg);
190 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
191                         struct rte_eth_ethertype_filter *filter);
192 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
193                      enum rte_filter_type filter_type,
194                      enum rte_filter_op filter_op,
195                      void *arg);
196
197 /*
198  * Define VF Stats MACRO for Non "cleared on read" register
199  */
200 #define UPDATE_VF_STAT(reg, last, cur)            \
201 {                                                 \
202         u32 latest = E1000_READ_REG(hw, reg);     \
203         cur += latest - last;                     \
204         last = latest;                            \
205 }
206
207
208 #define IGB_FC_PAUSE_TIME 0x0680
209 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
210 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
211
212 #define IGBVF_PMD_NAME "rte_igbvf_pmd"     /* PMD name */
213
214 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
215
216 /*
217  * The set of PCI devices this driver supports
218  */
219 static struct rte_pci_id pci_id_igb_map[] = {
220
221 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
222 #include "rte_pci_dev_ids.h"
223
224 {.device_id = 0},
225 };
226
227 /*
228  * The set of PCI devices this driver supports (for 82576&I350 VF)
229  */
230 static struct rte_pci_id pci_id_igbvf_map[] = {
231
232 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
233 #include "rte_pci_dev_ids.h"
234
235 {.device_id = 0},
236 };
237
238 static struct eth_dev_ops eth_igb_ops = {
239         .dev_configure        = eth_igb_configure,
240         .dev_start            = eth_igb_start,
241         .dev_stop             = eth_igb_stop,
242         .dev_close            = eth_igb_close,
243         .promiscuous_enable   = eth_igb_promiscuous_enable,
244         .promiscuous_disable  = eth_igb_promiscuous_disable,
245         .allmulticast_enable  = eth_igb_allmulticast_enable,
246         .allmulticast_disable = eth_igb_allmulticast_disable,
247         .link_update          = eth_igb_link_update,
248         .stats_get            = eth_igb_stats_get,
249         .stats_reset          = eth_igb_stats_reset,
250         .dev_infos_get        = eth_igb_infos_get,
251         .mtu_set              = eth_igb_mtu_set,
252         .vlan_filter_set      = eth_igb_vlan_filter_set,
253         .vlan_tpid_set        = eth_igb_vlan_tpid_set,
254         .vlan_offload_set     = eth_igb_vlan_offload_set,
255         .rx_queue_setup       = eth_igb_rx_queue_setup,
256         .rx_queue_release     = eth_igb_rx_queue_release,
257         .rx_queue_count       = eth_igb_rx_queue_count,
258         .rx_descriptor_done   = eth_igb_rx_descriptor_done,
259         .tx_queue_setup       = eth_igb_tx_queue_setup,
260         .tx_queue_release     = eth_igb_tx_queue_release,
261         .dev_led_on           = eth_igb_led_on,
262         .dev_led_off          = eth_igb_led_off,
263         .flow_ctrl_get        = eth_igb_flow_ctrl_get,
264         .flow_ctrl_set        = eth_igb_flow_ctrl_set,
265         .mac_addr_add         = eth_igb_rar_set,
266         .mac_addr_remove      = eth_igb_rar_clear,
267         .reta_update          = eth_igb_rss_reta_update,
268         .reta_query           = eth_igb_rss_reta_query,
269         .rss_hash_update      = eth_igb_rss_hash_update,
270         .rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
271         .filter_ctrl          = eth_igb_filter_ctrl,
272 };
273
274 /*
275  * dev_ops for virtual function, bare necessities for basic vf
276  * operation have been implemented
277  */
278 static struct eth_dev_ops igbvf_eth_dev_ops = {
279         .dev_configure        = igbvf_dev_configure,
280         .dev_start            = igbvf_dev_start,
281         .dev_stop             = igbvf_dev_stop,
282         .dev_close            = igbvf_dev_close,
283         .link_update          = eth_igb_link_update,
284         .stats_get            = eth_igbvf_stats_get,
285         .stats_reset          = eth_igbvf_stats_reset,
286         .vlan_filter_set      = igbvf_vlan_filter_set,
287         .dev_infos_get        = eth_igbvf_infos_get,
288         .rx_queue_setup       = eth_igb_rx_queue_setup,
289         .rx_queue_release     = eth_igb_rx_queue_release,
290         .tx_queue_setup       = eth_igb_tx_queue_setup,
291         .tx_queue_release     = eth_igb_tx_queue_release,
292 };
293
294 /**
295  * Atomically reads the link status information from global
296  * structure rte_eth_dev.
297  *
298  * @param dev
299  *   - Pointer to the structure rte_eth_dev to read from.
300  *   - Pointer to the buffer to be saved with the link status.
301  *
302  * @return
303  *   - On success, zero.
304  *   - On failure, negative value.
305  */
306 static inline int
307 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
308                                 struct rte_eth_link *link)
309 {
310         struct rte_eth_link *dst = link;
311         struct rte_eth_link *src = &(dev->data->dev_link);
312
313         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
314                                         *(uint64_t *)src) == 0)
315                 return -1;
316
317         return 0;
318 }
319
320 /**
321  * Atomically writes the link status information into global
322  * structure rte_eth_dev.
323  *
324  * @param dev
325  *   - Pointer to the structure rte_eth_dev to read from.
326  *   - Pointer to the buffer to be saved with the link status.
327  *
328  * @return
329  *   - On success, zero.
330  *   - On failure, negative value.
331  */
332 static inline int
333 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
334                                 struct rte_eth_link *link)
335 {
336         struct rte_eth_link *dst = &(dev->data->dev_link);
337         struct rte_eth_link *src = link;
338
339         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
340                                         *(uint64_t *)src) == 0)
341                 return -1;
342
343         return 0;
344 }
345
346 static inline void
347 igb_intr_enable(struct rte_eth_dev *dev)
348 {
349         struct e1000_interrupt *intr =
350                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
351         struct e1000_hw *hw =
352                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
353
354         E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
355         E1000_WRITE_FLUSH(hw);
356 }
357
358 static void
359 igb_intr_disable(struct e1000_hw *hw)
360 {
361         E1000_WRITE_REG(hw, E1000_IMC, ~0);
362         E1000_WRITE_FLUSH(hw);
363 }
364
365 static inline int32_t
366 igb_pf_reset_hw(struct e1000_hw *hw)
367 {
368         uint32_t ctrl_ext;
369         int32_t status;
370
371         status = e1000_reset_hw(hw);
372
373         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
374         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
375         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
376         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
377         E1000_WRITE_FLUSH(hw);
378
379         return status;
380 }
381
382 static void
383 igb_identify_hardware(struct rte_eth_dev *dev)
384 {
385         struct e1000_hw *hw =
386                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
387
388         hw->vendor_id = dev->pci_dev->id.vendor_id;
389         hw->device_id = dev->pci_dev->id.device_id;
390         hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
391         hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
392
393         e1000_set_mac_type(hw);
394
395         /* need to check if it is a vf device below */
396 }
397
398 static int
399 igb_reset_swfw_lock(struct e1000_hw *hw)
400 {
401         int ret_val;
402
403         /*
404          * Do mac ops initialization manually here, since we will need
405          * some function pointers set by this call.
406          */
407         ret_val = e1000_init_mac_params(hw);
408         if (ret_val)
409                 return ret_val;
410
411         /*
412          * SMBI lock should not fail in this early stage. If this is the case,
413          * it is due to an improper exit of the application.
414          * So force the release of the faulty lock.
415          */
416         if (e1000_get_hw_semaphore_generic(hw) < 0) {
417                 PMD_DRV_LOG(DEBUG, "SMBI lock released");
418         }
419         e1000_put_hw_semaphore_generic(hw);
420
421         if (hw->mac.ops.acquire_swfw_sync != NULL) {
422                 uint16_t mask;
423
424                 /*
425                  * Phy lock should not fail in this early stage. If this is the case,
426                  * it is due to an improper exit of the application.
427                  * So force the release of the faulty lock.
428                  */
429                 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
430                 if (hw->bus.func > E1000_FUNC_1)
431                         mask <<= 2;
432                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
433                         PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
434                                     hw->bus.func);
435                 }
436                 hw->mac.ops.release_swfw_sync(hw, mask);
437
438                 /*
439                  * This one is more tricky since it is common to all ports; but
440                  * swfw_sync retries last long enough (1s) to be almost sure that if
441                  * lock can not be taken it is due to an improper lock of the
442                  * semaphore.
443                  */
444                 mask = E1000_SWFW_EEP_SM;
445                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
446                         PMD_DRV_LOG(DEBUG, "SWFW common locks released");
447                 }
448                 hw->mac.ops.release_swfw_sync(hw, mask);
449         }
450
451         return E1000_SUCCESS;
452 }
453
454 static int
455 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
456 {
457         int error = 0;
458         struct rte_pci_device *pci_dev;
459         struct e1000_hw *hw =
460                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
461         struct e1000_vfta * shadow_vfta =
462                         E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
463         struct e1000_filter_info *filter_info =
464                 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
465         uint32_t ctrl_ext;
466
467         pci_dev = eth_dev->pci_dev;
468         eth_dev->dev_ops = &eth_igb_ops;
469         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
470         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
471
472         /* for secondary processes, we don't initialise any further as primary
473          * has already done this work. Only check we don't need a different
474          * RX function */
475         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
476                 if (eth_dev->data->scattered_rx)
477                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
478                 return 0;
479         }
480
481         hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
482
483         igb_identify_hardware(eth_dev);
484         if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
485                 error = -EIO;
486                 goto err_late;
487         }
488
489         e1000_get_bus_info(hw);
490
491         /* Reset any pending lock */
492         if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
493                 error = -EIO;
494                 goto err_late;
495         }
496
497         /* Finish initialization */
498         if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
499                 error = -EIO;
500                 goto err_late;
501         }
502
503         hw->mac.autoneg = 1;
504         hw->phy.autoneg_wait_to_complete = 0;
505         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
506
507         /* Copper options */
508         if (hw->phy.media_type == e1000_media_type_copper) {
509                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
510                 hw->phy.disable_polarity_correction = 0;
511                 hw->phy.ms_type = e1000_ms_hw_default;
512         }
513
514         /*
515          * Start from a known state, this is important in reading the nvm
516          * and mac from that.
517          */
518         igb_pf_reset_hw(hw);
519
520         /* Make sure we have a good EEPROM before we read from it */
521         if (e1000_validate_nvm_checksum(hw) < 0) {
522                 /*
523                  * Some PCI-E parts fail the first check due to
524                  * the link being in sleep state, call it again,
525                  * if it fails a second time its a real issue.
526                  */
527                 if (e1000_validate_nvm_checksum(hw) < 0) {
528                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
529                         error = -EIO;
530                         goto err_late;
531                 }
532         }
533
534         /* Read the permanent MAC address out of the EEPROM */
535         if (e1000_read_mac_addr(hw) != 0) {
536                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
537                 error = -EIO;
538                 goto err_late;
539         }
540
541         /* Allocate memory for storing MAC addresses */
542         eth_dev->data->mac_addrs = rte_zmalloc("e1000",
543                 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
544         if (eth_dev->data->mac_addrs == NULL) {
545                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
546                                                 "store MAC addresses",
547                                 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
548                 error = -ENOMEM;
549                 goto err_late;
550         }
551
552         /* Copy the permanent MAC address */
553         ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
554
555         /* initialize the vfta */
556         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
557
558         /* Now initialize the hardware */
559         if (igb_hardware_init(hw) != 0) {
560                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
561                 rte_free(eth_dev->data->mac_addrs);
562                 eth_dev->data->mac_addrs = NULL;
563                 error = -ENODEV;
564                 goto err_late;
565         }
566         hw->mac.get_link_status = 1;
567
568         /* Indicate SOL/IDER usage */
569         if (e1000_check_reset_block(hw) < 0) {
570                 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
571                                         "SOL/IDER session");
572         }
573
574         /* initialize PF if max_vfs not zero */
575         igb_pf_host_init(eth_dev);
576
577         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
578         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
579         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
580         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
581         E1000_WRITE_FLUSH(hw);
582
583         PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
584                      eth_dev->data->port_id, pci_dev->id.vendor_id,
585                      pci_dev->id.device_id);
586
587         rte_intr_callback_register(&(pci_dev->intr_handle),
588                 eth_igb_interrupt_handler, (void *)eth_dev);
589
590         /* enable uio intr after callback register */
591         rte_intr_enable(&(pci_dev->intr_handle));
592
593         /* enable support intr */
594         igb_intr_enable(eth_dev);
595
596         TAILQ_INIT(&filter_info->flex_list);
597         filter_info->flex_mask = 0;
598         TAILQ_INIT(&filter_info->twotuple_list);
599         filter_info->twotuple_mask = 0;
600         TAILQ_INIT(&filter_info->fivetuple_list);
601         filter_info->fivetuple_mask = 0;
602
603         return 0;
604
605 err_late:
606         igb_hw_control_release(hw);
607
608         return (error);
609 }
610
611 /*
612  * Virtual Function device init
613  */
614 static int
615 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
616 {
617         struct rte_pci_device *pci_dev;
618         struct e1000_hw *hw =
619                 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
620         int diag;
621
622         PMD_INIT_FUNC_TRACE();
623
624         eth_dev->dev_ops = &igbvf_eth_dev_ops;
625         eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
626         eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
627
628         /* for secondary processes, we don't initialise any further as primary
629          * has already done this work. Only check we don't need a different
630          * RX function */
631         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
632                 if (eth_dev->data->scattered_rx)
633                         eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
634                 return 0;
635         }
636
637         pci_dev = eth_dev->pci_dev;
638
639         hw->device_id = pci_dev->id.device_id;
640         hw->vendor_id = pci_dev->id.vendor_id;
641         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
642
643         /* Initialize the shared code (base driver) */
644         diag = e1000_setup_init_funcs(hw, TRUE);
645         if (diag != 0) {
646                 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
647                         diag);
648                 return -EIO;
649         }
650
651         /* init_mailbox_params */
652         hw->mbx.ops.init_params(hw);
653
654         /* Disable the interrupts for VF */
655         igbvf_intr_disable(hw);
656
657         diag = hw->mac.ops.reset_hw(hw);
658
659         /* Allocate memory for storing MAC addresses */
660         eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
661                 hw->mac.rar_entry_count, 0);
662         if (eth_dev->data->mac_addrs == NULL) {
663                 PMD_INIT_LOG(ERR,
664                         "Failed to allocate %d bytes needed to store MAC "
665                         "addresses",
666                         ETHER_ADDR_LEN * hw->mac.rar_entry_count);
667                 return -ENOMEM;
668         }
669
670         /* Copy the permanent MAC address */
671         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
672                         &eth_dev->data->mac_addrs[0]);
673
674         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
675                      "mac.type=%s",
676                      eth_dev->data->port_id, pci_dev->id.vendor_id,
677                      pci_dev->id.device_id, "igb_mac_82576_vf");
678
679         return 0;
680 }
681
682 static struct eth_driver rte_igb_pmd = {
683         {
684                 .name = "rte_igb_pmd",
685                 .id_table = pci_id_igb_map,
686                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
687         },
688         .eth_dev_init = eth_igb_dev_init,
689         .dev_private_size = sizeof(struct e1000_adapter),
690 };
691
692 /*
693  * virtual function driver struct
694  */
695 static struct eth_driver rte_igbvf_pmd = {
696         {
697                 .name = "rte_igbvf_pmd",
698                 .id_table = pci_id_igbvf_map,
699                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
700         },
701         .eth_dev_init = eth_igbvf_dev_init,
702         .dev_private_size = sizeof(struct e1000_adapter),
703 };
704
705 static int
706 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
707 {
708         rte_eth_driver_register(&rte_igb_pmd);
709         return 0;
710 }
711
712 static void
713 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
714 {
715         struct e1000_hw *hw =
716                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717         /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
718         uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
719         rctl |= E1000_RCTL_VFE;
720         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
721 }
722
723 /*
724  * VF Driver initialization routine.
725  * Invoked one at EAL init time.
726  * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
727  */
728 static int
729 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
730 {
731         PMD_INIT_FUNC_TRACE();
732
733         rte_eth_driver_register(&rte_igbvf_pmd);
734         return (0);
735 }
736
737 static int
738 eth_igb_configure(struct rte_eth_dev *dev)
739 {
740         struct e1000_interrupt *intr =
741                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
742
743         PMD_INIT_FUNC_TRACE();
744         intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
745         PMD_INIT_FUNC_TRACE();
746
747         return (0);
748 }
749
750 static int
751 eth_igb_start(struct rte_eth_dev *dev)
752 {
753         struct e1000_hw *hw =
754                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
755         int ret, i, mask;
756         uint32_t ctrl_ext;
757
758         PMD_INIT_FUNC_TRACE();
759
760         /* Power up the phy. Needed to make the link go Up */
761         e1000_power_up_phy(hw);
762
763         /*
764          * Packet Buffer Allocation (PBA)
765          * Writing PBA sets the receive portion of the buffer
766          * the remainder is used for the transmit buffer.
767          */
768         if (hw->mac.type == e1000_82575) {
769                 uint32_t pba;
770
771                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
772                 E1000_WRITE_REG(hw, E1000_PBA, pba);
773         }
774
775         /* Put the address into the Receive Address Array */
776         e1000_rar_set(hw, hw->mac.addr, 0);
777
778         /* Initialize the hardware */
779         if (igb_hardware_init(hw)) {
780                 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
781                 return (-EIO);
782         }
783
784         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
785
786         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
787         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
788         ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
789         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
790         E1000_WRITE_FLUSH(hw);
791
792         /* configure PF module if SRIOV enabled */
793         igb_pf_host_configure(dev);
794
795         /* Configure for OS presence */
796         igb_init_manageability(hw);
797
798         eth_igb_tx_init(dev);
799
800         /* This can fail when allocating mbufs for descriptor rings */
801         ret = eth_igb_rx_init(dev);
802         if (ret) {
803                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
804                 igb_dev_clear_queues(dev);
805                 return ret;
806         }
807
808         e1000_clear_hw_cntrs_base_generic(hw);
809
810         /*
811          * VLAN Offload Settings
812          */
813         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
814                         ETH_VLAN_EXTEND_MASK;
815         eth_igb_vlan_offload_set(dev, mask);
816
817         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
818                 /* Enable VLAN filter since VMDq always use VLAN filter */
819                 igb_vmdq_vlan_hw_filter_enable(dev);
820         }
821
822         /*
823          * Configure the Interrupt Moderation register (EITR) with the maximum
824          * possible value (0xFFFF) to minimize "System Partial Write" issued by
825          * spurious [DMA] memory updates of RX and TX ring descriptors.
826          *
827          * With a EITR granularity of 2 microseconds in the 82576, only 7/8
828          * spurious memory updates per second should be expected.
829          * ((65535 * 2) / 1000.1000 ~= 0.131 second).
830          *
831          * Because interrupts are not used at all, the MSI-X is not activated
832          * and interrupt moderation is controlled by EITR[0].
833          *
834          * Note that having [almost] disabled memory updates of RX and TX ring
835          * descriptors through the Interrupt Moderation mechanism, memory
836          * updates of ring descriptors are now moderated by the configurable
837          * value of Write-Back Threshold registers.
838          */
839         if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
840                 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
841                 (hw->mac.type == e1000_i211)) {
842                 uint32_t ivar;
843
844                 /* Enable all RX & TX queues in the IVAR registers */
845                 ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
846                 for (i = 0; i < 8; i++)
847                         E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
848
849                 /* Configure EITR with the maximum possible value (0xFFFF) */
850                 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
851         }
852
853         /* Setup link speed and duplex */
854         switch (dev->data->dev_conf.link_speed) {
855         case ETH_LINK_SPEED_AUTONEG:
856                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
857                         hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
858                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
859                         hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
860                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
861                         hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
862                 else
863                         goto error_invalid_config;
864                 break;
865         case ETH_LINK_SPEED_10:
866                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
867                         hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
868                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
869                         hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
870                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
871                         hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
872                 else
873                         goto error_invalid_config;
874                 break;
875         case ETH_LINK_SPEED_100:
876                 if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
877                         hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
878                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
879                         hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
880                 else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
881                         hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
882                 else
883                         goto error_invalid_config;
884                 break;
885         case ETH_LINK_SPEED_1000:
886                 if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
887                                 (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
888                         hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
889                 else
890                         goto error_invalid_config;
891                 break;
892         case ETH_LINK_SPEED_10000:
893         default:
894                 goto error_invalid_config;
895         }
896         e1000_setup_link(hw);
897
898         /* check if lsc interrupt feature is enabled */
899         if (dev->data->dev_conf.intr_conf.lsc != 0)
900                 ret = eth_igb_lsc_interrupt_setup(dev);
901
902         /* resume enabled intr since hw reset */
903         igb_intr_enable(dev);
904
905         PMD_INIT_LOG(DEBUG, "<<");
906
907         return (0);
908
909 error_invalid_config:
910         PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
911                      dev->data->dev_conf.link_speed,
912                      dev->data->dev_conf.link_duplex, dev->data->port_id);
913         igb_dev_clear_queues(dev);
914         return (-EINVAL);
915 }
916
917 /*********************************************************************
918  *
919  *  This routine disables all traffic on the adapter by issuing a
920  *  global reset on the MAC.
921  *
922  **********************************************************************/
923 static void
924 eth_igb_stop(struct rte_eth_dev *dev)
925 {
926         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
927         struct e1000_filter_info *filter_info =
928                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
929         struct rte_eth_link link;
930         struct e1000_flex_filter *p_flex;
931         struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
932         struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
933
934         igb_intr_disable(hw);
935         igb_pf_reset_hw(hw);
936         E1000_WRITE_REG(hw, E1000_WUC, 0);
937
938         /* Set bit for Go Link disconnect */
939         if (hw->mac.type >= e1000_82580) {
940                 uint32_t phpm_reg;
941
942                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
943                 phpm_reg |= E1000_82580_PM_GO_LINKD;
944                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
945         }
946
947         /* Power down the phy. Needed to make the link go Down */
948         e1000_power_down_phy(hw);
949
950         igb_dev_clear_queues(dev);
951
952         /* clear the recorded link status */
953         memset(&link, 0, sizeof(link));
954         rte_igb_dev_atomic_write_link_status(dev, &link);
955
956         /* Remove all flex filters of the device */
957         while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
958                 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
959                 rte_free(p_flex);
960         }
961         filter_info->flex_mask = 0;
962
963         /* Remove all ntuple filters of the device */
964         for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
965              p_5tuple != NULL; p_5tuple = p_5tuple_next) {
966                 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
967                 TAILQ_REMOVE(&filter_info->fivetuple_list,
968                              p_5tuple, entries);
969                 rte_free(p_5tuple);
970         }
971         filter_info->fivetuple_mask = 0;
972         for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list);
973              p_2tuple != NULL; p_2tuple = p_2tuple_next) {
974                 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries);
975                 TAILQ_REMOVE(&filter_info->twotuple_list,
976                              p_2tuple, entries);
977                 rte_free(p_2tuple);
978         }
979         filter_info->twotuple_mask = 0;
980 }
981
982 static void
983 eth_igb_close(struct rte_eth_dev *dev)
984 {
985         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986         struct rte_eth_link link;
987
988         eth_igb_stop(dev);
989         e1000_phy_hw_reset(hw);
990         igb_release_manageability(hw);
991         igb_hw_control_release(hw);
992
993         /* Clear bit for Go Link disconnect */
994         if (hw->mac.type >= e1000_82580) {
995                 uint32_t phpm_reg;
996
997                 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
998                 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
999                 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1000         }
1001
1002         igb_dev_clear_queues(dev);
1003
1004         memset(&link, 0, sizeof(link));
1005         rte_igb_dev_atomic_write_link_status(dev, &link);
1006 }
1007
1008 static int
1009 igb_get_rx_buffer_size(struct e1000_hw *hw)
1010 {
1011         uint32_t rx_buf_size;
1012         if (hw->mac.type == e1000_82576) {
1013                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1014         } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1015                 /* PBS needs to be translated according to a lookup table */
1016                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1017                 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1018                 rx_buf_size = (rx_buf_size << 10);
1019         } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1020                 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1021         } else {
1022                 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1023         }
1024
1025         return rx_buf_size;
1026 }
1027
1028 /*********************************************************************
1029  *
1030  *  Initialize the hardware
1031  *
1032  **********************************************************************/
1033 static int
1034 igb_hardware_init(struct e1000_hw *hw)
1035 {
1036         uint32_t rx_buf_size;
1037         int diag;
1038
1039         /* Let the firmware know the OS is in control */
1040         igb_hw_control_acquire(hw);
1041
1042         /*
1043          * These parameters control the automatic generation (Tx) and
1044          * response (Rx) to Ethernet PAUSE frames.
1045          * - High water mark should allow for at least two standard size (1518)
1046          *   frames to be received after sending an XOFF.
1047          * - Low water mark works best when it is very near the high water mark.
1048          *   This allows the receiver to restart by sending XON when it has
1049          *   drained a bit. Here we use an arbitrary value of 1500 which will
1050          *   restart after one full frame is pulled from the buffer. There
1051          *   could be several smaller frames in the buffer and if so they will
1052          *   not trigger the XON until their total number reduces the buffer
1053          *   by 1500.
1054          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1055          */
1056         rx_buf_size = igb_get_rx_buffer_size(hw);
1057
1058         hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1059         hw->fc.low_water = hw->fc.high_water - 1500;
1060         hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1061         hw->fc.send_xon = 1;
1062
1063         /* Set Flow control, use the tunable location if sane */
1064         if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1065                 hw->fc.requested_mode = igb_fc_setting;
1066         else
1067                 hw->fc.requested_mode = e1000_fc_none;
1068
1069         /* Issue a global reset */
1070         igb_pf_reset_hw(hw);
1071         E1000_WRITE_REG(hw, E1000_WUC, 0);
1072
1073         diag = e1000_init_hw(hw);
1074         if (diag < 0)
1075                 return (diag);
1076
1077         E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1078         e1000_get_phy_info(hw);
1079         e1000_check_for_link(hw);
1080
1081         return (0);
1082 }
1083
1084 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1085 static void
1086 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1087 {
1088         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089         struct e1000_hw_stats *stats =
1090                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1091         int pause_frames;
1092
1093         if(hw->phy.media_type == e1000_media_type_copper ||
1094             (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1095                 stats->symerrs +=
1096                     E1000_READ_REG(hw,E1000_SYMERRS);
1097                 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1098         }
1099
1100         stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1101         stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1102         stats->scc += E1000_READ_REG(hw, E1000_SCC);
1103         stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1104
1105         stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1106         stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1107         stats->colc += E1000_READ_REG(hw, E1000_COLC);
1108         stats->dc += E1000_READ_REG(hw, E1000_DC);
1109         stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1110         stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1111         stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1112         /*
1113         ** For watchdog management we need to know if we have been
1114         ** paused during the last interval, so capture that here.
1115         */
1116         pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1117         stats->xoffrxc += pause_frames;
1118         stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1119         stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1120         stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1121         stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1122         stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1123         stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1124         stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1125         stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1126         stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1127         stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1128         stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1129         stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1130
1131         /* For the 64-bit byte counters the low dword must be read first. */
1132         /* Both registers clear on the read of the high dword */
1133
1134         stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1135         stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1136         stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1137         stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1138
1139         stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1140         stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1141         stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1142         stats->roc += E1000_READ_REG(hw, E1000_ROC);
1143         stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1144
1145         stats->tor += E1000_READ_REG(hw, E1000_TORH);
1146         stats->tot += E1000_READ_REG(hw, E1000_TOTH);
1147
1148         stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1149         stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1150         stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1151         stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1152         stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1153         stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1154         stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1155         stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1156         stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1157         stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1158
1159         /* Interrupt Counts */
1160
1161         stats->iac += E1000_READ_REG(hw, E1000_IAC);
1162         stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1163         stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1164         stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1165         stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1166         stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1167         stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1168         stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1169         stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1170
1171         /* Host to Card Statistics */
1172
1173         stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1174         stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1175         stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1176         stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1177         stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1178         stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1179         stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1180         stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1181         stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1182         stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1183         stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1184         stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1185         stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1186         stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1187
1188         stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1189         stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1190         stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1191         stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1192         stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1193         stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1194
1195         if (rte_stats == NULL)
1196                 return;
1197
1198         /* Rx Errors */
1199         rte_stats->ibadcrc = stats->crcerrs;
1200         rte_stats->ibadlen = stats->rlec + stats->ruc + stats->roc;
1201         rte_stats->imissed = stats->mpc;
1202         rte_stats->ierrors = rte_stats->ibadcrc +
1203                              rte_stats->ibadlen +
1204                              rte_stats->imissed +
1205                              stats->rxerrc + stats->algnerrc + stats->cexterr;
1206
1207         /* Tx Errors */
1208         rte_stats->oerrors = stats->ecol + stats->latecol;
1209
1210         /* XON/XOFF pause frames */
1211         rte_stats->tx_pause_xon  = stats->xontxc;
1212         rte_stats->rx_pause_xon  = stats->xonrxc;
1213         rte_stats->tx_pause_xoff = stats->xofftxc;
1214         rte_stats->rx_pause_xoff = stats->xoffrxc;
1215
1216         rte_stats->ipackets = stats->gprc;
1217         rte_stats->opackets = stats->gptc;
1218         rte_stats->ibytes   = stats->gorc;
1219         rte_stats->obytes   = stats->gotc;
1220 }
1221
1222 static void
1223 eth_igb_stats_reset(struct rte_eth_dev *dev)
1224 {
1225         struct e1000_hw_stats *hw_stats =
1226                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1227
1228         /* HW registers are cleared on read */
1229         eth_igb_stats_get(dev, NULL);
1230
1231         /* Reset software totals */
1232         memset(hw_stats, 0, sizeof(*hw_stats));
1233 }
1234
1235 static void
1236 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1237 {
1238         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1240                           E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1241
1242         /* Good Rx packets, include VF loopback */
1243         UPDATE_VF_STAT(E1000_VFGPRC,
1244             hw_stats->last_gprc, hw_stats->gprc);
1245
1246         /* Good Rx octets, include VF loopback */
1247         UPDATE_VF_STAT(E1000_VFGORC,
1248             hw_stats->last_gorc, hw_stats->gorc);
1249
1250         /* Good Tx packets, include VF loopback */
1251         UPDATE_VF_STAT(E1000_VFGPTC,
1252             hw_stats->last_gptc, hw_stats->gptc);
1253
1254         /* Good Tx octets, include VF loopback */
1255         UPDATE_VF_STAT(E1000_VFGOTC,
1256             hw_stats->last_gotc, hw_stats->gotc);
1257
1258         /* Rx Multicst packets */
1259         UPDATE_VF_STAT(E1000_VFMPRC,
1260             hw_stats->last_mprc, hw_stats->mprc);
1261
1262         /* Good Rx loopback packets */
1263         UPDATE_VF_STAT(E1000_VFGPRLBC,
1264             hw_stats->last_gprlbc, hw_stats->gprlbc);
1265
1266         /* Good Rx loopback octets */
1267         UPDATE_VF_STAT(E1000_VFGORLBC,
1268             hw_stats->last_gorlbc, hw_stats->gorlbc);
1269
1270         /* Good Tx loopback packets */
1271         UPDATE_VF_STAT(E1000_VFGPTLBC,
1272             hw_stats->last_gptlbc, hw_stats->gptlbc);
1273
1274         /* Good Tx loopback octets */
1275         UPDATE_VF_STAT(E1000_VFGOTLBC,
1276             hw_stats->last_gotlbc, hw_stats->gotlbc);
1277
1278         if (rte_stats == NULL)
1279                 return;
1280
1281         rte_stats->ipackets = hw_stats->gprc;
1282         rte_stats->ibytes = hw_stats->gorc;
1283         rte_stats->opackets = hw_stats->gptc;
1284         rte_stats->obytes = hw_stats->gotc;
1285         rte_stats->imcasts = hw_stats->mprc;
1286         rte_stats->ilbpackets = hw_stats->gprlbc;
1287         rte_stats->ilbbytes = hw_stats->gorlbc;
1288         rte_stats->olbpackets = hw_stats->gptlbc;
1289         rte_stats->olbbytes = hw_stats->gotlbc;
1290
1291 }
1292
1293 static void
1294 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
1295 {
1296         struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
1297                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1298
1299         /* Sync HW register to the last stats */
1300         eth_igbvf_stats_get(dev, NULL);
1301
1302         /* reset HW current stats*/
1303         memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
1304                offsetof(struct e1000_vf_stats, gprc));
1305
1306 }
1307
1308 static void
1309 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1310 {
1311         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1312
1313         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1314         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1315         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1316         dev_info->rx_offload_capa =
1317                 DEV_RX_OFFLOAD_VLAN_STRIP |
1318                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1319                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1320                 DEV_RX_OFFLOAD_TCP_CKSUM;
1321         dev_info->tx_offload_capa =
1322                 DEV_TX_OFFLOAD_VLAN_INSERT |
1323                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1324                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1325                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1326                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1327
1328         switch (hw->mac.type) {
1329         case e1000_82575:
1330                 dev_info->max_rx_queues = 4;
1331                 dev_info->max_tx_queues = 4;
1332                 dev_info->max_vmdq_pools = 0;
1333                 break;
1334
1335         case e1000_82576:
1336                 dev_info->max_rx_queues = 16;
1337                 dev_info->max_tx_queues = 16;
1338                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1339                 dev_info->vmdq_queue_num = 16;
1340                 break;
1341
1342         case e1000_82580:
1343                 dev_info->max_rx_queues = 8;
1344                 dev_info->max_tx_queues = 8;
1345                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1346                 dev_info->vmdq_queue_num = 8;
1347                 break;
1348
1349         case e1000_i350:
1350                 dev_info->max_rx_queues = 8;
1351                 dev_info->max_tx_queues = 8;
1352                 dev_info->max_vmdq_pools = ETH_8_POOLS;
1353                 dev_info->vmdq_queue_num = 8;
1354                 break;
1355
1356         case e1000_i354:
1357                 dev_info->max_rx_queues = 8;
1358                 dev_info->max_tx_queues = 8;
1359                 break;
1360
1361         case e1000_i210:
1362                 dev_info->max_rx_queues = 4;
1363                 dev_info->max_tx_queues = 4;
1364                 dev_info->max_vmdq_pools = 0;
1365                 break;
1366
1367         case e1000_i211:
1368                 dev_info->max_rx_queues = 2;
1369                 dev_info->max_tx_queues = 2;
1370                 dev_info->max_vmdq_pools = 0;
1371                 break;
1372
1373         default:
1374                 /* Should not happen */
1375                 break;
1376         }
1377         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1378         dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
1379
1380         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1381                 .rx_thresh = {
1382                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1383                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1384                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1385                 },
1386                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1387                 .rx_drop_en = 0,
1388         };
1389
1390         dev_info->default_txconf = (struct rte_eth_txconf) {
1391                 .tx_thresh = {
1392                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1393                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1394                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1395                 },
1396                 .txq_flags = 0,
1397         };
1398 }
1399
1400 static void
1401 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1402 {
1403         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1404
1405         dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1406         dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
1407         dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1408         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1409                                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1410                                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1411                                 DEV_RX_OFFLOAD_TCP_CKSUM;
1412         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1413                                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1414                                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1415                                 DEV_TX_OFFLOAD_TCP_CKSUM   |
1416                                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1417         switch (hw->mac.type) {
1418         case e1000_vfadapt:
1419                 dev_info->max_rx_queues = 2;
1420                 dev_info->max_tx_queues = 2;
1421                 break;
1422         case e1000_vfadapt_i350:
1423                 dev_info->max_rx_queues = 1;
1424                 dev_info->max_tx_queues = 1;
1425                 break;
1426         default:
1427                 /* Should not happen */
1428                 break;
1429         }
1430
1431         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1432                 .rx_thresh = {
1433                         .pthresh = IGB_DEFAULT_RX_PTHRESH,
1434                         .hthresh = IGB_DEFAULT_RX_HTHRESH,
1435                         .wthresh = IGB_DEFAULT_RX_WTHRESH,
1436                 },
1437                 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
1438                 .rx_drop_en = 0,
1439         };
1440
1441         dev_info->default_txconf = (struct rte_eth_txconf) {
1442                 .tx_thresh = {
1443                         .pthresh = IGB_DEFAULT_TX_PTHRESH,
1444                         .hthresh = IGB_DEFAULT_TX_HTHRESH,
1445                         .wthresh = IGB_DEFAULT_TX_WTHRESH,
1446                 },
1447                 .txq_flags = 0,
1448         };
1449 }
1450
1451 /* return 0 means link status changed, -1 means not changed */
1452 static int
1453 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1454 {
1455         struct e1000_hw *hw =
1456                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1457         struct rte_eth_link link, old;
1458         int link_check, count;
1459
1460         link_check = 0;
1461         hw->mac.get_link_status = 1;
1462
1463         /* possible wait-to-complete in up to 9 seconds */
1464         for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
1465                 /* Read the real link status */
1466                 switch (hw->phy.media_type) {
1467                 case e1000_media_type_copper:
1468                         /* Do the work to read phy */
1469                         e1000_check_for_link(hw);
1470                         link_check = !hw->mac.get_link_status;
1471                         break;
1472
1473                 case e1000_media_type_fiber:
1474                         e1000_check_for_link(hw);
1475                         link_check = (E1000_READ_REG(hw, E1000_STATUS) &
1476                                       E1000_STATUS_LU);
1477                         break;
1478
1479                 case e1000_media_type_internal_serdes:
1480                         e1000_check_for_link(hw);
1481                         link_check = hw->mac.serdes_has_link;
1482                         break;
1483
1484                 /* VF device is type_unknown */
1485                 case e1000_media_type_unknown:
1486                         eth_igbvf_link_update(hw);
1487                         link_check = !hw->mac.get_link_status;
1488                         break;
1489
1490                 default:
1491                         break;
1492                 }
1493                 if (link_check || wait_to_complete == 0)
1494                         break;
1495                 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
1496         }
1497         memset(&link, 0, sizeof(link));
1498         rte_igb_dev_atomic_read_link_status(dev, &link);
1499         old = link;
1500
1501         /* Now we check if a transition has happened */
1502         if (link_check) {
1503                 hw->mac.ops.get_link_up_info(hw, &link.link_speed,
1504                                           &link.link_duplex);
1505                 link.link_status = 1;
1506         } else if (!link_check) {
1507                 link.link_speed = 0;
1508                 link.link_duplex = 0;
1509                 link.link_status = 0;
1510         }
1511         rte_igb_dev_atomic_write_link_status(dev, &link);
1512
1513         /* not changed */
1514         if (old.link_status == link.link_status)
1515                 return -1;
1516
1517         /* changed */
1518         return 0;
1519 }
1520
1521 /*
1522  * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
1523  * For ASF and Pass Through versions of f/w this means
1524  * that the driver is loaded.
1525  */
1526 static void
1527 igb_hw_control_acquire(struct e1000_hw *hw)
1528 {
1529         uint32_t ctrl_ext;
1530
1531         /* Let firmware know the driver has taken over */
1532         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1533         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1534 }
1535
1536 /*
1537  * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
1538  * For ASF and Pass Through versions of f/w this means that the
1539  * driver is no longer loaded.
1540  */
1541 static void
1542 igb_hw_control_release(struct e1000_hw *hw)
1543 {
1544         uint32_t ctrl_ext;
1545
1546         /* Let firmware taken over control of h/w */
1547         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1548         E1000_WRITE_REG(hw, E1000_CTRL_EXT,
1549                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1550 }
1551
1552 /*
1553  * Bit of a misnomer, what this really means is
1554  * to enable OS management of the system... aka
1555  * to disable special hardware management features.
1556  */
1557 static void
1558 igb_init_manageability(struct e1000_hw *hw)
1559 {
1560         if (e1000_enable_mng_pass_thru(hw)) {
1561                 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
1562                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1563
1564                 /* disable hardware interception of ARP */
1565                 manc &= ~(E1000_MANC_ARP_EN);
1566
1567                 /* enable receiving management packets to the host */
1568                 manc |= E1000_MANC_EN_MNG2HOST;
1569                 manc2h |= 1 << 5;  /* Mng Port 623 */
1570                 manc2h |= 1 << 6;  /* Mng Port 664 */
1571                 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
1572                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1573         }
1574 }
1575
1576 static void
1577 igb_release_manageability(struct e1000_hw *hw)
1578 {
1579         if (e1000_enable_mng_pass_thru(hw)) {
1580                 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
1581
1582                 manc |= E1000_MANC_ARP_EN;
1583                 manc &= ~E1000_MANC_EN_MNG2HOST;
1584
1585                 E1000_WRITE_REG(hw, E1000_MANC, manc);
1586         }
1587 }
1588
1589 static void
1590 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
1591 {
1592         struct e1000_hw *hw =
1593                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1594         uint32_t rctl;
1595
1596         rctl = E1000_READ_REG(hw, E1000_RCTL);
1597         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1598         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1599 }
1600
1601 static void
1602 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
1603 {
1604         struct e1000_hw *hw =
1605                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1606         uint32_t rctl;
1607
1608         rctl = E1000_READ_REG(hw, E1000_RCTL);
1609         rctl &= (~E1000_RCTL_UPE);
1610         if (dev->data->all_multicast == 1)
1611                 rctl |= E1000_RCTL_MPE;
1612         else
1613                 rctl &= (~E1000_RCTL_MPE);
1614         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1615 }
1616
1617 static void
1618 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
1619 {
1620         struct e1000_hw *hw =
1621                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1622         uint32_t rctl;
1623
1624         rctl = E1000_READ_REG(hw, E1000_RCTL);
1625         rctl |= E1000_RCTL_MPE;
1626         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1627 }
1628
1629 static void
1630 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
1631 {
1632         struct e1000_hw *hw =
1633                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1634         uint32_t rctl;
1635
1636         if (dev->data->promiscuous == 1)
1637                 return; /* must remain in all_multicast mode */
1638         rctl = E1000_READ_REG(hw, E1000_RCTL);
1639         rctl &= (~E1000_RCTL_MPE);
1640         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1641 }
1642
1643 static int
1644 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1645 {
1646         struct e1000_hw *hw =
1647                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1648         struct e1000_vfta * shadow_vfta =
1649                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1650         uint32_t vfta;
1651         uint32_t vid_idx;
1652         uint32_t vid_bit;
1653
1654         vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
1655                               E1000_VFTA_ENTRY_MASK);
1656         vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
1657         vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
1658         if (on)
1659                 vfta |= vid_bit;
1660         else
1661                 vfta &= ~vid_bit;
1662         E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
1663
1664         /* update local VFTA copy */
1665         shadow_vfta->vfta[vid_idx] = vfta;
1666
1667         return 0;
1668 }
1669
1670 static void
1671 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
1672 {
1673         struct e1000_hw *hw =
1674                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1675         uint32_t reg = ETHER_TYPE_VLAN ;
1676
1677         reg |= (tpid << 16);
1678         E1000_WRITE_REG(hw, E1000_VET, reg);
1679 }
1680
1681 static void
1682 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1683 {
1684         struct e1000_hw *hw =
1685                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686         uint32_t reg;
1687
1688         /* Filter Table Disable */
1689         reg = E1000_READ_REG(hw, E1000_RCTL);
1690         reg &= ~E1000_RCTL_CFIEN;
1691         reg &= ~E1000_RCTL_VFE;
1692         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1693 }
1694
1695 static void
1696 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1697 {
1698         struct e1000_hw *hw =
1699                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1700         struct e1000_vfta * shadow_vfta =
1701                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1702         uint32_t reg;
1703         int i;
1704
1705         /* Filter Table Enable, CFI not used for packet acceptance */
1706         reg = E1000_READ_REG(hw, E1000_RCTL);
1707         reg &= ~E1000_RCTL_CFIEN;
1708         reg |= E1000_RCTL_VFE;
1709         E1000_WRITE_REG(hw, E1000_RCTL, reg);
1710
1711         /* restore VFTA table */
1712         for (i = 0; i < IGB_VFTA_SIZE; i++)
1713                 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
1714 }
1715
1716 static void
1717 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
1718 {
1719         struct e1000_hw *hw =
1720                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721         uint32_t reg;
1722
1723         /* VLAN Mode Disable */
1724         reg = E1000_READ_REG(hw, E1000_CTRL);
1725         reg &= ~E1000_CTRL_VME;
1726         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1727 }
1728
1729 static void
1730 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
1731 {
1732         struct e1000_hw *hw =
1733                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1734         uint32_t reg;
1735
1736         /* VLAN Mode Enable */
1737         reg = E1000_READ_REG(hw, E1000_CTRL);
1738         reg |= E1000_CTRL_VME;
1739         E1000_WRITE_REG(hw, E1000_CTRL, reg);
1740 }
1741
1742 static void
1743 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1744 {
1745         struct e1000_hw *hw =
1746                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1747         uint32_t reg;
1748
1749         /* CTRL_EXT: Extended VLAN */
1750         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1751         reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
1752         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1753
1754         /* Update maximum packet length */
1755         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1756                 E1000_WRITE_REG(hw, E1000_RLPML,
1757                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1758                                                 VLAN_TAG_SIZE);
1759 }
1760
1761 static void
1762 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1763 {
1764         struct e1000_hw *hw =
1765                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1766         uint32_t reg;
1767
1768         /* CTRL_EXT: Extended VLAN */
1769         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1770         reg |= E1000_CTRL_EXT_EXTEND_VLAN;
1771         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1772
1773         /* Update maximum packet length */
1774         if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1775                 E1000_WRITE_REG(hw, E1000_RLPML,
1776                         dev->data->dev_conf.rxmode.max_rx_pkt_len +
1777                                                 2 * VLAN_TAG_SIZE);
1778 }
1779
1780 static void
1781 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1782 {
1783         if(mask & ETH_VLAN_STRIP_MASK){
1784                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1785                         igb_vlan_hw_strip_enable(dev);
1786                 else
1787                         igb_vlan_hw_strip_disable(dev);
1788         }
1789
1790         if(mask & ETH_VLAN_FILTER_MASK){
1791                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1792                         igb_vlan_hw_filter_enable(dev);
1793                 else
1794                         igb_vlan_hw_filter_disable(dev);
1795         }
1796
1797         if(mask & ETH_VLAN_EXTEND_MASK){
1798                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1799                         igb_vlan_hw_extend_enable(dev);
1800                 else
1801                         igb_vlan_hw_extend_disable(dev);
1802         }
1803 }
1804
1805
1806 /**
1807  * It enables the interrupt mask and then enable the interrupt.
1808  *
1809  * @param dev
1810  *  Pointer to struct rte_eth_dev.
1811  *
1812  * @return
1813  *  - On success, zero.
1814  *  - On failure, a negative value.
1815  */
1816 static int
1817 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
1818 {
1819         struct e1000_interrupt *intr =
1820                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1821
1822         intr->mask |= E1000_ICR_LSC;
1823
1824         return 0;
1825 }
1826
1827 /*
1828  * It reads ICR and gets interrupt causes, check it and set a bit flag
1829  * to update link status.
1830  *
1831  * @param dev
1832  *  Pointer to struct rte_eth_dev.
1833  *
1834  * @return
1835  *  - On success, zero.
1836  *  - On failure, a negative value.
1837  */
1838 static int
1839 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
1840 {
1841         uint32_t icr;
1842         struct e1000_hw *hw =
1843                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1844         struct e1000_interrupt *intr =
1845                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1846
1847         igb_intr_disable(hw);
1848
1849         /* read-on-clear nic registers here */
1850         icr = E1000_READ_REG(hw, E1000_ICR);
1851
1852         intr->flags = 0;
1853         if (icr & E1000_ICR_LSC) {
1854                 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1855         }
1856
1857         if (icr & E1000_ICR_VMMB)
1858                 intr->flags |= E1000_FLAG_MAILBOX;
1859
1860         return 0;
1861 }
1862
1863 /*
1864  * It executes link_update after knowing an interrupt is prsent.
1865  *
1866  * @param dev
1867  *  Pointer to struct rte_eth_dev.
1868  *
1869  * @return
1870  *  - On success, zero.
1871  *  - On failure, a negative value.
1872  */
1873 static int
1874 eth_igb_interrupt_action(struct rte_eth_dev *dev)
1875 {
1876         struct e1000_hw *hw =
1877                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1878         struct e1000_interrupt *intr =
1879                 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1880         uint32_t tctl, rctl;
1881         struct rte_eth_link link;
1882         int ret;
1883
1884         if (intr->flags & E1000_FLAG_MAILBOX) {
1885                 igb_pf_mbx_process(dev);
1886                 intr->flags &= ~E1000_FLAG_MAILBOX;
1887         }
1888
1889         igb_intr_enable(dev);
1890         rte_intr_enable(&(dev->pci_dev->intr_handle));
1891
1892         if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
1893                 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
1894
1895                 /* set get_link_status to check register later */
1896                 hw->mac.get_link_status = 1;
1897                 ret = eth_igb_link_update(dev, 0);
1898
1899                 /* check if link has changed */
1900                 if (ret < 0)
1901                         return 0;
1902
1903                 memset(&link, 0, sizeof(link));
1904                 rte_igb_dev_atomic_read_link_status(dev, &link);
1905                 if (link.link_status) {
1906                         PMD_INIT_LOG(INFO,
1907                                      " Port %d: Link Up - speed %u Mbps - %s",
1908                                      dev->data->port_id,
1909                                      (unsigned)link.link_speed,
1910                                      link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1911                                      "full-duplex" : "half-duplex");
1912                 } else {
1913                         PMD_INIT_LOG(INFO, " Port %d: Link Down",
1914                                      dev->data->port_id);
1915                 }
1916                 PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1917                              dev->pci_dev->addr.domain,
1918                              dev->pci_dev->addr.bus,
1919                              dev->pci_dev->addr.devid,
1920                              dev->pci_dev->addr.function);
1921                 tctl = E1000_READ_REG(hw, E1000_TCTL);
1922                 rctl = E1000_READ_REG(hw, E1000_RCTL);
1923                 if (link.link_status) {
1924                         /* enable Tx/Rx */
1925                         tctl |= E1000_TCTL_EN;
1926                         rctl |= E1000_RCTL_EN;
1927                 } else {
1928                         /* disable Tx/Rx */
1929                         tctl &= ~E1000_TCTL_EN;
1930                         rctl &= ~E1000_RCTL_EN;
1931                 }
1932                 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1933                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1934                 E1000_WRITE_FLUSH(hw);
1935                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1936         }
1937
1938         return 0;
1939 }
1940
1941 /**
1942  * Interrupt handler which shall be registered at first.
1943  *
1944  * @param handle
1945  *  Pointer to interrupt handle.
1946  * @param param
1947  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1948  *
1949  * @return
1950  *  void
1951  */
1952 static void
1953 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1954                                                         void *param)
1955 {
1956         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1957
1958         eth_igb_interrupt_get_status(dev);
1959         eth_igb_interrupt_action(dev);
1960 }
1961
1962 static int
1963 eth_igb_led_on(struct rte_eth_dev *dev)
1964 {
1965         struct e1000_hw *hw;
1966
1967         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1968         return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1969 }
1970
1971 static int
1972 eth_igb_led_off(struct rte_eth_dev *dev)
1973 {
1974         struct e1000_hw *hw;
1975
1976         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1977         return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
1978 }
1979
1980 static int
1981 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1982 {
1983         struct e1000_hw *hw;
1984         uint32_t ctrl;
1985         int tx_pause;
1986         int rx_pause;
1987
1988         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1989         fc_conf->pause_time = hw->fc.pause_time;
1990         fc_conf->high_water = hw->fc.high_water;
1991         fc_conf->low_water = hw->fc.low_water;
1992         fc_conf->send_xon = hw->fc.send_xon;
1993         fc_conf->autoneg = hw->mac.autoneg;
1994
1995         /*
1996          * Return rx_pause and tx_pause status according to actual setting of
1997          * the TFCE and RFCE bits in the CTRL register.
1998          */
1999         ctrl = E1000_READ_REG(hw, E1000_CTRL);
2000         if (ctrl & E1000_CTRL_TFCE)
2001                 tx_pause = 1;
2002         else
2003                 tx_pause = 0;
2004
2005         if (ctrl & E1000_CTRL_RFCE)
2006                 rx_pause = 1;
2007         else
2008                 rx_pause = 0;
2009
2010         if (rx_pause && tx_pause)
2011                 fc_conf->mode = RTE_FC_FULL;
2012         else if (rx_pause)
2013                 fc_conf->mode = RTE_FC_RX_PAUSE;
2014         else if (tx_pause)
2015                 fc_conf->mode = RTE_FC_TX_PAUSE;
2016         else
2017                 fc_conf->mode = RTE_FC_NONE;
2018
2019         return 0;
2020 }
2021
2022 static int
2023 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2024 {
2025         struct e1000_hw *hw;
2026         int err;
2027         enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
2028                 e1000_fc_none,
2029                 e1000_fc_rx_pause,
2030                 e1000_fc_tx_pause,
2031                 e1000_fc_full
2032         };
2033         uint32_t rx_buf_size;
2034         uint32_t max_high_water;
2035         uint32_t rctl;
2036
2037         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2038         if (fc_conf->autoneg != hw->mac.autoneg)
2039                 return -ENOTSUP;
2040         rx_buf_size = igb_get_rx_buffer_size(hw);
2041         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2042
2043         /* At least reserve one Ethernet frame for watermark */
2044         max_high_water = rx_buf_size - ETHER_MAX_LEN;
2045         if ((fc_conf->high_water > max_high_water) ||
2046             (fc_conf->high_water < fc_conf->low_water)) {
2047                 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
2048                 PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
2049                 return (-EINVAL);
2050         }
2051
2052         hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
2053         hw->fc.pause_time     = fc_conf->pause_time;
2054         hw->fc.high_water     = fc_conf->high_water;
2055         hw->fc.low_water      = fc_conf->low_water;
2056         hw->fc.send_xon       = fc_conf->send_xon;
2057
2058         err = e1000_setup_link_generic(hw);
2059         if (err == E1000_SUCCESS) {
2060
2061                 /* check if we want to forward MAC frames - driver doesn't have native
2062                  * capability to do that, so we'll write the registers ourselves */
2063
2064                 rctl = E1000_READ_REG(hw, E1000_RCTL);
2065
2066                 /* set or clear MFLCN.PMCF bit depending on configuration */
2067                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2068                         rctl |= E1000_RCTL_PMCF;
2069                 else
2070                         rctl &= ~E1000_RCTL_PMCF;
2071
2072                 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2073                 E1000_WRITE_FLUSH(hw);
2074
2075                 return 0;
2076         }
2077
2078         PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
2079         return (-EIO);
2080 }
2081
2082 #define E1000_RAH_POOLSEL_SHIFT      (18)
2083 static void
2084 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
2085                 uint32_t index, __rte_unused uint32_t pool)
2086 {
2087         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2088         uint32_t rah;
2089
2090         e1000_rar_set(hw, mac_addr->addr_bytes, index);
2091         rah = E1000_READ_REG(hw, E1000_RAH(index));
2092         rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
2093         E1000_WRITE_REG(hw, E1000_RAH(index), rah);
2094 }
2095
2096 static void
2097 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
2098 {
2099         uint8_t addr[ETHER_ADDR_LEN];
2100         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2101
2102         memset(addr, 0, sizeof(addr));
2103
2104         e1000_rar_set(hw, addr, index);
2105 }
2106
2107 /*
2108  * Virtual Function operations
2109  */
2110 static void
2111 igbvf_intr_disable(struct e1000_hw *hw)
2112 {
2113         PMD_INIT_FUNC_TRACE();
2114
2115         /* Clear interrupt mask to stop from interrupts being generated */
2116         E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
2117
2118         E1000_WRITE_FLUSH(hw);
2119 }
2120
2121 static void
2122 igbvf_stop_adapter(struct rte_eth_dev *dev)
2123 {
2124         u32 reg_val;
2125         u16 i;
2126         struct rte_eth_dev_info dev_info;
2127         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2128
2129         memset(&dev_info, 0, sizeof(dev_info));
2130         eth_igbvf_infos_get(dev, &dev_info);
2131
2132         /* Clear interrupt mask to stop from interrupts being generated */
2133         igbvf_intr_disable(hw);
2134
2135         /* Clear any pending interrupts, flush previous writes */
2136         E1000_READ_REG(hw, E1000_EICR);
2137
2138         /* Disable the transmit unit.  Each queue must be disabled. */
2139         for (i = 0; i < dev_info.max_tx_queues; i++)
2140                 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
2141
2142         /* Disable the receive unit by stopping each queue */
2143         for (i = 0; i < dev_info.max_rx_queues; i++) {
2144                 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
2145                 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
2146                 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
2147                 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
2148                         ;
2149         }
2150
2151         /* flush all queues disables */
2152         E1000_WRITE_FLUSH(hw);
2153         msec_delay(2);
2154 }
2155
2156 static int eth_igbvf_link_update(struct e1000_hw *hw)
2157 {
2158         struct e1000_mbx_info *mbx = &hw->mbx;
2159         struct e1000_mac_info *mac = &hw->mac;
2160         int ret_val = E1000_SUCCESS;
2161
2162         PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
2163
2164         /*
2165          * We only want to run this if there has been a rst asserted.
2166          * in this case that could mean a link change, device reset,
2167          * or a virtual function reset
2168          */
2169
2170         /* If we were hit with a reset or timeout drop the link */
2171         if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
2172                 mac->get_link_status = TRUE;
2173
2174         if (!mac->get_link_status)
2175                 goto out;
2176
2177         /* if link status is down no point in checking to see if pf is up */
2178         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
2179                 goto out;
2180
2181         /* if we passed all the tests above then the link is up and we no
2182          * longer need to check for link */
2183         mac->get_link_status = FALSE;
2184
2185 out:
2186         return ret_val;
2187 }
2188
2189
2190 static int
2191 igbvf_dev_configure(struct rte_eth_dev *dev)
2192 {
2193         struct rte_eth_conf* conf = &dev->data->dev_conf;
2194
2195         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
2196                      dev->data->port_id);
2197
2198         /*
2199          * VF has no ability to enable/disable HW CRC
2200          * Keep the persistent behavior the same as Host PF
2201          */
2202 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
2203         if (!conf->rxmode.hw_strip_crc) {
2204                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
2205                 conf->rxmode.hw_strip_crc = 1;
2206         }
2207 #else
2208         if (conf->rxmode.hw_strip_crc) {
2209                 PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
2210                 conf->rxmode.hw_strip_crc = 0;
2211         }
2212 #endif
2213
2214         return 0;
2215 }
2216
2217 static int
2218 igbvf_dev_start(struct rte_eth_dev *dev)
2219 {
2220         struct e1000_hw *hw =
2221                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2222         int ret;
2223
2224         PMD_INIT_FUNC_TRACE();
2225
2226         hw->mac.ops.reset_hw(hw);
2227
2228         /* Set all vfta */
2229         igbvf_set_vfta_all(dev,1);
2230
2231         eth_igbvf_tx_init(dev);
2232
2233         /* This can fail when allocating mbufs for descriptor rings */
2234         ret = eth_igbvf_rx_init(dev);
2235         if (ret) {
2236                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2237                 igb_dev_clear_queues(dev);
2238                 return ret;
2239         }
2240
2241         return 0;
2242 }
2243
2244 static void
2245 igbvf_dev_stop(struct rte_eth_dev *dev)
2246 {
2247         PMD_INIT_FUNC_TRACE();
2248
2249         igbvf_stop_adapter(dev);
2250
2251         /*
2252           * Clear what we set, but we still keep shadow_vfta to
2253           * restore after device starts
2254           */
2255         igbvf_set_vfta_all(dev,0);
2256
2257         igb_dev_clear_queues(dev);
2258 }
2259
2260 static void
2261 igbvf_dev_close(struct rte_eth_dev *dev)
2262 {
2263         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2264
2265         PMD_INIT_FUNC_TRACE();
2266
2267         e1000_reset_hw(hw);
2268
2269         igbvf_dev_stop(dev);
2270 }
2271
2272 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
2273 {
2274         struct e1000_mbx_info *mbx = &hw->mbx;
2275         uint32_t msgbuf[2];
2276
2277         /* After set vlan, vlan strip will also be enabled in igb driver*/
2278         msgbuf[0] = E1000_VF_SET_VLAN;
2279         msgbuf[1] = vid;
2280         /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
2281         if (on)
2282                 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
2283
2284         return (mbx->ops.write_posted(hw, msgbuf, 2, 0));
2285 }
2286
2287 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
2288 {
2289         struct e1000_hw *hw =
2290                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2291         struct e1000_vfta * shadow_vfta =
2292                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2293         int i = 0, j = 0, vfta = 0, mask = 1;
2294
2295         for (i = 0; i < IGB_VFTA_SIZE; i++){
2296                 vfta = shadow_vfta->vfta[i];
2297                 if(vfta){
2298                         mask = 1;
2299                         for (j = 0; j < 32; j++){
2300                                 if(vfta & mask)
2301                                         igbvf_set_vfta(hw,
2302                                                 (uint16_t)((i<<5)+j), on);
2303                                 mask<<=1;
2304                         }
2305                 }
2306         }
2307
2308 }
2309
2310 static int
2311 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2312 {
2313         struct e1000_hw *hw =
2314                 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2315         struct e1000_vfta * shadow_vfta =
2316                 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2317         uint32_t vid_idx = 0;
2318         uint32_t vid_bit = 0;
2319         int ret = 0;
2320
2321         PMD_INIT_FUNC_TRACE();
2322
2323         /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
2324         ret = igbvf_set_vfta(hw, vlan_id, !!on);
2325         if(ret){
2326                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
2327                 return ret;
2328         }
2329         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
2330         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
2331
2332         /*Save what we set and retore it after device reset*/
2333         if (on)
2334                 shadow_vfta->vfta[vid_idx] |= vid_bit;
2335         else
2336                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
2337
2338         return 0;
2339 }
2340
2341 static int
2342 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
2343                         struct rte_eth_rss_reta_entry64 *reta_conf,
2344                         uint16_t reta_size)
2345 {
2346         uint8_t i, j, mask;
2347         uint32_t reta, r;
2348         uint16_t idx, shift;
2349         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2350
2351         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2352                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2353                         "(%d) doesn't match the number hardware can supported "
2354                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2355                 return -EINVAL;
2356         }
2357
2358         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2359                 idx = i / RTE_RETA_GROUP_SIZE;
2360                 shift = i % RTE_RETA_GROUP_SIZE;
2361                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2362                                                 IGB_4_BIT_MASK);
2363                 if (!mask)
2364                         continue;
2365                 if (mask == IGB_4_BIT_MASK)
2366                         r = 0;
2367                 else
2368                         r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2369                 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
2370                         if (mask & (0x1 << j))
2371                                 reta |= reta_conf[idx].reta[shift + j] <<
2372                                                         (CHAR_BIT * j);
2373                         else
2374                                 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
2375                 }
2376                 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
2377         }
2378
2379         return 0;
2380 }
2381
2382 static int
2383 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
2384                        struct rte_eth_rss_reta_entry64 *reta_conf,
2385                        uint16_t reta_size)
2386 {
2387         uint8_t i, j, mask;
2388         uint32_t reta;
2389         uint16_t idx, shift;
2390         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2391
2392         if (reta_size != ETH_RSS_RETA_SIZE_128) {
2393                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2394                         "(%d) doesn't match the number hardware can supported "
2395                         "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
2396                 return -EINVAL;
2397         }
2398
2399         for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
2400                 idx = i / RTE_RETA_GROUP_SIZE;
2401                 shift = i % RTE_RETA_GROUP_SIZE;
2402                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2403                                                 IGB_4_BIT_MASK);
2404                 if (!mask)
2405                         continue;
2406                 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
2407                 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
2408                         if (mask & (0x1 << j))
2409                                 reta_conf[idx].reta[shift + j] =
2410                                         ((reta >> (CHAR_BIT * j)) &
2411                                                 IGB_8_BIT_MASK);
2412                 }
2413         }
2414
2415         return 0;
2416 }
2417
2418 #define MAC_TYPE_FILTER_SUP(type)    do {\
2419         if ((type) != e1000_82580 && (type) != e1000_i350 &&\
2420                 (type) != e1000_82576)\
2421                 return -ENOTSUP;\
2422 } while (0)
2423
2424 static int
2425 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
2426                         struct rte_eth_syn_filter *filter,
2427                         bool add)
2428 {
2429         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2430         uint32_t synqf, rfctl;
2431
2432         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2433                 return -EINVAL;
2434
2435         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2436
2437         if (add) {
2438                 if (synqf & E1000_SYN_FILTER_ENABLE)
2439                         return -EINVAL;
2440
2441                 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
2442                         E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
2443
2444                 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2445                 if (filter->hig_pri)
2446                         rfctl |= E1000_RFCTL_SYNQFP;
2447                 else
2448                         rfctl &= ~E1000_RFCTL_SYNQFP;
2449
2450                 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
2451         } else {
2452                 if (!(synqf & E1000_SYN_FILTER_ENABLE))
2453                         return -ENOENT;
2454                 synqf = 0;
2455         }
2456
2457         E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
2458         E1000_WRITE_FLUSH(hw);
2459         return 0;
2460 }
2461
2462 static int
2463 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
2464                         struct rte_eth_syn_filter *filter)
2465 {
2466         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2467         uint32_t synqf, rfctl;
2468
2469         synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
2470         if (synqf & E1000_SYN_FILTER_ENABLE) {
2471                 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
2472                 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
2473                 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
2474                                 E1000_SYN_FILTER_QUEUE_SHIFT);
2475                 return 0;
2476         }
2477
2478         return -ENOENT;
2479 }
2480
2481 static int
2482 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
2483                         enum rte_filter_op filter_op,
2484                         void *arg)
2485 {
2486         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2487         int ret;
2488
2489         MAC_TYPE_FILTER_SUP(hw->mac.type);
2490
2491         if (filter_op == RTE_ETH_FILTER_NOP)
2492                 return 0;
2493
2494         if (arg == NULL) {
2495                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2496                             filter_op);
2497                 return -EINVAL;
2498         }
2499
2500         switch (filter_op) {
2501         case RTE_ETH_FILTER_ADD:
2502                 ret = eth_igb_syn_filter_set(dev,
2503                                 (struct rte_eth_syn_filter *)arg,
2504                                 TRUE);
2505                 break;
2506         case RTE_ETH_FILTER_DELETE:
2507                 ret = eth_igb_syn_filter_set(dev,
2508                                 (struct rte_eth_syn_filter *)arg,
2509                                 FALSE);
2510                 break;
2511         case RTE_ETH_FILTER_GET:
2512                 ret = eth_igb_syn_filter_get(dev,
2513                                 (struct rte_eth_syn_filter *)arg);
2514                 break;
2515         default:
2516                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
2517                 ret = -EINVAL;
2518                 break;
2519         }
2520
2521         return ret;
2522 }
2523
2524 #define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
2525         if ((type) != e1000_82580 && (type) != e1000_i350)\
2526                 return -ENOSYS; \
2527 } while (0)
2528
2529 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
2530 static inline int
2531 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
2532                         struct e1000_2tuple_filter_info *filter_info)
2533 {
2534         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
2535                 return -EINVAL;
2536         if (filter->priority > E1000_2TUPLE_MAX_PRI)
2537                 return -EINVAL;  /* filter index is out of range. */
2538         if (filter->tcp_flags > TCP_FLAG_ALL)
2539                 return -EINVAL;  /* flags is invalid. */
2540
2541         switch (filter->dst_port_mask) {
2542         case UINT16_MAX:
2543                 filter_info->dst_port_mask = 0;
2544                 filter_info->dst_port = filter->dst_port;
2545                 break;
2546         case 0:
2547                 filter_info->dst_port_mask = 1;
2548                 break;
2549         default:
2550                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2551                 return -EINVAL;
2552         }
2553
2554         switch (filter->proto_mask) {
2555         case UINT8_MAX:
2556                 filter_info->proto_mask = 0;
2557                 filter_info->proto = filter->proto;
2558                 break;
2559         case 0:
2560                 filter_info->proto_mask = 1;
2561                 break;
2562         default:
2563                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
2564                 return -EINVAL;
2565         }
2566
2567         filter_info->priority = (uint8_t)filter->priority;
2568         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
2569                 filter_info->tcp_flags = filter->tcp_flags;
2570         else
2571                 filter_info->tcp_flags = 0;
2572
2573         return 0;
2574 }
2575
2576 static inline struct e1000_2tuple_filter *
2577 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
2578                         struct e1000_2tuple_filter_info *key)
2579 {
2580         struct e1000_2tuple_filter *it;
2581
2582         TAILQ_FOREACH(it, filter_list, entries) {
2583                 if (memcmp(key, &it->filter_info,
2584                         sizeof(struct e1000_2tuple_filter_info)) == 0) {
2585                         return it;
2586                 }
2587         }
2588         return NULL;
2589 }
2590
2591 /*
2592  * igb_add_2tuple_filter - add a 2tuple filter
2593  *
2594  * @param
2595  * dev: Pointer to struct rte_eth_dev.
2596  * ntuple_filter: ponter to the filter that will be added.
2597  *
2598  * @return
2599  *    - On success, zero.
2600  *    - On failure, a negative value.
2601  */
2602 static int
2603 igb_add_2tuple_filter(struct rte_eth_dev *dev,
2604                         struct rte_eth_ntuple_filter *ntuple_filter)
2605 {
2606         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2607         struct e1000_filter_info *filter_info =
2608                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2609         struct e1000_2tuple_filter *filter;
2610         uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
2611         uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
2612         int i, ret;
2613
2614         filter = rte_zmalloc("e1000_2tuple_filter",
2615                         sizeof(struct e1000_2tuple_filter), 0);
2616         if (filter == NULL)
2617                 return -ENOMEM;
2618
2619         ret = ntuple_filter_to_2tuple(ntuple_filter,
2620                                       &filter->filter_info);
2621         if (ret < 0) {
2622                 rte_free(filter);
2623                 return ret;
2624         }
2625         if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2626                                          &filter->filter_info) != NULL) {
2627                 PMD_DRV_LOG(ERR, "filter exists.");
2628                 rte_free(filter);
2629                 return -EEXIST;
2630         }
2631         filter->queue = ntuple_filter->queue;
2632
2633         /*
2634          * look for an unused 2tuple filter index,
2635          * and insert the filter to list.
2636          */
2637         for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
2638                 if (!(filter_info->twotuple_mask & (1 << i))) {
2639                         filter_info->twotuple_mask |= 1 << i;
2640                         filter->index = i;
2641                         TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
2642                                           filter,
2643                                           entries);
2644                         break;
2645                 }
2646         }
2647         if (i >= E1000_MAX_TTQF_FILTERS) {
2648                 PMD_DRV_LOG(ERR, "2tuple filters are full.");
2649                 rte_free(filter);
2650                 return -ENOSYS;
2651         }
2652
2653         imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
2654         if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
2655                 imir |= E1000_IMIR_PORT_BP;
2656         else
2657                 imir &= ~E1000_IMIR_PORT_BP;
2658
2659         imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
2660
2661         ttqf |= E1000_TTQF_QUEUE_ENABLE;
2662         ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
2663         ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK);
2664         if (filter->filter_info.proto_mask == 0)
2665                 ttqf &= ~E1000_TTQF_MASK_ENABLE;
2666
2667         /* tcp flags bits setting. */
2668         if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
2669                 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
2670                         imir_ext |= E1000_IMIREXT_CTRL_URG;
2671                 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
2672                         imir_ext |= E1000_IMIREXT_CTRL_ACK;
2673                 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
2674                         imir_ext |= E1000_IMIREXT_CTRL_PSH;
2675                 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
2676                         imir_ext |= E1000_IMIREXT_CTRL_RST;
2677                 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
2678                         imir_ext |= E1000_IMIREXT_CTRL_SYN;
2679                 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
2680                         imir_ext |= E1000_IMIREXT_CTRL_FIN;
2681         } else
2682                 imir_ext |= E1000_IMIREXT_CTRL_BP;
2683         E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
2684         E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
2685         E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
2686         return 0;
2687 }
2688
2689 /*
2690  * igb_remove_2tuple_filter - remove a 2tuple filter
2691  *
2692  * @param
2693  * dev: Pointer to struct rte_eth_dev.
2694  * ntuple_filter: ponter to the filter that will be removed.
2695  *
2696  * @return
2697  *    - On success, zero.
2698  *    - On failure, a negative value.
2699  */
2700 static int
2701 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
2702                         struct rte_eth_ntuple_filter *ntuple_filter)
2703 {
2704         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2705         struct e1000_filter_info *filter_info =
2706                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2707         struct e1000_2tuple_filter_info filter_2tuple;
2708         struct e1000_2tuple_filter *filter;
2709         int ret;
2710
2711         memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
2712         ret = ntuple_filter_to_2tuple(ntuple_filter,
2713                                       &filter_2tuple);
2714         if (ret < 0)
2715                 return ret;
2716
2717         filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
2718                                          &filter_2tuple);
2719         if (filter == NULL) {
2720                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2721                 return -ENOENT;
2722         }
2723
2724         filter_info->twotuple_mask &= ~(1 << filter->index);
2725         TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
2726         rte_free(filter);
2727
2728         E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
2729         E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
2730         E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
2731         return 0;
2732 }
2733
2734 static inline struct e1000_flex_filter *
2735 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
2736                         struct e1000_flex_filter_info *key)
2737 {
2738         struct e1000_flex_filter *it;
2739
2740         TAILQ_FOREACH(it, filter_list, entries) {
2741                 if (memcmp(key, &it->filter_info,
2742                         sizeof(struct e1000_flex_filter_info)) == 0)
2743                         return it;
2744         }
2745
2746         return NULL;
2747 }
2748
2749 static int
2750 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
2751                         struct rte_eth_flex_filter *filter,
2752                         bool add)
2753 {
2754         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2755         struct e1000_filter_info *filter_info =
2756                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2757         struct e1000_flex_filter *flex_filter, *it;
2758         uint32_t wufc, queueing, mask;
2759         uint32_t reg_off;
2760         uint8_t shift, i, j = 0;
2761
2762         flex_filter = rte_zmalloc("e1000_flex_filter",
2763                         sizeof(struct e1000_flex_filter), 0);
2764         if (flex_filter == NULL)
2765                 return -ENOMEM;
2766
2767         flex_filter->filter_info.len = filter->len;
2768         flex_filter->filter_info.priority = filter->priority;
2769         memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
2770         for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
2771                 mask = 0;
2772                 /* reverse bits in flex filter's mask*/
2773                 for (shift = 0; shift < CHAR_BIT; shift++) {
2774                         if (filter->mask[i] & (0x01 << shift))
2775                                 mask |= (0x80 >> shift);
2776                 }
2777                 flex_filter->filter_info.mask[i] = mask;
2778         }
2779
2780         wufc = E1000_READ_REG(hw, E1000_WUFC);
2781         if (flex_filter->index < E1000_MAX_FHFT)
2782                 reg_off = E1000_FHFT(flex_filter->index);
2783         else
2784                 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
2785
2786         if (add) {
2787                 if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
2788                                 &flex_filter->filter_info) != NULL) {
2789                         PMD_DRV_LOG(ERR, "filter exists.");
2790                         rte_free(flex_filter);
2791                         return -EEXIST;
2792                 }
2793                 flex_filter->queue = filter->queue;
2794                 /*
2795                  * look for an unused flex filter index
2796                  * and insert the filter into the list.
2797                  */
2798                 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
2799                         if (!(filter_info->flex_mask & (1 << i))) {
2800                                 filter_info->flex_mask |= 1 << i;
2801                                 flex_filter->index = i;
2802                                 TAILQ_INSERT_TAIL(&filter_info->flex_list,
2803                                         flex_filter,
2804                                         entries);
2805                                 break;
2806                         }
2807                 }
2808                 if (i >= E1000_MAX_FLEX_FILTERS) {
2809                         PMD_DRV_LOG(ERR, "flex filters are full.");
2810                         rte_free(flex_filter);
2811                         return -ENOSYS;
2812                 }
2813
2814                 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
2815                                 (E1000_WUFC_FLX0 << flex_filter->index));
2816                 queueing = filter->len |
2817                         (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
2818                         (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT);
2819                 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
2820                                 queueing);
2821                 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
2822                         E1000_WRITE_REG(hw, reg_off,
2823                                         flex_filter->filter_info.dwords[j]);
2824                         reg_off += sizeof(uint32_t);
2825                         E1000_WRITE_REG(hw, reg_off,
2826                                         flex_filter->filter_info.dwords[++j]);
2827                         reg_off += sizeof(uint32_t);
2828                         E1000_WRITE_REG(hw, reg_off,
2829                                 (uint32_t)flex_filter->filter_info.mask[i]);
2830                         reg_off += sizeof(uint32_t) * 2;
2831                         ++j;
2832                 }
2833         } else {
2834                 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2835                                 &flex_filter->filter_info);
2836                 if (it == NULL) {
2837                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
2838                         rte_free(flex_filter);
2839                         return -ENOENT;
2840                 }
2841
2842                 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
2843                         E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
2844                 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
2845                         (~(E1000_WUFC_FLX0 << it->index)));
2846
2847                 filter_info->flex_mask &= ~(1 << it->index);
2848                 TAILQ_REMOVE(&filter_info->flex_list, it, entries);
2849                 rte_free(it);
2850                 rte_free(flex_filter);
2851         }
2852
2853         return 0;
2854 }
2855
2856 static int
2857 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
2858                         struct rte_eth_flex_filter *filter)
2859 {
2860         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2861         struct e1000_filter_info *filter_info =
2862                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2863         struct e1000_flex_filter flex_filter, *it;
2864         uint32_t wufc, queueing, wufc_en = 0;
2865
2866         memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
2867         flex_filter.filter_info.len = filter->len;
2868         flex_filter.filter_info.priority = filter->priority;
2869         memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
2870         memcpy(flex_filter.filter_info.mask, filter->mask,
2871                         RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
2872
2873         it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
2874                                 &flex_filter.filter_info);
2875         if (it == NULL) {
2876                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2877                 return -ENOENT;
2878         }
2879
2880         wufc = E1000_READ_REG(hw, E1000_WUFC);
2881         wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
2882
2883         if ((wufc & wufc_en) == wufc_en) {
2884                 uint32_t reg_off = 0;
2885                 if (it->index < E1000_MAX_FHFT)
2886                         reg_off = E1000_FHFT(it->index);
2887                 else
2888                         reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
2889
2890                 queueing = E1000_READ_REG(hw,
2891                                 reg_off + E1000_FHFT_QUEUEING_OFFSET);
2892                 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
2893                 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
2894                         E1000_FHFT_QUEUEING_PRIO_SHIFT;
2895                 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
2896                         E1000_FHFT_QUEUEING_QUEUE_SHIFT;
2897                 return 0;
2898         }
2899         return -ENOENT;
2900 }
2901
2902 static int
2903 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
2904                         enum rte_filter_op filter_op,
2905                         void *arg)
2906 {
2907         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2908         struct rte_eth_flex_filter *filter;
2909         int ret = 0;
2910
2911         MAC_TYPE_FILTER_SUP(hw->mac.type);
2912
2913         if (filter_op == RTE_ETH_FILTER_NOP)
2914                 return ret;
2915
2916         if (arg == NULL) {
2917                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
2918                             filter_op);
2919                 return -EINVAL;
2920         }
2921
2922         filter = (struct rte_eth_flex_filter *)arg;
2923         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
2924             || filter->len % sizeof(uint64_t) != 0) {
2925                 PMD_DRV_LOG(ERR, "filter's length is out of range");
2926                 return -EINVAL;
2927         }
2928         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
2929                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
2930                 return -EINVAL;
2931         }
2932
2933         switch (filter_op) {
2934         case RTE_ETH_FILTER_ADD:
2935                 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
2936                 break;
2937         case RTE_ETH_FILTER_DELETE:
2938                 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
2939                 break;
2940         case RTE_ETH_FILTER_GET:
2941                 ret = eth_igb_get_flex_filter(dev, filter);
2942                 break;
2943         default:
2944                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
2945                 ret = -EINVAL;
2946                 break;
2947         }
2948
2949         return ret;
2950 }
2951
2952 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
2953 static inline int
2954 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
2955                         struct e1000_5tuple_filter_info *filter_info)
2956 {
2957         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
2958                 return -EINVAL;
2959         if (filter->priority > E1000_2TUPLE_MAX_PRI)
2960                 return -EINVAL;  /* filter index is out of range. */
2961         if (filter->tcp_flags > TCP_FLAG_ALL)
2962                 return -EINVAL;  /* flags is invalid. */
2963
2964         switch (filter->dst_ip_mask) {
2965         case UINT32_MAX:
2966                 filter_info->dst_ip_mask = 0;
2967                 filter_info->dst_ip = filter->dst_ip;
2968                 break;
2969         case 0:
2970                 filter_info->dst_ip_mask = 1;
2971                 break;
2972         default:
2973                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
2974                 return -EINVAL;
2975         }
2976
2977         switch (filter->src_ip_mask) {
2978         case UINT32_MAX:
2979                 filter_info->src_ip_mask = 0;
2980                 filter_info->src_ip = filter->src_ip;
2981                 break;
2982         case 0:
2983                 filter_info->src_ip_mask = 1;
2984                 break;
2985         default:
2986                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2987                 return -EINVAL;
2988         }
2989
2990         switch (filter->dst_port_mask) {
2991         case UINT16_MAX:
2992                 filter_info->dst_port_mask = 0;
2993                 filter_info->dst_port = filter->dst_port;
2994                 break;
2995         case 0:
2996                 filter_info->dst_port_mask = 1;
2997                 break;
2998         default:
2999                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3000                 return -EINVAL;
3001         }
3002
3003         switch (filter->src_port_mask) {
3004         case UINT16_MAX:
3005                 filter_info->src_port_mask = 0;
3006                 filter_info->src_port = filter->src_port;
3007                 break;
3008         case 0:
3009                 filter_info->src_port_mask = 1;
3010                 break;
3011         default:
3012                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3013                 return -EINVAL;
3014         }
3015
3016         switch (filter->proto_mask) {
3017         case UINT8_MAX:
3018                 filter_info->proto_mask = 0;
3019                 filter_info->proto = filter->proto;
3020                 break;
3021         case 0:
3022                 filter_info->proto_mask = 1;
3023                 break;
3024         default:
3025                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3026                 return -EINVAL;
3027         }
3028
3029         filter_info->priority = (uint8_t)filter->priority;
3030         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3031                 filter_info->tcp_flags = filter->tcp_flags;
3032         else
3033                 filter_info->tcp_flags = 0;
3034
3035         return 0;
3036 }
3037
3038 static inline struct e1000_5tuple_filter *
3039 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
3040                         struct e1000_5tuple_filter_info *key)
3041 {
3042         struct e1000_5tuple_filter *it;
3043
3044         TAILQ_FOREACH(it, filter_list, entries) {
3045                 if (memcmp(key, &it->filter_info,
3046                         sizeof(struct e1000_5tuple_filter_info)) == 0) {
3047                         return it;
3048                 }
3049         }
3050         return NULL;
3051 }
3052
3053 /*
3054  * igb_add_5tuple_filter_82576 - add a 5tuple filter
3055  *
3056  * @param
3057  * dev: Pointer to struct rte_eth_dev.
3058  * ntuple_filter: ponter to the filter that will be added.
3059  *
3060  * @return
3061  *    - On success, zero.
3062  *    - On failure, a negative value.
3063  */
3064 static int
3065 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
3066                         struct rte_eth_ntuple_filter *ntuple_filter)
3067 {
3068         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3069         struct e1000_filter_info *filter_info =
3070                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3071         struct e1000_5tuple_filter *filter;
3072         uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
3073         uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3074         uint8_t i;
3075         int ret;
3076
3077         filter = rte_zmalloc("e1000_5tuple_filter",
3078                         sizeof(struct e1000_5tuple_filter), 0);
3079         if (filter == NULL)
3080                 return -ENOMEM;
3081
3082         ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3083                                             &filter->filter_info);
3084         if (ret < 0) {
3085                 rte_free(filter);
3086                 return ret;
3087         }
3088
3089         if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3090                                          &filter->filter_info) != NULL) {
3091                 PMD_DRV_LOG(ERR, "filter exists.");
3092                 rte_free(filter);
3093                 return -EEXIST;
3094         }
3095         filter->queue = ntuple_filter->queue;
3096
3097         /*
3098          * look for an unused 5tuple filter index,
3099          * and insert the filter to list.
3100          */
3101         for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
3102                 if (!(filter_info->fivetuple_mask & (1 << i))) {
3103                         filter_info->fivetuple_mask |= 1 << i;
3104                         filter->index = i;
3105                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3106                                           filter,
3107                                           entries);
3108                         break;
3109                 }
3110         }
3111         if (i >= E1000_MAX_FTQF_FILTERS) {
3112                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
3113                 rte_free(filter);
3114                 return -ENOSYS;
3115         }
3116
3117         ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
3118         if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
3119                 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
3120         if (filter->filter_info.dst_ip_mask == 0)
3121                 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
3122         if (filter->filter_info.src_port_mask == 0)
3123                 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
3124         if (filter->filter_info.proto_mask == 0)
3125                 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
3126         ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
3127                 E1000_FTQF_QUEUE_MASK;
3128         ftqf |= E1000_FTQF_QUEUE_ENABLE;
3129         E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
3130         E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
3131         E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
3132
3133         spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
3134         E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
3135
3136         imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3137         if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3138                 imir |= E1000_IMIR_PORT_BP;
3139         else
3140                 imir &= ~E1000_IMIR_PORT_BP;
3141         imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3142
3143         /* tcp flags bits setting. */
3144         if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3145                 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3146                         imir_ext |= E1000_IMIREXT_CTRL_URG;
3147                 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3148                         imir_ext |= E1000_IMIREXT_CTRL_ACK;
3149                 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3150                         imir_ext |= E1000_IMIREXT_CTRL_PSH;
3151                 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3152                         imir_ext |= E1000_IMIREXT_CTRL_RST;
3153                 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3154                         imir_ext |= E1000_IMIREXT_CTRL_SYN;
3155                 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3156                         imir_ext |= E1000_IMIREXT_CTRL_FIN;
3157         } else
3158                 imir_ext |= E1000_IMIREXT_CTRL_BP;
3159         E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3160         E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3161         return 0;
3162 }
3163
3164 /*
3165  * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
3166  *
3167  * @param
3168  * dev: Pointer to struct rte_eth_dev.
3169  * ntuple_filter: ponter to the filter that will be removed.
3170  *
3171  * @return
3172  *    - On success, zero.
3173  *    - On failure, a negative value.
3174  */
3175 static int
3176 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
3177                                 struct rte_eth_ntuple_filter *ntuple_filter)
3178 {
3179         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3180         struct e1000_filter_info *filter_info =
3181                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3182         struct e1000_5tuple_filter_info filter_5tuple;
3183         struct e1000_5tuple_filter *filter;
3184         int ret;
3185
3186         memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
3187         ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3188                                             &filter_5tuple);
3189         if (ret < 0)
3190                 return ret;
3191
3192         filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
3193                                          &filter_5tuple);
3194         if (filter == NULL) {
3195                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3196                 return -ENOENT;
3197         }
3198
3199         filter_info->fivetuple_mask &= ~(1 << filter->index);
3200         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3201         rte_free(filter);
3202
3203         E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
3204                         E1000_FTQF_VF_BP | E1000_FTQF_MASK);
3205         E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
3206         E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
3207         E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
3208         E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3209         E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3210         return 0;
3211 }
3212
3213 static int
3214 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3215 {
3216         uint32_t rctl;
3217         struct e1000_hw *hw;
3218         struct rte_eth_dev_info dev_info;
3219         uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
3220                                      VLAN_TAG_SIZE);
3221
3222         hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3223
3224 #ifdef RTE_LIBRTE_82571_SUPPORT
3225         /* XXX: not bigger than max_rx_pktlen */
3226         if (hw->mac.type == e1000_82571)
3227                 return -ENOTSUP;
3228 #endif
3229         eth_igb_infos_get(dev, &dev_info);
3230
3231         /* check that mtu is within the allowed range */
3232         if ((mtu < ETHER_MIN_MTU) ||
3233             (frame_size > dev_info.max_rx_pktlen))
3234                 return -EINVAL;
3235
3236         /* refuse mtu that requires the support of scattered packets when this
3237          * feature has not been enabled before. */
3238         if (!dev->data->scattered_rx &&
3239             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
3240                 return -EINVAL;
3241
3242         rctl = E1000_READ_REG(hw, E1000_RCTL);
3243
3244         /* switch to jumbo mode if needed */
3245         if (frame_size > ETHER_MAX_LEN) {
3246                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
3247                 rctl |= E1000_RCTL_LPE;
3248         } else {
3249                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
3250                 rctl &= ~E1000_RCTL_LPE;
3251         }
3252         E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3253
3254         /* update max frame size */
3255         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3256
3257         E1000_WRITE_REG(hw, E1000_RLPML,
3258                         dev->data->dev_conf.rxmode.max_rx_pkt_len);
3259
3260         return 0;
3261 }
3262
3263 /*
3264  * igb_add_del_ntuple_filter - add or delete a ntuple filter
3265  *
3266  * @param
3267  * dev: Pointer to struct rte_eth_dev.
3268  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3269  * add: if true, add filter, if false, remove filter
3270  *
3271  * @return
3272  *    - On success, zero.
3273  *    - On failure, a negative value.
3274  */
3275 static int
3276 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
3277                         struct rte_eth_ntuple_filter *ntuple_filter,
3278                         bool add)
3279 {
3280         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3281         int ret;
3282
3283         switch (ntuple_filter->flags) {
3284         case RTE_5TUPLE_FLAGS:
3285         case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3286                 if (hw->mac.type != e1000_82576)
3287                         return -ENOTSUP;
3288                 if (add)
3289                         ret = igb_add_5tuple_filter_82576(dev,
3290                                                           ntuple_filter);
3291                 else
3292                         ret = igb_remove_5tuple_filter_82576(dev,
3293                                                              ntuple_filter);
3294                 break;
3295         case RTE_2TUPLE_FLAGS:
3296         case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3297                 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3298                         return -ENOTSUP;
3299                 if (add)
3300                         ret = igb_add_2tuple_filter(dev, ntuple_filter);
3301                 else
3302                         ret = igb_remove_2tuple_filter(dev, ntuple_filter);
3303                 break;
3304         default:
3305                 ret = -EINVAL;
3306                 break;
3307         }
3308
3309         return ret;
3310 }
3311
3312 /*
3313  * igb_get_ntuple_filter - get a ntuple filter
3314  *
3315  * @param
3316  * dev: Pointer to struct rte_eth_dev.
3317  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
3318  *
3319  * @return
3320  *    - On success, zero.
3321  *    - On failure, a negative value.
3322  */
3323 static int
3324 igb_get_ntuple_filter(struct rte_eth_dev *dev,
3325                         struct rte_eth_ntuple_filter *ntuple_filter)
3326 {
3327         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3328         struct e1000_filter_info *filter_info =
3329                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3330         struct e1000_5tuple_filter_info filter_5tuple;
3331         struct e1000_2tuple_filter_info filter_2tuple;
3332         struct e1000_5tuple_filter *p_5tuple_filter;
3333         struct e1000_2tuple_filter *p_2tuple_filter;
3334         int ret;
3335
3336         switch (ntuple_filter->flags) {
3337         case RTE_5TUPLE_FLAGS:
3338         case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3339                 if (hw->mac.type != e1000_82576)
3340                         return -ENOTSUP;
3341                 memset(&filter_5tuple,
3342                         0,
3343                         sizeof(struct e1000_5tuple_filter_info));
3344                 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
3345                                                     &filter_5tuple);
3346                 if (ret < 0)
3347                         return ret;
3348                 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
3349                                         &filter_info->fivetuple_list,
3350                                         &filter_5tuple);
3351                 if (p_5tuple_filter == NULL) {
3352                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
3353                         return -ENOENT;
3354                 }
3355                 ntuple_filter->queue = p_5tuple_filter->queue;
3356                 break;
3357         case RTE_2TUPLE_FLAGS:
3358         case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
3359                 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
3360                         return -ENOTSUP;
3361                 memset(&filter_2tuple,
3362                         0,
3363                         sizeof(struct e1000_2tuple_filter_info));
3364                 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
3365                 if (ret < 0)
3366                         return ret;
3367                 p_2tuple_filter = igb_2tuple_filter_lookup(
3368                                         &filter_info->twotuple_list,
3369                                         &filter_2tuple);
3370                 if (p_2tuple_filter == NULL) {
3371                         PMD_DRV_LOG(ERR, "filter doesn't exist.");
3372                         return -ENOENT;
3373                 }
3374                 ntuple_filter->queue = p_2tuple_filter->queue;
3375                 break;
3376         default:
3377                 ret = -EINVAL;
3378                 break;
3379         }
3380
3381         return 0;
3382 }
3383
3384 /*
3385  * igb_ntuple_filter_handle - Handle operations for ntuple filter.
3386  * @dev: pointer to rte_eth_dev structure
3387  * @filter_op:operation will be taken.
3388  * @arg: a pointer to specific structure corresponding to the filter_op
3389  */
3390 static int
3391 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
3392                                 enum rte_filter_op filter_op,
3393                                 void *arg)
3394 {
3395         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3396         int ret;
3397
3398         MAC_TYPE_FILTER_SUP(hw->mac.type);
3399
3400         if (filter_op == RTE_ETH_FILTER_NOP)
3401                 return 0;
3402
3403         if (arg == NULL) {
3404                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3405                             filter_op);
3406                 return -EINVAL;
3407         }
3408
3409         switch (filter_op) {
3410         case RTE_ETH_FILTER_ADD:
3411                 ret = igb_add_del_ntuple_filter(dev,
3412                         (struct rte_eth_ntuple_filter *)arg,
3413                         TRUE);
3414                 break;
3415         case RTE_ETH_FILTER_DELETE:
3416                 ret = igb_add_del_ntuple_filter(dev,
3417                         (struct rte_eth_ntuple_filter *)arg,
3418                         FALSE);
3419                 break;
3420         case RTE_ETH_FILTER_GET:
3421                 ret = igb_get_ntuple_filter(dev,
3422                         (struct rte_eth_ntuple_filter *)arg);
3423                 break;
3424         default:
3425                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3426                 ret = -EINVAL;
3427                 break;
3428         }
3429         return ret;
3430 }
3431
3432 static inline int
3433 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
3434                         uint16_t ethertype)
3435 {
3436         int i;
3437
3438         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3439                 if (filter_info->ethertype_filters[i] == ethertype &&
3440                     (filter_info->ethertype_mask & (1 << i)))
3441                         return i;
3442         }
3443         return -1;
3444 }
3445
3446 static inline int
3447 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
3448                         uint16_t ethertype)
3449 {
3450         int i;
3451
3452         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
3453                 if (!(filter_info->ethertype_mask & (1 << i))) {
3454                         filter_info->ethertype_mask |= 1 << i;
3455                         filter_info->ethertype_filters[i] = ethertype;
3456                         return i;
3457                 }
3458         }
3459         return -1;
3460 }
3461
3462 static inline int
3463 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
3464                         uint8_t idx)
3465 {
3466         if (idx >= E1000_MAX_ETQF_FILTERS)
3467                 return -1;
3468         filter_info->ethertype_mask &= ~(1 << idx);
3469         filter_info->ethertype_filters[idx] = 0;
3470         return idx;
3471 }
3472
3473
3474 static int
3475 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
3476                         struct rte_eth_ethertype_filter *filter,
3477                         bool add)
3478 {
3479         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3480         struct e1000_filter_info *filter_info =
3481                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3482         uint32_t etqf = 0;
3483         int ret;
3484
3485         if (filter->ether_type == ETHER_TYPE_IPv4 ||
3486                 filter->ether_type == ETHER_TYPE_IPv6) {
3487                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
3488                         " ethertype filter.", filter->ether_type);
3489                 return -EINVAL;
3490         }
3491
3492         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
3493                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
3494                 return -EINVAL;
3495         }
3496         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
3497                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
3498                 return -EINVAL;
3499         }
3500
3501         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3502         if (ret >= 0 && add) {
3503                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
3504                             filter->ether_type);
3505                 return -EEXIST;
3506         }
3507         if (ret < 0 && !add) {
3508                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3509                             filter->ether_type);
3510                 return -ENOENT;
3511         }
3512
3513         if (add) {
3514                 ret = igb_ethertype_filter_insert(filter_info,
3515                         filter->ether_type);
3516                 if (ret < 0) {
3517                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
3518                         return -ENOSYS;
3519                 }
3520
3521                 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
3522                 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
3523                 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
3524         } else {
3525                 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
3526                 if (ret < 0)
3527                         return -ENOSYS;
3528         }
3529         E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
3530         E1000_WRITE_FLUSH(hw);
3531
3532         return 0;
3533 }
3534
3535 static int
3536 igb_get_ethertype_filter(struct rte_eth_dev *dev,
3537                         struct rte_eth_ethertype_filter *filter)
3538 {
3539         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3540         struct e1000_filter_info *filter_info =
3541                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3542         uint32_t etqf;
3543         int ret;
3544
3545         ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
3546         if (ret < 0) {
3547                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
3548                             filter->ether_type);
3549                 return -ENOENT;
3550         }
3551
3552         etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
3553         if (etqf & E1000_ETQF_FILTER_ENABLE) {
3554                 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
3555                 filter->flags = 0;
3556                 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
3557                                 E1000_ETQF_QUEUE_SHIFT;
3558                 return 0;
3559         }
3560
3561         return -ENOENT;
3562 }
3563
3564 /*
3565  * igb_ethertype_filter_handle - Handle operations for ethertype filter.
3566  * @dev: pointer to rte_eth_dev structure
3567  * @filter_op:operation will be taken.
3568  * @arg: a pointer to specific structure corresponding to the filter_op
3569  */
3570 static int
3571 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
3572                                 enum rte_filter_op filter_op,
3573                                 void *arg)
3574 {
3575         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3576         int ret;
3577
3578         MAC_TYPE_FILTER_SUP(hw->mac.type);
3579
3580         if (filter_op == RTE_ETH_FILTER_NOP)
3581                 return 0;
3582
3583         if (arg == NULL) {
3584                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3585                             filter_op);
3586                 return -EINVAL;
3587         }
3588
3589         switch (filter_op) {
3590         case RTE_ETH_FILTER_ADD:
3591                 ret = igb_add_del_ethertype_filter(dev,
3592                         (struct rte_eth_ethertype_filter *)arg,
3593                         TRUE);
3594                 break;
3595         case RTE_ETH_FILTER_DELETE:
3596                 ret = igb_add_del_ethertype_filter(dev,
3597                         (struct rte_eth_ethertype_filter *)arg,
3598                         FALSE);
3599                 break;
3600         case RTE_ETH_FILTER_GET:
3601                 ret = igb_get_ethertype_filter(dev,
3602                         (struct rte_eth_ethertype_filter *)arg);
3603                 break;
3604         default:
3605                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3606                 ret = -EINVAL;
3607                 break;
3608         }
3609         return ret;
3610 }
3611
3612 static int
3613 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
3614                      enum rte_filter_type filter_type,
3615                      enum rte_filter_op filter_op,
3616                      void *arg)
3617 {
3618         int ret = -EINVAL;
3619
3620         switch (filter_type) {
3621         case RTE_ETH_FILTER_NTUPLE:
3622                 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
3623                 break;
3624         case RTE_ETH_FILTER_ETHERTYPE:
3625                 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
3626                 break;
3627         case RTE_ETH_FILTER_SYN:
3628                 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
3629                 break;
3630         case RTE_ETH_FILTER_FLEXIBLE:
3631                 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
3632                 break;
3633         default:
3634                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3635                                                         filter_type);
3636                 break;
3637         }
3638
3639         return ret;
3640 }
3641
3642 static struct rte_driver pmd_igb_drv = {
3643         .type = PMD_PDEV,
3644         .init = rte_igb_pmd_init,
3645 };
3646
3647 static struct rte_driver pmd_igbvf_drv = {
3648         .type = PMD_PDEV,
3649         .init = rte_igbvf_pmd_init,
3650 };
3651
3652 PMD_REGISTER_DRIVER(pmd_igb_drv);
3653 PMD_REGISTER_DRIVER(pmd_igbvf_drv);