ixgbe: update RX/TX queue configuration
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <sys/queue.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <string.h>
40 #include <unistd.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62
63 #include "ixgbe_logs.h"
64 #include "ixgbe/ixgbe_api.h"
65 #include "ixgbe/ixgbe_vf.h"
66 #include "ixgbe/ixgbe_common.h"
67 #include "ixgbe_ethdev.h"
68
69 /*
70  * High threshold controlling when to start sending XOFF frames. Must be at
71  * least 8 bytes less than receive packet buffer size. This value is in units
72  * of 1024 bytes.
73  */
74 #define IXGBE_FC_HI    0x80
75
76 /*
77  * Low threshold controlling when to start sending XON frames. This value is
78  * in units of 1024 bytes.
79  */
80 #define IXGBE_FC_LO    0x40
81
82 /* Timer value included in XOFF frames. */
83 #define IXGBE_FC_PAUSE 0x680
84
85 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
86 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
87
88 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
89
90 static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
91                 struct rte_eth_dev *eth_dev);
92 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
93 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
94 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
95 static void ixgbe_dev_close(struct rte_eth_dev *dev);
96 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
97 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
98 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
99 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
100 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
101                                 int wait_to_complete);
102 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
103                                 struct rte_eth_stats *stats);
104 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
105 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
106                                              uint16_t queue_id,
107                                              uint8_t stat_idx,
108                                              uint8_t is_rx);
109 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
110                                 struct rte_eth_dev_info *dev_info);
111 static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
112                                   uint16_t vlan_id,
113                                   int on);
114 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
115 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
116 static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
117                                 struct rte_eth_fc_conf *fc_conf);
118 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
119 static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
120 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
121 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
122 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
123                                                         void *param);
124 static void ixgbe_dev_interrupt_delayed_handler(void *param);
125 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
126                                 uint32_t index, uint32_t pool);
127 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
128
129 /* For Virtual Function support */
130 static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
131                 struct rte_eth_dev *eth_dev);
132 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
133 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
134 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
135 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
136 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
137 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
138
139 /*
140  *  * Define VF Stats MACRO for Non "cleared on read" register
141  *   */
142 #define UPDATE_VF_STAT(reg, last, cur)                          \
143 {                                                               \
144         u32 latest = IXGBE_READ_REG(hw, reg);                   \
145         cur += latest - last;                                   \
146         last = latest;                                          \
147 }
148
149 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
150 {                                                                \
151         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
152         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
153         u64 latest = ((new_msb << 32) | new_lsb);                \
154         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
155         last = latest;                                           \
156 }
157
158 /*
159  * The set of PCI devices this driver supports
160  */
161 static struct rte_pci_id pci_id_ixgbe_map[] = {
162
163 #undef RTE_LIBRTE_IGB_PMD
164 #define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
165 #include "rte_pci_dev_ids.h"
166
167 { .vendor_id = 0, /* sentinel */ },
168 };
169
170
171 /*
172  * The set of PCI devices this driver supports (for 82599 VF)
173  */
174 static struct rte_pci_id pci_id_ixgbevf_map[] = {
175 {
176         .vendor_id = PCI_VENDOR_ID_INTEL,
177         .device_id = IXGBE_DEV_ID_82599_VF,
178         .subsystem_vendor_id = PCI_ANY_ID,
179         .subsystem_device_id = PCI_ANY_ID,
180 },
181 { .vendor_id = 0, /* sentinel */ },
182 };
183
184 static struct eth_dev_ops ixgbe_eth_dev_ops = {
185         .dev_configure        = ixgbe_dev_configure,
186         .dev_start            = ixgbe_dev_start,
187         .dev_stop             = ixgbe_dev_stop,
188         .dev_close            = ixgbe_dev_close,
189         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
190         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
191         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
192         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
193         .link_update          = ixgbe_dev_link_update,
194         .stats_get            = ixgbe_dev_stats_get,
195         .stats_reset          = ixgbe_dev_stats_reset,
196         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
197         .dev_infos_get        = ixgbe_dev_info_get,
198         .vlan_filter_set      = ixgbe_vlan_filter_set,
199         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
200         .rx_queue_release     = ixgbe_dev_rx_queue_release,
201         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
202         .tx_queue_release     = ixgbe_dev_tx_queue_release,
203         .dev_led_on           = ixgbe_dev_led_on,
204         .dev_led_off          = ixgbe_dev_led_off,
205         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
206         .mac_addr_add         = ixgbe_add_rar,
207         .mac_addr_remove      = ixgbe_remove_rar,
208         .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
209         .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
210         .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
211         .fdir_infos_get               = ixgbe_fdir_info_get,
212         .fdir_add_perfect_filter      = ixgbe_fdir_add_perfect_filter,
213         .fdir_update_perfect_filter   = ixgbe_fdir_update_perfect_filter,
214         .fdir_remove_perfect_filter   = ixgbe_fdir_remove_perfect_filter,
215         .fdir_set_masks               = ixgbe_fdir_set_masks,
216 };
217
218 /*
219  * dev_ops for virtual function, bare necessities for basic vf
220  * operation have been implemented
221  */
222 static struct eth_dev_ops ixgbevf_eth_dev_ops = {
223
224         .dev_configure        = ixgbevf_dev_configure,
225         .dev_start            = ixgbevf_dev_start,
226         .dev_stop             = ixgbevf_dev_stop,
227         .link_update          = ixgbe_dev_link_update,
228         .stats_get            = ixgbevf_dev_stats_get,
229         .stats_reset          = ixgbevf_dev_stats_reset,
230         .dev_close            = ixgbevf_dev_stop,
231
232         .dev_infos_get        = ixgbe_dev_info_get,
233         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
234         .rx_queue_release     = ixgbe_dev_rx_queue_release,
235         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
236         .tx_queue_release     = ixgbe_dev_tx_queue_release,
237 };
238
239 /**
240  * Atomically reads the link status information from global
241  * structure rte_eth_dev.
242  *
243  * @param dev
244  *   - Pointer to the structure rte_eth_dev to read from.
245  *   - Pointer to the buffer to be saved with the link status.
246  *
247  * @return
248  *   - On success, zero.
249  *   - On failure, negative value.
250  */
251 static inline int
252 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
253                                 struct rte_eth_link *link)
254 {
255         struct rte_eth_link *dst = link;
256         struct rte_eth_link *src = &(dev->data->dev_link);
257
258         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
259                                         *(uint64_t *)src) == 0)
260                 return -1;
261
262         return 0;
263 }
264
265 /**
266  * Atomically writes the link status information into global
267  * structure rte_eth_dev.
268  *
269  * @param dev
270  *   - Pointer to the structure rte_eth_dev to read from.
271  *   - Pointer to the buffer to be saved with the link status.
272  *
273  * @return
274  *   - On success, zero.
275  *   - On failure, negative value.
276  */
277 static inline int
278 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
279                                 struct rte_eth_link *link)
280 {
281         struct rte_eth_link *dst = &(dev->data->dev_link);
282         struct rte_eth_link *src = link;
283
284         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
285                                         *(uint64_t *)src) == 0)
286                 return -1;
287
288         return 0;
289 }
290
291 /*
292  * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
293  */
294 static inline int
295 ixgbe_is_sfp(struct ixgbe_hw *hw)
296 {
297         switch (hw->phy.type) {
298         case ixgbe_phy_sfp_avago:
299         case ixgbe_phy_sfp_ftl:
300         case ixgbe_phy_sfp_intel:
301         case ixgbe_phy_sfp_unknown:
302         case ixgbe_phy_sfp_passive_tyco:
303         case ixgbe_phy_sfp_passive_unknown:
304                 return 1;
305         default:
306                 return 0;
307         }
308 }
309
310 /*
311  * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
312  */
313 static void
314 ixgbe_disable_intr(struct ixgbe_hw *hw)
315 {
316         PMD_INIT_FUNC_TRACE();
317
318         if (hw->mac.type == ixgbe_mac_82598EB) {
319                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
320         } else {
321                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
322                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
323                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
324         }
325         IXGBE_WRITE_FLUSH(hw);
326 }
327
328 /*
329  * This function resets queue statistics mapping registers.
330  * From Niantic datasheet, Initialization of Statistics section:
331  * "...if software requires the queue counters, the RQSMR and TQSM registers
332  * must be re-programmed following a device reset.
333  */
334 static void
335 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
336 {
337         uint32_t i;
338
339         for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
340                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
341                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
342         }
343 }
344
345
346 static int
347 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
348                                   uint16_t queue_id,
349                                   uint8_t stat_idx,
350                                   uint8_t is_rx)
351 {
352 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
353 #define NB_QMAP_FIELDS_PER_QSM_REG 4
354 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
355
356         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
357         struct ixgbe_stat_mapping_registers *stat_mappings =
358                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
359         uint32_t qsmr_mask = 0;
360         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
361         uint32_t q_map;
362         uint8_t n, offset;
363
364         if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
365                 return -ENOSYS;
366
367         PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
368                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
369
370         n = queue_id / NB_QMAP_FIELDS_PER_QSM_REG;
371         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
372                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
373                 return -EIO;
374         }
375         offset = queue_id % NB_QMAP_FIELDS_PER_QSM_REG;
376
377         /* Now clear any previous stat_idx set */
378         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
379         if (!is_rx)
380                 stat_mappings->tqsm[n] &= ~clearing_mask;
381         else
382                 stat_mappings->rqsmr[n] &= ~clearing_mask;
383
384         q_map = (uint32_t)stat_idx;
385         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
386         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
387         if (!is_rx)
388                 stat_mappings->tqsm[n] |= qsmr_mask;
389         else
390                 stat_mappings->rqsmr[n] |= qsmr_mask;
391
392         PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
393                      "%s[%d] = 0x%08x\n",
394                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
395                      is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
396
397         /* Now write the mapping in the appropriate register */
398         if (is_rx) {
399                 PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
400                              stat_mappings->rqsmr[n], n);
401                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
402         }
403         else {
404                 PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
405                              stat_mappings->tqsm[n], n);
406                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
407         }
408         return 0;
409 }
410
411 static void
412 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
413 {
414         struct ixgbe_stat_mapping_registers *stat_mappings =
415                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
416         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
417         int i;
418
419         /* write whatever was in stat mapping table to the NIC */
420         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
421                 /* rx */
422                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
423
424                 /* tx */
425                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
426         }
427 }
428
429 /*
430  * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
431  * It returns 0 on success.
432  */
433 static int
434 eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
435                      struct rte_eth_dev *eth_dev)
436 {
437         struct rte_pci_device *pci_dev;
438         struct ixgbe_hw *hw =
439                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
440         struct ixgbe_vfta * shadow_vfta =
441                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
442         uint32_t ctrl_ext;
443         uint16_t csum;
444         int diag, i;
445
446         PMD_INIT_FUNC_TRACE();
447
448         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
449         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
450         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
451
452         /* for secondary processes, we don't initialise any further as primary
453          * has already done this work. Only check we don't need a different
454          * RX function */
455         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
456                 if (eth_dev->data->scattered_rx)
457                         eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
458                 return 0;
459         }
460         pci_dev = eth_dev->pci_dev;
461
462         /* Vendor and Device ID need to be set before init of shared code */
463         hw->device_id = pci_dev->id.device_id;
464         hw->vendor_id = pci_dev->id.vendor_id;
465         hw->hw_addr = (void *)pci_dev->mem_resource.addr;
466
467         /* Initialize the shared code */
468         diag = ixgbe_init_shared_code(hw);
469         if (diag != IXGBE_SUCCESS) {
470                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
471                 return -EIO;
472         }
473
474         /* Get Hardware Flow Control setting */
475         hw->fc.requested_mode = ixgbe_fc_full;
476         hw->fc.current_mode = ixgbe_fc_full;
477         hw->fc.pause_time = IXGBE_FC_PAUSE;
478         hw->fc.low_water = IXGBE_FC_LO;
479         for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
480                 hw->fc.high_water[i] = IXGBE_FC_HI;
481         hw->fc.send_xon = 1;
482
483         ixgbe_disable_intr(hw);
484
485         /* Make sure we have a good EEPROM before we read from it */
486         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
487         if (diag != IXGBE_SUCCESS) {
488                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
489                 return -EIO;
490         }
491
492         diag = ixgbe_init_hw(hw);
493
494         /*
495          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
496          * is called too soon after the kernel driver unbinding/binding occurs.
497          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
498          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
499          * also called. See ixgbe_identify_phy_82599(). The reason for the
500          * failure is not known, and only occuts when virtualisation features
501          * are disabled in the bios. A delay of 100ms  was found to be enough by
502          * trial-and-error, and is doubled to be safe.
503          */
504         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
505                 rte_delay_ms(200);
506                 diag = ixgbe_init_hw(hw);
507         }
508
509         if (diag == IXGBE_ERR_EEPROM_VERSION) {
510                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
511                     "LOM.  Please be aware there may be issues associated "
512                     "with your hardware.\n If you are experiencing problems "
513                     "please contact your Intel or hardware representative "
514                     "who provided you with this hardware.\n");
515         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
516                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
517         if (diag) {
518                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
519                 return -EIO;
520         }
521
522         /* pick up the PCI bus settings for reporting later */
523         ixgbe_get_bus_info(hw);
524
525         /* reset mappings for queue statistics hw counters*/
526         ixgbe_reset_qstat_mappings(hw);
527
528         /* Allocate memory for storing MAC addresses */
529         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
530                         hw->mac.num_rar_entries, 0);
531         if (eth_dev->data->mac_addrs == NULL) {
532                 PMD_INIT_LOG(ERR,
533                         "Failed to allocate %d bytes needed to store MAC addresses",
534                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
535                 return -ENOMEM;
536         }
537         /* Copy the permanent MAC address */
538         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
539                         &eth_dev->data->mac_addrs[0]);
540
541         /* initialize the vfta */
542         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
543
544         /* let hardware know driver is loaded */
545         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
546         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
547         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
548
549         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
550                 PMD_INIT_LOG(DEBUG,
551                              "MAC: %d, PHY: %d, SFP+: %d<n",
552                              (int) hw->mac.type, (int) hw->phy.type,
553                              (int) hw->phy.sfp_type);
554         else
555                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
556                              (int) hw->mac.type, (int) hw->phy.type);
557
558         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
559                         eth_dev->data->port_id, pci_dev->id.vendor_id,
560                         pci_dev->id.device_id);
561
562         rte_intr_callback_register(&(pci_dev->intr_handle),
563                 ixgbe_dev_interrupt_handler, (void *)eth_dev);
564
565         return 0;
566 }
567
568 /*
569  * Virtual Function device init
570  */
571 static int
572 eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
573                      struct rte_eth_dev *eth_dev)
574 {
575         struct rte_pci_device *pci_dev;
576         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
577         int diag;
578
579         PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
580
581         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
582         pci_dev = eth_dev->pci_dev;
583
584         hw->device_id = pci_dev->id.device_id;
585         hw->vendor_id = pci_dev->id.vendor_id;
586         hw->hw_addr = (void *)pci_dev->mem_resource.addr;
587
588         /* Initialize the shared code */
589         diag = ixgbe_init_shared_code(hw);
590         if (diag != IXGBE_SUCCESS) {
591                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
592                 return -EIO;
593         }
594
595         /* init_mailbox_params */
596         hw->mbx.ops.init_params(hw);
597
598         /* Disable the interrupts for VF */
599         ixgbevf_intr_disable(hw);
600
601         hw->mac.num_rar_entries = hw->mac.max_rx_queues;
602         diag = hw->mac.ops.reset_hw(hw);
603
604         /* Allocate memory for storing MAC addresses */
605         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
606                         hw->mac.num_rar_entries, 0);
607         if (eth_dev->data->mac_addrs == NULL) {
608                 PMD_INIT_LOG(ERR,
609                         "Failed to allocate %d bytes needed to store MAC addresses",
610                         ETHER_ADDR_LEN * hw->mac.num_rar_entries);
611                 return -ENOMEM;
612         }
613         /* Copy the permanent MAC address */
614         ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
615                         &eth_dev->data->mac_addrs[0]);
616
617         /* reset the hardware with the new settings */
618         diag = hw->mac.ops.start_hw(hw);
619         switch (diag) {
620                 case  0:
621                         break;
622
623                 default:
624                         PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
625                         return (diag);
626         }
627
628         PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
629                          eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
630                          "ixgbe_mac_82599_vf");
631
632         return 0;
633 }
634
635 static struct eth_driver rte_ixgbe_pmd = {
636         {
637                 .name = "rte_ixgbe_pmd",
638                 .id_table = pci_id_ixgbe_map,
639                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
640         },
641         .eth_dev_init = eth_ixgbe_dev_init,
642         .dev_private_size = sizeof(struct ixgbe_adapter),
643 };
644
645 /*
646  * virtual function driver struct
647  */
648 static struct eth_driver rte_ixgbevf_pmd = {
649         {
650                 .name = "rte_ixgbevf_pmd",
651                 .id_table = pci_id_ixgbevf_map,
652                 .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
653         },
654         .eth_dev_init = eth_ixgbevf_dev_init,
655         .dev_private_size = sizeof(struct ixgbe_adapter),
656 };
657
658 /*
659  * Driver initialization routine.
660  * Invoked once at EAL init time.
661  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
662  */
663 int
664 rte_ixgbe_pmd_init(void)
665 {
666         PMD_INIT_FUNC_TRACE();
667
668         rte_eth_driver_register(&rte_ixgbe_pmd);
669         return 0;
670 }
671
672 /*
673  * VF Driver initialization routine.
674  * Invoked one at EAL init time.
675  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
676  */
677 int
678 rte_ixgbevf_pmd_init(void)
679 {
680         DEBUGFUNC("rte_ixgbevf_pmd_init");
681
682         rte_eth_driver_register(&rte_ixgbevf_pmd);
683         return (0);
684 }
685
686 static void
687 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
688 {
689         struct ixgbe_hw *hw =
690                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691         struct ixgbe_vfta * shadow_vfta =
692                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
693         uint32_t vfta;
694         uint32_t vid_idx;
695         uint32_t vid_bit;
696
697         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
698         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
699         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
700         if (on)
701                 vfta |= vid_bit;
702         else
703                 vfta &= ~vid_bit;
704         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
705
706         /* update local VFTA copy */
707         shadow_vfta->vfta[vid_idx] = vfta;
708 }
709
710 static void
711 ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
712 {
713         struct ixgbe_hw *hw =
714                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
715         uint32_t vlnctrl;
716         uint32_t rxdctl;
717         uint16_t i;
718
719         PMD_INIT_FUNC_TRACE();
720
721         /* Filter Table Disable */
722         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
723         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
724
725         if (hw->mac.type == ixgbe_mac_82598EB)
726                 vlnctrl &= ~IXGBE_VLNCTRL_VME;
727         else {
728                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
729                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
730                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
731                         rxdctl &= ~IXGBE_RXDCTL_VME;
732                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
733                 }
734         }
735         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
736 }
737
738 static void
739 ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
740 {
741         struct ixgbe_hw *hw =
742                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
743         struct ixgbe_vfta * shadow_vfta =
744                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
745         uint32_t vlnctrl;
746         uint32_t rxdctl;
747         uint16_t i;
748
749         PMD_INIT_FUNC_TRACE();
750
751         /* Filter Table Enable */
752         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
753         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
754         vlnctrl |= IXGBE_VLNCTRL_VFE;
755
756         if (hw->mac.type == ixgbe_mac_82598EB)
757                 vlnctrl |= IXGBE_VLNCTRL_VME;
758         else {
759                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
760                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
761                         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
762                         rxdctl |= IXGBE_RXDCTL_VME;
763                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
764                 }
765         }
766         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
767
768         /* write whatever is in local vfta copy */
769         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
770                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
771 }
772
773 static int
774 ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
775 {
776         struct ixgbe_interrupt *intr =
777                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
778         int diag;
779
780         PMD_INIT_FUNC_TRACE();
781
782         }
783
784         }
785
786         /* set flag to update link status after init */
787         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
788
789         return 0;
790 }
791
792 /*
793  * Configure device link speed and setup link.
794  * It returns 0 on success.
795  */
796 static int
797 ixgbe_dev_start(struct rte_eth_dev *dev)
798 {
799         struct ixgbe_hw *hw =
800                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801         int err, link_up = 0, negotiate = 0;
802         uint32_t speed = 0;
803
804         PMD_INIT_FUNC_TRACE();
805
806         /* IXGBE devices don't support half duplex */
807         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
808                         (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
809                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
810                                 dev->data->dev_conf.link_duplex,
811                                 dev->data->port_id);
812                 return -EINVAL;
813         }
814
815         /* stop adapter */
816         hw->adapter_stopped = FALSE;
817         ixgbe_stop_adapter(hw);
818
819         /* reinitialize adapter
820          * this calls reset and start */
821         ixgbe_init_hw(hw);
822
823         /* initialize transmission unit */
824         ixgbe_dev_tx_init(dev);
825
826         /* This can fail when allocating mbufs for descriptor rings */
827         err = ixgbe_dev_rx_init(dev);
828         if (err) {
829                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
830                 return err;
831         }
832
833         ixgbe_dev_rxtx_start(dev);
834
835         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
836                 err = hw->mac.ops.setup_sfp(hw);
837                 if (err)
838                         goto error;
839         }
840
841         /* Turn on the laser */
842         if (hw->phy.multispeed_fiber)
843                 ixgbe_enable_tx_laser(hw);
844
845         err = ixgbe_check_link(hw, &speed, &link_up, 0);
846         if (err)
847                 goto error;
848         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
849         if (err)
850                 goto error;
851
852         switch(dev->data->dev_conf.link_speed) {
853         case ETH_LINK_SPEED_AUTONEG:
854                 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
855                                 IXGBE_LINK_SPEED_82599_AUTONEG :
856                                 IXGBE_LINK_SPEED_82598_AUTONEG;
857                 break;
858         case ETH_LINK_SPEED_100:
859                 /*
860                  * Invalid for 82598 but error will be detected by
861                  * ixgbe_setup_link()
862                  */
863                 speed = IXGBE_LINK_SPEED_100_FULL;
864                 break;
865         case ETH_LINK_SPEED_1000:
866                 speed = IXGBE_LINK_SPEED_1GB_FULL;
867                 break;
868         case ETH_LINK_SPEED_10000:
869                 speed = IXGBE_LINK_SPEED_10GB_FULL;
870                 break;
871         default:
872                 PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
873                                 dev->data->dev_conf.link_speed, dev->data->port_id);
874                 return -EINVAL;
875         }
876
877         err = ixgbe_setup_link(hw, speed, negotiate, link_up);
878         if (err)
879                 goto error;
880
881         /* check if lsc interrupt is enabled */
882         if (dev->data->dev_conf.intr_conf.lsc != 0) {
883                 err = ixgbe_dev_interrupt_setup(dev);
884                 if (err)
885                         goto error;
886         }
887
888         /*
889          * If VLAN filtering is enabled, set up VLAN tag offload and filtering
890          * and restore VFTA.
891          */
892         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
893                 ixgbe_vlan_hw_support_enable(dev);
894         else
895                 ixgbe_vlan_hw_support_disable(dev);
896
897         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
898                 err = ixgbe_fdir_configure(dev);
899                 if (err)
900                         goto error;
901         }
902
903         ixgbe_restore_statistics_mapping(dev);
904
905         return (0);
906
907 error:
908         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
909         ixgbe_dev_clear_queues(dev);
910         return -EIO;
911 }
912
913 /*
914  * Stop device: disable rx and tx functions to allow for reconfiguring.
915  */
916 static void
917 ixgbe_dev_stop(struct rte_eth_dev *dev)
918 {
919         struct rte_eth_link link;
920         struct ixgbe_hw *hw =
921                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
922
923         PMD_INIT_FUNC_TRACE();
924
925         /* disable interrupts */
926         ixgbe_disable_intr(hw);
927
928         /* reset the NIC */
929         ixgbe_reset_hw(hw);
930         hw->adapter_stopped = FALSE;
931
932         /* stop adapter */
933         ixgbe_stop_adapter(hw);
934
935         /* Turn off the laser */
936         if (hw->phy.multispeed_fiber)
937                 ixgbe_disable_tx_laser(hw);
938
939         ixgbe_dev_clear_queues(dev);
940
941         /* Clear recorded link status */
942         memset(&link, 0, sizeof(link));
943         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
944 }
945
946 /*
947  * Reest and stop device.
948  */
949 static void
950 ixgbe_dev_close(struct rte_eth_dev *dev)
951 {
952         struct ixgbe_hw *hw =
953                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
954
955         PMD_INIT_FUNC_TRACE();
956
957         ixgbe_reset_hw(hw);
958
959
960         ixgbe_dev_stop(dev);
961         hw->adapter_stopped = 1;
962
963         ixgbe_disable_pcie_master(hw);
964
965         /* reprogram the RAR[0] in case user changed it. */
966         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
967 }
968
969 /*
970  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
971  */
972 static void
973 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
974 {
975         struct ixgbe_hw *hw =
976                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
977         struct ixgbe_hw_stats *hw_stats =
978                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
979         uint32_t bprc, lxon, lxoff, total;
980         uint64_t total_missed_rx, total_qbrc, total_qprc;
981         unsigned i;
982
983         total_missed_rx = 0;
984         total_qbrc = 0;
985         total_qprc = 0;
986
987         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
988         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
989         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
990         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
991
992         for (i = 0; i < 8; i++) {
993                 uint32_t mp;
994                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
995                 /* global total per queue */
996                 hw_stats->mpc[i] += mp;
997                 /* Running comprehensive total for stats display */
998                 total_missed_rx += hw_stats->mpc[i];
999                 if (hw->mac.type == ixgbe_mac_82598EB)
1000                         hw_stats->rnbc[i] +=
1001                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1002                 hw_stats->pxontxc[i] +=
1003                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1004                 hw_stats->pxonrxc[i] +=
1005                     IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1006                 hw_stats->pxofftxc[i] +=
1007                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1008                 hw_stats->pxoffrxc[i] +=
1009                     IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1010                 hw_stats->pxon2offc[i] +=
1011                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1012         }
1013         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1014                 hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1015                 hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1016                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
1017                 hw_stats->qbrc[i] +=
1018                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
1019                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
1020                 hw_stats->qbtc[i] +=
1021                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
1022                 hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1023
1024                 total_qprc += hw_stats->qprc[i];
1025                 total_qbrc += hw_stats->qbrc[i];
1026         }
1027         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1028         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1029         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1030
1031         /* Note that gprc counts missed packets */
1032         hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1033
1034         if (hw->mac.type != ixgbe_mac_82598EB) {
1035                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1036                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1037                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1038                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1039                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1040                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1041                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1042                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1043         } else {
1044                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1045                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1046                 /* 82598 only has a counter in the high register */
1047                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1048                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1049                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1050         }
1051
1052         /*
1053          * Workaround: mprc hardware is incorrectly counting
1054          * broadcasts, so for now we subtract those.
1055          */
1056         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1057         hw_stats->bprc += bprc;
1058         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1059         if (hw->mac.type == ixgbe_mac_82598EB)
1060                 hw_stats->mprc -= bprc;
1061
1062         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1063         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1064         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1065         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1066         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1067         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1068
1069         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1070         hw_stats->lxontxc += lxon;
1071         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1072         hw_stats->lxofftxc += lxoff;
1073         total = lxon + lxoff;
1074
1075         hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1076         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1077         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1078         hw_stats->gptc -= total;
1079         hw_stats->mptc -= total;
1080         hw_stats->ptc64 -= total;
1081         hw_stats->gotc -= total * ETHER_MIN_LEN;
1082
1083         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1084         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1085         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1086         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1087         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1088         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1089         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1090         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1091         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1092         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1093         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1094         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1095         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1096         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1097         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1098         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1099         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1100         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1101         /* Only read FCOE on 82599 */
1102         if (hw->mac.type != ixgbe_mac_82598EB) {
1103                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1104                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1105                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1106                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1107                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1108         }
1109
1110         if (stats == NULL)
1111                 return;
1112
1113         /* Fill out the rte_eth_stats statistics structure */
1114         stats->ipackets = total_qprc;
1115         stats->ibytes = total_qbrc;
1116         stats->opackets = hw_stats->gptc;
1117         stats->obytes = hw_stats->gotc;
1118         stats->imcasts = hw_stats->mprc;
1119
1120         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
1121                 stats->q_ipackets[i] = hw_stats->qprc[i];
1122                 stats->q_opackets[i] = hw_stats->qptc[i];
1123                 stats->q_ibytes[i] = hw_stats->qbrc[i];
1124                 stats->q_obytes[i] = hw_stats->qbtc[i];
1125                 stats->q_errors[i] = hw_stats->qprdc[i];
1126         }
1127
1128         /* Rx Errors */
1129         stats->ierrors = total_missed_rx + hw_stats->crcerrs +
1130                 hw_stats->rlec;
1131
1132         stats->oerrors  = 0;
1133
1134         /* Flow Director Stats registers */
1135         hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1136         hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1137         stats->fdirmatch = hw_stats->fdirmatch;
1138         stats->fdirmiss = hw_stats->fdirmiss;
1139 }
1140
1141 static void
1142 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
1143 {
1144         struct ixgbe_hw_stats *stats =
1145                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1146
1147         /* HW registers are cleared on read */
1148         ixgbe_dev_stats_get(dev, NULL);
1149
1150         /* Reset software totals */
1151         memset(stats, 0, sizeof(*stats));
1152 }
1153
1154 static void
1155 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1156 {
1157         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1159                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1160
1161         /* Good Rx packet, include VF loopback */
1162         UPDATE_VF_STAT(IXGBE_VFGPRC,
1163             hw_stats->last_vfgprc, hw_stats->vfgprc);
1164
1165         /* Good Rx octets, include VF loopback */
1166         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1167             hw_stats->last_vfgorc, hw_stats->vfgorc);
1168
1169         /* Good Tx packet, include VF loopback */
1170         UPDATE_VF_STAT(IXGBE_VFGPTC,
1171             hw_stats->last_vfgptc, hw_stats->vfgptc);
1172
1173         /* Good Tx octets, include VF loopback */
1174         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1175             hw_stats->last_vfgotc, hw_stats->vfgotc);
1176
1177         /* Rx Multicst Packet */
1178         UPDATE_VF_STAT(IXGBE_VFMPRC,
1179             hw_stats->last_vfmprc, hw_stats->vfmprc);
1180
1181         if (stats == NULL)
1182                 return;
1183
1184         memset(stats, 0, sizeof(*stats));
1185         stats->ipackets = hw_stats->vfgprc;
1186         stats->ibytes = hw_stats->vfgorc;
1187         stats->opackets = hw_stats->vfgptc;
1188         stats->obytes = hw_stats->vfgotc;
1189         stats->imcasts = hw_stats->vfmprc;
1190 }
1191
1192 static void
1193 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
1194 {
1195         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
1196                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1197
1198         /* Sync HW register to the last stats */
1199         ixgbevf_dev_stats_get(dev, NULL);
1200
1201         /* reset HW current stats*/
1202         hw_stats->vfgprc = 0;
1203         hw_stats->vfgorc = 0;
1204         hw_stats->vfgptc = 0;
1205         hw_stats->vfgotc = 0;
1206         hw_stats->vfmprc = 0;
1207
1208 }
1209
1210 static void
1211 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1212 {
1213         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214
1215         dev_info->max_rx_queues = hw->mac.max_rx_queues;
1216         dev_info->max_tx_queues = hw->mac.max_tx_queues;
1217         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
1218         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
1219         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
1220 }
1221
1222 /* return 0 means link status changed, -1 means not changed */
1223 static int
1224 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1225 {
1226         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227         struct rte_eth_link link, old;
1228         ixgbe_link_speed link_speed;
1229         int link_up;
1230         int diag;
1231
1232         link.link_status = 0;
1233         link.link_speed = 0;
1234         link.link_duplex = 0;
1235         memset(&old, 0, sizeof(old));
1236         rte_ixgbe_dev_atomic_read_link_status(dev, &old);
1237
1238         /* check if it needs to wait to complete, if lsc interrupt is enabled */
1239         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
1240                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
1241         else
1242                 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
1243         if (diag != 0) {
1244                 link.link_speed = ETH_LINK_SPEED_100;
1245                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1246                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1247                 if (link.link_status == old.link_status)
1248                         return -1;
1249                 return 0;
1250         }
1251
1252         if (link_up == 0) {
1253                 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1254                 if (link.link_status == old.link_status)
1255                         return -1;
1256                 return 0;
1257         }
1258         link.link_status = 1;
1259         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1260
1261         switch (link_speed) {
1262         default:
1263         case IXGBE_LINK_SPEED_UNKNOWN:
1264                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1265                 link.link_speed = ETH_LINK_SPEED_100;
1266                 break;
1267
1268         case IXGBE_LINK_SPEED_100_FULL:
1269                 link.link_speed = ETH_LINK_SPEED_100;
1270                 break;
1271
1272         case IXGBE_LINK_SPEED_1GB_FULL:
1273                 link.link_speed = ETH_LINK_SPEED_1000;
1274                 break;
1275
1276         case IXGBE_LINK_SPEED_10GB_FULL:
1277                 link.link_speed = ETH_LINK_SPEED_10000;
1278                 break;
1279         }
1280         rte_ixgbe_dev_atomic_write_link_status(dev, &link);
1281
1282         if (link.link_status == old.link_status)
1283                 return -1;
1284
1285         return 0;
1286 }
1287
1288 static void
1289 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
1290 {
1291         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1292         uint32_t fctrl;
1293
1294         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1295         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1296         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1297 }
1298
1299 static void
1300 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
1301 {
1302         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1303         uint32_t fctrl;
1304
1305         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1306         fctrl &= (~IXGBE_FCTRL_UPE);
1307         if (dev->data->all_multicast == 1)
1308                 fctrl |= IXGBE_FCTRL_MPE;
1309         else
1310                 fctrl &= (~IXGBE_FCTRL_MPE);
1311         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1312 }
1313
1314 static void
1315 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
1316 {
1317         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1318         uint32_t fctrl;
1319
1320         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1321         fctrl |= IXGBE_FCTRL_MPE;
1322         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1323 }
1324
1325 static void
1326 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
1327 {
1328         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329         uint32_t fctrl;
1330
1331         if (dev->data->promiscuous == 1)
1332                 return; /* must remain in all_multicast mode */
1333
1334         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1335         fctrl &= (~IXGBE_FCTRL_MPE);
1336         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1337 }
1338
1339 /**
1340  * It clears the interrupt causes and enables the interrupt.
1341  * It will be called once only during nic initialized.
1342  *
1343  * @param dev
1344  *  Pointer to struct rte_eth_dev.
1345  *
1346  * @return
1347  *  - On success, zero.
1348  *  - On failure, a negative value.
1349  */
1350 static int
1351 ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
1352 {
1353         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1354
1355         ixgbe_dev_link_status_print(dev);
1356         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1357         IXGBE_WRITE_FLUSH(hw);
1358         rte_intr_enable(&(dev->pci_dev->intr_handle));
1359
1360         return 0;
1361 }
1362
1363 /*
1364  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
1365  *
1366  * @param dev
1367  *  Pointer to struct rte_eth_dev.
1368  *
1369  * @return
1370  *  - On success, zero.
1371  *  - On failure, a negative value.
1372  */
1373 static int
1374 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
1375 {
1376         uint32_t eicr;
1377         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1378         struct ixgbe_interrupt *intr =
1379                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1380
1381         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
1382         IXGBE_WRITE_FLUSH(hw);
1383
1384         /* read-on-clear nic registers here */
1385         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1386         PMD_INIT_LOG(INFO, "eicr %x", eicr);
1387         if (eicr & IXGBE_EICR_LSC) {
1388                 /* set flag for async link update */
1389                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1390         }
1391
1392         return 0;
1393 }
1394
1395 /**
1396  * It gets and then prints the link status.
1397  *
1398  * @param dev
1399  *  Pointer to struct rte_eth_dev.
1400  *
1401  * @return
1402  *  - On success, zero.
1403  *  - On failure, a negative value.
1404  */
1405 static void
1406 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
1407 {
1408         struct rte_eth_link link;
1409
1410         memset(&link, 0, sizeof(link));
1411         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1412         if (link.link_status) {
1413                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1414                                         (int)(dev->data->port_id),
1415                                         (unsigned)link.link_speed,
1416                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1417                                         "full-duplex" : "half-duplex");
1418         } else {
1419                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
1420                                 (int)(dev->data->port_id));
1421         }
1422         PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1423                                 dev->pci_dev->addr.domain,
1424                                 dev->pci_dev->addr.bus,
1425                                 dev->pci_dev->addr.devid,
1426                                 dev->pci_dev->addr.function);
1427 }
1428
1429 /*
1430  * It executes link_update after knowing an interrupt occured.
1431  *
1432  * @param dev
1433  *  Pointer to struct rte_eth_dev.
1434  *
1435  * @return
1436  *  - On success, zero.
1437  *  - On failure, a negative value.
1438  */
1439 static int
1440 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
1441 {
1442         struct ixgbe_interrupt *intr =
1443                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1444
1445         if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
1446                 return -1;
1447         }
1448         ixgbe_dev_link_update(dev, 0);
1449
1450         return 0;
1451 }
1452
1453 /**
1454  * Interrupt handler which shall be registered for alarm callback for delayed
1455  * handling specific interrupt to wait for the stable nic state. As the
1456  * NIC interrupt state is not stable for ixgbe after link is just down,
1457  * it needs to wait 4 seconds to get the stable status.
1458  *
1459  * @param handle
1460  *  Pointer to interrupt handle.
1461  * @param param
1462  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1463  *
1464  * @return
1465  *  void
1466  */
1467 static void
1468 ixgbe_dev_interrupt_delayed_handler(void *param)
1469 {
1470         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1471         struct ixgbe_interrupt *intr =
1472                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1473         struct ixgbe_hw *hw =
1474                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1475
1476         IXGBE_READ_REG(hw, IXGBE_EICR);
1477         ixgbe_dev_interrupt_action(dev);
1478         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
1479                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
1480                 rte_intr_enable(&(dev->pci_dev->intr_handle));
1481                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
1482                 IXGBE_WRITE_FLUSH(hw);
1483                 ixgbe_dev_link_status_print(dev);
1484                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
1485         }
1486 }
1487
1488 /**
1489  * Interrupt handler triggered by NIC  for handling
1490  * specific interrupt.
1491  *
1492  * @param handle
1493  *  Pointer to interrupt handle.
1494  * @param param
1495  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1496  *
1497  * @return
1498  *  void
1499  */
1500 static void
1501 ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
1502 {
1503         int64_t timeout;
1504         struct rte_eth_link link;
1505         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1506         struct ixgbe_interrupt *intr =
1507                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1508
1509         /* get the link status before link update, for predicting later */
1510         memset(&link, 0, sizeof(link));
1511         rte_ixgbe_dev_atomic_read_link_status(dev, &link);
1512         ixgbe_dev_interrupt_get_status(dev);
1513         ixgbe_dev_interrupt_action(dev);
1514
1515         if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
1516                 return;
1517
1518         /* likely to up */
1519         if (!link.link_status)
1520                 /* handle it 1 sec later, wait it being stable */
1521                 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
1522         /* likely to down */
1523         else
1524                 /* handle it 4 sec later, wait it being stable */
1525                 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
1526
1527         ixgbe_dev_link_status_print(dev);
1528         if (rte_eal_alarm_set(timeout * 1000,
1529                 ixgbe_dev_interrupt_delayed_handler, param) < 0)
1530                 PMD_INIT_LOG(ERR, "Error setting alarm");
1531 }
1532
1533 static int
1534 ixgbe_dev_led_on(struct rte_eth_dev *dev)
1535 {
1536         struct ixgbe_hw *hw;
1537
1538         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539         return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1540 }
1541
1542 static int
1543 ixgbe_dev_led_off(struct rte_eth_dev *dev)
1544 {
1545         struct ixgbe_hw *hw;
1546
1547         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548         return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
1549 }
1550
1551 static int
1552 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1553 {
1554         struct ixgbe_hw *hw;
1555         int err;
1556         uint32_t rx_buf_size;
1557         uint32_t max_high_water;
1558         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
1559                 ixgbe_fc_none,
1560                 ixgbe_fc_rx_pause,
1561                 ixgbe_fc_tx_pause,
1562                 ixgbe_fc_full
1563         };
1564
1565         PMD_INIT_FUNC_TRACE();
1566
1567         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1568         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
1569         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
1570
1571         /*
1572          * At least reserve one Ethernet frame for watermark
1573          * high_water/low_water in kilo bytes for ixgbe
1574          */
1575         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
1576         if ((fc_conf->high_water > max_high_water) ||
1577                 (fc_conf->high_water < fc_conf->low_water)) {
1578                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
1579                 PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
1580                 return (-EINVAL);
1581         }
1582
1583         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
1584         hw->fc.pause_time     = fc_conf->pause_time;
1585         hw->fc.high_water[0]  = fc_conf->high_water;
1586         hw->fc.low_water      = fc_conf->low_water;
1587         hw->fc.send_xon       = fc_conf->send_xon;
1588
1589         err = ixgbe_fc_enable(hw, 0);
1590         /* Not negotiated is not an error case */
1591         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
1592                 return 0;
1593         }
1594
1595         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
1596         return -EIO;
1597 }
1598
1599 static void
1600 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1601                                 uint32_t index, uint32_t pool)
1602 {
1603         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1604         uint32_t enable_addr = 1;
1605
1606         ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
1607 }
1608
1609 static void
1610 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
1611 {
1612         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1613
1614         ixgbe_clear_rar(hw, index);
1615 }
1616
1617 /*
1618  * Virtual Function operations
1619  */
1620 static void
1621 ixgbevf_intr_disable(struct ixgbe_hw *hw)
1622 {
1623         PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
1624
1625         /* Clear interrupt mask to stop from interrupts being generated */
1626         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
1627
1628         IXGBE_WRITE_FLUSH(hw);
1629 }
1630
1631 static int
1632 ixgbevf_dev_configure(struct rte_eth_dev *dev)
1633 {
1634         struct rte_eth_conf* conf = &dev->data->dev_conf;
1635
1636
1637         if (!conf->rxmode.hw_strip_crc) {
1638                 /*
1639                  * VF has no ability to enable/disable HW CRC
1640                  * Keep the persistent behavior the same as Host PF
1641                  */
1642                 PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
1643                 conf->rxmode.hw_strip_crc = 1;
1644         }
1645
1646         return 0;
1647 }
1648
1649 static int
1650 ixgbevf_dev_start(struct rte_eth_dev *dev)
1651 {
1652         int err = 0;
1653         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
1654
1655         ixgbevf_dev_tx_init(dev);
1656         err = ixgbevf_dev_rx_init(dev);
1657         if(err){
1658                 ixgbe_dev_clear_queues(dev);
1659                 PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
1660                 return err;
1661         }
1662         ixgbevf_dev_rxtx_start(dev);
1663
1664         return 0;
1665 }
1666
1667 static void
1668 ixgbevf_dev_stop(struct rte_eth_dev *dev)
1669 {
1670         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1671
1672         PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
1673
1674         ixgbe_reset_hw(hw);
1675         hw->adapter_stopped = 0;
1676         ixgbe_stop_adapter(hw);
1677         /* reprogram the RAR[0] in case user changed it. */
1678         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1679 }