4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_ethdev_pci.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
64 #include <rte_hash_crc.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "ixgbe_regs.h"
78 * High threshold controlling when to start sending XOFF frames. Must be at
79 * least 8 bytes less than receive packet buffer size. This value is in units
82 #define IXGBE_FC_HI 0x80
85 * Low threshold controlling when to start sending XON frames. This value is
86 * in units of 1024 bytes.
88 #define IXGBE_FC_LO 0x40
90 /* Default minimum inter-interrupt interval for EITR configuration */
91 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
93 /* Timer value included in XOFF frames. */
94 #define IXGBE_FC_PAUSE 0x680
96 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
97 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
98 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
100 #define IXGBE_MMW_SIZE_DEFAULT 0x4
101 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
102 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
105 * Default values for RX/TX configuration
107 #define IXGBE_DEFAULT_RX_FREE_THRESH 32
108 #define IXGBE_DEFAULT_RX_PTHRESH 8
109 #define IXGBE_DEFAULT_RX_HTHRESH 8
110 #define IXGBE_DEFAULT_RX_WTHRESH 0
112 #define IXGBE_DEFAULT_TX_FREE_THRESH 32
113 #define IXGBE_DEFAULT_TX_PTHRESH 32
114 #define IXGBE_DEFAULT_TX_HTHRESH 0
115 #define IXGBE_DEFAULT_TX_WTHRESH 0
116 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
118 /* Bit shift and mask */
119 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
120 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
121 #define IXGBE_8_BIT_WIDTH CHAR_BIT
122 #define IXGBE_8_BIT_MASK UINT8_MAX
124 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
126 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
128 #define IXGBE_HKEY_MAX_INDEX 10
130 /* Additional timesync values. */
131 #define NSEC_PER_SEC 1000000000L
132 #define IXGBE_INCVAL_10GB 0x66666666
133 #define IXGBE_INCVAL_1GB 0x40000000
134 #define IXGBE_INCVAL_100 0x50000000
135 #define IXGBE_INCVAL_SHIFT_10GB 28
136 #define IXGBE_INCVAL_SHIFT_1GB 24
137 #define IXGBE_INCVAL_SHIFT_100 21
138 #define IXGBE_INCVAL_SHIFT_82599 7
139 #define IXGBE_INCPER_SHIFT_82599 24
141 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
143 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
144 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
145 #define DEFAULT_ETAG_ETYPE 0x893f
146 #define IXGBE_ETAG_ETYPE 0x00005084
147 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
148 #define IXGBE_ETAG_ETYPE_VALID 0x80000000
149 #define IXGBE_RAH_ADTYPE 0x40000000
150 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
151 #define IXGBE_VMVIR_TAGA_MASK 0x18000000
152 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
153 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
154 #define IXGBE_QDE_STRIP_TAG 0x00000004
155 #define IXGBE_VTEICR_MASK 0x07
157 #define IXGBE_EXVET_VET_EXT_SHIFT 16
158 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
160 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
161 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
162 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
163 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
164 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
165 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
166 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
167 static int ixgbe_dev_configure(struct rte_eth_dev *dev);
168 static int ixgbe_dev_start(struct rte_eth_dev *dev);
169 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
170 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
171 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
172 static void ixgbe_dev_close(struct rte_eth_dev *dev);
173 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
174 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
175 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
176 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
177 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
178 int wait_to_complete);
179 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
180 struct rte_eth_stats *stats);
181 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
182 struct rte_eth_xstat *xstats, unsigned n);
183 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
184 struct rte_eth_xstat *xstats, unsigned n);
186 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
187 uint64_t *values, unsigned int n);
188 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
189 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
190 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
191 struct rte_eth_xstat_name *xstats_names,
193 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
194 struct rte_eth_xstat_name *xstats_names, unsigned limit);
195 static int ixgbe_dev_xstats_get_names_by_id(
196 struct rte_eth_dev *dev,
197 struct rte_eth_xstat_name *xstats_names,
200 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
204 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
206 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
207 struct rte_eth_dev_info *dev_info);
208 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
209 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
210 struct rte_eth_dev_info *dev_info);
211 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
213 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
214 uint16_t vlan_id, int on);
215 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
216 enum rte_vlan_type vlan_type,
218 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
219 uint16_t queue, bool on);
220 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
222 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
223 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
224 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
225 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
226 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
228 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
229 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
230 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
231 struct rte_eth_fc_conf *fc_conf);
232 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
233 struct rte_eth_fc_conf *fc_conf);
234 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
235 struct rte_eth_pfc_conf *pfc_conf);
236 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
237 struct rte_eth_rss_reta_entry64 *reta_conf,
239 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
240 struct rte_eth_rss_reta_entry64 *reta_conf,
242 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
243 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
244 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
245 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
246 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
247 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
248 struct rte_intr_handle *handle);
249 static void ixgbe_dev_interrupt_handler(void *param);
250 static void ixgbe_dev_interrupt_delayed_handler(void *param);
251 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
252 uint32_t index, uint32_t pool);
253 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
254 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
255 struct ether_addr *mac_addr);
256 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
257 static bool is_device_supported(struct rte_eth_dev *dev,
258 struct rte_pci_driver *drv);
260 /* For Virtual Function support */
261 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
262 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
263 static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
264 static int ixgbevf_dev_start(struct rte_eth_dev *dev);
265 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
266 int wait_to_complete);
267 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
268 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
269 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
270 static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
271 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
272 struct rte_eth_stats *stats);
273 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
274 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
275 uint16_t vlan_id, int on);
276 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
277 uint16_t queue, int on);
278 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
279 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
280 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
282 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
284 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
285 uint8_t queue, uint8_t msix_vector);
286 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
287 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
288 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
290 /* For Eth VMDQ APIs support */
291 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
292 ether_addr * mac_addr, uint8_t on);
293 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
294 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
295 struct rte_eth_mirror_conf *mirror_conf,
296 uint8_t rule_id, uint8_t on);
297 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
299 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
301 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
303 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
304 uint8_t queue, uint8_t msix_vector);
305 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
307 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
308 uint16_t queue_idx, uint16_t tx_rate);
310 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
311 struct ether_addr *mac_addr,
312 uint32_t index, uint32_t pool);
313 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
314 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
315 struct ether_addr *mac_addr);
316 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
317 struct rte_eth_syn_filter *filter);
318 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
319 enum rte_filter_op filter_op,
321 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
322 struct ixgbe_5tuple_filter *filter);
323 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
324 struct ixgbe_5tuple_filter *filter);
325 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
326 enum rte_filter_op filter_op,
328 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
329 struct rte_eth_ntuple_filter *filter);
330 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
331 enum rte_filter_op filter_op,
333 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
334 struct rte_eth_ethertype_filter *filter);
335 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
336 enum rte_filter_type filter_type,
337 enum rte_filter_op filter_op,
339 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
341 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
342 struct ether_addr *mc_addr_set,
343 uint32_t nb_mc_addr);
344 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
345 struct rte_eth_dcb_info *dcb_info);
347 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
348 static int ixgbe_get_regs(struct rte_eth_dev *dev,
349 struct rte_dev_reg_info *regs);
350 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
351 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
352 struct rte_dev_eeprom_info *eeprom);
353 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
354 struct rte_dev_eeprom_info *eeprom);
356 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
357 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
358 struct rte_dev_reg_info *regs);
360 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
361 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
362 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
363 struct timespec *timestamp,
365 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
366 struct timespec *timestamp);
367 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
368 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
369 struct timespec *timestamp);
370 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
371 const struct timespec *timestamp);
372 static void ixgbevf_dev_interrupt_handler(void *param);
374 static int ixgbe_dev_l2_tunnel_eth_type_conf
375 (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
376 static int ixgbe_dev_l2_tunnel_offload_set
377 (struct rte_eth_dev *dev,
378 struct rte_eth_l2_tunnel_conf *l2_tunnel,
381 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
382 enum rte_filter_op filter_op,
385 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
386 struct rte_eth_udp_tunnel *udp_tunnel);
387 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
388 struct rte_eth_udp_tunnel *udp_tunnel);
389 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
390 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
393 * Define VF Stats MACRO for Non "cleared on read" register
395 #define UPDATE_VF_STAT(reg, last, cur) \
397 uint32_t latest = IXGBE_READ_REG(hw, reg); \
398 cur += (latest - last) & UINT_MAX; \
402 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
404 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
405 u64 new_msb = IXGBE_READ_REG(hw, msb); \
406 u64 latest = ((new_msb << 32) | new_lsb); \
407 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
411 #define IXGBE_SET_HWSTRIP(h, q) do {\
412 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
413 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
414 (h)->bitmap[idx] |= 1 << bit;\
417 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
418 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
419 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
420 (h)->bitmap[idx] &= ~(1 << bit);\
423 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
424 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
425 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
426 (r) = (h)->bitmap[idx] >> bit & 1;\
430 * The set of PCI devices this driver supports
432 static const struct rte_pci_id pci_id_ixgbe_map[] = {
433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
464 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
466 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
467 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
468 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
469 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
470 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
471 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
472 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
473 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
479 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
480 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
481 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
482 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
483 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
484 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
485 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
486 #ifdef RTE_LIBRTE_IXGBE_BYPASS
487 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
489 { .vendor_id = 0, /* sentinel */ },
493 * The set of PCI devices this driver supports (for 82599 VF)
495 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
496 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
497 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
498 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
499 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
500 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
501 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
502 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
503 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
504 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
505 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
506 { .vendor_id = 0, /* sentinel */ },
509 static const struct rte_eth_desc_lim rx_desc_lim = {
510 .nb_max = IXGBE_MAX_RING_DESC,
511 .nb_min = IXGBE_MIN_RING_DESC,
512 .nb_align = IXGBE_RXD_ALIGN,
515 static const struct rte_eth_desc_lim tx_desc_lim = {
516 .nb_max = IXGBE_MAX_RING_DESC,
517 .nb_min = IXGBE_MIN_RING_DESC,
518 .nb_align = IXGBE_TXD_ALIGN,
519 .nb_seg_max = IXGBE_TX_MAX_SEG,
520 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
523 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
524 .dev_configure = ixgbe_dev_configure,
525 .dev_start = ixgbe_dev_start,
526 .dev_stop = ixgbe_dev_stop,
527 .dev_set_link_up = ixgbe_dev_set_link_up,
528 .dev_set_link_down = ixgbe_dev_set_link_down,
529 .dev_close = ixgbe_dev_close,
530 .promiscuous_enable = ixgbe_dev_promiscuous_enable,
531 .promiscuous_disable = ixgbe_dev_promiscuous_disable,
532 .allmulticast_enable = ixgbe_dev_allmulticast_enable,
533 .allmulticast_disable = ixgbe_dev_allmulticast_disable,
534 .link_update = ixgbe_dev_link_update,
535 .stats_get = ixgbe_dev_stats_get,
536 .xstats_get = ixgbe_dev_xstats_get,
537 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
538 .stats_reset = ixgbe_dev_stats_reset,
539 .xstats_reset = ixgbe_dev_xstats_reset,
540 .xstats_get_names = ixgbe_dev_xstats_get_names,
541 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
542 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
543 .fw_version_get = ixgbe_fw_version_get,
544 .dev_infos_get = ixgbe_dev_info_get,
545 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
546 .mtu_set = ixgbe_dev_mtu_set,
547 .vlan_filter_set = ixgbe_vlan_filter_set,
548 .vlan_tpid_set = ixgbe_vlan_tpid_set,
549 .vlan_offload_set = ixgbe_vlan_offload_set,
550 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
551 .rx_queue_start = ixgbe_dev_rx_queue_start,
552 .rx_queue_stop = ixgbe_dev_rx_queue_stop,
553 .tx_queue_start = ixgbe_dev_tx_queue_start,
554 .tx_queue_stop = ixgbe_dev_tx_queue_stop,
555 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
556 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
557 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
558 .rx_queue_release = ixgbe_dev_rx_queue_release,
559 .rx_queue_count = ixgbe_dev_rx_queue_count,
560 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
561 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
562 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
563 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
564 .tx_queue_release = ixgbe_dev_tx_queue_release,
565 .dev_led_on = ixgbe_dev_led_on,
566 .dev_led_off = ixgbe_dev_led_off,
567 .flow_ctrl_get = ixgbe_flow_ctrl_get,
568 .flow_ctrl_set = ixgbe_flow_ctrl_set,
569 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
570 .mac_addr_add = ixgbe_add_rar,
571 .mac_addr_remove = ixgbe_remove_rar,
572 .mac_addr_set = ixgbe_set_default_mac_addr,
573 .uc_hash_table_set = ixgbe_uc_hash_table_set,
574 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
575 .mirror_rule_set = ixgbe_mirror_rule_set,
576 .mirror_rule_reset = ixgbe_mirror_rule_reset,
577 .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
578 .reta_update = ixgbe_dev_rss_reta_update,
579 .reta_query = ixgbe_dev_rss_reta_query,
580 .rss_hash_update = ixgbe_dev_rss_hash_update,
581 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
582 .filter_ctrl = ixgbe_dev_filter_ctrl,
583 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
584 .rxq_info_get = ixgbe_rxq_info_get,
585 .txq_info_get = ixgbe_txq_info_get,
586 .timesync_enable = ixgbe_timesync_enable,
587 .timesync_disable = ixgbe_timesync_disable,
588 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
589 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
590 .get_reg = ixgbe_get_regs,
591 .get_eeprom_length = ixgbe_get_eeprom_length,
592 .get_eeprom = ixgbe_get_eeprom,
593 .set_eeprom = ixgbe_set_eeprom,
594 .get_dcb_info = ixgbe_dev_get_dcb_info,
595 .timesync_adjust_time = ixgbe_timesync_adjust_time,
596 .timesync_read_time = ixgbe_timesync_read_time,
597 .timesync_write_time = ixgbe_timesync_write_time,
598 .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
599 .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set,
600 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
601 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
602 .tm_ops_get = ixgbe_tm_ops_get,
606 * dev_ops for virtual function, bare necessities for basic vf
607 * operation have been implemented
609 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
610 .dev_configure = ixgbevf_dev_configure,
611 .dev_start = ixgbevf_dev_start,
612 .dev_stop = ixgbevf_dev_stop,
613 .link_update = ixgbevf_dev_link_update,
614 .stats_get = ixgbevf_dev_stats_get,
615 .xstats_get = ixgbevf_dev_xstats_get,
616 .stats_reset = ixgbevf_dev_stats_reset,
617 .xstats_reset = ixgbevf_dev_stats_reset,
618 .xstats_get_names = ixgbevf_dev_xstats_get_names,
619 .dev_close = ixgbevf_dev_close,
620 .allmulticast_enable = ixgbevf_dev_allmulticast_enable,
621 .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
622 .dev_infos_get = ixgbevf_dev_info_get,
623 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
624 .mtu_set = ixgbevf_dev_set_mtu,
625 .vlan_filter_set = ixgbevf_vlan_filter_set,
626 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
627 .vlan_offload_set = ixgbevf_vlan_offload_set,
628 .rx_queue_setup = ixgbe_dev_rx_queue_setup,
629 .rx_queue_release = ixgbe_dev_rx_queue_release,
630 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
631 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
632 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
633 .tx_queue_setup = ixgbe_dev_tx_queue_setup,
634 .tx_queue_release = ixgbe_dev_tx_queue_release,
635 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
636 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
637 .mac_addr_add = ixgbevf_add_mac_addr,
638 .mac_addr_remove = ixgbevf_remove_mac_addr,
639 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
640 .rxq_info_get = ixgbe_rxq_info_get,
641 .txq_info_get = ixgbe_txq_info_get,
642 .mac_addr_set = ixgbevf_set_default_mac_addr,
643 .get_reg = ixgbevf_get_regs,
644 .reta_update = ixgbe_dev_rss_reta_update,
645 .reta_query = ixgbe_dev_rss_reta_query,
646 .rss_hash_update = ixgbe_dev_rss_hash_update,
647 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
650 /* store statistics names and its offset in stats structure */
651 struct rte_ixgbe_xstats_name_off {
652 char name[RTE_ETH_XSTATS_NAME_SIZE];
656 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
657 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
658 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
659 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
660 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
661 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
662 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
663 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
664 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
665 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
666 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
667 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
668 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
669 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
670 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
671 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
673 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
675 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
676 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
677 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
678 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
679 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
680 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
681 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
682 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
683 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
684 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
685 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
686 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
687 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
688 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
689 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
690 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
691 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
693 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
695 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
696 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
697 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
698 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
700 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
702 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
704 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
706 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
708 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
710 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
713 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
714 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
715 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
717 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
718 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
719 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
720 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
721 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
723 {"rx_fcoe_no_direct_data_placement_ext_buff",
724 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
726 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
728 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
730 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
732 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
734 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
737 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
738 sizeof(rte_ixgbe_stats_strings[0]))
740 /* MACsec statistics */
741 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
742 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
744 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
745 out_pkts_encrypted)},
746 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
747 out_pkts_protected)},
748 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
749 out_octets_encrypted)},
750 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
751 out_octets_protected)},
752 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
754 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
756 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
758 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
759 in_pkts_unknownsci)},
760 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
761 in_octets_decrypted)},
762 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
763 in_octets_validated)},
764 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
766 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
768 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
770 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
772 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
774 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
776 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
778 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
779 in_pkts_notusingsa)},
782 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
783 sizeof(rte_ixgbe_macsec_strings[0]))
785 /* Per-queue statistics */
786 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
787 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
788 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
789 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
790 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
793 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
794 sizeof(rte_ixgbe_rxq_strings[0]))
795 #define IXGBE_NB_RXQ_PRIO_VALUES 8
797 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
798 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
799 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
800 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
804 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
805 sizeof(rte_ixgbe_txq_strings[0]))
806 #define IXGBE_NB_TXQ_PRIO_VALUES 8
808 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
809 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
812 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
813 sizeof(rte_ixgbevf_stats_strings[0]))
816 * Atomically reads the link status information from global
817 * structure rte_eth_dev.
820 * - Pointer to the structure rte_eth_dev to read from.
821 * - Pointer to the buffer to be saved with the link status.
824 * - On success, zero.
825 * - On failure, negative value.
828 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
829 struct rte_eth_link *link)
831 struct rte_eth_link *dst = link;
832 struct rte_eth_link *src = &(dev->data->dev_link);
834 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
835 *(uint64_t *)src) == 0)
842 * Atomically writes the link status information into global
843 * structure rte_eth_dev.
846 * - Pointer to the structure rte_eth_dev to read from.
847 * - Pointer to the buffer to be saved with the link status.
850 * - On success, zero.
851 * - On failure, negative value.
854 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
855 struct rte_eth_link *link)
857 struct rte_eth_link *dst = &(dev->data->dev_link);
858 struct rte_eth_link *src = link;
860 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
861 *(uint64_t *)src) == 0)
868 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
871 ixgbe_is_sfp(struct ixgbe_hw *hw)
873 switch (hw->phy.type) {
874 case ixgbe_phy_sfp_avago:
875 case ixgbe_phy_sfp_ftl:
876 case ixgbe_phy_sfp_intel:
877 case ixgbe_phy_sfp_unknown:
878 case ixgbe_phy_sfp_passive_tyco:
879 case ixgbe_phy_sfp_passive_unknown:
886 static inline int32_t
887 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
892 status = ixgbe_reset_hw(hw);
894 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
895 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
896 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
897 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
898 IXGBE_WRITE_FLUSH(hw);
900 if (status == IXGBE_ERR_SFP_NOT_PRESENT)
901 status = IXGBE_SUCCESS;
906 ixgbe_enable_intr(struct rte_eth_dev *dev)
908 struct ixgbe_interrupt *intr =
909 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
910 struct ixgbe_hw *hw =
911 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
913 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
914 IXGBE_WRITE_FLUSH(hw);
918 * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
921 ixgbe_disable_intr(struct ixgbe_hw *hw)
923 PMD_INIT_FUNC_TRACE();
925 if (hw->mac.type == ixgbe_mac_82598EB) {
926 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
928 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
929 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
930 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
932 IXGBE_WRITE_FLUSH(hw);
936 * This function resets queue statistics mapping registers.
937 * From Niantic datasheet, Initialization of Statistics section:
938 * "...if software requires the queue counters, the RQSMR and TQSM registers
939 * must be re-programmed following a device reset.
942 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
946 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
947 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
948 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
954 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
959 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
960 #define NB_QMAP_FIELDS_PER_QSM_REG 4
961 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
963 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
964 struct ixgbe_stat_mapping_registers *stat_mappings =
965 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
966 uint32_t qsmr_mask = 0;
967 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
971 if ((hw->mac.type != ixgbe_mac_82599EB) &&
972 (hw->mac.type != ixgbe_mac_X540) &&
973 (hw->mac.type != ixgbe_mac_X550) &&
974 (hw->mac.type != ixgbe_mac_X550EM_x) &&
975 (hw->mac.type != ixgbe_mac_X550EM_a))
978 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
979 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
982 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
983 if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
984 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
987 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
989 /* Now clear any previous stat_idx set */
990 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
992 stat_mappings->tqsm[n] &= ~clearing_mask;
994 stat_mappings->rqsmr[n] &= ~clearing_mask;
996 q_map = (uint32_t)stat_idx;
997 q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
998 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
1000 stat_mappings->tqsm[n] |= qsmr_mask;
1002 stat_mappings->rqsmr[n] |= qsmr_mask;
1004 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
1005 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
1006 queue_id, stat_idx);
1007 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
1008 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
1010 /* Now write the mapping in the appropriate register */
1012 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
1013 stat_mappings->rqsmr[n], n);
1014 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
1016 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
1017 stat_mappings->tqsm[n], n);
1018 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
1024 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
1026 struct ixgbe_stat_mapping_registers *stat_mappings =
1027 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
1028 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1031 /* write whatever was in stat mapping table to the NIC */
1032 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
1034 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
1037 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
1042 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
1045 struct ixgbe_dcb_tc_config *tc;
1046 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1048 dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1049 dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1050 for (i = 0; i < dcb_max_tc; i++) {
1051 tc = &dcb_config->tc_config[i];
1052 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1053 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1054 (uint8_t)(100/dcb_max_tc + (i & 1));
1055 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1056 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1057 (uint8_t)(100/dcb_max_tc + (i & 1));
1058 tc->pfc = ixgbe_dcb_pfc_disabled;
1061 /* Initialize default user to priority mapping, UPx->TC0 */
1062 tc = &dcb_config->tc_config[0];
1063 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1064 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1065 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1066 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1067 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1069 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1070 dcb_config->pfc_mode_enable = false;
1071 dcb_config->vt_mode = true;
1072 dcb_config->round_robin_enable = false;
1073 /* support all DCB capabilities in 82599 */
1074 dcb_config->support.capabilities = 0xFF;
1076 /*we only support 4 Tcs for X540, X550 */
1077 if (hw->mac.type == ixgbe_mac_X540 ||
1078 hw->mac.type == ixgbe_mac_X550 ||
1079 hw->mac.type == ixgbe_mac_X550EM_x ||
1080 hw->mac.type == ixgbe_mac_X550EM_a) {
1081 dcb_config->num_tcs.pg_tcs = 4;
1082 dcb_config->num_tcs.pfc_tcs = 4;
1087 * Ensure that all locks are released before first NVM or PHY access
1090 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1095 * Phy lock should not fail in this early stage. If this is the case,
1096 * it is due to an improper exit of the application.
1097 * So force the release of the faulty lock. Release of common lock
1098 * is done automatically by swfw_sync function.
1100 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1101 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1102 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1104 ixgbe_release_swfw_semaphore(hw, mask);
1107 * These ones are more tricky since they are common to all ports; but
1108 * swfw_sync retries last long enough (1s) to be almost sure that if
1109 * lock can not be taken it is due to an improper lock of the
1112 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1113 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1114 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1116 ixgbe_release_swfw_semaphore(hw, mask);
1120 * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1121 * It returns 0 on success.
1124 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
1126 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1127 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1128 struct ixgbe_hw *hw =
1129 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1130 struct ixgbe_vfta *shadow_vfta =
1131 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1132 struct ixgbe_hwstrip *hwstrip =
1133 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1134 struct ixgbe_dcb_config *dcb_config =
1135 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1136 struct ixgbe_filter_info *filter_info =
1137 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1138 struct ixgbe_bw_conf *bw_conf =
1139 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1144 PMD_INIT_FUNC_TRACE();
1146 eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1147 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1148 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1149 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1152 * For secondary processes, we don't initialise any further as primary
1153 * has already done this work. Only check we don't need a different
1154 * RX and TX function.
1156 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1157 struct ixgbe_tx_queue *txq;
1158 /* TX queue function in primary, set by last queue initialized
1159 * Tx queue may not initialized by primary process
1161 if (eth_dev->data->tx_queues) {
1162 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1163 ixgbe_set_tx_function(eth_dev, txq);
1165 /* Use default TX function if we get here */
1166 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1167 "Using default TX function.");
1170 ixgbe_set_rx_function(eth_dev);
1175 rte_eth_copy_pci_info(eth_dev, pci_dev);
1176 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1178 /* Vendor and Device ID need to be set before init of shared code */
1179 hw->device_id = pci_dev->id.device_id;
1180 hw->vendor_id = pci_dev->id.vendor_id;
1181 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1182 hw->allow_unsupported_sfp = 1;
1184 /* Initialize the shared code (base driver) */
1185 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1186 diag = ixgbe_bypass_init_shared_code(hw);
1188 diag = ixgbe_init_shared_code(hw);
1189 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1191 if (diag != IXGBE_SUCCESS) {
1192 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1196 /* pick up the PCI bus settings for reporting later */
1197 ixgbe_get_bus_info(hw);
1199 /* Unlock any pending hardware semaphore */
1200 ixgbe_swfw_lock_reset(hw);
1202 /* Initialize DCB configuration*/
1203 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1204 ixgbe_dcb_init(hw, dcb_config);
1205 /* Get Hardware Flow Control setting */
1206 hw->fc.requested_mode = ixgbe_fc_full;
1207 hw->fc.current_mode = ixgbe_fc_full;
1208 hw->fc.pause_time = IXGBE_FC_PAUSE;
1209 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1210 hw->fc.low_water[i] = IXGBE_FC_LO;
1211 hw->fc.high_water[i] = IXGBE_FC_HI;
1213 hw->fc.send_xon = 1;
1215 /* Make sure we have a good EEPROM before we read from it */
1216 diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1217 if (diag != IXGBE_SUCCESS) {
1218 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1222 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1223 diag = ixgbe_bypass_init_hw(hw);
1225 diag = ixgbe_init_hw(hw);
1226 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1229 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1230 * is called too soon after the kernel driver unbinding/binding occurs.
1231 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1232 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1233 * also called. See ixgbe_identify_phy_82599(). The reason for the
1234 * failure is not known, and only occuts when virtualisation features
1235 * are disabled in the bios. A delay of 100ms was found to be enough by
1236 * trial-and-error, and is doubled to be safe.
1238 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1240 diag = ixgbe_init_hw(hw);
1243 if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1244 diag = IXGBE_SUCCESS;
1246 if (diag == IXGBE_ERR_EEPROM_VERSION) {
1247 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1248 "LOM. Please be aware there may be issues associated "
1249 "with your hardware.");
1250 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1251 "please contact your Intel or hardware representative "
1252 "who provided you with this hardware.");
1253 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1254 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1256 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1260 /* Reset the hw statistics */
1261 ixgbe_dev_stats_reset(eth_dev);
1263 /* disable interrupt */
1264 ixgbe_disable_intr(hw);
1266 /* reset mappings for queue statistics hw counters*/
1267 ixgbe_reset_qstat_mappings(hw);
1269 /* Allocate memory for storing MAC addresses */
1270 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1271 hw->mac.num_rar_entries, 0);
1272 if (eth_dev->data->mac_addrs == NULL) {
1274 "Failed to allocate %u bytes needed to store "
1276 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1279 /* Copy the permanent MAC address */
1280 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1281 ð_dev->data->mac_addrs[0]);
1283 /* Allocate memory for storing hash filter MAC addresses */
1284 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1285 IXGBE_VMDQ_NUM_UC_MAC, 0);
1286 if (eth_dev->data->hash_mac_addrs == NULL) {
1288 "Failed to allocate %d bytes needed to store MAC addresses",
1289 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1293 /* initialize the vfta */
1294 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1296 /* initialize the hw strip bitmap*/
1297 memset(hwstrip, 0, sizeof(*hwstrip));
1299 /* initialize PF if max_vfs not zero */
1300 ixgbe_pf_host_init(eth_dev);
1302 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1303 /* let hardware know driver is loaded */
1304 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1305 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1306 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1307 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1308 IXGBE_WRITE_FLUSH(hw);
1310 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1311 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1312 (int) hw->mac.type, (int) hw->phy.type,
1313 (int) hw->phy.sfp_type);
1315 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1316 (int) hw->mac.type, (int) hw->phy.type);
1318 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1319 eth_dev->data->port_id, pci_dev->id.vendor_id,
1320 pci_dev->id.device_id);
1322 rte_intr_callback_register(intr_handle,
1323 ixgbe_dev_interrupt_handler, eth_dev);
1325 /* enable uio/vfio intr/eventfd mapping */
1326 rte_intr_enable(intr_handle);
1328 /* enable support intr */
1329 ixgbe_enable_intr(eth_dev);
1331 /* initialize filter info */
1332 memset(filter_info, 0,
1333 sizeof(struct ixgbe_filter_info));
1335 /* initialize 5tuple filter list */
1336 TAILQ_INIT(&filter_info->fivetuple_list);
1338 /* initialize flow director filter list & hash */
1339 ixgbe_fdir_filter_init(eth_dev);
1341 /* initialize l2 tunnel filter list & hash */
1342 ixgbe_l2_tn_filter_init(eth_dev);
1344 TAILQ_INIT(&filter_ntuple_list);
1345 TAILQ_INIT(&filter_ethertype_list);
1346 TAILQ_INIT(&filter_syn_list);
1347 TAILQ_INIT(&filter_fdir_list);
1348 TAILQ_INIT(&filter_l2_tunnel_list);
1349 TAILQ_INIT(&ixgbe_flow_list);
1351 /* initialize bandwidth configuration info */
1352 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1358 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1361 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1362 struct ixgbe_hw *hw;
1364 PMD_INIT_FUNC_TRACE();
1366 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1369 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1371 if (hw->adapter_stopped == 0)
1372 ixgbe_dev_close(eth_dev);
1374 eth_dev->dev_ops = NULL;
1375 eth_dev->rx_pkt_burst = NULL;
1376 eth_dev->tx_pkt_burst = NULL;
1378 /* Unlock any pending hardware semaphore */
1379 ixgbe_swfw_lock_reset(hw);
1381 /* disable uio intr before callback unregister */
1382 rte_intr_disable(intr_handle);
1383 rte_intr_callback_unregister(intr_handle,
1384 ixgbe_dev_interrupt_handler, eth_dev);
1386 /* uninitialize PF if max_vfs not zero */
1387 ixgbe_pf_host_uninit(eth_dev);
1389 rte_free(eth_dev->data->mac_addrs);
1390 eth_dev->data->mac_addrs = NULL;
1392 rte_free(eth_dev->data->hash_mac_addrs);
1393 eth_dev->data->hash_mac_addrs = NULL;
1395 /* remove all the fdir filters & hash */
1396 ixgbe_fdir_filter_uninit(eth_dev);
1398 /* remove all the L2 tunnel filters & hash */
1399 ixgbe_l2_tn_filter_uninit(eth_dev);
1401 /* Remove all ntuple filters of the device */
1402 ixgbe_ntuple_filter_uninit(eth_dev);
1404 /* clear all the filters list */
1405 ixgbe_filterlist_flush();
1410 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1412 struct ixgbe_filter_info *filter_info =
1413 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1414 struct ixgbe_5tuple_filter *p_5tuple;
1416 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1417 TAILQ_REMOVE(&filter_info->fivetuple_list,
1422 memset(filter_info->fivetuple_mask, 0,
1423 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1428 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1430 struct ixgbe_hw_fdir_info *fdir_info =
1431 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1432 struct ixgbe_fdir_filter *fdir_filter;
1434 if (fdir_info->hash_map)
1435 rte_free(fdir_info->hash_map);
1436 if (fdir_info->hash_handle)
1437 rte_hash_free(fdir_info->hash_handle);
1439 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1440 TAILQ_REMOVE(&fdir_info->fdir_list,
1443 rte_free(fdir_filter);
1449 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1451 struct ixgbe_l2_tn_info *l2_tn_info =
1452 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1453 struct ixgbe_l2_tn_filter *l2_tn_filter;
1455 if (l2_tn_info->hash_map)
1456 rte_free(l2_tn_info->hash_map);
1457 if (l2_tn_info->hash_handle)
1458 rte_hash_free(l2_tn_info->hash_handle);
1460 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1461 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1464 rte_free(l2_tn_filter);
1470 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1472 struct ixgbe_hw_fdir_info *fdir_info =
1473 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1474 char fdir_hash_name[RTE_HASH_NAMESIZE];
1475 struct rte_hash_parameters fdir_hash_params = {
1476 .name = fdir_hash_name,
1477 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1478 .key_len = sizeof(union ixgbe_atr_input),
1479 .hash_func = rte_hash_crc,
1480 .hash_func_init_val = 0,
1481 .socket_id = rte_socket_id(),
1484 TAILQ_INIT(&fdir_info->fdir_list);
1485 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1486 "fdir_%s", eth_dev->device->name);
1487 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1488 if (!fdir_info->hash_handle) {
1489 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1492 fdir_info->hash_map = rte_zmalloc("ixgbe",
1493 sizeof(struct ixgbe_fdir_filter *) *
1494 IXGBE_MAX_FDIR_FILTER_NUM,
1496 if (!fdir_info->hash_map) {
1498 "Failed to allocate memory for fdir hash map!");
1501 fdir_info->mask_added = FALSE;
1506 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1508 struct ixgbe_l2_tn_info *l2_tn_info =
1509 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1510 char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1511 struct rte_hash_parameters l2_tn_hash_params = {
1512 .name = l2_tn_hash_name,
1513 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1514 .key_len = sizeof(struct ixgbe_l2_tn_key),
1515 .hash_func = rte_hash_crc,
1516 .hash_func_init_val = 0,
1517 .socket_id = rte_socket_id(),
1520 TAILQ_INIT(&l2_tn_info->l2_tn_list);
1521 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1522 "l2_tn_%s", eth_dev->device->name);
1523 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1524 if (!l2_tn_info->hash_handle) {
1525 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1528 l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1529 sizeof(struct ixgbe_l2_tn_filter *) *
1530 IXGBE_MAX_L2_TN_FILTER_NUM,
1532 if (!l2_tn_info->hash_map) {
1534 "Failed to allocate memory for L2 TN hash map!");
1537 l2_tn_info->e_tag_en = FALSE;
1538 l2_tn_info->e_tag_fwd_en = FALSE;
1539 l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
1544 * Negotiate mailbox API version with the PF.
1545 * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1546 * Then we try to negotiate starting with the most recent one.
1547 * If all negotiation attempts fail, then we will proceed with
1548 * the default one (ixgbe_mbox_api_10).
1551 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1555 /* start with highest supported, proceed down */
1556 static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1563 i != RTE_DIM(sup_ver) &&
1564 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1570 generate_random_mac_addr(struct ether_addr *mac_addr)
1574 /* Set Organizationally Unique Identifier (OUI) prefix. */
1575 mac_addr->addr_bytes[0] = 0x00;
1576 mac_addr->addr_bytes[1] = 0x09;
1577 mac_addr->addr_bytes[2] = 0xC0;
1578 /* Force indication of locally assigned MAC address. */
1579 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1580 /* Generate the last 3 bytes of the MAC address with a random number. */
1581 random = rte_rand();
1582 memcpy(&mac_addr->addr_bytes[3], &random, 3);
1586 * Virtual Function device init
1589 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1593 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1594 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1595 struct ixgbe_hw *hw =
1596 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1597 struct ixgbe_vfta *shadow_vfta =
1598 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1599 struct ixgbe_hwstrip *hwstrip =
1600 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1601 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1603 PMD_INIT_FUNC_TRACE();
1605 eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1606 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1607 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1609 /* for secondary processes, we don't initialise any further as primary
1610 * has already done this work. Only check we don't need a different
1613 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1614 struct ixgbe_tx_queue *txq;
1615 /* TX queue function in primary, set by last queue initialized
1616 * Tx queue may not initialized by primary process
1618 if (eth_dev->data->tx_queues) {
1619 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1620 ixgbe_set_tx_function(eth_dev, txq);
1622 /* Use default TX function if we get here */
1623 PMD_INIT_LOG(NOTICE,
1624 "No TX queues configured yet. Using default TX function.");
1627 ixgbe_set_rx_function(eth_dev);
1632 rte_eth_copy_pci_info(eth_dev, pci_dev);
1633 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1635 hw->device_id = pci_dev->id.device_id;
1636 hw->vendor_id = pci_dev->id.vendor_id;
1637 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1639 /* initialize the vfta */
1640 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1642 /* initialize the hw strip bitmap*/
1643 memset(hwstrip, 0, sizeof(*hwstrip));
1645 /* Initialize the shared code (base driver) */
1646 diag = ixgbe_init_shared_code(hw);
1647 if (diag != IXGBE_SUCCESS) {
1648 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1652 /* init_mailbox_params */
1653 hw->mbx.ops.init_params(hw);
1655 /* Reset the hw statistics */
1656 ixgbevf_dev_stats_reset(eth_dev);
1658 /* Disable the interrupts for VF */
1659 ixgbevf_intr_disable(hw);
1661 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1662 diag = hw->mac.ops.reset_hw(hw);
1665 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1666 * the underlying PF driver has not assigned a MAC address to the VF.
1667 * In this case, assign a random MAC address.
1669 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1670 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1674 /* negotiate mailbox API version to use with the PF. */
1675 ixgbevf_negotiate_api(hw);
1677 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1678 ixgbevf_get_queues(hw, &tcs, &tc);
1680 /* Allocate memory for storing MAC addresses */
1681 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1682 hw->mac.num_rar_entries, 0);
1683 if (eth_dev->data->mac_addrs == NULL) {
1685 "Failed to allocate %u bytes needed to store "
1687 ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1691 /* Generate a random MAC address, if none was assigned by PF. */
1692 if (is_zero_ether_addr(perm_addr)) {
1693 generate_random_mac_addr(perm_addr);
1694 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1696 rte_free(eth_dev->data->mac_addrs);
1697 eth_dev->data->mac_addrs = NULL;
1700 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1701 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1702 "%02x:%02x:%02x:%02x:%02x:%02x",
1703 perm_addr->addr_bytes[0],
1704 perm_addr->addr_bytes[1],
1705 perm_addr->addr_bytes[2],
1706 perm_addr->addr_bytes[3],
1707 perm_addr->addr_bytes[4],
1708 perm_addr->addr_bytes[5]);
1711 /* Copy the permanent MAC address */
1712 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
1714 /* reset the hardware with the new settings */
1715 diag = hw->mac.ops.start_hw(hw);
1721 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1725 rte_intr_callback_register(intr_handle,
1726 ixgbevf_dev_interrupt_handler, eth_dev);
1727 rte_intr_enable(intr_handle);
1728 ixgbevf_intr_enable(hw);
1730 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1731 eth_dev->data->port_id, pci_dev->id.vendor_id,
1732 pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1737 /* Virtual Function device uninit */
1740 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1742 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1743 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1744 struct ixgbe_hw *hw;
1746 PMD_INIT_FUNC_TRACE();
1748 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1751 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1753 if (hw->adapter_stopped == 0)
1754 ixgbevf_dev_close(eth_dev);
1756 eth_dev->dev_ops = NULL;
1757 eth_dev->rx_pkt_burst = NULL;
1758 eth_dev->tx_pkt_burst = NULL;
1760 /* Disable the interrupts for VF */
1761 ixgbevf_intr_disable(hw);
1763 rte_free(eth_dev->data->mac_addrs);
1764 eth_dev->data->mac_addrs = NULL;
1766 rte_intr_disable(intr_handle);
1767 rte_intr_callback_unregister(intr_handle,
1768 ixgbevf_dev_interrupt_handler, eth_dev);
1773 static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1774 struct rte_pci_device *pci_dev)
1776 return rte_eth_dev_pci_generic_probe(pci_dev,
1777 sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
1780 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1782 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
1785 static struct rte_pci_driver rte_ixgbe_pmd = {
1786 .id_table = pci_id_ixgbe_map,
1787 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1788 .probe = eth_ixgbe_pci_probe,
1789 .remove = eth_ixgbe_pci_remove,
1792 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1793 struct rte_pci_device *pci_dev)
1795 return rte_eth_dev_pci_generic_probe(pci_dev,
1796 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1799 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1801 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1805 * virtual function driver struct
1807 static struct rte_pci_driver rte_ixgbevf_pmd = {
1808 .id_table = pci_id_ixgbevf_map,
1809 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1810 .probe = eth_ixgbevf_pci_probe,
1811 .remove = eth_ixgbevf_pci_remove,
1815 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1817 struct ixgbe_hw *hw =
1818 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1819 struct ixgbe_vfta *shadow_vfta =
1820 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1825 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1826 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1827 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1832 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1834 /* update local VFTA copy */
1835 shadow_vfta->vfta[vid_idx] = vfta;
1841 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1844 ixgbe_vlan_hw_strip_enable(dev, queue);
1846 ixgbe_vlan_hw_strip_disable(dev, queue);
1850 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1851 enum rte_vlan_type vlan_type,
1854 struct ixgbe_hw *hw =
1855 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1860 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1861 qinq &= IXGBE_DMATXCTL_GDV;
1863 switch (vlan_type) {
1864 case ETH_VLAN_TYPE_INNER:
1866 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1867 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1868 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1869 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1870 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1871 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1872 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1875 PMD_DRV_LOG(ERR, "Inner type is not supported"
1879 case ETH_VLAN_TYPE_OUTER:
1881 /* Only the high 16-bits is valid */
1882 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1883 IXGBE_EXVET_VET_EXT_SHIFT);
1885 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1886 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1887 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1888 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1889 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1890 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1891 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1897 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1905 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1907 struct ixgbe_hw *hw =
1908 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1911 PMD_INIT_FUNC_TRACE();
1913 /* Filter Table Disable */
1914 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1915 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1917 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1921 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1923 struct ixgbe_hw *hw =
1924 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1925 struct ixgbe_vfta *shadow_vfta =
1926 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1930 PMD_INIT_FUNC_TRACE();
1932 /* Filter Table Enable */
1933 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1934 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1935 vlnctrl |= IXGBE_VLNCTRL_VFE;
1937 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1939 /* write whatever is in local vfta copy */
1940 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1941 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1945 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1947 struct ixgbe_hwstrip *hwstrip =
1948 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1949 struct ixgbe_rx_queue *rxq;
1951 if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1955 IXGBE_SET_HWSTRIP(hwstrip, queue);
1957 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1959 if (queue >= dev->data->nb_rx_queues)
1962 rxq = dev->data->rx_queues[queue];
1965 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1967 rxq->vlan_flags = PKT_RX_VLAN_PKT;
1971 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1973 struct ixgbe_hw *hw =
1974 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1977 PMD_INIT_FUNC_TRACE();
1979 if (hw->mac.type == ixgbe_mac_82598EB) {
1980 /* No queue level support */
1981 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1985 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1986 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1987 ctrl &= ~IXGBE_RXDCTL_VME;
1988 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1990 /* record those setting for HW strip per queue */
1991 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1995 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1997 struct ixgbe_hw *hw =
1998 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2001 PMD_INIT_FUNC_TRACE();
2003 if (hw->mac.type == ixgbe_mac_82598EB) {
2004 /* No queue level supported */
2005 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2009 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2010 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2011 ctrl |= IXGBE_RXDCTL_VME;
2012 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2014 /* record those setting for HW strip per queue */
2015 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2019 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
2021 struct ixgbe_hw *hw =
2022 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025 struct ixgbe_rx_queue *rxq;
2027 PMD_INIT_FUNC_TRACE();
2029 if (hw->mac.type == ixgbe_mac_82598EB) {
2030 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2031 ctrl &= ~IXGBE_VLNCTRL_VME;
2032 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2034 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2035 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2036 rxq = dev->data->rx_queues[i];
2037 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2038 ctrl &= ~IXGBE_RXDCTL_VME;
2039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2041 /* record those setting for HW strip per queue */
2042 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
2048 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
2050 struct ixgbe_hw *hw =
2051 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2054 struct ixgbe_rx_queue *rxq;
2056 PMD_INIT_FUNC_TRACE();
2058 if (hw->mac.type == ixgbe_mac_82598EB) {
2059 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2060 ctrl |= IXGBE_VLNCTRL_VME;
2061 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2063 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2064 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2065 rxq = dev->data->rx_queues[i];
2066 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2067 ctrl |= IXGBE_RXDCTL_VME;
2068 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2070 /* record those setting for HW strip per queue */
2071 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
2077 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2079 struct ixgbe_hw *hw =
2080 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2083 PMD_INIT_FUNC_TRACE();
2085 /* DMATXCTRL: Geric Double VLAN Disable */
2086 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2087 ctrl &= ~IXGBE_DMATXCTL_GDV;
2088 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2090 /* CTRL_EXT: Global Double VLAN Disable */
2091 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2092 ctrl &= ~IXGBE_EXTENDED_VLAN;
2093 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2098 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2100 struct ixgbe_hw *hw =
2101 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104 PMD_INIT_FUNC_TRACE();
2106 /* DMATXCTRL: Geric Double VLAN Enable */
2107 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2108 ctrl |= IXGBE_DMATXCTL_GDV;
2109 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2111 /* CTRL_EXT: Global Double VLAN Enable */
2112 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2113 ctrl |= IXGBE_EXTENDED_VLAN;
2114 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2116 /* Clear pooling mode of PFVTCTL. It's required by X550. */
2117 if (hw->mac.type == ixgbe_mac_X550 ||
2118 hw->mac.type == ixgbe_mac_X550EM_x ||
2119 hw->mac.type == ixgbe_mac_X550EM_a) {
2120 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2121 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2122 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2126 * VET EXT field in the EXVET register = 0x8100 by default
2127 * So no need to change. Same to VT field of DMATXCTL register
2132 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2134 if (mask & ETH_VLAN_STRIP_MASK) {
2135 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2136 ixgbe_vlan_hw_strip_enable_all(dev);
2138 ixgbe_vlan_hw_strip_disable_all(dev);
2141 if (mask & ETH_VLAN_FILTER_MASK) {
2142 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2143 ixgbe_vlan_hw_filter_enable(dev);
2145 ixgbe_vlan_hw_filter_disable(dev);
2148 if (mask & ETH_VLAN_EXTEND_MASK) {
2149 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2150 ixgbe_vlan_hw_extend_enable(dev);
2152 ixgbe_vlan_hw_extend_disable(dev);
2157 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2159 struct ixgbe_hw *hw =
2160 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2161 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2162 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2164 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2165 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2169 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2171 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2176 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2179 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2185 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
2186 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
2192 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2194 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2195 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196 uint16_t nb_rx_q = dev->data->nb_rx_queues;
2197 uint16_t nb_tx_q = dev->data->nb_tx_queues;
2199 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2200 /* check multi-queue mode */
2201 switch (dev_conf->rxmode.mq_mode) {
2202 case ETH_MQ_RX_VMDQ_DCB:
2203 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2205 case ETH_MQ_RX_VMDQ_DCB_RSS:
2206 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2207 PMD_INIT_LOG(ERR, "SRIOV active,"
2208 " unsupported mq_mode rx %d.",
2209 dev_conf->rxmode.mq_mode);
2212 case ETH_MQ_RX_VMDQ_RSS:
2213 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2214 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2215 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2216 PMD_INIT_LOG(ERR, "SRIOV is active,"
2217 " invalid queue number"
2218 " for VMDQ RSS, allowed"
2219 " value are 1, 2 or 4.");
2223 case ETH_MQ_RX_VMDQ_ONLY:
2224 case ETH_MQ_RX_NONE:
2225 /* if nothing mq mode configure, use default scheme */
2226 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2227 if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
2228 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
2230 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2231 /* SRIOV only works in VMDq enable mode */
2232 PMD_INIT_LOG(ERR, "SRIOV is active,"
2233 " wrong mq_mode rx %d.",
2234 dev_conf->rxmode.mq_mode);
2238 switch (dev_conf->txmode.mq_mode) {
2239 case ETH_MQ_TX_VMDQ_DCB:
2240 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2241 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2243 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2244 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2248 /* check valid queue number */
2249 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2250 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2251 PMD_INIT_LOG(ERR, "SRIOV is active,"
2252 " nb_rx_q=%d nb_tx_q=%d queue number"
2253 " must be less than or equal to %d.",
2255 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2259 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2260 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2264 /* check configuration for vmdb+dcb mode */
2265 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2266 const struct rte_eth_vmdq_dcb_conf *conf;
2268 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2269 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2270 IXGBE_VMDQ_DCB_NB_QUEUES);
2273 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2274 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2275 conf->nb_queue_pools == ETH_32_POOLS)) {
2276 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2277 " nb_queue_pools must be %d or %d.",
2278 ETH_16_POOLS, ETH_32_POOLS);
2282 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2283 const struct rte_eth_vmdq_dcb_tx_conf *conf;
2285 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2286 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2287 IXGBE_VMDQ_DCB_NB_QUEUES);
2290 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2291 if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2292 conf->nb_queue_pools == ETH_32_POOLS)) {
2293 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2294 " nb_queue_pools != %d and"
2295 " nb_queue_pools != %d.",
2296 ETH_16_POOLS, ETH_32_POOLS);
2301 /* For DCB mode check our configuration before we go further */
2302 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2303 const struct rte_eth_dcb_rx_conf *conf;
2305 if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
2306 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
2307 IXGBE_DCB_NB_QUEUES);
2310 conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2311 if (!(conf->nb_tcs == ETH_4_TCS ||
2312 conf->nb_tcs == ETH_8_TCS)) {
2313 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2314 " and nb_tcs != %d.",
2315 ETH_4_TCS, ETH_8_TCS);
2320 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2321 const struct rte_eth_dcb_tx_conf *conf;
2323 if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
2324 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
2325 IXGBE_DCB_NB_QUEUES);
2328 conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2329 if (!(conf->nb_tcs == ETH_4_TCS ||
2330 conf->nb_tcs == ETH_8_TCS)) {
2331 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2332 " and nb_tcs != %d.",
2333 ETH_4_TCS, ETH_8_TCS);
2339 * When DCB/VT is off, maximum number of queues changes,
2340 * except for 82598EB, which remains constant.
2342 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2343 hw->mac.type != ixgbe_mac_82598EB) {
2344 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2346 "Neither VT nor DCB are enabled, "
2348 IXGBE_NONE_MODE_TX_NB_QUEUES);
2357 ixgbe_dev_configure(struct rte_eth_dev *dev)
2359 struct ixgbe_interrupt *intr =
2360 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2361 struct ixgbe_adapter *adapter =
2362 (struct ixgbe_adapter *)dev->data->dev_private;
2365 PMD_INIT_FUNC_TRACE();
2366 /* multipe queue mode checking */
2367 ret = ixgbe_check_mq_mode(dev);
2369 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2374 /* set flag to update link status after init */
2375 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2378 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2379 * allocation or vector Rx preconditions we will reset it.
2381 adapter->rx_bulk_alloc_allowed = true;
2382 adapter->rx_vec_allowed = true;
2388 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2390 struct ixgbe_hw *hw =
2391 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2392 struct ixgbe_interrupt *intr =
2393 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2396 /* only set up it on X550EM_X */
2397 if (hw->mac.type == ixgbe_mac_X550EM_x) {
2398 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2399 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2400 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2401 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2402 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2407 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2408 uint16_t tx_rate, uint64_t q_msk)
2410 struct ixgbe_hw *hw;
2411 struct ixgbe_vf_info *vfinfo;
2412 struct rte_eth_link link;
2413 uint8_t nb_q_per_pool;
2414 uint32_t queue_stride;
2415 uint32_t queue_idx, idx = 0, vf_idx;
2417 uint16_t total_rate = 0;
2418 struct rte_pci_device *pci_dev;
2420 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2421 rte_eth_link_get_nowait(dev->data->port_id, &link);
2423 if (vf >= pci_dev->max_vfs)
2426 if (tx_rate > link.link_speed)
2432 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2433 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2434 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2435 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2436 queue_idx = vf * queue_stride;
2437 queue_end = queue_idx + nb_q_per_pool - 1;
2438 if (queue_end >= hw->mac.max_tx_queues)
2442 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2445 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2447 total_rate += vfinfo[vf_idx].tx_rate[idx];
2453 /* Store tx_rate for this vf. */
2454 for (idx = 0; idx < nb_q_per_pool; idx++) {
2455 if (((uint64_t)0x1 << idx) & q_msk) {
2456 if (vfinfo[vf].tx_rate[idx] != tx_rate)
2457 vfinfo[vf].tx_rate[idx] = tx_rate;
2458 total_rate += tx_rate;
2462 if (total_rate > dev->data->dev_link.link_speed) {
2463 /* Reset stored TX rate of the VF if it causes exceed
2466 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2470 /* Set RTTBCNRC of each queue/pool for vf X */
2471 for (; queue_idx <= queue_end; queue_idx++) {
2473 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2481 * Configure device link speed and setup link.
2482 * It returns 0 on success.
2485 ixgbe_dev_start(struct rte_eth_dev *dev)
2487 struct ixgbe_hw *hw =
2488 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489 struct ixgbe_vf_info *vfinfo =
2490 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2491 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2492 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2493 uint32_t intr_vector = 0;
2494 int err, link_up = 0, negotiate = 0;
2499 uint32_t *link_speeds;
2501 PMD_INIT_FUNC_TRACE();
2503 /* IXGBE devices don't support:
2504 * - half duplex (checked afterwards for valid speeds)
2505 * - fixed speed: TODO implement
2507 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2508 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
2509 dev->data->port_id);
2513 /* disable uio/vfio intr/eventfd mapping */
2514 rte_intr_disable(intr_handle);
2517 hw->adapter_stopped = 0;
2518 ixgbe_stop_adapter(hw);
2520 /* reinitialize adapter
2521 * this calls reset and start
2523 status = ixgbe_pf_reset_hw(hw);
2526 hw->mac.ops.start_hw(hw);
2527 hw->mac.get_link_status = true;
2529 /* configure PF module if SRIOV enabled */
2530 ixgbe_pf_host_configure(dev);
2532 ixgbe_dev_phy_intr_setup(dev);
2534 /* check and configure queue intr-vector mapping */
2535 if ((rte_intr_cap_multiple(intr_handle) ||
2536 !RTE_ETH_DEV_SRIOV(dev).active) &&
2537 dev->data->dev_conf.intr_conf.rxq != 0) {
2538 intr_vector = dev->data->nb_rx_queues;
2539 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2540 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2541 IXGBE_MAX_INTR_QUEUE_NUM);
2544 if (rte_intr_efd_enable(intr_handle, intr_vector))
2548 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2549 intr_handle->intr_vec =
2550 rte_zmalloc("intr_vec",
2551 dev->data->nb_rx_queues * sizeof(int), 0);
2552 if (intr_handle->intr_vec == NULL) {
2553 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2554 " intr_vec", dev->data->nb_rx_queues);
2559 /* confiugre msix for sleep until rx interrupt */
2560 ixgbe_configure_msix(dev);
2562 /* initialize transmission unit */
2563 ixgbe_dev_tx_init(dev);
2565 /* This can fail when allocating mbufs for descriptor rings */
2566 err = ixgbe_dev_rx_init(dev);
2568 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2572 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2573 ETH_VLAN_EXTEND_MASK;
2574 ixgbe_vlan_offload_set(dev, mask);
2576 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2577 /* Enable vlan filtering for VMDq */
2578 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2581 /* Configure DCB hw */
2582 ixgbe_configure_dcb(dev);
2584 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2585 err = ixgbe_fdir_configure(dev);
2590 /* Restore vf rate limit */
2591 if (vfinfo != NULL) {
2592 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2593 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2594 if (vfinfo[vf].tx_rate[idx] != 0)
2595 ixgbe_set_vf_rate_limit(
2597 vfinfo[vf].tx_rate[idx],
2601 ixgbe_restore_statistics_mapping(dev);
2603 err = ixgbe_dev_rxtx_start(dev);
2605 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2609 /* Skip link setup if loopback mode is enabled for 82599. */
2610 if (hw->mac.type == ixgbe_mac_82599EB &&
2611 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2612 goto skip_link_setup;
2614 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2615 err = hw->mac.ops.setup_sfp(hw);
2620 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2621 /* Turn on the copper */
2622 ixgbe_set_phy_power(hw, true);
2624 /* Turn on the laser */
2625 ixgbe_enable_tx_laser(hw);
2628 err = ixgbe_check_link(hw, &speed, &link_up, 0);
2631 dev->data->dev_link.link_status = link_up;
2633 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2637 link_speeds = &dev->data->dev_conf.link_speeds;
2638 if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2639 ETH_LINK_SPEED_10G)) {
2640 PMD_INIT_LOG(ERR, "Invalid link setting");
2645 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2646 speed = (hw->mac.type != ixgbe_mac_82598EB) ?
2647 IXGBE_LINK_SPEED_82599_AUTONEG :
2648 IXGBE_LINK_SPEED_82598_AUTONEG;
2650 if (*link_speeds & ETH_LINK_SPEED_10G)
2651 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2652 if (*link_speeds & ETH_LINK_SPEED_1G)
2653 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2654 if (*link_speeds & ETH_LINK_SPEED_100M)
2655 speed |= IXGBE_LINK_SPEED_100_FULL;
2658 err = ixgbe_setup_link(hw, speed, link_up);
2664 if (rte_intr_allow_others(intr_handle)) {
2665 /* check if lsc interrupt is enabled */
2666 if (dev->data->dev_conf.intr_conf.lsc != 0)
2667 ixgbe_dev_lsc_interrupt_setup(dev);
2668 ixgbe_dev_macsec_interrupt_setup(dev);
2670 rte_intr_callback_unregister(intr_handle,
2671 ixgbe_dev_interrupt_handler, dev);
2672 if (dev->data->dev_conf.intr_conf.lsc != 0)
2673 PMD_INIT_LOG(INFO, "lsc won't enable because of"
2674 " no intr multiplex");
2677 /* check if rxq interrupt is enabled */
2678 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2679 rte_intr_dp_is_en(intr_handle))
2680 ixgbe_dev_rxq_interrupt_setup(dev);
2682 /* enable uio/vfio intr/eventfd mapping */
2683 rte_intr_enable(intr_handle);
2685 /* resume enabled intr since hw reset */
2686 ixgbe_enable_intr(dev);
2687 ixgbe_l2_tunnel_conf(dev);
2688 ixgbe_filter_restore(dev);
2693 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2694 ixgbe_dev_clear_queues(dev);
2699 * Stop device: disable rx and tx functions to allow for reconfiguring.
2702 ixgbe_dev_stop(struct rte_eth_dev *dev)
2704 struct rte_eth_link link;
2705 struct ixgbe_hw *hw =
2706 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2707 struct ixgbe_vf_info *vfinfo =
2708 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2709 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2710 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2713 PMD_INIT_FUNC_TRACE();
2715 /* disable interrupts */
2716 ixgbe_disable_intr(hw);
2719 ixgbe_pf_reset_hw(hw);
2720 hw->adapter_stopped = 0;
2723 ixgbe_stop_adapter(hw);
2725 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2726 vfinfo[vf].clear_to_send = false;
2728 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2729 /* Turn off the copper */
2730 ixgbe_set_phy_power(hw, false);
2732 /* Turn off the laser */
2733 ixgbe_disable_tx_laser(hw);
2736 ixgbe_dev_clear_queues(dev);
2738 /* Clear stored conf */
2739 dev->data->scattered_rx = 0;
2742 /* Clear recorded link status */
2743 memset(&link, 0, sizeof(link));
2744 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
2746 if (!rte_intr_allow_others(intr_handle))
2747 /* resume to the default handler */
2748 rte_intr_callback_register(intr_handle,
2749 ixgbe_dev_interrupt_handler,
2752 /* Clean datapath event and queue/vec mapping */
2753 rte_intr_efd_disable(intr_handle);
2754 if (intr_handle->intr_vec != NULL) {
2755 rte_free(intr_handle->intr_vec);
2756 intr_handle->intr_vec = NULL;
2761 * Set device link up: enable tx.
2764 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2766 struct ixgbe_hw *hw =
2767 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2768 if (hw->mac.type == ixgbe_mac_82599EB) {
2769 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2770 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2771 /* Not suported in bypass mode */
2772 PMD_INIT_LOG(ERR, "Set link up is not supported "
2773 "by device id 0x%x", hw->device_id);
2779 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2780 /* Turn on the copper */
2781 ixgbe_set_phy_power(hw, true);
2783 /* Turn on the laser */
2784 ixgbe_enable_tx_laser(hw);
2791 * Set device link down: disable tx.
2794 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2796 struct ixgbe_hw *hw =
2797 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2798 if (hw->mac.type == ixgbe_mac_82599EB) {
2799 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2800 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2801 /* Not suported in bypass mode */
2802 PMD_INIT_LOG(ERR, "Set link down is not supported "
2803 "by device id 0x%x", hw->device_id);
2809 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2810 /* Turn off the copper */
2811 ixgbe_set_phy_power(hw, false);
2813 /* Turn off the laser */
2814 ixgbe_disable_tx_laser(hw);
2821 * Reest and stop device.
2824 ixgbe_dev_close(struct rte_eth_dev *dev)
2826 struct ixgbe_hw *hw =
2827 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2829 PMD_INIT_FUNC_TRACE();
2831 ixgbe_pf_reset_hw(hw);
2833 ixgbe_dev_stop(dev);
2834 hw->adapter_stopped = 1;
2836 ixgbe_dev_free_queues(dev);
2838 ixgbe_disable_pcie_master(hw);
2840 /* reprogram the RAR[0] in case user changed it. */
2841 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2845 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
2846 struct ixgbe_hw_stats *hw_stats,
2847 struct ixgbe_macsec_stats *macsec_stats,
2848 uint64_t *total_missed_rx, uint64_t *total_qbrc,
2849 uint64_t *total_qprc, uint64_t *total_qprdc)
2851 uint32_t bprc, lxon, lxoff, total;
2852 uint32_t delta_gprc = 0;
2854 /* Workaround for RX byte count not including CRC bytes when CRC
2855 * strip is enabled. CRC bytes are removed from counters when crc_strip
2858 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
2859 IXGBE_HLREG0_RXCRCSTRP);
2861 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2862 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
2863 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
2864 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
2866 for (i = 0; i < 8; i++) {
2867 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2869 /* global total per queue */
2870 hw_stats->mpc[i] += mp;
2871 /* Running comprehensive total for stats display */
2872 *total_missed_rx += hw_stats->mpc[i];
2873 if (hw->mac.type == ixgbe_mac_82598EB) {
2874 hw_stats->rnbc[i] +=
2875 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2876 hw_stats->pxonrxc[i] +=
2877 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
2878 hw_stats->pxoffrxc[i] +=
2879 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
2881 hw_stats->pxonrxc[i] +=
2882 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
2883 hw_stats->pxoffrxc[i] +=
2884 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
2885 hw_stats->pxon2offc[i] +=
2886 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
2888 hw_stats->pxontxc[i] +=
2889 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
2890 hw_stats->pxofftxc[i] +=
2891 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
2893 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
2894 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
2895 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
2896 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
2898 delta_gprc += delta_qprc;
2900 hw_stats->qprc[i] += delta_qprc;
2901 hw_stats->qptc[i] += delta_qptc;
2903 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
2904 hw_stats->qbrc[i] +=
2905 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
2907 hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
2909 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
2910 hw_stats->qbtc[i] +=
2911 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
2913 hw_stats->qprdc[i] += delta_qprdc;
2914 *total_qprdc += hw_stats->qprdc[i];
2916 *total_qprc += hw_stats->qprc[i];
2917 *total_qbrc += hw_stats->qbrc[i];
2919 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
2920 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
2921 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2924 * An errata states that gprc actually counts good + missed packets:
2925 * Workaround to set gprc to summated queue packet receives
2927 hw_stats->gprc = *total_qprc;
2929 if (hw->mac.type != ixgbe_mac_82598EB) {
2930 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
2931 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
2932 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
2933 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
2934 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
2935 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
2936 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
2937 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
2939 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2940 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2941 /* 82598 only has a counter in the high register */
2942 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2943 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2944 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2946 uint64_t old_tpr = hw_stats->tpr;
2948 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2949 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
2952 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
2954 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
2955 hw_stats->gptc += delta_gptc;
2956 hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
2957 hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
2960 * Workaround: mprc hardware is incorrectly counting
2961 * broadcasts, so for now we subtract those.
2963 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2964 hw_stats->bprc += bprc;
2965 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2966 if (hw->mac.type == ixgbe_mac_82598EB)
2967 hw_stats->mprc -= bprc;
2969 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2970 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2971 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2972 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2973 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2974 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2976 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2977 hw_stats->lxontxc += lxon;
2978 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2979 hw_stats->lxofftxc += lxoff;
2980 total = lxon + lxoff;
2982 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2983 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2984 hw_stats->gptc -= total;
2985 hw_stats->mptc -= total;
2986 hw_stats->ptc64 -= total;
2987 hw_stats->gotc -= total * ETHER_MIN_LEN;
2989 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2990 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2991 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2992 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2993 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
2994 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
2995 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
2996 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2997 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2998 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2999 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3000 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3001 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3002 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3003 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3004 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3005 /* Only read FCOE on 82599 */
3006 if (hw->mac.type != ixgbe_mac_82598EB) {
3007 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3008 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3009 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3010 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3011 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3014 /* Flow Director Stats registers */
3015 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3016 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3018 /* MACsec Stats registers */
3019 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3020 macsec_stats->out_pkts_encrypted +=
3021 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3022 macsec_stats->out_pkts_protected +=
3023 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3024 macsec_stats->out_octets_encrypted +=
3025 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3026 macsec_stats->out_octets_protected +=
3027 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3028 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3029 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3030 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3031 macsec_stats->in_pkts_unknownsci +=
3032 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3033 macsec_stats->in_octets_decrypted +=
3034 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3035 macsec_stats->in_octets_validated +=
3036 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3037 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3038 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3039 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3040 for (i = 0; i < 2; i++) {
3041 macsec_stats->in_pkts_ok +=
3042 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3043 macsec_stats->in_pkts_invalid +=
3044 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3045 macsec_stats->in_pkts_notvalid +=
3046 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3048 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3049 macsec_stats->in_pkts_notusingsa +=
3050 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3054 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3057 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3059 struct ixgbe_hw *hw =
3060 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3061 struct ixgbe_hw_stats *hw_stats =
3062 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3063 struct ixgbe_macsec_stats *macsec_stats =
3064 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3065 dev->data->dev_private);
3066 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3069 total_missed_rx = 0;
3074 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3075 &total_qbrc, &total_qprc, &total_qprdc);
3080 /* Fill out the rte_eth_stats statistics structure */
3081 stats->ipackets = total_qprc;
3082 stats->ibytes = total_qbrc;
3083 stats->opackets = hw_stats->gptc;
3084 stats->obytes = hw_stats->gotc;
3086 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3087 stats->q_ipackets[i] = hw_stats->qprc[i];
3088 stats->q_opackets[i] = hw_stats->qptc[i];
3089 stats->q_ibytes[i] = hw_stats->qbrc[i];
3090 stats->q_obytes[i] = hw_stats->qbtc[i];
3091 stats->q_errors[i] = hw_stats->qprdc[i];
3095 stats->imissed = total_missed_rx;
3096 stats->ierrors = hw_stats->crcerrs +
3112 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3114 struct ixgbe_hw_stats *stats =
3115 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3117 /* HW registers are cleared on read */
3118 ixgbe_dev_stats_get(dev, NULL);
3120 /* Reset software totals */
3121 memset(stats, 0, sizeof(*stats));
3124 /* This function calculates the number of xstats based on the current config */
3126 ixgbe_xstats_calc_num(void) {
3127 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3128 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3129 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3132 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3133 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3135 const unsigned cnt_stats = ixgbe_xstats_calc_num();
3136 unsigned stat, i, count;
3138 if (xstats_names != NULL) {
3141 /* Note: limit >= cnt_stats checked upstream
3142 * in rte_eth_xstats_names()
3145 /* Extended stats from ixgbe_hw_stats */
3146 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3147 snprintf(xstats_names[count].name,
3148 sizeof(xstats_names[count].name),
3150 rte_ixgbe_stats_strings[i].name);
3155 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3156 snprintf(xstats_names[count].name,
3157 sizeof(xstats_names[count].name),
3159 rte_ixgbe_macsec_strings[i].name);
3163 /* RX Priority Stats */
3164 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3165 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3166 snprintf(xstats_names[count].name,
3167 sizeof(xstats_names[count].name),
3168 "rx_priority%u_%s", i,
3169 rte_ixgbe_rxq_strings[stat].name);
3174 /* TX Priority Stats */
3175 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3176 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3177 snprintf(xstats_names[count].name,
3178 sizeof(xstats_names[count].name),
3179 "tx_priority%u_%s", i,
3180 rte_ixgbe_txq_strings[stat].name);
3188 static int ixgbe_dev_xstats_get_names_by_id(
3189 struct rte_eth_dev *dev,
3190 struct rte_eth_xstat_name *xstats_names,
3191 const uint64_t *ids,
3195 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3196 unsigned int stat, i, count;
3198 if (xstats_names != NULL) {
3201 /* Note: limit >= cnt_stats checked upstream
3202 * in rte_eth_xstats_names()
3205 /* Extended stats from ixgbe_hw_stats */
3206 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3207 snprintf(xstats_names[count].name,
3208 sizeof(xstats_names[count].name),
3210 rte_ixgbe_stats_strings[i].name);
3215 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3216 snprintf(xstats_names[count].name,
3217 sizeof(xstats_names[count].name),
3219 rte_ixgbe_macsec_strings[i].name);
3223 /* RX Priority Stats */
3224 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3225 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3226 snprintf(xstats_names[count].name,
3227 sizeof(xstats_names[count].name),
3228 "rx_priority%u_%s", i,
3229 rte_ixgbe_rxq_strings[stat].name);
3234 /* TX Priority Stats */
3235 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3236 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3237 snprintf(xstats_names[count].name,
3238 sizeof(xstats_names[count].name),
3239 "tx_priority%u_%s", i,
3240 rte_ixgbe_txq_strings[stat].name);
3249 uint16_t size = ixgbe_xstats_calc_num();
3250 struct rte_eth_xstat_name xstats_names_copy[size];
3252 ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3255 for (i = 0; i < limit; i++) {
3256 if (ids[i] >= size) {
3257 PMD_INIT_LOG(ERR, "id value isn't valid");
3260 strcpy(xstats_names[i].name,
3261 xstats_names_copy[ids[i]].name);
3266 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3267 struct rte_eth_xstat_name *xstats_names, unsigned limit)
3271 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3274 if (xstats_names != NULL)
3275 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3276 snprintf(xstats_names[i].name,
3277 sizeof(xstats_names[i].name),
3278 "%s", rte_ixgbevf_stats_strings[i].name);
3279 return IXGBEVF_NB_XSTATS;
3283 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3286 struct ixgbe_hw *hw =
3287 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3288 struct ixgbe_hw_stats *hw_stats =
3289 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3290 struct ixgbe_macsec_stats *macsec_stats =
3291 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3292 dev->data->dev_private);
3293 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3294 unsigned i, stat, count = 0;
3296 count = ixgbe_xstats_calc_num();
3301 total_missed_rx = 0;
3306 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3307 &total_qbrc, &total_qprc, &total_qprdc);
3309 /* If this is a reset xstats is NULL, and we have cleared the
3310 * registers by reading them.
3315 /* Extended stats from ixgbe_hw_stats */
3317 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3318 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3319 rte_ixgbe_stats_strings[i].offset);
3320 xstats[count].id = count;
3325 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3326 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3327 rte_ixgbe_macsec_strings[i].offset);
3328 xstats[count].id = count;
3332 /* RX Priority Stats */
3333 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3334 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3335 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3336 rte_ixgbe_rxq_strings[stat].offset +
3337 (sizeof(uint64_t) * i));
3338 xstats[count].id = count;
3343 /* TX Priority Stats */
3344 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3345 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3346 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3347 rte_ixgbe_txq_strings[stat].offset +
3348 (sizeof(uint64_t) * i));
3349 xstats[count].id = count;
3357 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3358 uint64_t *values, unsigned int n)
3361 struct ixgbe_hw *hw =
3362 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3363 struct ixgbe_hw_stats *hw_stats =
3364 IXGBE_DEV_PRIVATE_TO_STATS(
3365 dev->data->dev_private);
3366 struct ixgbe_macsec_stats *macsec_stats =
3367 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3368 dev->data->dev_private);
3369 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3370 unsigned int i, stat, count = 0;
3372 count = ixgbe_xstats_calc_num();
3374 if (!ids && n < count)
3377 total_missed_rx = 0;
3382 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3383 &total_missed_rx, &total_qbrc, &total_qprc,
3386 /* If this is a reset xstats is NULL, and we have cleared the
3387 * registers by reading them.
3389 if (!ids && !values)
3392 /* Extended stats from ixgbe_hw_stats */
3394 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3395 values[count] = *(uint64_t *)(((char *)hw_stats) +
3396 rte_ixgbe_stats_strings[i].offset);
3401 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3402 values[count] = *(uint64_t *)(((char *)macsec_stats) +
3403 rte_ixgbe_macsec_strings[i].offset);
3407 /* RX Priority Stats */
3408 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3409 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3411 *(uint64_t *)(((char *)hw_stats) +
3412 rte_ixgbe_rxq_strings[stat].offset +
3413 (sizeof(uint64_t) * i));
3418 /* TX Priority Stats */
3419 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3420 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3422 *(uint64_t *)(((char *)hw_stats) +
3423 rte_ixgbe_txq_strings[stat].offset +
3424 (sizeof(uint64_t) * i));
3432 uint16_t size = ixgbe_xstats_calc_num();
3433 uint64_t values_copy[size];
3435 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3437 for (i = 0; i < n; i++) {
3438 if (ids[i] >= size) {
3439 PMD_INIT_LOG(ERR, "id value isn't valid");
3442 values[i] = values_copy[ids[i]];
3448 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3450 struct ixgbe_hw_stats *stats =
3451 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3452 struct ixgbe_macsec_stats *macsec_stats =
3453 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3454 dev->data->dev_private);
3456 unsigned count = ixgbe_xstats_calc_num();
3458 /* HW registers are cleared on read */
3459 ixgbe_dev_xstats_get(dev, NULL, count);
3461 /* Reset software totals */
3462 memset(stats, 0, sizeof(*stats));
3463 memset(macsec_stats, 0, sizeof(*macsec_stats));
3467 ixgbevf_update_stats(struct rte_eth_dev *dev)
3469 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3470 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3471 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3473 /* Good Rx packet, include VF loopback */
3474 UPDATE_VF_STAT(IXGBE_VFGPRC,
3475 hw_stats->last_vfgprc, hw_stats->vfgprc);
3477 /* Good Rx octets, include VF loopback */
3478 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3479 hw_stats->last_vfgorc, hw_stats->vfgorc);
3481 /* Good Tx packet, include VF loopback */
3482 UPDATE_VF_STAT(IXGBE_VFGPTC,
3483 hw_stats->last_vfgptc, hw_stats->vfgptc);
3485 /* Good Tx octets, include VF loopback */
3486 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3487 hw_stats->last_vfgotc, hw_stats->vfgotc);
3489 /* Rx Multicst Packet */
3490 UPDATE_VF_STAT(IXGBE_VFMPRC,
3491 hw_stats->last_vfmprc, hw_stats->vfmprc);
3495 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3498 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3499 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3502 if (n < IXGBEVF_NB_XSTATS)
3503 return IXGBEVF_NB_XSTATS;
3505 ixgbevf_update_stats(dev);
3510 /* Extended stats */
3511 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3513 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3514 rte_ixgbevf_stats_strings[i].offset);
3517 return IXGBEVF_NB_XSTATS;
3521 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3523 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3524 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3526 ixgbevf_update_stats(dev);
3531 stats->ipackets = hw_stats->vfgprc;
3532 stats->ibytes = hw_stats->vfgorc;
3533 stats->opackets = hw_stats->vfgptc;
3534 stats->obytes = hw_stats->vfgotc;
3538 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3540 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3541 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3543 /* Sync HW register to the last stats */
3544 ixgbevf_dev_stats_get(dev, NULL);
3546 /* reset HW current stats*/
3547 hw_stats->vfgprc = 0;
3548 hw_stats->vfgorc = 0;
3549 hw_stats->vfgptc = 0;
3550 hw_stats->vfgotc = 0;
3554 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3556 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3557 u16 eeprom_verh, eeprom_verl;
3561 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3562 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3564 etrack_id = (eeprom_verh << 16) | eeprom_verl;
3565 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3567 ret += 1; /* add the size of '\0' */
3568 if (fw_size < (u32)ret)
3575 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3577 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3578 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3579 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3581 dev_info->pci_dev = pci_dev;
3582 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3583 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3584 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3586 * When DCB/VT is off, maximum number of queues changes,
3587 * except for 82598EB, which remains constant.
3589 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3590 hw->mac.type != ixgbe_mac_82598EB)
3591 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3593 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3594 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3595 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3596 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3597 dev_info->max_vfs = pci_dev->max_vfs;
3598 if (hw->mac.type == ixgbe_mac_82598EB)
3599 dev_info->max_vmdq_pools = ETH_16_POOLS;
3601 dev_info->max_vmdq_pools = ETH_64_POOLS;
3602 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3603 dev_info->rx_offload_capa =
3604 DEV_RX_OFFLOAD_VLAN_STRIP |
3605 DEV_RX_OFFLOAD_IPV4_CKSUM |
3606 DEV_RX_OFFLOAD_UDP_CKSUM |
3607 DEV_RX_OFFLOAD_TCP_CKSUM;
3610 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
3613 if ((hw->mac.type == ixgbe_mac_82599EB ||
3614 hw->mac.type == ixgbe_mac_X540) &&
3615 !RTE_ETH_DEV_SRIOV(dev).active)
3616 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
3618 if (hw->mac.type == ixgbe_mac_82599EB ||
3619 hw->mac.type == ixgbe_mac_X540)
3620 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
3622 if (hw->mac.type == ixgbe_mac_X550 ||
3623 hw->mac.type == ixgbe_mac_X550EM_x ||
3624 hw->mac.type == ixgbe_mac_X550EM_a)
3625 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
3627 dev_info->tx_offload_capa =
3628 DEV_TX_OFFLOAD_VLAN_INSERT |
3629 DEV_TX_OFFLOAD_IPV4_CKSUM |
3630 DEV_TX_OFFLOAD_UDP_CKSUM |
3631 DEV_TX_OFFLOAD_TCP_CKSUM |
3632 DEV_TX_OFFLOAD_SCTP_CKSUM |
3633 DEV_TX_OFFLOAD_TCP_TSO;
3635 if (hw->mac.type == ixgbe_mac_82599EB ||
3636 hw->mac.type == ixgbe_mac_X540)
3637 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
3639 if (hw->mac.type == ixgbe_mac_X550 ||
3640 hw->mac.type == ixgbe_mac_X550EM_x ||
3641 hw->mac.type == ixgbe_mac_X550EM_a)
3642 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
3644 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3646 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3647 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3648 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3650 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3654 dev_info->default_txconf = (struct rte_eth_txconf) {
3656 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3657 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3658 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3660 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3661 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3662 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3663 ETH_TXQ_FLAGS_NOOFFLOADS,
3666 dev_info->rx_desc_lim = rx_desc_lim;
3667 dev_info->tx_desc_lim = tx_desc_lim;
3669 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3670 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3671 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3673 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3674 if (hw->mac.type == ixgbe_mac_X540 ||
3675 hw->mac.type == ixgbe_mac_X540_vf ||
3676 hw->mac.type == ixgbe_mac_X550 ||
3677 hw->mac.type == ixgbe_mac_X550_vf) {
3678 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3682 static const uint32_t *
3683 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3685 static const uint32_t ptypes[] = {
3686 /* For non-vec functions,
3687 * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3688 * for vec functions,
3689 * refers to _recv_raw_pkts_vec().
3693 RTE_PTYPE_L3_IPV4_EXT,
3695 RTE_PTYPE_L3_IPV6_EXT,
3699 RTE_PTYPE_TUNNEL_IP,
3700 RTE_PTYPE_INNER_L3_IPV6,
3701 RTE_PTYPE_INNER_L3_IPV6_EXT,
3702 RTE_PTYPE_INNER_L4_TCP,
3703 RTE_PTYPE_INNER_L4_UDP,
3707 if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3708 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3709 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3710 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3713 #if defined(RTE_ARCH_X86)
3714 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3715 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3722 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3723 struct rte_eth_dev_info *dev_info)
3725 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3726 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3728 dev_info->pci_dev = pci_dev;
3729 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3730 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3731 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3732 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3733 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3734 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3735 dev_info->max_vfs = pci_dev->max_vfs;
3736 if (hw->mac.type == ixgbe_mac_82598EB)
3737 dev_info->max_vmdq_pools = ETH_16_POOLS;
3739 dev_info->max_vmdq_pools = ETH_64_POOLS;
3740 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
3741 DEV_RX_OFFLOAD_IPV4_CKSUM |
3742 DEV_RX_OFFLOAD_UDP_CKSUM |
3743 DEV_RX_OFFLOAD_TCP_CKSUM;
3744 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
3745 DEV_TX_OFFLOAD_IPV4_CKSUM |
3746 DEV_TX_OFFLOAD_UDP_CKSUM |
3747 DEV_TX_OFFLOAD_TCP_CKSUM |
3748 DEV_TX_OFFLOAD_SCTP_CKSUM |
3749 DEV_TX_OFFLOAD_TCP_TSO;
3751 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3753 .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3754 .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3755 .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3757 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3761 dev_info->default_txconf = (struct rte_eth_txconf) {
3763 .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3764 .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3765 .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3767 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3768 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3769 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3770 ETH_TXQ_FLAGS_NOOFFLOADS,
3773 dev_info->rx_desc_lim = rx_desc_lim;
3774 dev_info->tx_desc_lim = tx_desc_lim;
3778 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3779 int *link_up, int wait_to_complete)
3782 * for a quick link status checking, wait_to_compelet == 0,
3783 * skip PF link status checking
3785 bool no_pflink_check = wait_to_complete == 0;
3786 struct ixgbe_mbx_info *mbx = &hw->mbx;
3787 struct ixgbe_mac_info *mac = &hw->mac;
3788 uint32_t links_reg, in_msg;
3791 /* If we were hit with a reset drop the link */
3792 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3793 mac->get_link_status = true;
3795 if (!mac->get_link_status)
3798 /* if link status is down no point in checking to see if pf is up */
3799 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3800 if (!(links_reg & IXGBE_LINKS_UP))
3803 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3804 * before the link status is correct
3806 if (mac->type == ixgbe_mac_82599_vf) {
3809 for (i = 0; i < 5; i++) {
3811 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3813 if (!(links_reg & IXGBE_LINKS_UP))
3818 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3819 case IXGBE_LINKS_SPEED_10G_82599:
3820 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3821 if (hw->mac.type >= ixgbe_mac_X550) {
3822 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3823 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3826 case IXGBE_LINKS_SPEED_1G_82599:
3827 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3829 case IXGBE_LINKS_SPEED_100_82599:
3830 *speed = IXGBE_LINK_SPEED_100_FULL;
3831 if (hw->mac.type == ixgbe_mac_X550) {
3832 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3833 *speed = IXGBE_LINK_SPEED_5GB_FULL;
3836 case IXGBE_LINKS_SPEED_10_X550EM_A:
3837 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3838 /* Since Reserved in older MAC's */
3839 if (hw->mac.type >= ixgbe_mac_X550)
3840 *speed = IXGBE_LINK_SPEED_10_FULL;
3843 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3846 if (no_pflink_check) {
3847 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
3848 mac->get_link_status = true;
3850 mac->get_link_status = false;
3854 /* if the read failed it could just be a mailbox collision, best wait
3855 * until we are called again and don't report an error
3857 if (mbx->ops.read(hw, &in_msg, 1, 0))
3860 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
3861 /* msg is not CTS and is NACK we must have lost CTS status */
3862 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
3867 /* the pf is talking, if we timed out in the past we reinit */
3868 if (!mbx->timeout) {
3873 /* if we passed all the tests above then the link is up and we no
3874 * longer need to check for link
3876 mac->get_link_status = false;
3879 *link_up = !mac->get_link_status;
3883 /* return 0 means link status changed, -1 means not changed */
3885 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
3886 int wait_to_complete, int vf)
3888 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3889 struct rte_eth_link link, old;
3890 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3891 struct ixgbe_interrupt *intr =
3892 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
3897 bool autoneg = false;
3899 link.link_status = ETH_LINK_DOWN;
3900 link.link_speed = 0;
3901 link.link_duplex = ETH_LINK_HALF_DUPLEX;
3902 memset(&old, 0, sizeof(old));
3903 rte_ixgbe_dev_atomic_read_link_status(dev, &old);
3905 hw->mac.get_link_status = true;
3907 if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
3908 ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
3909 speed = hw->phy.autoneg_advertised;
3911 ixgbe_get_link_capabilities(hw, &speed, &autoneg);
3912 ixgbe_setup_link(hw, speed, true);
3915 /* check if it needs to wait to complete, if lsc interrupt is enabled */
3916 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
3920 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
3922 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
3925 link.link_speed = ETH_SPEED_NUM_100M;
3926 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3927 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3928 if (link.link_status == old.link_status)
3934 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3935 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
3936 if (link.link_status == old.link_status)
3940 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
3941 link.link_status = ETH_LINK_UP;
3942 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3944 switch (link_speed) {
3946 case IXGBE_LINK_SPEED_UNKNOWN:
3947 link.link_duplex = ETH_LINK_FULL_DUPLEX;
3948 link.link_speed = ETH_SPEED_NUM_100M;
3951 case IXGBE_LINK_SPEED_100_FULL:
3952 link.link_speed = ETH_SPEED_NUM_100M;
3955 case IXGBE_LINK_SPEED_1GB_FULL:
3956 link.link_speed = ETH_SPEED_NUM_1G;
3959 case IXGBE_LINK_SPEED_10GB_FULL:
3960 link.link_speed = ETH_SPEED_NUM_10G;
3963 rte_ixgbe_dev_atomic_write_link_status(dev, &link);
3965 if (link.link_status == old.link_status)
3972 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3974 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
3978 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3980 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
3984 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
3986 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3989 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3990 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3991 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3995 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
3997 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4000 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4001 fctrl &= (~IXGBE_FCTRL_UPE);
4002 if (dev->data->all_multicast == 1)
4003 fctrl |= IXGBE_FCTRL_MPE;
4005 fctrl &= (~IXGBE_FCTRL_MPE);
4006 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4010 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4012 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4015 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4016 fctrl |= IXGBE_FCTRL_MPE;
4017 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4021 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4023 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4026 if (dev->data->promiscuous == 1)
4027 return; /* must remain in all_multicast mode */
4029 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4030 fctrl &= (~IXGBE_FCTRL_MPE);
4031 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4035 * It clears the interrupt causes and enables the interrupt.
4036 * It will be called once only during nic initialized.
4039 * Pointer to struct rte_eth_dev.
4042 * - On success, zero.
4043 * - On failure, a negative value.
4046 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
4048 struct ixgbe_interrupt *intr =
4049 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4051 ixgbe_dev_link_status_print(dev);
4052 intr->mask |= IXGBE_EICR_LSC;
4058 * It clears the interrupt causes and enables the interrupt.
4059 * It will be called once only during nic initialized.
4062 * Pointer to struct rte_eth_dev.
4065 * - On success, zero.
4066 * - On failure, a negative value.
4069 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4071 struct ixgbe_interrupt *intr =
4072 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4074 intr->mask |= IXGBE_EICR_RTX_QUEUE;
4080 * It clears the interrupt causes and enables the interrupt.
4081 * It will be called once only during nic initialized.
4084 * Pointer to struct rte_eth_dev.
4087 * - On success, zero.
4088 * - On failure, a negative value.
4091 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4093 struct ixgbe_interrupt *intr =
4094 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4096 intr->mask |= IXGBE_EICR_LINKSEC;
4102 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4105 * Pointer to struct rte_eth_dev.
4108 * - On success, zero.
4109 * - On failure, a negative value.
4112 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4115 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4116 struct ixgbe_interrupt *intr =
4117 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4119 /* clear all cause mask */
4120 ixgbe_disable_intr(hw);
4122 /* read-on-clear nic registers here */
4123 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4124 PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4128 /* set flag for async link update */
4129 if (eicr & IXGBE_EICR_LSC)
4130 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4132 if (eicr & IXGBE_EICR_MAILBOX)
4133 intr->flags |= IXGBE_FLAG_MAILBOX;
4135 if (eicr & IXGBE_EICR_LINKSEC)
4136 intr->flags |= IXGBE_FLAG_MACSEC;
4138 if (hw->mac.type == ixgbe_mac_X550EM_x &&
4139 hw->phy.type == ixgbe_phy_x550em_ext_t &&
4140 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4141 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4147 * It gets and then prints the link status.
4150 * Pointer to struct rte_eth_dev.
4153 * - On success, zero.
4154 * - On failure, a negative value.
4157 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4159 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4160 struct rte_eth_link link;
4162 memset(&link, 0, sizeof(link));
4163 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4164 if (link.link_status) {
4165 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4166 (int)(dev->data->port_id),
4167 (unsigned)link.link_speed,
4168 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4169 "full-duplex" : "half-duplex");
4171 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4172 (int)(dev->data->port_id));
4174 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4175 pci_dev->addr.domain,
4177 pci_dev->addr.devid,
4178 pci_dev->addr.function);
4182 * It executes link_update after knowing an interrupt occurred.
4185 * Pointer to struct rte_eth_dev.
4188 * - On success, zero.
4189 * - On failure, a negative value.
4192 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
4193 struct rte_intr_handle *intr_handle)
4195 struct ixgbe_interrupt *intr =
4196 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4198 struct rte_eth_link link;
4199 struct ixgbe_hw *hw =
4200 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4202 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4204 if (intr->flags & IXGBE_FLAG_MAILBOX) {
4205 ixgbe_pf_mbx_process(dev);
4206 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4209 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4210 ixgbe_handle_lasi(hw);
4211 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4214 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4215 /* get the link status before link update, for predicting later */
4216 memset(&link, 0, sizeof(link));
4217 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
4219 ixgbe_dev_link_update(dev, 0);
4222 if (!link.link_status)
4223 /* handle it 1 sec later, wait it being stable */
4224 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4225 /* likely to down */
4227 /* handle it 4 sec later, wait it being stable */
4228 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4230 ixgbe_dev_link_status_print(dev);
4231 if (rte_eal_alarm_set(timeout * 1000,
4232 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4233 PMD_DRV_LOG(ERR, "Error setting alarm");
4235 /* remember original mask */
4236 intr->mask_original = intr->mask;
4237 /* only disable lsc interrupt */
4238 intr->mask &= ~IXGBE_EIMS_LSC;
4242 PMD_DRV_LOG(DEBUG, "enable intr immediately");
4243 ixgbe_enable_intr(dev);
4244 rte_intr_enable(intr_handle);
4250 * Interrupt handler which shall be registered for alarm callback for delayed
4251 * handling specific interrupt to wait for the stable nic state. As the
4252 * NIC interrupt state is not stable for ixgbe after link is just down,
4253 * it needs to wait 4 seconds to get the stable status.
4256 * Pointer to interrupt handle.
4258 * The address of parameter (struct rte_eth_dev *) regsitered before.
4264 ixgbe_dev_interrupt_delayed_handler(void *param)
4266 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4268 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4269 struct ixgbe_interrupt *intr =
4270 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4271 struct ixgbe_hw *hw =
4272 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4275 ixgbe_disable_intr(hw);
4277 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4278 if (eicr & IXGBE_EICR_MAILBOX)
4279 ixgbe_pf_mbx_process(dev);
4281 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4282 ixgbe_handle_lasi(hw);
4283 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4286 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4287 ixgbe_dev_link_update(dev, 0);
4288 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4289 ixgbe_dev_link_status_print(dev);
4290 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4294 if (intr->flags & IXGBE_FLAG_MACSEC) {
4295 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4297 intr->flags &= ~IXGBE_FLAG_MACSEC;
4300 /* restore original mask */
4301 intr->mask = intr->mask_original;
4302 intr->mask_original = 0;
4304 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4305 ixgbe_enable_intr(dev);
4306 rte_intr_enable(intr_handle);
4310 * Interrupt handler triggered by NIC for handling
4311 * specific interrupt.
4314 * Pointer to interrupt handle.
4316 * The address of parameter (struct rte_eth_dev *) regsitered before.
4322 ixgbe_dev_interrupt_handler(void *param)
4324 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4326 ixgbe_dev_interrupt_get_status(dev);
4327 ixgbe_dev_interrupt_action(dev, dev->intr_handle);
4331 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4333 struct ixgbe_hw *hw;
4335 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4336 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4340 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4342 struct ixgbe_hw *hw;
4344 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4345 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4349 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4351 struct ixgbe_hw *hw;
4357 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4359 fc_conf->pause_time = hw->fc.pause_time;
4360 fc_conf->high_water = hw->fc.high_water[0];
4361 fc_conf->low_water = hw->fc.low_water[0];
4362 fc_conf->send_xon = hw->fc.send_xon;
4363 fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4366 * Return rx_pause status according to actual setting of
4369 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4370 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4376 * Return tx_pause status according to actual setting of
4379 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4380 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4385 if (rx_pause && tx_pause)
4386 fc_conf->mode = RTE_FC_FULL;
4388 fc_conf->mode = RTE_FC_RX_PAUSE;
4390 fc_conf->mode = RTE_FC_TX_PAUSE;
4392 fc_conf->mode = RTE_FC_NONE;
4398 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4400 struct ixgbe_hw *hw;
4402 uint32_t rx_buf_size;
4403 uint32_t max_high_water;
4405 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4412 PMD_INIT_FUNC_TRACE();
4414 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4415 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4416 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4419 * At least reserve one Ethernet frame for watermark
4420 * high_water/low_water in kilo bytes for ixgbe
4422 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4423 if ((fc_conf->high_water > max_high_water) ||
4424 (fc_conf->high_water < fc_conf->low_water)) {
4425 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4426 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4430 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4431 hw->fc.pause_time = fc_conf->pause_time;
4432 hw->fc.high_water[0] = fc_conf->high_water;
4433 hw->fc.low_water[0] = fc_conf->low_water;
4434 hw->fc.send_xon = fc_conf->send_xon;
4435 hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4437 err = ixgbe_fc_enable(hw);
4439 /* Not negotiated is not an error case */
4440 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4442 /* check if we want to forward MAC frames - driver doesn't have native
4443 * capability to do that, so we'll write the registers ourselves */
4445 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4447 /* set or clear MFLCN.PMCF bit depending on configuration */
4448 if (fc_conf->mac_ctrl_frame_fwd != 0)
4449 mflcn |= IXGBE_MFLCN_PMCF;
4451 mflcn &= ~IXGBE_MFLCN_PMCF;
4453 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4454 IXGBE_WRITE_FLUSH(hw);
4459 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4464 * ixgbe_pfc_enable_generic - Enable flow control
4465 * @hw: pointer to hardware structure
4466 * @tc_num: traffic class number
4467 * Enable flow control according to the current settings.
4470 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4473 uint32_t mflcn_reg, fccfg_reg;
4475 uint32_t fcrtl, fcrth;
4479 /* Validate the water mark configuration */
4480 if (!hw->fc.pause_time) {
4481 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4485 /* Low water mark of zero causes XOFF floods */
4486 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4487 /* High/Low water can not be 0 */
4488 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4489 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4490 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4494 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4495 PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4496 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4500 /* Negotiate the fc mode to use */
4501 ixgbe_fc_autoneg(hw);
4503 /* Disable any previous flow control settings */
4504 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4505 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4507 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4508 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4510 switch (hw->fc.current_mode) {
4513 * If the count of enabled RX Priority Flow control >1,
4514 * and the TX pause can not be disabled
4517 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4518 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4519 if (reg & IXGBE_FCRTH_FCEN)
4523 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4525 case ixgbe_fc_rx_pause:
4527 * Rx Flow control is enabled and Tx Flow control is
4528 * disabled by software override. Since there really
4529 * isn't a way to advertise that we are capable of RX
4530 * Pause ONLY, we will advertise that we support both
4531 * symmetric and asymmetric Rx PAUSE. Later, we will
4532 * disable the adapter's ability to send PAUSE frames.
4534 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4536 * If the count of enabled RX Priority Flow control >1,
4537 * and the TX pause can not be disabled
4540 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4541 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4542 if (reg & IXGBE_FCRTH_FCEN)
4546 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4548 case ixgbe_fc_tx_pause:
4550 * Tx Flow control is enabled, and Rx Flow control is
4551 * disabled by software override.
4553 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4556 /* Flow control (both Rx and Tx) is enabled by SW override. */
4557 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4558 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4561 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4562 ret_val = IXGBE_ERR_CONFIG;
4566 /* Set 802.3x based flow control settings. */
4567 mflcn_reg |= IXGBE_MFLCN_DPF;
4568 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4569 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4571 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4572 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4573 hw->fc.high_water[tc_num]) {
4574 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4575 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4576 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4578 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4580 * In order to prevent Tx hangs when the internal Tx
4581 * switch is enabled we must set the high water mark
4582 * to the maximum FCRTH value. This allows the Tx
4583 * switch to function even under heavy Rx workloads.
4585 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4587 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4589 /* Configure pause time (2 TCs per register) */
4590 reg = hw->fc.pause_time * 0x00010001;
4591 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4592 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4594 /* Configure flow control refresh threshold value */
4595 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4602 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4604 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4605 int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4607 if (hw->mac.type != ixgbe_mac_82598EB) {
4608 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4614 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4617 uint32_t rx_buf_size;
4618 uint32_t max_high_water;
4620 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4621 struct ixgbe_hw *hw =
4622 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4623 struct ixgbe_dcb_config *dcb_config =
4624 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4626 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4633 PMD_INIT_FUNC_TRACE();
4635 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4636 tc_num = map[pfc_conf->priority];
4637 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4638 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4640 * At least reserve one Ethernet frame for watermark
4641 * high_water/low_water in kilo bytes for ixgbe
4643 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4644 if ((pfc_conf->fc.high_water > max_high_water) ||
4645 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4646 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4647 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4651 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4652 hw->fc.pause_time = pfc_conf->fc.pause_time;
4653 hw->fc.send_xon = pfc_conf->fc.send_xon;
4654 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
4655 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4657 err = ixgbe_dcb_pfc_enable(dev, tc_num);
4659 /* Not negotiated is not an error case */
4660 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4663 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4668 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4669 struct rte_eth_rss_reta_entry64 *reta_conf,
4672 uint16_t i, sp_reta_size;
4675 uint16_t idx, shift;
4676 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4679 PMD_INIT_FUNC_TRACE();
4681 if (!ixgbe_rss_update_sp(hw->mac.type)) {
4682 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4687 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4688 if (reta_size != sp_reta_size) {
4689 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4690 "(%d) doesn't match the number hardware can supported "
4691 "(%d)", reta_size, sp_reta_size);
4695 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4696 idx = i / RTE_RETA_GROUP_SIZE;
4697 shift = i % RTE_RETA_GROUP_SIZE;
4698 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4702 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4703 if (mask == IXGBE_4_BIT_MASK)
4706 r = IXGBE_READ_REG(hw, reta_reg);
4707 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4708 if (mask & (0x1 << j))
4709 reta |= reta_conf[idx].reta[shift + j] <<
4712 reta |= r & (IXGBE_8_BIT_MASK <<
4715 IXGBE_WRITE_REG(hw, reta_reg, reta);
4722 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4723 struct rte_eth_rss_reta_entry64 *reta_conf,
4726 uint16_t i, sp_reta_size;
4729 uint16_t idx, shift;
4730 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4733 PMD_INIT_FUNC_TRACE();
4734 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4735 if (reta_size != sp_reta_size) {
4736 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4737 "(%d) doesn't match the number hardware can supported "
4738 "(%d)", reta_size, sp_reta_size);
4742 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4743 idx = i / RTE_RETA_GROUP_SIZE;
4744 shift = i % RTE_RETA_GROUP_SIZE;
4745 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4750 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4751 reta = IXGBE_READ_REG(hw, reta_reg);
4752 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4753 if (mask & (0x1 << j))
4754 reta_conf[idx].reta[shift + j] =
4755 ((reta >> (CHAR_BIT * j)) &
4764 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4765 uint32_t index, uint32_t pool)
4767 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4768 uint32_t enable_addr = 1;
4770 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4775 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4777 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4779 ixgbe_clear_rar(hw, index);
4783 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4785 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4787 ixgbe_remove_rar(dev, 0);
4789 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4793 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4795 if (strcmp(dev->device->driver->name, drv->driver.name))
4802 is_ixgbe_supported(struct rte_eth_dev *dev)
4804 return is_device_supported(dev, &rte_ixgbe_pmd);
4808 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4812 struct ixgbe_hw *hw;
4813 struct rte_eth_dev_info dev_info;
4814 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4815 struct rte_eth_dev_data *dev_data = dev->data;
4817 ixgbe_dev_info_get(dev, &dev_info);
4819 /* check that mtu is within the allowed range */
4820 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
4823 /* If device is started, refuse mtu that requires the support of
4824 * scattered packets when this feature has not been enabled before.
4826 if (dev_data->dev_started && !dev_data->scattered_rx &&
4827 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
4828 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
4829 PMD_INIT_LOG(ERR, "Stop port first.");
4833 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4834 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4836 /* switch to jumbo mode if needed */
4837 if (frame_size > ETHER_MAX_LEN) {
4838 dev->data->dev_conf.rxmode.jumbo_frame = 1;
4839 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4841 dev->data->dev_conf.rxmode.jumbo_frame = 0;
4842 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4844 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4846 /* update max frame size */
4847 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4849 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4850 maxfrs &= 0x0000FFFF;
4851 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
4852 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4858 * Virtual Function operations
4861 ixgbevf_intr_disable(struct ixgbe_hw *hw)
4863 PMD_INIT_FUNC_TRACE();
4865 /* Clear interrupt mask to stop from interrupts being generated */
4866 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
4868 IXGBE_WRITE_FLUSH(hw);
4872 ixgbevf_intr_enable(struct ixgbe_hw *hw)
4874 PMD_INIT_FUNC_TRACE();
4876 /* VF enable interrupt autoclean */
4877 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
4878 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
4879 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
4881 IXGBE_WRITE_FLUSH(hw);
4885 ixgbevf_dev_configure(struct rte_eth_dev *dev)
4887 struct rte_eth_conf *conf = &dev->data->dev_conf;
4888 struct ixgbe_adapter *adapter =
4889 (struct ixgbe_adapter *)dev->data->dev_private;
4891 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
4892 dev->data->port_id);
4895 * VF has no ability to enable/disable HW CRC
4896 * Keep the persistent behavior the same as Host PF
4898 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
4899 if (!conf->rxmode.hw_strip_crc) {
4900 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
4901 conf->rxmode.hw_strip_crc = 1;
4904 if (conf->rxmode.hw_strip_crc) {
4905 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
4906 conf->rxmode.hw_strip_crc = 0;
4911 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
4912 * allocation or vector Rx preconditions we will reset it.
4914 adapter->rx_bulk_alloc_allowed = true;
4915 adapter->rx_vec_allowed = true;
4921 ixgbevf_dev_start(struct rte_eth_dev *dev)
4923 struct ixgbe_hw *hw =
4924 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4925 uint32_t intr_vector = 0;
4926 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4927 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4931 PMD_INIT_FUNC_TRACE();
4933 hw->mac.ops.reset_hw(hw);
4934 hw->mac.get_link_status = true;
4936 /* negotiate mailbox API version to use with the PF. */
4937 ixgbevf_negotiate_api(hw);
4939 ixgbevf_dev_tx_init(dev);
4941 /* This can fail when allocating mbufs for descriptor rings */
4942 err = ixgbevf_dev_rx_init(dev);
4944 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
4945 ixgbe_dev_clear_queues(dev);
4950 ixgbevf_set_vfta_all(dev, 1);
4953 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
4954 ETH_VLAN_EXTEND_MASK;
4955 ixgbevf_vlan_offload_set(dev, mask);
4957 ixgbevf_dev_rxtx_start(dev);
4959 /* check and configure queue intr-vector mapping */
4960 if (dev->data->dev_conf.intr_conf.rxq != 0) {
4961 intr_vector = dev->data->nb_rx_queues;
4962 if (rte_intr_efd_enable(intr_handle, intr_vector))
4966 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
4967 intr_handle->intr_vec =
4968 rte_zmalloc("intr_vec",
4969 dev->data->nb_rx_queues * sizeof(int), 0);
4970 if (intr_handle->intr_vec == NULL) {
4971 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
4972 " intr_vec", dev->data->nb_rx_queues);
4976 ixgbevf_configure_msix(dev);
4978 rte_intr_enable(intr_handle);
4980 /* Re-enable interrupt for VF */
4981 ixgbevf_intr_enable(hw);
4987 ixgbevf_dev_stop(struct rte_eth_dev *dev)
4989 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4990 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4991 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4993 PMD_INIT_FUNC_TRACE();
4995 ixgbevf_intr_disable(hw);
4997 hw->adapter_stopped = 1;
4998 ixgbe_stop_adapter(hw);
5001 * Clear what we set, but we still keep shadow_vfta to
5002 * restore after device starts
5004 ixgbevf_set_vfta_all(dev, 0);
5006 /* Clear stored conf */
5007 dev->data->scattered_rx = 0;
5009 ixgbe_dev_clear_queues(dev);
5011 /* Clean datapath event and queue/vec mapping */
5012 rte_intr_efd_disable(intr_handle);
5013 if (intr_handle->intr_vec != NULL) {
5014 rte_free(intr_handle->intr_vec);
5015 intr_handle->intr_vec = NULL;
5020 ixgbevf_dev_close(struct rte_eth_dev *dev)
5022 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5024 PMD_INIT_FUNC_TRACE();
5028 ixgbevf_dev_stop(dev);
5030 ixgbe_dev_free_queues(dev);
5033 * Remove the VF MAC address ro ensure
5034 * that the VF traffic goes to the PF
5035 * after stop, close and detach of the VF
5037 ixgbevf_remove_mac_addr(dev, 0);
5040 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5042 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5043 struct ixgbe_vfta *shadow_vfta =
5044 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5045 int i = 0, j = 0, vfta = 0, mask = 1;
5047 for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5048 vfta = shadow_vfta->vfta[i];
5051 for (j = 0; j < 32; j++) {
5053 ixgbe_set_vfta(hw, (i<<5)+j, 0,
5063 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5065 struct ixgbe_hw *hw =
5066 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5067 struct ixgbe_vfta *shadow_vfta =
5068 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5069 uint32_t vid_idx = 0;
5070 uint32_t vid_bit = 0;
5073 PMD_INIT_FUNC_TRACE();
5075 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5076 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5078 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5081 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5082 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5084 /* Save what we set and retore it after device reset */
5086 shadow_vfta->vfta[vid_idx] |= vid_bit;
5088 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5094 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5096 struct ixgbe_hw *hw =
5097 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5100 PMD_INIT_FUNC_TRACE();
5102 if (queue >= hw->mac.max_rx_queues)
5105 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5107 ctrl |= IXGBE_RXDCTL_VME;
5109 ctrl &= ~IXGBE_RXDCTL_VME;
5110 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5112 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5116 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5118 struct ixgbe_hw *hw =
5119 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5123 /* VF function only support hw strip feature, others are not support */
5124 if (mask & ETH_VLAN_STRIP_MASK) {
5125 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
5127 for (i = 0; i < hw->mac.max_rx_queues; i++)
5128 ixgbevf_vlan_strip_queue_set(dev, i, on);
5133 ixgbe_vt_check(struct ixgbe_hw *hw)
5137 /* if Virtualization Technology is enabled */
5138 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5139 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5140 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5148 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5150 uint32_t vector = 0;
5152 switch (hw->mac.mc_filter_type) {
5153 case 0: /* use bits [47:36] of the address */
5154 vector = ((uc_addr->addr_bytes[4] >> 4) |
5155 (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5157 case 1: /* use bits [46:35] of the address */
5158 vector = ((uc_addr->addr_bytes[4] >> 3) |
5159 (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5161 case 2: /* use bits [45:34] of the address */
5162 vector = ((uc_addr->addr_bytes[4] >> 2) |
5163 (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5165 case 3: /* use bits [43:32] of the address */
5166 vector = ((uc_addr->addr_bytes[4]) |
5167 (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5169 default: /* Invalid mc_filter_type */
5173 /* vector can only be 12-bits or boundary will be exceeded */
5179 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5187 const uint32_t ixgbe_uta_idx_mask = 0x7F;
5188 const uint32_t ixgbe_uta_bit_shift = 5;
5189 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5190 const uint32_t bit1 = 0x1;
5192 struct ixgbe_hw *hw =
5193 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5194 struct ixgbe_uta_info *uta_info =
5195 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5197 /* The UTA table only exists on 82599 hardware and newer */
5198 if (hw->mac.type < ixgbe_mac_82599EB)
5201 vector = ixgbe_uta_vector(hw, mac_addr);
5202 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5203 uta_shift = vector & ixgbe_uta_bit_mask;
5205 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5209 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5211 uta_info->uta_in_use++;
5212 reg_val |= (bit1 << uta_shift);
5213 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5215 uta_info->uta_in_use--;
5216 reg_val &= ~(bit1 << uta_shift);
5217 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5220 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5222 if (uta_info->uta_in_use > 0)
5223 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5224 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5226 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5232 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5235 struct ixgbe_hw *hw =
5236 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5237 struct ixgbe_uta_info *uta_info =
5238 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5240 /* The UTA table only exists on 82599 hardware and newer */
5241 if (hw->mac.type < ixgbe_mac_82599EB)
5245 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5246 uta_info->uta_shadow[i] = ~0;
5247 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5250 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5251 uta_info->uta_shadow[i] = 0;
5252 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5260 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5262 uint32_t new_val = orig_val;
5264 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5265 new_val |= IXGBE_VMOLR_AUPE;
5266 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5267 new_val |= IXGBE_VMOLR_ROMPE;
5268 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5269 new_val |= IXGBE_VMOLR_ROPE;
5270 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5271 new_val |= IXGBE_VMOLR_BAM;
5272 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5273 new_val |= IXGBE_VMOLR_MPE;
5278 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
5279 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
5280 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
5281 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
5282 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5283 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5284 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5287 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5288 struct rte_eth_mirror_conf *mirror_conf,
5289 uint8_t rule_id, uint8_t on)
5291 uint32_t mr_ctl, vlvf;
5292 uint32_t mp_lsb = 0;
5293 uint32_t mv_msb = 0;
5294 uint32_t mv_lsb = 0;
5295 uint32_t mp_msb = 0;
5298 uint64_t vlan_mask = 0;
5300 const uint8_t pool_mask_offset = 32;
5301 const uint8_t vlan_mask_offset = 32;
5302 const uint8_t dst_pool_offset = 8;
5303 const uint8_t rule_mr_offset = 4;
5304 const uint8_t mirror_rule_mask = 0x0F;
5306 struct ixgbe_mirror_info *mr_info =
5307 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5308 struct ixgbe_hw *hw =
5309 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5310 uint8_t mirror_type = 0;
5312 if (ixgbe_vt_check(hw) < 0)
5315 if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5318 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5319 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5320 mirror_conf->rule_type);
5324 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5325 mirror_type |= IXGBE_MRCTL_VLME;
5326 /* Check if vlan id is valid and find conresponding VLAN ID
5329 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5330 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5331 /* search vlan id related pool vlan filter
5334 reg_index = ixgbe_find_vlvf_slot(
5336 mirror_conf->vlan.vlan_id[i],
5340 vlvf = IXGBE_READ_REG(hw,
5341 IXGBE_VLVF(reg_index));
5342 if ((vlvf & IXGBE_VLVF_VIEN) &&
5343 ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5344 mirror_conf->vlan.vlan_id[i]))
5345 vlan_mask |= (1ULL << reg_index);
5352 mv_lsb = vlan_mask & 0xFFFFFFFF;
5353 mv_msb = vlan_mask >> vlan_mask_offset;
5355 mr_info->mr_conf[rule_id].vlan.vlan_mask =
5356 mirror_conf->vlan.vlan_mask;
5357 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5358 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5359 mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5360 mirror_conf->vlan.vlan_id[i];
5365 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5366 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5367 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5372 * if enable pool mirror, write related pool mask register,if disable
5373 * pool mirror, clear PFMRVM register
5375 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5376 mirror_type |= IXGBE_MRCTL_VPME;
5378 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5379 mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5380 mr_info->mr_conf[rule_id].pool_mask =
5381 mirror_conf->pool_mask;
5386 mr_info->mr_conf[rule_id].pool_mask = 0;
5389 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5390 mirror_type |= IXGBE_MRCTL_UPME;
5391 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5392 mirror_type |= IXGBE_MRCTL_DPME;
5394 /* read mirror control register and recalculate it */
5395 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5398 mr_ctl |= mirror_type;
5399 mr_ctl &= mirror_rule_mask;
5400 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5402 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5405 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5406 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5408 /* write mirrror control register */
5409 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5411 /* write pool mirrror control register */
5412 if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
5413 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5414 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5417 /* write VLAN mirrror control register */
5418 if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
5419 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5420 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5428 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5431 uint32_t lsb_val = 0;
5432 uint32_t msb_val = 0;
5433 const uint8_t rule_mr_offset = 4;
5435 struct ixgbe_hw *hw =
5436 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5437 struct ixgbe_mirror_info *mr_info =
5438 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5440 if (ixgbe_vt_check(hw) < 0)
5443 memset(&mr_info->mr_conf[rule_id], 0,
5444 sizeof(struct rte_eth_mirror_conf));
5446 /* clear PFVMCTL register */
5447 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5449 /* clear pool mask register */
5450 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5451 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5453 /* clear vlan mask register */
5454 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5455 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5461 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5463 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5464 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5466 struct ixgbe_hw *hw =
5467 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5469 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5470 mask |= (1 << IXGBE_MISC_VEC_ID);
5471 RTE_SET_USED(queue_id);
5472 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5474 rte_intr_enable(intr_handle);
5480 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5483 struct ixgbe_hw *hw =
5484 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5486 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
5487 mask &= ~(1 << IXGBE_MISC_VEC_ID);
5488 RTE_SET_USED(queue_id);
5489 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
5495 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5497 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5498 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5500 struct ixgbe_hw *hw =
5501 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5502 struct ixgbe_interrupt *intr =
5503 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5505 if (queue_id < 16) {
5506 ixgbe_disable_intr(hw);
5507 intr->mask |= (1 << queue_id);
5508 ixgbe_enable_intr(dev);
5509 } else if (queue_id < 32) {
5510 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5511 mask &= (1 << queue_id);
5512 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5513 } else if (queue_id < 64) {
5514 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5515 mask &= (1 << (queue_id - 32));
5516 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5518 rte_intr_enable(intr_handle);
5524 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5527 struct ixgbe_hw *hw =
5528 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5529 struct ixgbe_interrupt *intr =
5530 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5532 if (queue_id < 16) {
5533 ixgbe_disable_intr(hw);
5534 intr->mask &= ~(1 << queue_id);
5535 ixgbe_enable_intr(dev);
5536 } else if (queue_id < 32) {
5537 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5538 mask &= ~(1 << queue_id);
5539 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5540 } else if (queue_id < 64) {
5541 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5542 mask &= ~(1 << (queue_id - 32));
5543 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5550 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5551 uint8_t queue, uint8_t msix_vector)
5555 if (direction == -1) {
5557 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5558 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5561 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5563 /* rx or tx cause */
5564 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5565 idx = ((16 * (queue & 1)) + (8 * direction));
5566 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5567 tmp &= ~(0xFF << idx);
5568 tmp |= (msix_vector << idx);
5569 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5574 * set the IVAR registers, mapping interrupt causes to vectors
5576 * pointer to ixgbe_hw struct
5578 * 0 for Rx, 1 for Tx, -1 for other causes
5580 * queue to map the corresponding interrupt to
5582 * the vector to map to the corresponding queue
5585 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5586 uint8_t queue, uint8_t msix_vector)
5590 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5591 if (hw->mac.type == ixgbe_mac_82598EB) {
5592 if (direction == -1)
5594 idx = (((direction * 64) + queue) >> 2) & 0x1F;
5595 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5596 tmp &= ~(0xFF << (8 * (queue & 0x3)));
5597 tmp |= (msix_vector << (8 * (queue & 0x3)));
5598 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5599 } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5600 (hw->mac.type == ixgbe_mac_X540)) {
5601 if (direction == -1) {
5603 idx = ((queue & 1) * 8);
5604 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5605 tmp &= ~(0xFF << idx);
5606 tmp |= (msix_vector << idx);
5607 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5609 /* rx or tx causes */
5610 idx = ((16 * (queue & 1)) + (8 * direction));
5611 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5612 tmp &= ~(0xFF << idx);
5613 tmp |= (msix_vector << idx);
5614 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5620 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5622 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5623 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5624 struct ixgbe_hw *hw =
5625 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5627 uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5629 /* Configure VF other cause ivar */
5630 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5632 /* won't configure msix register if no mapping is done
5633 * between intr vector and event fd.
5635 if (!rte_intr_dp_is_en(intr_handle))
5638 /* Configure all RX queues of VF */
5639 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5640 /* Force all queue use vector 0,
5641 * as IXGBE_VF_MAXMSIVECOTR = 1
5643 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5644 intr_handle->intr_vec[q_idx] = vector_idx;
5649 * Sets up the hardware to properly generate MSI-X interrupts
5651 * board private structure
5654 ixgbe_configure_msix(struct rte_eth_dev *dev)
5656 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5657 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5658 struct ixgbe_hw *hw =
5659 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5660 uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5661 uint32_t vec = IXGBE_MISC_VEC_ID;
5665 /* won't configure msix register if no mapping is done
5666 * between intr vector and event fd
5668 if (!rte_intr_dp_is_en(intr_handle))
5671 if (rte_intr_allow_others(intr_handle))
5672 vec = base = IXGBE_RX_VEC_START;
5674 /* setup GPIE for MSI-x mode */
5675 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5676 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5677 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5678 /* auto clearing and auto setting corresponding bits in EIMS
5679 * when MSI-X interrupt is triggered
5681 if (hw->mac.type == ixgbe_mac_82598EB) {
5682 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5684 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5685 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5687 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5689 /* Populate the IVAR table and set the ITR values to the
5690 * corresponding register.
5692 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5694 /* by default, 1:1 mapping */
5695 ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5696 intr_handle->intr_vec[queue_id] = vec;
5697 if (vec < base + intr_handle->nb_efd - 1)
5701 switch (hw->mac.type) {
5702 case ixgbe_mac_82598EB:
5703 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
5706 case ixgbe_mac_82599EB:
5707 case ixgbe_mac_X540:
5708 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
5713 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
5714 IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
5716 /* set up to autoclear timer, and the vectors */
5717 mask = IXGBE_EIMS_ENABLE_MASK;
5718 mask &= ~(IXGBE_EIMS_OTHER |
5719 IXGBE_EIMS_MAILBOX |
5722 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5725 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
5726 uint16_t queue_idx, uint16_t tx_rate)
5728 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5729 uint32_t rf_dec, rf_int;
5731 uint16_t link_speed = dev->data->dev_link.link_speed;
5733 if (queue_idx >= hw->mac.max_tx_queues)
5737 /* Calculate the rate factor values to set */
5738 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
5739 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
5740 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
5742 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
5743 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
5744 IXGBE_RTTBCNRC_RF_INT_MASK_M);
5745 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
5751 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
5752 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
5755 if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
5756 (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
5757 IXGBE_MAX_JUMBO_FRAME_SIZE))
5758 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5759 IXGBE_MMW_SIZE_JUMBO_FRAME);
5761 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
5762 IXGBE_MMW_SIZE_DEFAULT);
5764 /* Set RTTBCNRC of queue X */
5765 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
5766 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
5767 IXGBE_WRITE_FLUSH(hw);
5773 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5774 __attribute__((unused)) uint32_t index,
5775 __attribute__((unused)) uint32_t pool)
5777 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5781 * On a 82599 VF, adding again the same MAC addr is not an idempotent
5782 * operation. Trap this case to avoid exhausting the [very limited]
5783 * set of PF resources used to store VF MAC addresses.
5785 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5787 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5789 PMD_DRV_LOG(ERR, "Unable to add MAC address "
5790 "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
5791 mac_addr->addr_bytes[0],
5792 mac_addr->addr_bytes[1],
5793 mac_addr->addr_bytes[2],
5794 mac_addr->addr_bytes[3],
5795 mac_addr->addr_bytes[4],
5796 mac_addr->addr_bytes[5],
5802 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
5804 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5805 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
5806 struct ether_addr *mac_addr;
5811 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
5812 * not support the deletion of a given MAC address.
5813 * Instead, it imposes to delete all MAC addresses, then to add again
5814 * all MAC addresses with the exception of the one to be deleted.
5816 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
5819 * Add again all MAC addresses, with the exception of the deleted one
5820 * and of the permanent MAC address.
5822 for (i = 0, mac_addr = dev->data->mac_addrs;
5823 i < hw->mac.num_rar_entries; i++, mac_addr++) {
5824 /* Skip the deleted MAC address */
5827 /* Skip NULL MAC addresses */
5828 if (is_zero_ether_addr(mac_addr))
5830 /* Skip the permanent MAC address */
5831 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
5833 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
5836 "Adding again MAC address "
5837 "%02x:%02x:%02x:%02x:%02x:%02x failed "
5839 mac_addr->addr_bytes[0],
5840 mac_addr->addr_bytes[1],
5841 mac_addr->addr_bytes[2],
5842 mac_addr->addr_bytes[3],
5843 mac_addr->addr_bytes[4],
5844 mac_addr->addr_bytes[5],
5850 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
5852 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5854 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
5858 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
5859 struct rte_eth_syn_filter *filter,
5862 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5863 struct ixgbe_filter_info *filter_info =
5864 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5868 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
5871 syn_info = filter_info->syn_info;
5874 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
5876 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
5877 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
5879 if (filter->hig_pri)
5880 synqf |= IXGBE_SYN_FILTER_SYNQFP;
5882 synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
5884 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5885 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
5887 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
5890 filter_info->syn_info = synqf;
5891 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
5892 IXGBE_WRITE_FLUSH(hw);
5897 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
5898 struct rte_eth_syn_filter *filter)
5900 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5901 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
5903 if (synqf & IXGBE_SYN_FILTER_ENABLE) {
5904 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
5905 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
5912 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
5913 enum rte_filter_op filter_op,
5916 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5919 MAC_TYPE_FILTER_SUP(hw->mac.type);
5921 if (filter_op == RTE_ETH_FILTER_NOP)
5925 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5930 switch (filter_op) {
5931 case RTE_ETH_FILTER_ADD:
5932 ret = ixgbe_syn_filter_set(dev,
5933 (struct rte_eth_syn_filter *)arg,
5936 case RTE_ETH_FILTER_DELETE:
5937 ret = ixgbe_syn_filter_set(dev,
5938 (struct rte_eth_syn_filter *)arg,
5941 case RTE_ETH_FILTER_GET:
5942 ret = ixgbe_syn_filter_get(dev,
5943 (struct rte_eth_syn_filter *)arg);
5946 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
5955 static inline enum ixgbe_5tuple_protocol
5956 convert_protocol_type(uint8_t protocol_value)
5958 if (protocol_value == IPPROTO_TCP)
5959 return IXGBE_FILTER_PROTOCOL_TCP;
5960 else if (protocol_value == IPPROTO_UDP)
5961 return IXGBE_FILTER_PROTOCOL_UDP;
5962 else if (protocol_value == IPPROTO_SCTP)
5963 return IXGBE_FILTER_PROTOCOL_SCTP;
5965 return IXGBE_FILTER_PROTOCOL_NONE;
5968 /* inject a 5-tuple filter to HW */
5970 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
5971 struct ixgbe_5tuple_filter *filter)
5973 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5975 uint32_t ftqf, sdpqf;
5976 uint32_t l34timir = 0;
5977 uint8_t mask = 0xff;
5981 sdpqf = (uint32_t)(filter->filter_info.dst_port <<
5982 IXGBE_SDPQF_DSTPORT_SHIFT);
5983 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
5985 ftqf = (uint32_t)(filter->filter_info.proto &
5986 IXGBE_FTQF_PROTOCOL_MASK);
5987 ftqf |= (uint32_t)((filter->filter_info.priority &
5988 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
5989 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
5990 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
5991 if (filter->filter_info.dst_ip_mask == 0)
5992 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
5993 if (filter->filter_info.src_port_mask == 0)
5994 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
5995 if (filter->filter_info.dst_port_mask == 0)
5996 mask &= IXGBE_FTQF_DEST_PORT_MASK;
5997 if (filter->filter_info.proto_mask == 0)
5998 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
5999 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6000 ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6001 ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6003 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6004 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6005 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6006 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6008 l34timir |= IXGBE_L34T_IMIR_RESERVE;
6009 l34timir |= (uint32_t)(filter->queue <<
6010 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6011 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6015 * add a 5tuple filter
6018 * dev: Pointer to struct rte_eth_dev.
6019 * index: the index the filter allocates.
6020 * filter: ponter to the filter that will be added.
6021 * rx_queue: the queue id the filter assigned to.
6024 * - On success, zero.
6025 * - On failure, a negative value.
6028 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6029 struct ixgbe_5tuple_filter *filter)
6031 struct ixgbe_filter_info *filter_info =
6032 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6036 * look for an unused 5tuple filter index,
6037 * and insert the filter to list.
6039 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6040 idx = i / (sizeof(uint32_t) * NBBY);
6041 shift = i % (sizeof(uint32_t) * NBBY);
6042 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6043 filter_info->fivetuple_mask[idx] |= 1 << shift;
6045 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6051 if (i >= IXGBE_MAX_FTQF_FILTERS) {
6052 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6056 ixgbe_inject_5tuple_filter(dev, filter);
6062 * remove a 5tuple filter
6065 * dev: Pointer to struct rte_eth_dev.
6066 * filter: the pointer of the filter will be removed.
6069 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6070 struct ixgbe_5tuple_filter *filter)
6072 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6073 struct ixgbe_filter_info *filter_info =
6074 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6075 uint16_t index = filter->index;
6077 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6078 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6079 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6082 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6083 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6084 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6085 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6086 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6090 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6092 struct ixgbe_hw *hw;
6093 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6094 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6096 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6098 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6101 /* refuse mtu that requires the support of scattered packets when this
6102 * feature has not been enabled before.
6104 if (!rx_conf->enable_scatter &&
6105 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6106 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6110 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6111 * request of the version 2.0 of the mailbox API.
6112 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6113 * of the mailbox API.
6114 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6115 * prior to 3.11.33 which contains the following change:
6116 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6118 ixgbevf_rlpml_set_vf(hw, max_frame);
6120 /* update max frame size */
6121 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6125 static inline struct ixgbe_5tuple_filter *
6126 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6127 struct ixgbe_5tuple_filter_info *key)
6129 struct ixgbe_5tuple_filter *it;
6131 TAILQ_FOREACH(it, filter_list, entries) {
6132 if (memcmp(key, &it->filter_info,
6133 sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6140 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6142 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6143 struct ixgbe_5tuple_filter_info *filter_info)
6145 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6146 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6147 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6150 switch (filter->dst_ip_mask) {
6152 filter_info->dst_ip_mask = 0;
6153 filter_info->dst_ip = filter->dst_ip;
6156 filter_info->dst_ip_mask = 1;
6159 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6163 switch (filter->src_ip_mask) {
6165 filter_info->src_ip_mask = 0;
6166 filter_info->src_ip = filter->src_ip;
6169 filter_info->src_ip_mask = 1;
6172 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6176 switch (filter->dst_port_mask) {
6178 filter_info->dst_port_mask = 0;
6179 filter_info->dst_port = filter->dst_port;
6182 filter_info->dst_port_mask = 1;
6185 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6189 switch (filter->src_port_mask) {
6191 filter_info->src_port_mask = 0;
6192 filter_info->src_port = filter->src_port;
6195 filter_info->src_port_mask = 1;
6198 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6202 switch (filter->proto_mask) {
6204 filter_info->proto_mask = 0;
6205 filter_info->proto =
6206 convert_protocol_type(filter->proto);
6209 filter_info->proto_mask = 1;
6212 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6216 filter_info->priority = (uint8_t)filter->priority;
6221 * add or delete a ntuple filter
6224 * dev: Pointer to struct rte_eth_dev.
6225 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6226 * add: if true, add filter, if false, remove filter
6229 * - On success, zero.
6230 * - On failure, a negative value.
6233 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6234 struct rte_eth_ntuple_filter *ntuple_filter,
6237 struct ixgbe_filter_info *filter_info =
6238 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6239 struct ixgbe_5tuple_filter_info filter_5tuple;
6240 struct ixgbe_5tuple_filter *filter;
6243 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6244 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6248 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6249 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6253 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6255 if (filter != NULL && add) {
6256 PMD_DRV_LOG(ERR, "filter exists.");
6259 if (filter == NULL && !add) {
6260 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6265 filter = rte_zmalloc("ixgbe_5tuple_filter",
6266 sizeof(struct ixgbe_5tuple_filter), 0);
6269 (void)rte_memcpy(&filter->filter_info,
6271 sizeof(struct ixgbe_5tuple_filter_info));
6272 filter->queue = ntuple_filter->queue;
6273 ret = ixgbe_add_5tuple_filter(dev, filter);
6279 ixgbe_remove_5tuple_filter(dev, filter);
6285 * get a ntuple filter
6288 * dev: Pointer to struct rte_eth_dev.
6289 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6292 * - On success, zero.
6293 * - On failure, a negative value.
6296 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6297 struct rte_eth_ntuple_filter *ntuple_filter)
6299 struct ixgbe_filter_info *filter_info =
6300 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6301 struct ixgbe_5tuple_filter_info filter_5tuple;
6302 struct ixgbe_5tuple_filter *filter;
6305 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6306 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6310 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6311 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6315 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6317 if (filter == NULL) {
6318 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6321 ntuple_filter->queue = filter->queue;
6326 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6327 * @dev: pointer to rte_eth_dev structure
6328 * @filter_op:operation will be taken.
6329 * @arg: a pointer to specific structure corresponding to the filter_op
6332 * - On success, zero.
6333 * - On failure, a negative value.
6336 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6337 enum rte_filter_op filter_op,
6340 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6343 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6345 if (filter_op == RTE_ETH_FILTER_NOP)
6349 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6354 switch (filter_op) {
6355 case RTE_ETH_FILTER_ADD:
6356 ret = ixgbe_add_del_ntuple_filter(dev,
6357 (struct rte_eth_ntuple_filter *)arg,
6360 case RTE_ETH_FILTER_DELETE:
6361 ret = ixgbe_add_del_ntuple_filter(dev,
6362 (struct rte_eth_ntuple_filter *)arg,
6365 case RTE_ETH_FILTER_GET:
6366 ret = ixgbe_get_ntuple_filter(dev,
6367 (struct rte_eth_ntuple_filter *)arg);
6370 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6378 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6379 struct rte_eth_ethertype_filter *filter,
6382 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6383 struct ixgbe_filter_info *filter_info =
6384 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6388 struct ixgbe_ethertype_filter ethertype_filter;
6390 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6393 if (filter->ether_type == ETHER_TYPE_IPv4 ||
6394 filter->ether_type == ETHER_TYPE_IPv6) {
6395 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6396 " ethertype filter.", filter->ether_type);
6400 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6401 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6404 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6405 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6409 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6410 if (ret >= 0 && add) {
6411 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6412 filter->ether_type);
6415 if (ret < 0 && !add) {
6416 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6417 filter->ether_type);
6422 etqf = IXGBE_ETQF_FILTER_EN;
6423 etqf |= (uint32_t)filter->ether_type;
6424 etqs |= (uint32_t)((filter->queue <<
6425 IXGBE_ETQS_RX_QUEUE_SHIFT) &
6426 IXGBE_ETQS_RX_QUEUE);
6427 etqs |= IXGBE_ETQS_QUEUE_EN;
6429 ethertype_filter.ethertype = filter->ether_type;
6430 ethertype_filter.etqf = etqf;
6431 ethertype_filter.etqs = etqs;
6432 ethertype_filter.conf = FALSE;
6433 ret = ixgbe_ethertype_filter_insert(filter_info,
6436 PMD_DRV_LOG(ERR, "ethertype filters are full.");
6440 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6444 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6445 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6446 IXGBE_WRITE_FLUSH(hw);
6452 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6453 struct rte_eth_ethertype_filter *filter)
6455 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6456 struct ixgbe_filter_info *filter_info =
6457 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6458 uint32_t etqf, etqs;
6461 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6463 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6464 filter->ether_type);
6468 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6469 if (etqf & IXGBE_ETQF_FILTER_EN) {
6470 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6471 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6473 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6474 IXGBE_ETQS_RX_QUEUE_SHIFT;
6481 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6482 * @dev: pointer to rte_eth_dev structure
6483 * @filter_op:operation will be taken.
6484 * @arg: a pointer to specific structure corresponding to the filter_op
6487 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6488 enum rte_filter_op filter_op,
6491 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6494 MAC_TYPE_FILTER_SUP(hw->mac.type);
6496 if (filter_op == RTE_ETH_FILTER_NOP)
6500 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6505 switch (filter_op) {
6506 case RTE_ETH_FILTER_ADD:
6507 ret = ixgbe_add_del_ethertype_filter(dev,
6508 (struct rte_eth_ethertype_filter *)arg,
6511 case RTE_ETH_FILTER_DELETE:
6512 ret = ixgbe_add_del_ethertype_filter(dev,
6513 (struct rte_eth_ethertype_filter *)arg,
6516 case RTE_ETH_FILTER_GET:
6517 ret = ixgbe_get_ethertype_filter(dev,
6518 (struct rte_eth_ethertype_filter *)arg);
6521 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6529 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6530 enum rte_filter_type filter_type,
6531 enum rte_filter_op filter_op,
6536 switch (filter_type) {
6537 case RTE_ETH_FILTER_NTUPLE:
6538 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6540 case RTE_ETH_FILTER_ETHERTYPE:
6541 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6543 case RTE_ETH_FILTER_SYN:
6544 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6546 case RTE_ETH_FILTER_FDIR:
6547 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6549 case RTE_ETH_FILTER_L2_TUNNEL:
6550 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6552 case RTE_ETH_FILTER_GENERIC:
6553 if (filter_op != RTE_ETH_FILTER_GET)
6555 *(const void **)arg = &ixgbe_flow_ops;
6558 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6568 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6569 u8 **mc_addr_ptr, u32 *vmdq)
6574 mc_addr = *mc_addr_ptr;
6575 *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6580 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6581 struct ether_addr *mc_addr_set,
6582 uint32_t nb_mc_addr)
6584 struct ixgbe_hw *hw;
6587 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6588 mc_addr_list = (u8 *)mc_addr_set;
6589 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6590 ixgbe_dev_addr_list_itr, TRUE);
6594 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6596 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6597 uint64_t systime_cycles;
6599 switch (hw->mac.type) {
6600 case ixgbe_mac_X550:
6601 case ixgbe_mac_X550EM_x:
6602 case ixgbe_mac_X550EM_a:
6603 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6604 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6605 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6609 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6610 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6614 return systime_cycles;
6618 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6620 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6621 uint64_t rx_tstamp_cycles;
6623 switch (hw->mac.type) {
6624 case ixgbe_mac_X550:
6625 case ixgbe_mac_X550EM_x:
6626 case ixgbe_mac_X550EM_a:
6627 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6628 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6629 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6633 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6634 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6635 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6639 return rx_tstamp_cycles;
6643 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6645 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6646 uint64_t tx_tstamp_cycles;
6648 switch (hw->mac.type) {
6649 case ixgbe_mac_X550:
6650 case ixgbe_mac_X550EM_x:
6651 case ixgbe_mac_X550EM_a:
6652 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6653 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6654 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6658 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6659 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6660 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6664 return tx_tstamp_cycles;
6668 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6670 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6671 struct ixgbe_adapter *adapter =
6672 (struct ixgbe_adapter *)dev->data->dev_private;
6673 struct rte_eth_link link;
6674 uint32_t incval = 0;
6677 /* Get current link speed. */
6678 memset(&link, 0, sizeof(link));
6679 ixgbe_dev_link_update(dev, 1);
6680 rte_ixgbe_dev_atomic_read_link_status(dev, &link);
6682 switch (link.link_speed) {
6683 case ETH_SPEED_NUM_100M:
6684 incval = IXGBE_INCVAL_100;
6685 shift = IXGBE_INCVAL_SHIFT_100;
6687 case ETH_SPEED_NUM_1G:
6688 incval = IXGBE_INCVAL_1GB;
6689 shift = IXGBE_INCVAL_SHIFT_1GB;
6691 case ETH_SPEED_NUM_10G:
6693 incval = IXGBE_INCVAL_10GB;
6694 shift = IXGBE_INCVAL_SHIFT_10GB;
6698 switch (hw->mac.type) {
6699 case ixgbe_mac_X550:
6700 case ixgbe_mac_X550EM_x:
6701 case ixgbe_mac_X550EM_a:
6702 /* Independent of link speed. */
6704 /* Cycles read will be interpreted as ns. */
6707 case ixgbe_mac_X540:
6708 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6710 case ixgbe_mac_82599EB:
6711 incval >>= IXGBE_INCVAL_SHIFT_82599;
6712 shift -= IXGBE_INCVAL_SHIFT_82599;
6713 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6714 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
6717 /* Not supported. */
6721 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6722 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6723 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6725 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6726 adapter->systime_tc.cc_shift = shift;
6727 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6729 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6730 adapter->rx_tstamp_tc.cc_shift = shift;
6731 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6733 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6734 adapter->tx_tstamp_tc.cc_shift = shift;
6735 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6739 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6741 struct ixgbe_adapter *adapter =
6742 (struct ixgbe_adapter *)dev->data->dev_private;
6744 adapter->systime_tc.nsec += delta;
6745 adapter->rx_tstamp_tc.nsec += delta;
6746 adapter->tx_tstamp_tc.nsec += delta;
6752 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
6755 struct ixgbe_adapter *adapter =
6756 (struct ixgbe_adapter *)dev->data->dev_private;
6758 ns = rte_timespec_to_ns(ts);
6759 /* Set the timecounters to a new value. */
6760 adapter->systime_tc.nsec = ns;
6761 adapter->rx_tstamp_tc.nsec = ns;
6762 adapter->tx_tstamp_tc.nsec = ns;
6768 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
6770 uint64_t ns, systime_cycles;
6771 struct ixgbe_adapter *adapter =
6772 (struct ixgbe_adapter *)dev->data->dev_private;
6774 systime_cycles = ixgbe_read_systime_cyclecounter(dev);
6775 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
6776 *ts = rte_ns_to_timespec(ns);
6782 ixgbe_timesync_enable(struct rte_eth_dev *dev)
6784 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6788 /* Stop the timesync system time. */
6789 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
6790 /* Reset the timesync system time value. */
6791 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
6792 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
6794 /* Enable system time for platforms where it isn't on by default. */
6795 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
6796 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
6797 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
6799 ixgbe_start_timecounters(dev);
6801 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6802 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
6804 IXGBE_ETQF_FILTER_EN |
6807 /* Enable timestamping of received PTP packets. */
6808 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6809 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
6810 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6812 /* Enable timestamping of transmitted PTP packets. */
6813 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6814 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
6815 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6817 IXGBE_WRITE_FLUSH(hw);
6823 ixgbe_timesync_disable(struct rte_eth_dev *dev)
6825 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6828 /* Disable timestamping of transmitted PTP packets. */
6829 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6830 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
6831 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
6833 /* Disable timestamping of received PTP packets. */
6834 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6835 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
6836 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
6838 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
6839 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
6841 /* Stop incrementating the System Time registers. */
6842 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
6848 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6849 struct timespec *timestamp,
6850 uint32_t flags __rte_unused)
6852 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6853 struct ixgbe_adapter *adapter =
6854 (struct ixgbe_adapter *)dev->data->dev_private;
6855 uint32_t tsync_rxctl;
6856 uint64_t rx_tstamp_cycles;
6859 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
6860 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
6863 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
6864 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
6865 *timestamp = rte_ns_to_timespec(ns);
6871 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6872 struct timespec *timestamp)
6874 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6875 struct ixgbe_adapter *adapter =
6876 (struct ixgbe_adapter *)dev->data->dev_private;
6877 uint32_t tsync_txctl;
6878 uint64_t tx_tstamp_cycles;
6881 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
6882 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
6885 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
6886 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
6887 *timestamp = rte_ns_to_timespec(ns);
6893 ixgbe_get_reg_length(struct rte_eth_dev *dev)
6895 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6898 const struct reg_info *reg_group;
6899 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6900 ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6902 while ((reg_group = reg_set[g_ind++]))
6903 count += ixgbe_regs_group_count(reg_group);
6909 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
6913 const struct reg_info *reg_group;
6915 while ((reg_group = ixgbevf_regs[g_ind++]))
6916 count += ixgbe_regs_group_count(reg_group);
6922 ixgbe_get_regs(struct rte_eth_dev *dev,
6923 struct rte_dev_reg_info *regs)
6925 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6926 uint32_t *data = regs->data;
6929 const struct reg_info *reg_group;
6930 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
6931 ixgbe_regs_mac_82598EB : ixgbe_regs_others;
6934 regs->length = ixgbe_get_reg_length(dev);
6935 regs->width = sizeof(uint32_t);
6939 /* Support only full register dump */
6940 if ((regs->length == 0) ||
6941 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
6942 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6944 while ((reg_group = reg_set[g_ind++]))
6945 count += ixgbe_read_regs_group(dev, &data[count],
6954 ixgbevf_get_regs(struct rte_eth_dev *dev,
6955 struct rte_dev_reg_info *regs)
6957 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6958 uint32_t *data = regs->data;
6961 const struct reg_info *reg_group;
6964 regs->length = ixgbevf_get_reg_length(dev);
6965 regs->width = sizeof(uint32_t);
6969 /* Support only full register dump */
6970 if ((regs->length == 0) ||
6971 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
6972 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
6974 while ((reg_group = ixgbevf_regs[g_ind++]))
6975 count += ixgbe_read_regs_group(dev, &data[count],
6984 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
6986 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6988 /* Return unit is byte count */
6989 return hw->eeprom.word_size * 2;
6993 ixgbe_get_eeprom(struct rte_eth_dev *dev,
6994 struct rte_dev_eeprom_info *in_eeprom)
6996 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6997 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
6998 uint16_t *data = in_eeprom->data;
7001 first = in_eeprom->offset >> 1;
7002 length = in_eeprom->length >> 1;
7003 if ((first > hw->eeprom.word_size) ||
7004 ((first + length) > hw->eeprom.word_size))
7007 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7009 return eeprom->ops.read_buffer(hw, first, length, data);
7013 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7014 struct rte_dev_eeprom_info *in_eeprom)
7016 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7017 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7018 uint16_t *data = in_eeprom->data;
7021 first = in_eeprom->offset >> 1;
7022 length = in_eeprom->length >> 1;
7023 if ((first > hw->eeprom.word_size) ||
7024 ((first + length) > hw->eeprom.word_size))
7027 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7029 return eeprom->ops.write_buffer(hw, first, length, data);
7033 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7035 case ixgbe_mac_X550:
7036 case ixgbe_mac_X550EM_x:
7037 case ixgbe_mac_X550EM_a:
7038 return ETH_RSS_RETA_SIZE_512;
7039 case ixgbe_mac_X550_vf:
7040 case ixgbe_mac_X550EM_x_vf:
7041 case ixgbe_mac_X550EM_a_vf:
7042 return ETH_RSS_RETA_SIZE_64;
7044 return ETH_RSS_RETA_SIZE_128;
7049 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7051 case ixgbe_mac_X550:
7052 case ixgbe_mac_X550EM_x:
7053 case ixgbe_mac_X550EM_a:
7054 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7055 return IXGBE_RETA(reta_idx >> 2);
7057 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7058 case ixgbe_mac_X550_vf:
7059 case ixgbe_mac_X550EM_x_vf:
7060 case ixgbe_mac_X550EM_a_vf:
7061 return IXGBE_VFRETA(reta_idx >> 2);
7063 return IXGBE_RETA(reta_idx >> 2);
7068 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7070 case ixgbe_mac_X550_vf:
7071 case ixgbe_mac_X550EM_x_vf:
7072 case ixgbe_mac_X550EM_a_vf:
7073 return IXGBE_VFMRQC;
7080 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7082 case ixgbe_mac_X550_vf:
7083 case ixgbe_mac_X550EM_x_vf:
7084 case ixgbe_mac_X550EM_a_vf:
7085 return IXGBE_VFRSSRK(i);
7087 return IXGBE_RSSRK(i);
7092 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7094 case ixgbe_mac_82599_vf:
7095 case ixgbe_mac_X540_vf:
7103 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7104 struct rte_eth_dcb_info *dcb_info)
7106 struct ixgbe_dcb_config *dcb_config =
7107 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7108 struct ixgbe_dcb_tc_config *tc;
7111 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7112 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7114 dcb_info->nb_tcs = 1;
7116 if (dcb_config->vt_mode) { /* vt is enabled*/
7117 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7118 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7119 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7120 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7121 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7122 for (j = 0; j < dcb_info->nb_tcs; j++) {
7123 dcb_info->tc_queue.tc_rxq[i][j].base =
7124 i * dcb_info->nb_tcs + j;
7125 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
7126 dcb_info->tc_queue.tc_txq[i][j].base =
7127 i * dcb_info->nb_tcs + j;
7128 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
7131 } else { /* vt is disabled*/
7132 struct rte_eth_dcb_rx_conf *rx_conf =
7133 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7134 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7135 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7136 if (dcb_info->nb_tcs == ETH_4_TCS) {
7137 for (i = 0; i < dcb_info->nb_tcs; i++) {
7138 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7139 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7141 dcb_info->tc_queue.tc_txq[0][0].base = 0;
7142 dcb_info->tc_queue.tc_txq[0][1].base = 64;
7143 dcb_info->tc_queue.tc_txq[0][2].base = 96;
7144 dcb_info->tc_queue.tc_txq[0][3].base = 112;
7145 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7146 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7147 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7148 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7149 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7150 for (i = 0; i < dcb_info->nb_tcs; i++) {
7151 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7152 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7154 dcb_info->tc_queue.tc_txq[0][0].base = 0;
7155 dcb_info->tc_queue.tc_txq[0][1].base = 32;
7156 dcb_info->tc_queue.tc_txq[0][2].base = 64;
7157 dcb_info->tc_queue.tc_txq[0][3].base = 80;
7158 dcb_info->tc_queue.tc_txq[0][4].base = 96;
7159 dcb_info->tc_queue.tc_txq[0][5].base = 104;
7160 dcb_info->tc_queue.tc_txq[0][6].base = 112;
7161 dcb_info->tc_queue.tc_txq[0][7].base = 120;
7162 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7163 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7164 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7165 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7166 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7167 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7168 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7169 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7172 for (i = 0; i < dcb_info->nb_tcs; i++) {
7173 tc = &dcb_config->tc_config[i];
7174 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7179 /* Update e-tag ether type */
7181 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7182 uint16_t ether_type)
7184 uint32_t etag_etype;
7186 if (hw->mac.type != ixgbe_mac_X550 &&
7187 hw->mac.type != ixgbe_mac_X550EM_x &&
7188 hw->mac.type != ixgbe_mac_X550EM_a) {
7192 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7193 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7194 etag_etype |= ether_type;
7195 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7196 IXGBE_WRITE_FLUSH(hw);
7201 /* Config l2 tunnel ether type */
7203 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7204 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7207 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7208 struct ixgbe_l2_tn_info *l2_tn_info =
7209 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7211 if (l2_tunnel == NULL)
7214 switch (l2_tunnel->l2_tunnel_type) {
7215 case RTE_L2_TUNNEL_TYPE_E_TAG:
7216 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7217 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7220 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7228 /* Enable e-tag tunnel */
7230 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7232 uint32_t etag_etype;
7234 if (hw->mac.type != ixgbe_mac_X550 &&
7235 hw->mac.type != ixgbe_mac_X550EM_x &&
7236 hw->mac.type != ixgbe_mac_X550EM_a) {
7240 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7241 etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7242 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7243 IXGBE_WRITE_FLUSH(hw);
7248 /* Enable l2 tunnel */
7250 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7251 enum rte_eth_tunnel_type l2_tunnel_type)
7254 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7255 struct ixgbe_l2_tn_info *l2_tn_info =
7256 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7258 switch (l2_tunnel_type) {
7259 case RTE_L2_TUNNEL_TYPE_E_TAG:
7260 l2_tn_info->e_tag_en = TRUE;
7261 ret = ixgbe_e_tag_enable(hw);
7264 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7272 /* Disable e-tag tunnel */
7274 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7276 uint32_t etag_etype;
7278 if (hw->mac.type != ixgbe_mac_X550 &&
7279 hw->mac.type != ixgbe_mac_X550EM_x &&
7280 hw->mac.type != ixgbe_mac_X550EM_a) {
7284 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7285 etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7286 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7287 IXGBE_WRITE_FLUSH(hw);
7292 /* Disable l2 tunnel */
7294 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7295 enum rte_eth_tunnel_type l2_tunnel_type)
7298 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7299 struct ixgbe_l2_tn_info *l2_tn_info =
7300 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7302 switch (l2_tunnel_type) {
7303 case RTE_L2_TUNNEL_TYPE_E_TAG:
7304 l2_tn_info->e_tag_en = FALSE;
7305 ret = ixgbe_e_tag_disable(hw);
7308 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7317 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7318 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7321 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7322 uint32_t i, rar_entries;
7323 uint32_t rar_low, rar_high;
7325 if (hw->mac.type != ixgbe_mac_X550 &&
7326 hw->mac.type != ixgbe_mac_X550EM_x &&
7327 hw->mac.type != ixgbe_mac_X550EM_a) {
7331 rar_entries = ixgbe_get_num_rx_addrs(hw);
7333 for (i = 1; i < rar_entries; i++) {
7334 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7335 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7336 if ((rar_high & IXGBE_RAH_AV) &&
7337 (rar_high & IXGBE_RAH_ADTYPE) &&
7338 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7339 l2_tunnel->tunnel_id)) {
7340 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7341 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7343 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7353 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7354 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7357 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7358 uint32_t i, rar_entries;
7359 uint32_t rar_low, rar_high;
7361 if (hw->mac.type != ixgbe_mac_X550 &&
7362 hw->mac.type != ixgbe_mac_X550EM_x &&
7363 hw->mac.type != ixgbe_mac_X550EM_a) {
7367 /* One entry for one tunnel. Try to remove potential existing entry. */
7368 ixgbe_e_tag_filter_del(dev, l2_tunnel);
7370 rar_entries = ixgbe_get_num_rx_addrs(hw);
7372 for (i = 1; i < rar_entries; i++) {
7373 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7374 if (rar_high & IXGBE_RAH_AV) {
7377 ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7378 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7379 rar_low = l2_tunnel->tunnel_id;
7381 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7382 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7388 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7389 " Please remove a rule before adding a new one.");
7393 static inline struct ixgbe_l2_tn_filter *
7394 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7395 struct ixgbe_l2_tn_key *key)
7399 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7403 return l2_tn_info->hash_map[ret];
7407 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7408 struct ixgbe_l2_tn_filter *l2_tn_filter)
7412 ret = rte_hash_add_key(l2_tn_info->hash_handle,
7413 &l2_tn_filter->key);
7417 "Failed to insert L2 tunnel filter"
7418 " to hash table %d!",
7423 l2_tn_info->hash_map[ret] = l2_tn_filter;
7425 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7431 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7432 struct ixgbe_l2_tn_key *key)
7435 struct ixgbe_l2_tn_filter *l2_tn_filter;
7437 ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7441 "No such L2 tunnel filter to delete %d!",
7446 l2_tn_filter = l2_tn_info->hash_map[ret];
7447 l2_tn_info->hash_map[ret] = NULL;
7449 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7450 rte_free(l2_tn_filter);
7455 /* Add l2 tunnel filter */
7457 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7458 struct rte_eth_l2_tunnel_conf *l2_tunnel,
7462 struct ixgbe_l2_tn_info *l2_tn_info =
7463 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7464 struct ixgbe_l2_tn_key key;
7465 struct ixgbe_l2_tn_filter *node;
7468 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7469 key.tn_id = l2_tunnel->tunnel_id;
7471 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7475 "The L2 tunnel filter already exists!");
7479 node = rte_zmalloc("ixgbe_l2_tn",
7480 sizeof(struct ixgbe_l2_tn_filter),
7485 (void)rte_memcpy(&node->key,
7487 sizeof(struct ixgbe_l2_tn_key));
7488 node->pool = l2_tunnel->pool;
7489 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7496 switch (l2_tunnel->l2_tunnel_type) {
7497 case RTE_L2_TUNNEL_TYPE_E_TAG:
7498 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7501 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7506 if ((!restore) && (ret < 0))
7507 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7512 /* Delete l2 tunnel filter */
7514 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7515 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7518 struct ixgbe_l2_tn_info *l2_tn_info =
7519 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7520 struct ixgbe_l2_tn_key key;
7522 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7523 key.tn_id = l2_tunnel->tunnel_id;
7524 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7528 switch (l2_tunnel->l2_tunnel_type) {
7529 case RTE_L2_TUNNEL_TYPE_E_TAG:
7530 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7533 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7542 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7543 * @dev: pointer to rte_eth_dev structure
7544 * @filter_op:operation will be taken.
7545 * @arg: a pointer to specific structure corresponding to the filter_op
7548 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7549 enum rte_filter_op filter_op,
7554 if (filter_op == RTE_ETH_FILTER_NOP)
7558 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7563 switch (filter_op) {
7564 case RTE_ETH_FILTER_ADD:
7565 ret = ixgbe_dev_l2_tunnel_filter_add
7567 (struct rte_eth_l2_tunnel_conf *)arg,
7570 case RTE_ETH_FILTER_DELETE:
7571 ret = ixgbe_dev_l2_tunnel_filter_del
7573 (struct rte_eth_l2_tunnel_conf *)arg);
7576 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7584 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7588 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7590 if (hw->mac.type != ixgbe_mac_X550 &&
7591 hw->mac.type != ixgbe_mac_X550EM_x &&
7592 hw->mac.type != ixgbe_mac_X550EM_a) {
7596 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7597 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7599 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7600 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7605 /* Enable l2 tunnel forwarding */
7607 ixgbe_dev_l2_tunnel_forwarding_enable
7608 (struct rte_eth_dev *dev,
7609 enum rte_eth_tunnel_type l2_tunnel_type)
7611 struct ixgbe_l2_tn_info *l2_tn_info =
7612 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7615 switch (l2_tunnel_type) {
7616 case RTE_L2_TUNNEL_TYPE_E_TAG:
7617 l2_tn_info->e_tag_fwd_en = TRUE;
7618 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
7621 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7629 /* Disable l2 tunnel forwarding */
7631 ixgbe_dev_l2_tunnel_forwarding_disable
7632 (struct rte_eth_dev *dev,
7633 enum rte_eth_tunnel_type l2_tunnel_type)
7635 struct ixgbe_l2_tn_info *l2_tn_info =
7636 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7639 switch (l2_tunnel_type) {
7640 case RTE_L2_TUNNEL_TYPE_E_TAG:
7641 l2_tn_info->e_tag_fwd_en = FALSE;
7642 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
7645 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7654 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
7655 struct rte_eth_l2_tunnel_conf *l2_tunnel,
7658 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
7660 uint32_t vmtir, vmvir;
7661 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7663 if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
7665 "VF id %u should be less than %u",
7671 if (hw->mac.type != ixgbe_mac_X550 &&
7672 hw->mac.type != ixgbe_mac_X550EM_x &&
7673 hw->mac.type != ixgbe_mac_X550EM_a) {
7678 vmtir = l2_tunnel->tunnel_id;
7682 IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
7684 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
7685 vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
7687 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
7688 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
7693 /* Enable l2 tunnel tag insertion */
7695 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
7696 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7700 switch (l2_tunnel->l2_tunnel_type) {
7701 case RTE_L2_TUNNEL_TYPE_E_TAG:
7702 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
7705 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7713 /* Disable l2 tunnel tag insertion */
7715 ixgbe_dev_l2_tunnel_insertion_disable
7716 (struct rte_eth_dev *dev,
7717 struct rte_eth_l2_tunnel_conf *l2_tunnel)
7721 switch (l2_tunnel->l2_tunnel_type) {
7722 case RTE_L2_TUNNEL_TYPE_E_TAG:
7723 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
7726 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7735 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
7740 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7742 if (hw->mac.type != ixgbe_mac_X550 &&
7743 hw->mac.type != ixgbe_mac_X550EM_x &&
7744 hw->mac.type != ixgbe_mac_X550EM_a) {
7748 qde = IXGBE_READ_REG(hw, IXGBE_QDE);
7750 qde |= IXGBE_QDE_STRIP_TAG;
7752 qde &= ~IXGBE_QDE_STRIP_TAG;
7753 qde &= ~IXGBE_QDE_READ;
7754 qde |= IXGBE_QDE_WRITE;
7755 IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
7760 /* Enable l2 tunnel tag stripping */
7762 ixgbe_dev_l2_tunnel_stripping_enable
7763 (struct rte_eth_dev *dev,
7764 enum rte_eth_tunnel_type l2_tunnel_type)
7768 switch (l2_tunnel_type) {
7769 case RTE_L2_TUNNEL_TYPE_E_TAG:
7770 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
7773 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7781 /* Disable l2 tunnel tag stripping */
7783 ixgbe_dev_l2_tunnel_stripping_disable
7784 (struct rte_eth_dev *dev,
7785 enum rte_eth_tunnel_type l2_tunnel_type)
7789 switch (l2_tunnel_type) {
7790 case RTE_L2_TUNNEL_TYPE_E_TAG:
7791 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
7794 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7802 /* Enable/disable l2 tunnel offload functions */
7804 ixgbe_dev_l2_tunnel_offload_set
7805 (struct rte_eth_dev *dev,
7806 struct rte_eth_l2_tunnel_conf *l2_tunnel,
7812 if (l2_tunnel == NULL)
7816 if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
7818 ret = ixgbe_dev_l2_tunnel_enable(
7820 l2_tunnel->l2_tunnel_type);
7822 ret = ixgbe_dev_l2_tunnel_disable(
7824 l2_tunnel->l2_tunnel_type);
7827 if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
7829 ret = ixgbe_dev_l2_tunnel_insertion_enable(
7833 ret = ixgbe_dev_l2_tunnel_insertion_disable(
7838 if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
7840 ret = ixgbe_dev_l2_tunnel_stripping_enable(
7842 l2_tunnel->l2_tunnel_type);
7844 ret = ixgbe_dev_l2_tunnel_stripping_disable(
7846 l2_tunnel->l2_tunnel_type);
7849 if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
7851 ret = ixgbe_dev_l2_tunnel_forwarding_enable(
7853 l2_tunnel->l2_tunnel_type);
7855 ret = ixgbe_dev_l2_tunnel_forwarding_disable(
7857 l2_tunnel->l2_tunnel_type);
7864 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7867 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7868 IXGBE_WRITE_FLUSH(hw);
7873 /* There's only one register for VxLAN UDP port.
7874 * So, we cannot add several ports. Will update it.
7877 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
7881 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
7885 return ixgbe_update_vxlan_port(hw, port);
7888 /* We cannot delete the VxLAN port. For there's a register for VxLAN
7889 * UDP port, it must have a value.
7890 * So, will reset it to the original value 0.
7893 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
7898 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
7900 if (cur_port != port) {
7901 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
7905 return ixgbe_update_vxlan_port(hw, 0);
7908 /* Add UDP tunneling port */
7910 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7911 struct rte_eth_udp_tunnel *udp_tunnel)
7914 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7916 if (hw->mac.type != ixgbe_mac_X550 &&
7917 hw->mac.type != ixgbe_mac_X550EM_x &&
7918 hw->mac.type != ixgbe_mac_X550EM_a) {
7922 if (udp_tunnel == NULL)
7925 switch (udp_tunnel->prot_type) {
7926 case RTE_TUNNEL_TYPE_VXLAN:
7927 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
7930 case RTE_TUNNEL_TYPE_GENEVE:
7931 case RTE_TUNNEL_TYPE_TEREDO:
7932 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7937 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7945 /* Remove UDP tunneling port */
7947 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7948 struct rte_eth_udp_tunnel *udp_tunnel)
7951 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7953 if (hw->mac.type != ixgbe_mac_X550 &&
7954 hw->mac.type != ixgbe_mac_X550EM_x &&
7955 hw->mac.type != ixgbe_mac_X550EM_a) {
7959 if (udp_tunnel == NULL)
7962 switch (udp_tunnel->prot_type) {
7963 case RTE_TUNNEL_TYPE_VXLAN:
7964 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
7966 case RTE_TUNNEL_TYPE_GENEVE:
7967 case RTE_TUNNEL_TYPE_TEREDO:
7968 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7972 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7981 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
7983 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7985 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
7989 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
7991 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7993 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
7996 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
7998 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8001 if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8004 /* PF reset VF event */
8005 if (in_msg == IXGBE_PF_CONTROL_MSG)
8006 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8011 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8014 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8015 struct ixgbe_interrupt *intr =
8016 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8017 ixgbevf_intr_disable(hw);
8019 /* read-on-clear nic registers here */
8020 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8023 /* only one misc vector supported - mailbox */
8024 eicr &= IXGBE_VTEICR_MASK;
8025 if (eicr == IXGBE_MISC_VEC_ID)
8026 intr->flags |= IXGBE_FLAG_MAILBOX;
8032 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8034 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8035 struct ixgbe_interrupt *intr =
8036 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8038 if (intr->flags & IXGBE_FLAG_MAILBOX) {
8039 ixgbevf_mbx_process(dev);
8040 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8043 ixgbevf_intr_enable(hw);
8049 ixgbevf_dev_interrupt_handler(void *param)
8051 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8053 ixgbevf_dev_interrupt_get_status(dev);
8054 ixgbevf_dev_interrupt_action(dev);
8058 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8059 * @hw: pointer to hardware structure
8061 * Stops the transmit data path and waits for the HW to internally empty
8062 * the Tx security block
8064 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8066 #define IXGBE_MAX_SECTX_POLL 40
8071 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8072 sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8073 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8074 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8075 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8076 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8078 /* Use interrupt-safe sleep just in case */
8082 /* For informational purposes only */
8083 if (i >= IXGBE_MAX_SECTX_POLL)
8084 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8085 "path fully disabled. Continuing with init.");
8087 return IXGBE_SUCCESS;
8091 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8092 * @hw: pointer to hardware structure
8094 * Enables the transmit data path.
8096 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8100 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8101 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8102 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8103 IXGBE_WRITE_FLUSH(hw);
8105 return IXGBE_SUCCESS;
8108 /* restore n-tuple filter */
8110 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8112 struct ixgbe_filter_info *filter_info =
8113 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8114 struct ixgbe_5tuple_filter *node;
8116 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8117 ixgbe_inject_5tuple_filter(dev, node);
8121 /* restore ethernet type filter */
8123 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8125 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8126 struct ixgbe_filter_info *filter_info =
8127 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8130 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8131 if (filter_info->ethertype_mask & (1 << i)) {
8132 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8133 filter_info->ethertype_filters[i].etqf);
8134 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8135 filter_info->ethertype_filters[i].etqs);
8136 IXGBE_WRITE_FLUSH(hw);
8141 /* restore SYN filter */
8143 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8145 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8146 struct ixgbe_filter_info *filter_info =
8147 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8150 synqf = filter_info->syn_info;
8152 if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8153 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8154 IXGBE_WRITE_FLUSH(hw);
8158 /* restore L2 tunnel filter */
8160 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8162 struct ixgbe_l2_tn_info *l2_tn_info =
8163 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8164 struct ixgbe_l2_tn_filter *node;
8165 struct rte_eth_l2_tunnel_conf l2_tn_conf;
8167 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8168 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8169 l2_tn_conf.tunnel_id = node->key.tn_id;
8170 l2_tn_conf.pool = node->pool;
8171 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8176 ixgbe_filter_restore(struct rte_eth_dev *dev)
8178 ixgbe_ntuple_filter_restore(dev);
8179 ixgbe_ethertype_filter_restore(dev);
8180 ixgbe_syn_filter_restore(dev);
8181 ixgbe_fdir_filter_restore(dev);
8182 ixgbe_l2_tn_filter_restore(dev);
8188 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8190 struct ixgbe_l2_tn_info *l2_tn_info =
8191 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8192 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8194 if (l2_tn_info->e_tag_en)
8195 (void)ixgbe_e_tag_enable(hw);
8197 if (l2_tn_info->e_tag_fwd_en)
8198 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8200 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8203 /* remove all the n-tuple filters */
8205 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8207 struct ixgbe_filter_info *filter_info =
8208 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8209 struct ixgbe_5tuple_filter *p_5tuple;
8211 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8212 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8215 /* remove all the ether type filters */
8217 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8219 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8220 struct ixgbe_filter_info *filter_info =
8221 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8224 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8225 if (filter_info->ethertype_mask & (1 << i) &&
8226 !filter_info->ethertype_filters[i].conf) {
8227 (void)ixgbe_ethertype_filter_remove(filter_info,
8229 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8230 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8231 IXGBE_WRITE_FLUSH(hw);
8236 /* remove the SYN filter */
8238 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8241 struct ixgbe_filter_info *filter_info =
8242 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8244 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8245 filter_info->syn_info = 0;
8247 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8248 IXGBE_WRITE_FLUSH(hw);
8252 /* remove all the L2 tunnel filters */
8254 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8256 struct ixgbe_l2_tn_info *l2_tn_info =
8257 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8258 struct ixgbe_l2_tn_filter *l2_tn_filter;
8259 struct rte_eth_l2_tunnel_conf l2_tn_conf;
8262 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8263 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8264 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
8265 l2_tn_conf.pool = l2_tn_filter->pool;
8266 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8274 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8275 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8276 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8277 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8278 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8279 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");