1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
11 #include <rte_interrupts.h>
13 #include <rte_debug.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
19 #include "ixgbe_logs.h"
20 #include "base/ixgbe_api.h"
21 #include "base/ixgbe_common.h"
22 #include "ixgbe_ethdev.h"
24 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
25 #define FDIRCTRL_PBALLOC_MASK 0x03
27 /* For calculating memory required for FDIR filters */
28 #define PBALLOC_SIZE_SHIFT 15
30 /* Number of bits used to mask bucket hash for different pballoc sizes */
31 #define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
32 #define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
33 #define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
34 #define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
35 #define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
36 #define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
37 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
38 #define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
39 #define IXGBE_MAX_FLX_SOURCE_OFF 62
40 #define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
41 #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
43 #define IXGBE_FDIR_FLOW_TYPES ( \
44 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
45 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
46 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
47 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
48 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
49 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
50 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
51 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
53 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
54 uint8_t ipv6_addr[16]; \
56 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
58 for (i = 0; i < sizeof(ipv6_addr); i++) { \
59 if (ipv6_addr[i] == UINT8_MAX) \
61 else if (ipv6_addr[i] != 0) { \
62 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
68 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
69 uint8_t ipv6_addr[16]; \
71 for (i = 0; i < sizeof(ipv6_addr); i++) { \
72 if ((ipv6m) & (1 << i)) \
73 ipv6_addr[i] = UINT8_MAX; \
77 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
80 #define DEFAULT_VXLAN_PORT 4789
81 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
83 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
84 static int fdir_set_input_mask(struct rte_eth_dev *dev,
85 const struct rte_eth_fdir_masks *input_mask);
86 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
87 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
88 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
89 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
90 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
91 static int ixgbe_fdir_filter_to_atr_input(
92 const struct rte_eth_fdir_filter *fdir_filter,
93 union ixgbe_atr_input *input,
94 enum rte_fdir_mode mode);
95 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
97 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
98 enum rte_fdir_pballoc_type pballoc);
99 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
100 enum rte_fdir_pballoc_type pballoc);
101 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
102 union ixgbe_atr_input *input, uint8_t queue,
103 uint32_t fdircmd, uint32_t fdirhash,
104 enum rte_fdir_mode mode);
105 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
106 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
108 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
109 const struct rte_eth_fdir_filter *fdir_filter,
112 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
113 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
114 struct rte_eth_fdir_info *fdir_info);
115 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
116 struct rte_eth_fdir_stats *fdir_stats);
119 * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
120 * It adds extra configuration of fdirctrl that is common for all filter types.
122 * Initialize Flow Director control registers
123 * @hw: pointer to hardware structure
124 * @fdirctrl: value to write to flow director control register
127 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
131 PMD_INIT_FUNC_TRACE();
133 /* Prime the keys for hashing */
134 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
135 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
138 * Continue setup of fdirctrl register bits:
139 * Set the maximum length per hash bucket to 0xA filters
140 * Send interrupt when 64 filters are left
142 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
143 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
146 * Poll init-done after we write the register. Estimated times:
147 * 10G: PBALLOC = 11b, timing is 60us
148 * 1G: PBALLOC = 11b, timing is 600us
149 * 100M: PBALLOC = 11b, timing is 6ms
151 * Multiple these timings by 4 if under full Rx load
153 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
154 * 1 msec per poll time. If we're at line rate and drop to 100M, then
155 * this might not finish in our poll time, but we can live with that
158 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
159 IXGBE_WRITE_FLUSH(hw);
160 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
161 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
162 IXGBE_FDIRCTRL_INIT_DONE)
167 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
168 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
175 * Set appropriate bits in fdirctrl for: variable reporting levels, moving
176 * flexbytes matching field, and drop queue (only for perfect matching mode).
179 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
183 switch (conf->pballoc) {
184 case RTE_FDIR_PBALLOC_64K:
185 /* 8k - 1 signature filters */
186 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
188 case RTE_FDIR_PBALLOC_128K:
189 /* 16k - 1 signature filters */
190 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
192 case RTE_FDIR_PBALLOC_256K:
193 /* 32k - 1 signature filters */
194 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
198 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
202 /* status flags: write hash & swindex in the rx descriptor */
203 switch (conf->status) {
204 case RTE_FDIR_NO_REPORT_STATUS:
205 /* do nothing, default mode */
207 case RTE_FDIR_REPORT_STATUS:
208 /* report status when the packet matches a fdir rule */
209 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
211 case RTE_FDIR_REPORT_STATUS_ALWAYS:
212 /* always report status */
213 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
217 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
221 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
222 IXGBE_FDIRCTRL_FLEX_SHIFT;
224 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
225 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
226 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
227 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
228 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
229 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
230 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
231 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
232 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
233 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
240 * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
242 * @hi_dword: Bits 31:16 mask to be bit swapped.
243 * @lo_dword: Bits 15:0 mask to be bit swapped.
245 * Flow director uses several registers to store 2 x 16 bit masks with the
246 * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
247 * mask affects the MS bit/byte of the target. This function reverses the
248 * bits in these masks.
250 static inline uint32_t
251 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
253 uint32_t mask = hi_dword << 16;
256 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
257 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
258 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
259 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
263 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
264 * but makes use of the rte_fdir_masks structure to see which bits to set.
267 fdir_set_input_mask_82599(struct rte_eth_dev *dev)
269 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
270 struct ixgbe_hw_fdir_info *info =
271 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
273 * mask VM pool and DIPv6 since there are currently not supported
274 * mask FLEX byte, it will be set in flex_conf
276 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
277 uint32_t fdirtcpm; /* TCP source and destination port masks. */
278 uint32_t fdiripv6m; /* IPv6 source and destination masks. */
279 volatile uint32_t *reg;
281 PMD_INIT_FUNC_TRACE();
284 * Program the relevant mask registers. If src/dst_port or src/dst_addr
285 * are zero, then assume a full mask for that field. Also assume that
286 * a VLAN of 0 is unspecified, so mask that out as well. L4type
287 * cannot be masked out in this implementation.
289 if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
290 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
291 fdirm |= IXGBE_FDIRM_L4P;
293 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
294 /* mask VLAN Priority */
295 fdirm |= IXGBE_FDIRM_VLANP;
296 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
298 fdirm |= IXGBE_FDIRM_VLANID;
299 else if (info->mask.vlan_tci_mask == 0)
300 /* mask VLAN ID and Priority */
301 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
302 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
303 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
308 if (info->mask.flex_bytes_mask == 0)
309 fdirm |= IXGBE_FDIRM_FLEX;
311 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
313 /* store the TCP/UDP port masks, bit reversed from port layout */
314 fdirtcpm = reverse_fdir_bitmasks(
315 rte_be_to_cpu_16(info->mask.dst_port_mask),
316 rte_be_to_cpu_16(info->mask.src_port_mask));
318 /* write all the same so that UDP, TCP and SCTP use the same mask
321 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
322 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
323 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
325 /* Store source and destination IPv4 masks (big-endian),
326 * can not use IXGBE_WRITE_REG.
328 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
329 *reg = ~(info->mask.src_ipv4_mask);
330 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
331 *reg = ~(info->mask.dst_ipv4_mask);
333 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
335 * Store source and destination IPv6 masks (bit reversed)
337 fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
338 info->mask.src_ipv6_mask;
340 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
343 return IXGBE_SUCCESS;
347 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
348 * but makes use of the rte_fdir_masks structure to see which bits to set.
351 fdir_set_input_mask_x550(struct rte_eth_dev *dev)
353 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
354 struct ixgbe_hw_fdir_info *info =
355 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
356 /* mask VM pool and DIPv6 since there are currently not supported
357 * mask FLEX byte, it will be set in flex_conf
359 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
362 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
365 PMD_INIT_FUNC_TRACE();
367 /* set the default UDP port for VxLAN */
368 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
369 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
371 /* some bits must be set for mac vlan or tunnel mode */
372 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
374 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
375 /* mask VLAN Priority */
376 fdirm |= IXGBE_FDIRM_VLANP;
377 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
379 fdirm |= IXGBE_FDIRM_VLANID;
380 else if (info->mask.vlan_tci_mask == 0)
381 /* mask VLAN ID and Priority */
382 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
383 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
384 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
388 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
390 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
391 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
392 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
393 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
394 IXGBE_FDIRIP6M_TNI_VNI;
396 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
397 mac_mask = info->mask.mac_addr_byte_mask;
398 fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
399 & IXGBE_FDIRIP6M_INNER_MAC;
401 switch (info->mask.tunnel_type_mask) {
403 /* Mask turnnel type */
404 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
409 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
413 switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
416 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
419 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
424 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
429 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
430 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
431 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
432 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
433 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
434 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
436 return IXGBE_SUCCESS;
440 ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
441 const struct rte_eth_fdir_masks *input_mask)
443 struct ixgbe_hw_fdir_info *info =
444 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
445 uint16_t dst_ipv6m = 0;
446 uint16_t src_ipv6m = 0;
448 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
449 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
450 info->mask.src_port_mask = input_mask->src_port_mask;
451 info->mask.dst_port_mask = input_mask->dst_port_mask;
452 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
453 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
454 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
455 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
456 info->mask.src_ipv6_mask = src_ipv6m;
457 info->mask.dst_ipv6_mask = dst_ipv6m;
459 return IXGBE_SUCCESS;
463 ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
464 const struct rte_eth_fdir_masks *input_mask)
466 struct ixgbe_hw_fdir_info *info =
467 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
469 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
470 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
471 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
472 info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
473 info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
475 return IXGBE_SUCCESS;
479 ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
480 const struct rte_eth_fdir_masks *input_mask)
482 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
484 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
485 mode <= RTE_FDIR_MODE_PERFECT)
486 return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
487 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
488 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
489 return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
491 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
496 ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
498 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
500 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
501 mode <= RTE_FDIR_MODE_PERFECT)
502 return fdir_set_input_mask_82599(dev);
503 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
504 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
505 return fdir_set_input_mask_x550(dev);
507 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
512 ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
515 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
519 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
521 fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
522 fdirctrl |= ((offset >> 1) /* convert to word offset */
523 << IXGBE_FDIRCTRL_FLEX_SHIFT);
525 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
526 IXGBE_WRITE_FLUSH(hw);
527 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
528 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
529 IXGBE_FDIRCTRL_INIT_DONE)
537 fdir_set_input_mask(struct rte_eth_dev *dev,
538 const struct rte_eth_fdir_masks *input_mask)
542 ret = ixgbe_fdir_store_input_mask(dev, input_mask);
546 return ixgbe_fdir_set_input_mask(dev);
550 * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
551 * arguments are valid
554 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
555 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
557 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558 struct ixgbe_hw_fdir_info *info =
559 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
560 const struct rte_eth_flex_payload_cfg *flex_cfg;
561 const struct rte_eth_fdir_flex_mask *flex_mask;
563 uint16_t flexbytes = 0;
566 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
569 PMD_DRV_LOG(ERR, "NULL pointer.");
573 for (i = 0; i < conf->nb_payloads; i++) {
574 flex_cfg = &conf->flex_set[i];
575 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
576 PMD_DRV_LOG(ERR, "unsupported payload type.");
579 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
580 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
581 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
582 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
584 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
585 IXGBE_FDIRCTRL_FLEX_SHIFT;
587 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
592 for (i = 0; i < conf->nb_flexmasks; i++) {
593 flex_mask = &conf->flex_mask[i];
594 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
595 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
598 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
599 ((flex_mask->mask[1]) & 0xFF));
600 if (flexbytes == UINT16_MAX)
601 fdirm &= ~IXGBE_FDIRM_FLEX;
602 else if (flexbytes != 0) {
603 /* IXGBE_FDIRM_FLEX is set by default when set mask */
604 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
608 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
609 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
610 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
611 IXGBE_FDIRCTRL_FLEX_MASK) >>
612 IXGBE_FDIRCTRL_FLEX_SHIFT);
617 ixgbe_fdir_configure(struct rte_eth_dev *dev)
619 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
621 uint32_t fdirctrl, pbsize;
623 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
625 PMD_INIT_FUNC_TRACE();
627 if (hw->mac.type != ixgbe_mac_82599EB &&
628 hw->mac.type != ixgbe_mac_X540 &&
629 hw->mac.type != ixgbe_mac_X550 &&
630 hw->mac.type != ixgbe_mac_X550EM_x &&
631 hw->mac.type != ixgbe_mac_X550EM_a)
634 /* x550 supports mac-vlan and tunnel mode but other NICs not */
635 if (hw->mac.type != ixgbe_mac_X550 &&
636 hw->mac.type != ixgbe_mac_X550EM_x &&
637 hw->mac.type != ixgbe_mac_X550EM_a &&
638 mode != RTE_FDIR_MODE_SIGNATURE &&
639 mode != RTE_FDIR_MODE_PERFECT)
642 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
647 * Before enabling Flow Director, the Rx Packet Buffer size
648 * must be reduced. The new value is the current size minus
649 * flow director memory usage size.
651 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
652 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
653 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
656 * The defaults in the HW for RX PB 1-7 are not zero and so should be
657 * initialized to zero for non DCB mode otherwise actual total RX PB
658 * would be bigger than programmed and filter space would run into
661 for (i = 1; i < 8; i++)
662 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
664 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
666 PMD_INIT_LOG(ERR, " Error on setting FD mask");
669 err = ixgbe_set_fdir_flex_conf(dev,
670 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
672 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
676 err = fdir_enable_82599(hw, fdirctrl);
678 PMD_INIT_LOG(ERR, " Error on enabling FD.");
685 * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
686 * by the IXGBE driver code.
689 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
690 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
692 input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
693 input->formatted.flex_bytes = (uint16_t)(
694 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
695 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
697 switch (fdir_filter->input.flow_type) {
698 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
699 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
701 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
702 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
704 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
705 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
707 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
708 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
710 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
711 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
713 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
714 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
716 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
717 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
719 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
720 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
726 switch (fdir_filter->input.flow_type) {
727 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
728 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
729 input->formatted.src_port =
730 fdir_filter->input.flow.udp4_flow.src_port;
731 input->formatted.dst_port =
732 fdir_filter->input.flow.udp4_flow.dst_port;
734 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
735 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
736 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
737 input->formatted.src_ip[0] =
738 fdir_filter->input.flow.ip4_flow.src_ip;
739 input->formatted.dst_ip[0] =
740 fdir_filter->input.flow.ip4_flow.dst_ip;
743 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
744 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
745 input->formatted.src_port =
746 fdir_filter->input.flow.udp6_flow.src_port;
747 input->formatted.dst_port =
748 fdir_filter->input.flow.udp6_flow.dst_port;
750 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
751 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
752 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
753 rte_memcpy(input->formatted.src_ip,
754 fdir_filter->input.flow.ipv6_flow.src_ip,
755 sizeof(input->formatted.src_ip));
756 rte_memcpy(input->formatted.dst_ip,
757 fdir_filter->input.flow.ipv6_flow.dst_ip,
758 sizeof(input->formatted.dst_ip));
764 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
766 input->formatted.inner_mac,
767 fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
768 sizeof(input->formatted.inner_mac));
769 } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
771 input->formatted.inner_mac,
772 fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
773 sizeof(input->formatted.inner_mac));
774 if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
775 RTE_FDIR_TUNNEL_TYPE_VXLAN)
776 input->formatted.tunnel_type =
777 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
778 else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
779 RTE_FDIR_TUNNEL_TYPE_NVGRE)
780 input->formatted.tunnel_type =
781 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
783 PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
785 input->formatted.tni_vni =
786 fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
793 * The below function is taken from the FreeBSD IXGBE drivers release
794 * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
795 * before returning, as the signature hash can use 16bits.
797 * The newer driver has optimised functions for calculating bucket and
798 * signature hashes. However they don't support IPv6 type packets for signature
799 * filters so are not used here.
801 * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
804 * Compute the hashes for SW ATR
805 * @stream: input bitstream to compute the hash on
806 * @key: 32-bit hash key
809 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
813 * The algorithm is as follows:
814 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
815 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
816 * and A[n] x B[n] is bitwise AND between same length strings
818 * K[n] is 16 bits, defined as:
819 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
820 * for n modulo 32 < 15, K[n] =
821 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
823 * S[n] is 16 bits, defined as:
824 * for n >= 15, S[n] = S[n:n - 15]
825 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
827 * To simplify for programming, the algorithm is implemented
828 * in software this way:
830 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
832 * for (i = 0; i < 352; i+=32)
833 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
835 * lo_hash_dword[15:0] ^= Stream[15:0];
836 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
837 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
839 * hi_hash_dword[31:0] ^= Stream[351:320];
842 * hash[15:0] ^= Stream[15:0];
844 * for (i = 0; i < 16; i++) {
846 * hash[15:0] ^= lo_hash_dword[(i+15):i];
848 * hash[15:0] ^= hi_hash_dword[(i+15):i];
852 __be32 common_hash_dword = 0;
853 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
857 /* record the flow_vm_vlan bits as they are a key part to the hash */
858 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
860 /* generate common hash dword */
861 for (i = 1; i <= 13; i++)
862 common_hash_dword ^= atr_input->dword_stream[i];
864 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
866 /* low dword is word swapped version of common */
867 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
869 /* apply flow ID/VM pool/VLAN ID bits to hash words */
870 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
872 /* Process bits 0 and 16 */
874 hash_result ^= lo_hash_dword;
875 if (key & 0x00010000)
876 hash_result ^= hi_hash_dword;
879 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
880 * delay this because bit 0 of the stream should not be processed
881 * so we do not add the vlan until after bit 0 was processed
883 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
886 /* process the remaining 30 bits in the key 2 bits at a time */
887 for (i = 15; i; i--) {
888 if (key & (0x0001 << i))
889 hash_result ^= lo_hash_dword >> i;
890 if (key & (0x00010000 << i))
891 hash_result ^= hi_hash_dword >> i;
898 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
899 enum rte_fdir_pballoc_type pballoc)
901 if (pballoc == RTE_FDIR_PBALLOC_256K)
902 return ixgbe_atr_compute_hash_82599(input,
903 IXGBE_ATR_BUCKET_HASH_KEY) &
904 PERFECT_BUCKET_256KB_HASH_MASK;
905 else if (pballoc == RTE_FDIR_PBALLOC_128K)
906 return ixgbe_atr_compute_hash_82599(input,
907 IXGBE_ATR_BUCKET_HASH_KEY) &
908 PERFECT_BUCKET_128KB_HASH_MASK;
910 return ixgbe_atr_compute_hash_82599(input,
911 IXGBE_ATR_BUCKET_HASH_KEY) &
912 PERFECT_BUCKET_64KB_HASH_MASK;
916 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
917 * @hw: pointer to hardware structure
920 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
924 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
925 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
926 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
928 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
935 * Calculate the hash value needed for signature-match filters. In the FreeBSD
936 * driver, this is done by the optimised function
937 * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
938 * doesn't support calculating a hash for an IPv6 filter.
941 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
942 enum rte_fdir_pballoc_type pballoc)
944 uint32_t bucket_hash, sig_hash;
946 if (pballoc == RTE_FDIR_PBALLOC_256K)
947 bucket_hash = ixgbe_atr_compute_hash_82599(input,
948 IXGBE_ATR_BUCKET_HASH_KEY) &
949 SIG_BUCKET_256KB_HASH_MASK;
950 else if (pballoc == RTE_FDIR_PBALLOC_128K)
951 bucket_hash = ixgbe_atr_compute_hash_82599(input,
952 IXGBE_ATR_BUCKET_HASH_KEY) &
953 SIG_BUCKET_128KB_HASH_MASK;
955 bucket_hash = ixgbe_atr_compute_hash_82599(input,
956 IXGBE_ATR_BUCKET_HASH_KEY) &
957 SIG_BUCKET_64KB_HASH_MASK;
959 sig_hash = ixgbe_atr_compute_hash_82599(input,
960 IXGBE_ATR_SIGNATURE_HASH_KEY);
962 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
966 * This is based on ixgbe_fdir_write_perfect_filter_82599() in
967 * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
968 * added, and IPv6 support also added. The hash value is also pre-calculated
969 * as the pballoc value is needed to do it.
972 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
973 union ixgbe_atr_input *input, uint8_t queue,
974 uint32_t fdircmd, uint32_t fdirhash,
975 enum rte_fdir_mode mode)
977 uint32_t fdirport, fdirvlan;
978 u32 addr_low, addr_high;
981 volatile uint32_t *reg;
983 if (mode == RTE_FDIR_MODE_PERFECT) {
984 /* record the IPv4 address (big-endian)
985 * can not use IXGBE_WRITE_REG.
987 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
988 *reg = input->formatted.src_ip[0];
989 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
990 *reg = input->formatted.dst_ip[0];
992 /* record source and destination port (little-endian)*/
993 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
994 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
995 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
996 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
997 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
998 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
999 /* for mac vlan and tunnel modes */
1000 addr_low = ((u32)input->formatted.inner_mac[0] |
1001 ((u32)input->formatted.inner_mac[1] << 8) |
1002 ((u32)input->formatted.inner_mac[2] << 16) |
1003 ((u32)input->formatted.inner_mac[3] << 24));
1004 addr_high = ((u32)input->formatted.inner_mac[4] |
1005 ((u32)input->formatted.inner_mac[5] << 8));
1007 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1008 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1009 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
1010 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
1013 if (input->formatted.tunnel_type)
1014 tunnel_type = 0x80000000;
1015 tunnel_type |= addr_high;
1016 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1017 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
1018 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
1019 input->formatted.tni_vni);
1021 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
1022 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
1023 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
1026 /* record vlan (little-endian) and flex_bytes(big-endian) */
1027 fdirvlan = input->formatted.flex_bytes;
1028 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1029 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1030 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1032 /* configure FDIRHASH register */
1033 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1036 * flush all previous writes to make certain registers are
1037 * programmed prior to issuing the command
1039 IXGBE_WRITE_FLUSH(hw);
1041 /* configure FDIRCMD register */
1042 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1043 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1044 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1045 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1046 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1048 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1050 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1052 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1054 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1060 * This function is based on ixgbe_atr_add_signature_filter_82599() in
1061 * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1062 * setting extra fields in the FDIRCMD register, and removes the code that was
1063 * verifying the flow_type field. According to the documentation, a flow type of
1064 * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1067 * Adds a signature hash filter
1068 * @hw: pointer to hardware structure
1069 * @input: unique input dword
1070 * @queue: queue index to direct traffic to
1071 * @fdircmd: any extra flags to set in fdircmd register
1072 * @fdirhash: pre-calculated hash value for the filter
1075 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1076 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1081 PMD_INIT_FUNC_TRACE();
1083 /* configure FDIRCMD register */
1084 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1085 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1086 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1087 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1089 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1090 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1092 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1094 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1096 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1102 * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1103 * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1104 * that it can be used for removing signature and perfect filters.
1107 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1109 uint32_t fdircmd = 0;
1112 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1114 /* flush hash to HW */
1115 IXGBE_WRITE_FLUSH(hw);
1117 /* Query if filter is present */
1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1120 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1122 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1126 /* if filter exists in hardware then remove it */
1127 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1128 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1129 IXGBE_WRITE_FLUSH(hw);
1130 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1131 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1133 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1135 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1140 static inline struct ixgbe_fdir_filter *
1141 ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1142 union ixgbe_atr_input *key)
1146 ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1150 return fdir_info->hash_map[ret];
1154 ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1155 struct ixgbe_fdir_filter *fdir_filter)
1159 ret = rte_hash_add_key(fdir_info->hash_handle,
1160 &fdir_filter->ixgbe_fdir);
1164 "Failed to insert fdir filter to hash table %d!",
1169 fdir_info->hash_map[ret] = fdir_filter;
1171 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1177 ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1178 union ixgbe_atr_input *key)
1181 struct ixgbe_fdir_filter *fdir_filter;
1183 ret = rte_hash_del_key(fdir_info->hash_handle, key);
1186 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1190 fdir_filter = fdir_info->hash_map[ret];
1191 fdir_info->hash_map[ret] = NULL;
1193 TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1194 rte_free(fdir_filter);
1200 ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
1201 const struct rte_eth_fdir_filter *fdir_filter,
1202 struct ixgbe_fdir_rule *rule)
1204 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1207 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1209 err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
1215 rule->mode = fdir_mode;
1216 if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
1217 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1218 rule->queue = fdir_filter->action.rx_queue;
1219 rule->soft_id = fdir_filter->soft_id;
1225 ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
1226 struct ixgbe_fdir_rule *rule,
1230 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 uint32_t fdircmd_flags;
1234 bool is_perfect = FALSE;
1236 struct ixgbe_hw_fdir_info *info =
1237 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1238 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1239 struct ixgbe_fdir_filter *node;
1240 bool add_node = FALSE;
1242 if (fdir_mode == RTE_FDIR_MODE_NONE ||
1243 fdir_mode != rule->mode)
1247 * Sanity check for x550.
1248 * When adding a new filter with flow type set to IPv4,
1249 * the flow director mask should be configed before,
1250 * and the L4 protocol and ports are masked.
1253 (hw->mac.type == ixgbe_mac_X550 ||
1254 hw->mac.type == ixgbe_mac_X550EM_x ||
1255 hw->mac.type == ixgbe_mac_X550EM_a) &&
1256 (rule->ixgbe_fdir.formatted.flow_type ==
1257 IXGBE_ATR_FLOW_TYPE_IPV4 ||
1258 rule->ixgbe_fdir.formatted.flow_type ==
1259 IXGBE_ATR_FLOW_TYPE_IPV6) &&
1260 (info->mask.src_port_mask != 0 ||
1261 info->mask.dst_port_mask != 0) &&
1262 (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1263 rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
1264 PMD_DRV_LOG(ERR, "By this device,"
1265 " IPv4 is not supported without"
1266 " L4 protocol and ports masked!");
1270 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1271 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1275 if (rule->ixgbe_fdir.formatted.flow_type &
1276 IXGBE_ATR_L4TYPE_IPV6_MASK) {
1277 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1281 fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
1282 dev->data->dev_conf.fdir_conf.pballoc);
1283 fdirhash |= rule->soft_id <<
1284 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1286 fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
1287 dev->data->dev_conf.fdir_conf.pballoc);
1290 err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1294 err = fdir_erase_filter_82599(hw, fdirhash);
1296 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1298 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1301 /* add or update an fdir filter*/
1302 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1303 if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
1305 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1306 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1308 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1309 " signature mode.");
1312 } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
1313 queue = (uint8_t)rule->queue;
1317 node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
1320 node->fdirflags = fdircmd_flags;
1321 node->fdirhash = fdirhash;
1322 node->queue = queue;
1324 PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1329 node = rte_zmalloc("ixgbe_fdir",
1330 sizeof(struct ixgbe_fdir_filter),
1334 rte_memcpy(&node->ixgbe_fdir,
1336 sizeof(union ixgbe_atr_input));
1337 node->fdirflags = fdircmd_flags;
1338 node->fdirhash = fdirhash;
1339 node->queue = queue;
1341 err = ixgbe_insert_fdir_filter(info, node);
1349 err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
1350 queue, fdircmd_flags,
1351 fdirhash, fdir_mode);
1353 err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
1354 queue, fdircmd_flags,
1358 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1361 (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1363 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1369 /* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1370 * @dev: pointer to the structure rte_eth_dev
1371 * @fdir_filter: fdir filter entry
1372 * @del: 1 - delete, 0 - add
1373 * @update: 1 - update
1376 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1377 const struct rte_eth_fdir_filter *fdir_filter,
1381 struct ixgbe_fdir_rule rule;
1384 err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
1389 return ixgbe_fdir_filter_program(dev, &rule, del, update);
1393 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1395 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1396 struct ixgbe_hw_fdir_info *info =
1397 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1400 ret = ixgbe_reinit_fdir_tables_82599(hw);
1402 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1414 #define FDIRENTRIES_NUM_SHIFT 10
1416 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1418 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419 struct ixgbe_hw_fdir_info *info =
1420 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1421 uint32_t fdirctrl, max_num, i;
1424 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1425 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1426 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1428 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1429 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1430 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1431 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1432 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1433 fdir_info->guarant_spc = max_num;
1434 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1435 fdir_info->guarant_spc = max_num * 4;
1437 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1438 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1439 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1440 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1441 fdir_info->mask.ipv6_mask.src_ip);
1442 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1443 fdir_info->mask.ipv6_mask.dst_ip);
1444 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1445 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1446 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1447 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1448 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1449 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1451 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1452 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1453 fdir_info->flow_types_mask[0] = 0ULL;
1455 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1456 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
1457 fdir_info->flow_types_mask[i] = 0ULL;
1459 fdir_info->flex_payload_unit = sizeof(uint16_t);
1460 fdir_info->max_flex_payload_segment_num = 1;
1461 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1462 fdir_info->flex_conf.nb_payloads = 1;
1463 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1464 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1465 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1466 fdir_info->flex_conf.nb_flexmasks = 1;
1467 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1468 fdir_info->flex_conf.flex_mask[0].mask[0] =
1469 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1470 fdir_info->flex_conf.flex_mask[0].mask[1] =
1471 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1475 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1477 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 struct ixgbe_hw_fdir_info *info =
1479 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1480 uint32_t reg, max_num;
1481 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1483 /* Get the information from registers */
1484 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1485 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1486 IXGBE_FDIRFREE_COLL_SHIFT);
1487 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1488 IXGBE_FDIRFREE_FREE_SHIFT);
1490 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1491 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1492 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1493 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1494 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1496 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1497 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1498 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1499 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1500 IXGBE_FDIRUSTAT_ADD_SHIFT;
1502 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1503 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1504 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1505 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1506 IXGBE_FDIRFSTAT_FADD_SHIFT;
1508 /* Copy the new information in the fdir parameter */
1509 fdir_stats->collision = info->collision;
1510 fdir_stats->free = info->free;
1511 fdir_stats->maxhash = info->maxhash;
1512 fdir_stats->maxlen = info->maxlen;
1513 fdir_stats->remove = info->remove;
1514 fdir_stats->add = info->add;
1515 fdir_stats->f_remove = info->f_remove;
1516 fdir_stats->f_add = info->f_add;
1518 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1519 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1520 (reg & FDIRCTRL_PBALLOC_MASK)));
1521 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1522 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1523 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1524 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1525 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1530 * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1531 * @dev: pointer to the structure rte_eth_dev
1532 * @filter_op:operation will be taken
1533 * @arg: a pointer to specific structure corresponding to the filter_op
1536 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1537 enum rte_filter_op filter_op, void *arg)
1539 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542 if (hw->mac.type != ixgbe_mac_82599EB &&
1543 hw->mac.type != ixgbe_mac_X540 &&
1544 hw->mac.type != ixgbe_mac_X550 &&
1545 hw->mac.type != ixgbe_mac_X550EM_x &&
1546 hw->mac.type != ixgbe_mac_X550EM_a)
1549 if (filter_op == RTE_ETH_FILTER_NOP)
1552 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1555 switch (filter_op) {
1556 case RTE_ETH_FILTER_ADD:
1557 ret = ixgbe_add_del_fdir_filter(dev,
1558 (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1560 case RTE_ETH_FILTER_UPDATE:
1561 ret = ixgbe_add_del_fdir_filter(dev,
1562 (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1564 case RTE_ETH_FILTER_DELETE:
1565 ret = ixgbe_add_del_fdir_filter(dev,
1566 (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1568 case RTE_ETH_FILTER_FLUSH:
1569 ret = ixgbe_fdir_flush(dev);
1571 case RTE_ETH_FILTER_INFO:
1572 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1574 case RTE_ETH_FILTER_STATS:
1575 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1578 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1585 /* restore flow director filter */
1587 ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1589 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1590 struct ixgbe_hw_fdir_info *fdir_info =
1591 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1592 struct ixgbe_fdir_filter *node;
1593 bool is_perfect = FALSE;
1594 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1596 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1597 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1601 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1602 (void)fdir_write_perfect_filter_82599(hw,
1610 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1611 (void)fdir_add_signature_filter_82599(hw,
1620 /* remove all the flow director filters */
1622 ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1624 struct ixgbe_hw_fdir_info *fdir_info =
1625 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1626 struct ixgbe_fdir_filter *fdir_filter;
1627 struct ixgbe_fdir_filter *filter_flag;
1630 /* flush flow director */
1631 rte_hash_reset(fdir_info->hash_handle);
1632 memset(fdir_info->hash_map, 0,
1633 sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1634 filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
1635 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1636 TAILQ_REMOVE(&fdir_info->fdir_list,
1639 rte_free(fdir_filter);
1642 if (filter_flag != NULL)
1643 ret = ixgbe_fdir_flush(dev);