1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
11 #include <rte_interrupts.h>
13 #include <rte_debug.h>
15 #include <rte_vxlan.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
19 #include "ixgbe_logs.h"
20 #include "base/ixgbe_api.h"
21 #include "base/ixgbe_common.h"
22 #include "ixgbe_ethdev.h"
24 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
25 #define FDIRCTRL_PBALLOC_MASK 0x03
27 /* For calculating memory required for FDIR filters */
28 #define PBALLOC_SIZE_SHIFT 15
30 /* Number of bits used to mask bucket hash for different pballoc sizes */
31 #define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
32 #define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
33 #define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
34 #define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
35 #define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
36 #define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
37 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
38 #define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
39 #define IXGBE_MAX_FLX_SOURCE_OFF 62
40 #define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
41 #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
43 #define IXGBE_FDIR_FLOW_TYPES ( \
44 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
45 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
46 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
47 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
48 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
49 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
50 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
51 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
53 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
54 uint8_t ipv6_addr[16]; \
56 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
58 for (i = 0; i < sizeof(ipv6_addr); i++) { \
59 if (ipv6_addr[i] == UINT8_MAX) \
61 else if (ipv6_addr[i] != 0) { \
62 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
68 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
69 uint8_t ipv6_addr[16]; \
71 for (i = 0; i < sizeof(ipv6_addr); i++) { \
72 if ((ipv6m) & (1 << i)) \
73 ipv6_addr[i] = UINT8_MAX; \
77 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
80 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
82 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
83 static int fdir_set_input_mask(struct rte_eth_dev *dev,
84 const struct rte_eth_fdir_masks *input_mask);
85 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
86 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
87 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
88 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
89 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
90 static int ixgbe_fdir_filter_to_atr_input(
91 const struct rte_eth_fdir_filter *fdir_filter,
92 union ixgbe_atr_input *input,
93 enum rte_fdir_mode mode);
94 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
96 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
97 enum rte_fdir_pballoc_type pballoc);
98 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
99 enum rte_fdir_pballoc_type pballoc);
100 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
101 union ixgbe_atr_input *input, uint8_t queue,
102 uint32_t fdircmd, uint32_t fdirhash,
103 enum rte_fdir_mode mode);
104 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
105 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
107 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
108 const struct rte_eth_fdir_filter *fdir_filter,
111 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
112 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
113 struct rte_eth_fdir_info *fdir_info);
114 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_fdir_stats *fdir_stats);
118 * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
119 * It adds extra configuration of fdirctrl that is common for all filter types.
121 * Initialize Flow Director control registers
122 * @hw: pointer to hardware structure
123 * @fdirctrl: value to write to flow director control register
126 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
130 PMD_INIT_FUNC_TRACE();
132 /* Prime the keys for hashing */
133 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
134 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
137 * Continue setup of fdirctrl register bits:
138 * Set the maximum length per hash bucket to 0xA filters
139 * Send interrupt when 64 filters are left
141 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
142 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
145 * Poll init-done after we write the register. Estimated times:
146 * 10G: PBALLOC = 11b, timing is 60us
147 * 1G: PBALLOC = 11b, timing is 600us
148 * 100M: PBALLOC = 11b, timing is 6ms
150 * Multiple these timings by 4 if under full Rx load
152 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
153 * 1 msec per poll time. If we're at line rate and drop to 100M, then
154 * this might not finish in our poll time, but we can live with that
157 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
158 IXGBE_WRITE_FLUSH(hw);
159 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
160 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
161 IXGBE_FDIRCTRL_INIT_DONE)
166 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
167 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
174 * Set appropriate bits in fdirctrl for: variable reporting levels, moving
175 * flexbytes matching field, and drop queue (only for perfect matching mode).
178 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
182 switch (conf->pballoc) {
183 case RTE_FDIR_PBALLOC_64K:
184 /* 8k - 1 signature filters */
185 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
187 case RTE_FDIR_PBALLOC_128K:
188 /* 16k - 1 signature filters */
189 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
191 case RTE_FDIR_PBALLOC_256K:
192 /* 32k - 1 signature filters */
193 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
197 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
201 /* status flags: write hash & swindex in the rx descriptor */
202 switch (conf->status) {
203 case RTE_FDIR_NO_REPORT_STATUS:
204 /* do nothing, default mode */
206 case RTE_FDIR_REPORT_STATUS:
207 /* report status when the packet matches a fdir rule */
208 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
210 case RTE_FDIR_REPORT_STATUS_ALWAYS:
211 /* always report status */
212 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
216 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
220 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
221 IXGBE_FDIRCTRL_FLEX_SHIFT;
223 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
224 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
225 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
226 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
227 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
228 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
229 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
230 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
231 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
232 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
239 * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
241 * @hi_dword: Bits 31:16 mask to be bit swapped.
242 * @lo_dword: Bits 15:0 mask to be bit swapped.
244 * Flow director uses several registers to store 2 x 16 bit masks with the
245 * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
246 * mask affects the MS bit/byte of the target. This function reverses the
247 * bits in these masks.
249 static inline uint32_t
250 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
252 uint32_t mask = hi_dword << 16;
255 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
256 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
257 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
258 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
262 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
263 * but makes use of the rte_fdir_masks structure to see which bits to set.
266 fdir_set_input_mask_82599(struct rte_eth_dev *dev)
268 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
269 struct ixgbe_hw_fdir_info *info =
270 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
272 * mask VM pool and DIPv6 since there are currently not supported
273 * mask FLEX byte, it will be set in flex_conf
275 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
276 uint32_t fdirtcpm; /* TCP source and destination port masks. */
277 uint32_t fdiripv6m; /* IPv6 source and destination masks. */
278 volatile uint32_t *reg;
280 PMD_INIT_FUNC_TRACE();
283 * Program the relevant mask registers. If src/dst_port or src/dst_addr
284 * are zero, then assume a full mask for that field. Also assume that
285 * a VLAN of 0 is unspecified, so mask that out as well. L4type
286 * cannot be masked out in this implementation.
288 if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
289 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
290 fdirm |= IXGBE_FDIRM_L4P;
292 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
293 /* mask VLAN Priority */
294 fdirm |= IXGBE_FDIRM_VLANP;
295 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
297 fdirm |= IXGBE_FDIRM_VLANID;
298 else if (info->mask.vlan_tci_mask == 0)
299 /* mask VLAN ID and Priority */
300 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
301 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
302 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
307 if (info->mask.flex_bytes_mask == 0)
308 fdirm |= IXGBE_FDIRM_FLEX;
310 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
312 /* store the TCP/UDP port masks, bit reversed from port layout */
313 fdirtcpm = reverse_fdir_bitmasks(
314 rte_be_to_cpu_16(info->mask.dst_port_mask),
315 rte_be_to_cpu_16(info->mask.src_port_mask));
317 /* write all the same so that UDP, TCP and SCTP use the same mask
320 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
321 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
322 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
324 /* Store source and destination IPv4 masks (big-endian),
325 * can not use IXGBE_WRITE_REG.
327 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
328 *reg = ~(info->mask.src_ipv4_mask);
329 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
330 *reg = ~(info->mask.dst_ipv4_mask);
332 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
334 * Store source and destination IPv6 masks (bit reversed)
336 fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
337 info->mask.src_ipv6_mask;
339 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
342 return IXGBE_SUCCESS;
346 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
347 * but makes use of the rte_fdir_masks structure to see which bits to set.
350 fdir_set_input_mask_x550(struct rte_eth_dev *dev)
352 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
353 struct ixgbe_hw_fdir_info *info =
354 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
355 /* mask VM pool and DIPv6 since there are currently not supported
356 * mask FLEX byte, it will be set in flex_conf
358 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
361 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
364 PMD_INIT_FUNC_TRACE();
366 /* set the default UDP port for VxLAN */
367 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
368 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT);
370 /* some bits must be set for mac vlan or tunnel mode */
371 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
373 if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
374 /* mask VLAN Priority */
375 fdirm |= IXGBE_FDIRM_VLANP;
376 else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
378 fdirm |= IXGBE_FDIRM_VLANID;
379 else if (info->mask.vlan_tci_mask == 0)
380 /* mask VLAN ID and Priority */
381 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
382 else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
383 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
387 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
389 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
390 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
391 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
392 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
393 IXGBE_FDIRIP6M_TNI_VNI;
395 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
396 fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
397 mac_mask = info->mask.mac_addr_byte_mask &
398 (IXGBE_FDIRIP6M_INNER_MAC >>
399 IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
400 fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
401 IXGBE_FDIRIP6M_INNER_MAC);
403 switch (info->mask.tunnel_type_mask) {
405 /* Mask turnnel type */
406 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
411 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
415 switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
418 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
421 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
426 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
431 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
432 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
433 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
434 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
435 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
436 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
438 return IXGBE_SUCCESS;
442 ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
443 const struct rte_eth_fdir_masks *input_mask)
445 struct ixgbe_hw_fdir_info *info =
446 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
447 uint16_t dst_ipv6m = 0;
448 uint16_t src_ipv6m = 0;
450 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
451 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
452 info->mask.src_port_mask = input_mask->src_port_mask;
453 info->mask.dst_port_mask = input_mask->dst_port_mask;
454 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
455 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
456 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
457 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
458 info->mask.src_ipv6_mask = src_ipv6m;
459 info->mask.dst_ipv6_mask = dst_ipv6m;
461 return IXGBE_SUCCESS;
465 ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
466 const struct rte_eth_fdir_masks *input_mask)
468 struct ixgbe_hw_fdir_info *info =
469 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
471 memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
472 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
473 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
474 info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
475 info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
477 return IXGBE_SUCCESS;
481 ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
482 const struct rte_eth_fdir_masks *input_mask)
484 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
486 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
487 mode <= RTE_FDIR_MODE_PERFECT)
488 return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
489 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
490 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
491 return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
493 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
498 ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
500 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
502 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
503 mode <= RTE_FDIR_MODE_PERFECT)
504 return fdir_set_input_mask_82599(dev);
505 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
506 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
507 return fdir_set_input_mask_x550(dev);
509 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
514 ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
517 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
521 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
523 fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
524 fdirctrl |= ((offset >> 1) /* convert to word offset */
525 << IXGBE_FDIRCTRL_FLEX_SHIFT);
527 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
528 IXGBE_WRITE_FLUSH(hw);
529 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
530 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
531 IXGBE_FDIRCTRL_INIT_DONE)
539 fdir_set_input_mask(struct rte_eth_dev *dev,
540 const struct rte_eth_fdir_masks *input_mask)
544 ret = ixgbe_fdir_store_input_mask(dev, input_mask);
548 return ixgbe_fdir_set_input_mask(dev);
552 * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
553 * arguments are valid
556 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
557 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
559 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560 struct ixgbe_hw_fdir_info *info =
561 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
562 const struct rte_eth_flex_payload_cfg *flex_cfg;
563 const struct rte_eth_fdir_flex_mask *flex_mask;
565 uint16_t flexbytes = 0;
568 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
571 PMD_DRV_LOG(ERR, "NULL pointer.");
575 for (i = 0; i < conf->nb_payloads; i++) {
576 flex_cfg = &conf->flex_set[i];
577 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
578 PMD_DRV_LOG(ERR, "unsupported payload type.");
581 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
582 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
583 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
584 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
586 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
587 IXGBE_FDIRCTRL_FLEX_SHIFT;
589 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
594 for (i = 0; i < conf->nb_flexmasks; i++) {
595 flex_mask = &conf->flex_mask[i];
596 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
597 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
600 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
601 ((flex_mask->mask[1]) & 0xFF));
602 if (flexbytes == UINT16_MAX)
603 fdirm &= ~IXGBE_FDIRM_FLEX;
604 else if (flexbytes != 0) {
605 /* IXGBE_FDIRM_FLEX is set by default when set mask */
606 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
610 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
611 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
612 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
613 IXGBE_FDIRCTRL_FLEX_MASK) >>
614 IXGBE_FDIRCTRL_FLEX_SHIFT);
619 ixgbe_fdir_configure(struct rte_eth_dev *dev)
621 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
623 uint32_t fdirctrl, pbsize;
625 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
627 PMD_INIT_FUNC_TRACE();
629 if (hw->mac.type != ixgbe_mac_82599EB &&
630 hw->mac.type != ixgbe_mac_X540 &&
631 hw->mac.type != ixgbe_mac_X550 &&
632 hw->mac.type != ixgbe_mac_X550EM_x &&
633 hw->mac.type != ixgbe_mac_X550EM_a)
636 /* x550 supports mac-vlan and tunnel mode but other NICs not */
637 if (hw->mac.type != ixgbe_mac_X550 &&
638 hw->mac.type != ixgbe_mac_X550EM_x &&
639 hw->mac.type != ixgbe_mac_X550EM_a &&
640 mode != RTE_FDIR_MODE_SIGNATURE &&
641 mode != RTE_FDIR_MODE_PERFECT)
644 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
649 * Before enabling Flow Director, the Rx Packet Buffer size
650 * must be reduced. The new value is the current size minus
651 * flow director memory usage size.
653 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
654 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
655 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
658 * The defaults in the HW for RX PB 1-7 are not zero and so should be
659 * initialized to zero for non DCB mode otherwise actual total RX PB
660 * would be bigger than programmed and filter space would run into
663 for (i = 1; i < 8; i++)
664 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
666 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
668 PMD_INIT_LOG(ERR, " Error on setting FD mask");
671 err = ixgbe_set_fdir_flex_conf(dev,
672 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
674 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
678 err = fdir_enable_82599(hw, fdirctrl);
680 PMD_INIT_LOG(ERR, " Error on enabling FD.");
687 * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
688 * by the IXGBE driver code.
691 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
692 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
694 input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
695 input->formatted.flex_bytes = (uint16_t)(
696 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
697 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
699 switch (fdir_filter->input.flow_type) {
700 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
701 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
703 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
704 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
706 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
707 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
709 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
710 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
712 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
713 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
715 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
716 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
718 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
719 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
721 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
722 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
728 switch (fdir_filter->input.flow_type) {
729 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
730 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
731 input->formatted.src_port =
732 fdir_filter->input.flow.udp4_flow.src_port;
733 input->formatted.dst_port =
734 fdir_filter->input.flow.udp4_flow.dst_port;
736 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
737 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
738 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
739 input->formatted.src_ip[0] =
740 fdir_filter->input.flow.ip4_flow.src_ip;
741 input->formatted.dst_ip[0] =
742 fdir_filter->input.flow.ip4_flow.dst_ip;
745 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
746 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
747 input->formatted.src_port =
748 fdir_filter->input.flow.udp6_flow.src_port;
749 input->formatted.dst_port =
750 fdir_filter->input.flow.udp6_flow.dst_port;
752 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
753 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
754 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
755 rte_memcpy(input->formatted.src_ip,
756 fdir_filter->input.flow.ipv6_flow.src_ip,
757 sizeof(input->formatted.src_ip));
758 rte_memcpy(input->formatted.dst_ip,
759 fdir_filter->input.flow.ipv6_flow.dst_ip,
760 sizeof(input->formatted.dst_ip));
766 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
768 input->formatted.inner_mac,
769 fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
770 sizeof(input->formatted.inner_mac));
771 } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
773 input->formatted.inner_mac,
774 fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
775 sizeof(input->formatted.inner_mac));
776 if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
777 RTE_FDIR_TUNNEL_TYPE_VXLAN)
778 input->formatted.tunnel_type =
779 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
780 else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
781 RTE_FDIR_TUNNEL_TYPE_NVGRE)
782 input->formatted.tunnel_type =
783 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
785 PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
787 input->formatted.tni_vni =
788 fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
795 * The below function is taken from the FreeBSD IXGBE drivers release
796 * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
797 * before returning, as the signature hash can use 16bits.
799 * The newer driver has optimised functions for calculating bucket and
800 * signature hashes. However they don't support IPv6 type packets for signature
801 * filters so are not used here.
803 * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
806 * Compute the hashes for SW ATR
807 * @stream: input bitstream to compute the hash on
808 * @key: 32-bit hash key
811 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
815 * The algorithm is as follows:
816 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
817 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
818 * and A[n] x B[n] is bitwise AND between same length strings
820 * K[n] is 16 bits, defined as:
821 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
822 * for n modulo 32 < 15, K[n] =
823 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
825 * S[n] is 16 bits, defined as:
826 * for n >= 15, S[n] = S[n:n - 15]
827 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
829 * To simplify for programming, the algorithm is implemented
830 * in software this way:
832 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
834 * for (i = 0; i < 352; i+=32)
835 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
837 * lo_hash_dword[15:0] ^= Stream[15:0];
838 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
839 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
841 * hi_hash_dword[31:0] ^= Stream[351:320];
844 * hash[15:0] ^= Stream[15:0];
846 * for (i = 0; i < 16; i++) {
848 * hash[15:0] ^= lo_hash_dword[(i+15):i];
850 * hash[15:0] ^= hi_hash_dword[(i+15):i];
854 __be32 common_hash_dword = 0;
855 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
859 /* record the flow_vm_vlan bits as they are a key part to the hash */
860 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
862 /* generate common hash dword */
863 for (i = 1; i <= 13; i++)
864 common_hash_dword ^= atr_input->dword_stream[i];
866 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
868 /* low dword is word swapped version of common */
869 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
871 /* apply flow ID/VM pool/VLAN ID bits to hash words */
872 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
874 /* Process bits 0 and 16 */
876 hash_result ^= lo_hash_dword;
877 if (key & 0x00010000)
878 hash_result ^= hi_hash_dword;
881 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
882 * delay this because bit 0 of the stream should not be processed
883 * so we do not add the vlan until after bit 0 was processed
885 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
888 /* process the remaining 30 bits in the key 2 bits at a time */
889 for (i = 15; i; i--) {
890 if (key & (0x0001 << i))
891 hash_result ^= lo_hash_dword >> i;
892 if (key & (0x00010000 << i))
893 hash_result ^= hi_hash_dword >> i;
900 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
901 enum rte_fdir_pballoc_type pballoc)
903 if (pballoc == RTE_FDIR_PBALLOC_256K)
904 return ixgbe_atr_compute_hash_82599(input,
905 IXGBE_ATR_BUCKET_HASH_KEY) &
906 PERFECT_BUCKET_256KB_HASH_MASK;
907 else if (pballoc == RTE_FDIR_PBALLOC_128K)
908 return ixgbe_atr_compute_hash_82599(input,
909 IXGBE_ATR_BUCKET_HASH_KEY) &
910 PERFECT_BUCKET_128KB_HASH_MASK;
912 return ixgbe_atr_compute_hash_82599(input,
913 IXGBE_ATR_BUCKET_HASH_KEY) &
914 PERFECT_BUCKET_64KB_HASH_MASK;
918 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
919 * @hw: pointer to hardware structure
922 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
926 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
927 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
928 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
930 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
937 * Calculate the hash value needed for signature-match filters. In the FreeBSD
938 * driver, this is done by the optimised function
939 * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
940 * doesn't support calculating a hash for an IPv6 filter.
943 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
944 enum rte_fdir_pballoc_type pballoc)
946 uint32_t bucket_hash, sig_hash;
948 if (pballoc == RTE_FDIR_PBALLOC_256K)
949 bucket_hash = ixgbe_atr_compute_hash_82599(input,
950 IXGBE_ATR_BUCKET_HASH_KEY) &
951 SIG_BUCKET_256KB_HASH_MASK;
952 else if (pballoc == RTE_FDIR_PBALLOC_128K)
953 bucket_hash = ixgbe_atr_compute_hash_82599(input,
954 IXGBE_ATR_BUCKET_HASH_KEY) &
955 SIG_BUCKET_128KB_HASH_MASK;
957 bucket_hash = ixgbe_atr_compute_hash_82599(input,
958 IXGBE_ATR_BUCKET_HASH_KEY) &
959 SIG_BUCKET_64KB_HASH_MASK;
961 sig_hash = ixgbe_atr_compute_hash_82599(input,
962 IXGBE_ATR_SIGNATURE_HASH_KEY);
964 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
968 * This is based on ixgbe_fdir_write_perfect_filter_82599() in
969 * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
970 * added, and IPv6 support also added. The hash value is also pre-calculated
971 * as the pballoc value is needed to do it.
974 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
975 union ixgbe_atr_input *input, uint8_t queue,
976 uint32_t fdircmd, uint32_t fdirhash,
977 enum rte_fdir_mode mode)
979 uint32_t fdirport, fdirvlan;
980 u32 addr_low, addr_high;
983 volatile uint32_t *reg;
985 if (mode == RTE_FDIR_MODE_PERFECT) {
986 /* record the IPv4 address (big-endian)
987 * can not use IXGBE_WRITE_REG.
989 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
990 *reg = input->formatted.src_ip[0];
991 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
992 *reg = input->formatted.dst_ip[0];
994 /* record source and destination port (little-endian)*/
995 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
996 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
997 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
998 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
999 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1000 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
1001 /* for mac vlan and tunnel modes */
1002 addr_low = ((u32)input->formatted.inner_mac[0] |
1003 ((u32)input->formatted.inner_mac[1] << 8) |
1004 ((u32)input->formatted.inner_mac[2] << 16) |
1005 ((u32)input->formatted.inner_mac[3] << 24));
1006 addr_high = ((u32)input->formatted.inner_mac[4] |
1007 ((u32)input->formatted.inner_mac[5] << 8));
1009 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1010 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1011 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
1012 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
1015 if (input->formatted.tunnel_type)
1016 tunnel_type = 0x80000000;
1017 tunnel_type |= addr_high;
1018 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1019 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
1020 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
1021 input->formatted.tni_vni);
1023 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
1024 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
1025 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
1028 /* record vlan (little-endian) and flex_bytes(big-endian) */
1029 fdirvlan = input->formatted.flex_bytes;
1030 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1031 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1032 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1034 /* configure FDIRHASH register */
1035 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1038 * flush all previous writes to make certain registers are
1039 * programmed prior to issuing the command
1041 IXGBE_WRITE_FLUSH(hw);
1043 /* configure FDIRCMD register */
1044 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1045 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1046 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1047 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1048 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1050 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1052 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1054 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1056 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1062 * This function is based on ixgbe_atr_add_signature_filter_82599() in
1063 * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1064 * setting extra fields in the FDIRCMD register, and removes the code that was
1065 * verifying the flow_type field. According to the documentation, a flow type of
1066 * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1069 * Adds a signature hash filter
1070 * @hw: pointer to hardware structure
1071 * @input: unique input dword
1072 * @queue: queue index to direct traffic to
1073 * @fdircmd: any extra flags to set in fdircmd register
1074 * @fdirhash: pre-calculated hash value for the filter
1077 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1078 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1083 PMD_INIT_FUNC_TRACE();
1085 /* configure FDIRCMD register */
1086 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1087 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1088 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1089 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1091 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1092 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1094 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1096 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1098 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1104 * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1105 * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1106 * that it can be used for removing signature and perfect filters.
1109 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1111 uint32_t fdircmd = 0;
1114 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1116 /* flush hash to HW */
1117 IXGBE_WRITE_FLUSH(hw);
1119 /* Query if filter is present */
1120 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1122 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1124 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1128 /* if filter exists in hardware then remove it */
1129 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1130 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1131 IXGBE_WRITE_FLUSH(hw);
1132 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1133 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1135 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1137 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1142 static inline struct ixgbe_fdir_filter *
1143 ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1144 union ixgbe_atr_input *key)
1148 ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1152 return fdir_info->hash_map[ret];
1156 ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1157 struct ixgbe_fdir_filter *fdir_filter)
1161 ret = rte_hash_add_key(fdir_info->hash_handle,
1162 &fdir_filter->ixgbe_fdir);
1166 "Failed to insert fdir filter to hash table %d!",
1171 fdir_info->hash_map[ret] = fdir_filter;
1173 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1179 ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1180 union ixgbe_atr_input *key)
1183 struct ixgbe_fdir_filter *fdir_filter;
1185 ret = rte_hash_del_key(fdir_info->hash_handle, key);
1188 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1192 fdir_filter = fdir_info->hash_map[ret];
1193 fdir_info->hash_map[ret] = NULL;
1195 TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1196 rte_free(fdir_filter);
1202 ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
1203 const struct rte_eth_fdir_filter *fdir_filter,
1204 struct ixgbe_fdir_rule *rule)
1206 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1209 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1211 err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
1217 rule->mode = fdir_mode;
1218 if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
1219 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1220 rule->queue = fdir_filter->action.rx_queue;
1221 rule->soft_id = fdir_filter->soft_id;
1227 ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
1228 struct ixgbe_fdir_rule *rule,
1232 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1233 uint32_t fdircmd_flags;
1236 bool is_perfect = FALSE;
1238 struct ixgbe_hw_fdir_info *info =
1239 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1240 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1241 struct ixgbe_fdir_filter *node;
1242 bool add_node = FALSE;
1244 if (fdir_mode == RTE_FDIR_MODE_NONE ||
1245 fdir_mode != rule->mode)
1249 * Sanity check for x550.
1250 * When adding a new filter with flow type set to IPv4,
1251 * the flow director mask should be configed before,
1252 * and the L4 protocol and ports are masked.
1255 (hw->mac.type == ixgbe_mac_X550 ||
1256 hw->mac.type == ixgbe_mac_X550EM_x ||
1257 hw->mac.type == ixgbe_mac_X550EM_a) &&
1258 (rule->ixgbe_fdir.formatted.flow_type ==
1259 IXGBE_ATR_FLOW_TYPE_IPV4 ||
1260 rule->ixgbe_fdir.formatted.flow_type ==
1261 IXGBE_ATR_FLOW_TYPE_IPV6) &&
1262 (info->mask.src_port_mask != 0 ||
1263 info->mask.dst_port_mask != 0) &&
1264 (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1265 rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
1266 PMD_DRV_LOG(ERR, "By this device,"
1267 " IPv4 is not supported without"
1268 " L4 protocol and ports masked!");
1272 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1273 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1277 if (rule->ixgbe_fdir.formatted.flow_type &
1278 IXGBE_ATR_L4TYPE_IPV6_MASK) {
1279 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1283 fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
1284 dev->data->dev_conf.fdir_conf.pballoc);
1285 fdirhash |= rule->soft_id <<
1286 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1288 fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
1289 dev->data->dev_conf.fdir_conf.pballoc);
1292 err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1296 err = fdir_erase_filter_82599(hw, fdirhash);
1298 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1300 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1303 /* add or update an fdir filter*/
1304 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1305 if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
1307 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1308 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1310 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1311 " signature mode.");
1314 } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
1315 queue = (uint8_t)rule->queue;
1319 node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
1322 node->fdirflags = fdircmd_flags;
1323 node->fdirhash = fdirhash;
1324 node->queue = queue;
1326 PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1331 node = rte_zmalloc("ixgbe_fdir",
1332 sizeof(struct ixgbe_fdir_filter),
1336 rte_memcpy(&node->ixgbe_fdir,
1338 sizeof(union ixgbe_atr_input));
1339 node->fdirflags = fdircmd_flags;
1340 node->fdirhash = fdirhash;
1341 node->queue = queue;
1343 err = ixgbe_insert_fdir_filter(info, node);
1351 err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
1352 queue, fdircmd_flags,
1353 fdirhash, fdir_mode);
1355 err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
1356 queue, fdircmd_flags,
1360 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1363 (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1365 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1371 /* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1372 * @dev: pointer to the structure rte_eth_dev
1373 * @fdir_filter: fdir filter entry
1374 * @del: 1 - delete, 0 - add
1375 * @update: 1 - update
1378 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1379 const struct rte_eth_fdir_filter *fdir_filter,
1383 struct ixgbe_fdir_rule rule;
1386 err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
1391 return ixgbe_fdir_filter_program(dev, &rule, del, update);
1395 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1397 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1398 struct ixgbe_hw_fdir_info *info =
1399 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1402 ret = ixgbe_reinit_fdir_tables_82599(hw);
1404 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1416 #define FDIRENTRIES_NUM_SHIFT 10
1418 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1420 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1421 struct ixgbe_hw_fdir_info *info =
1422 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1423 uint32_t fdirctrl, max_num, i;
1426 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1427 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1428 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1430 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1431 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1432 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1433 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1434 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1435 fdir_info->guarant_spc = max_num;
1436 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1437 fdir_info->guarant_spc = max_num * 4;
1439 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1440 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1441 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1442 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1443 fdir_info->mask.ipv6_mask.src_ip);
1444 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1445 fdir_info->mask.ipv6_mask.dst_ip);
1446 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1447 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1448 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1449 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1450 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1451 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1453 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1454 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1455 fdir_info->flow_types_mask[0] = 0ULL;
1457 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1458 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
1459 fdir_info->flow_types_mask[i] = 0ULL;
1461 fdir_info->flex_payload_unit = sizeof(uint16_t);
1462 fdir_info->max_flex_payload_segment_num = 1;
1463 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1464 fdir_info->flex_conf.nb_payloads = 1;
1465 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1466 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1467 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1468 fdir_info->flex_conf.nb_flexmasks = 1;
1469 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1470 fdir_info->flex_conf.flex_mask[0].mask[0] =
1471 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1472 fdir_info->flex_conf.flex_mask[0].mask[1] =
1473 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1477 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1479 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1480 struct ixgbe_hw_fdir_info *info =
1481 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1482 uint32_t reg, max_num;
1483 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1485 /* Get the information from registers */
1486 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1487 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1488 IXGBE_FDIRFREE_COLL_SHIFT);
1489 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1490 IXGBE_FDIRFREE_FREE_SHIFT);
1492 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1493 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1494 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1495 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1496 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1498 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1499 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1500 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1501 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1502 IXGBE_FDIRUSTAT_ADD_SHIFT;
1504 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1505 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1506 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1507 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1508 IXGBE_FDIRFSTAT_FADD_SHIFT;
1510 /* Copy the new information in the fdir parameter */
1511 fdir_stats->collision = info->collision;
1512 fdir_stats->free = info->free;
1513 fdir_stats->maxhash = info->maxhash;
1514 fdir_stats->maxlen = info->maxlen;
1515 fdir_stats->remove = info->remove;
1516 fdir_stats->add = info->add;
1517 fdir_stats->f_remove = info->f_remove;
1518 fdir_stats->f_add = info->f_add;
1520 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1521 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1522 (reg & FDIRCTRL_PBALLOC_MASK)));
1523 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1524 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1525 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1526 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1527 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1532 * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1533 * @dev: pointer to the structure rte_eth_dev
1534 * @filter_op:operation will be taken
1535 * @arg: a pointer to specific structure corresponding to the filter_op
1538 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1539 enum rte_filter_op filter_op, void *arg)
1541 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1544 if (hw->mac.type != ixgbe_mac_82599EB &&
1545 hw->mac.type != ixgbe_mac_X540 &&
1546 hw->mac.type != ixgbe_mac_X550 &&
1547 hw->mac.type != ixgbe_mac_X550EM_x &&
1548 hw->mac.type != ixgbe_mac_X550EM_a)
1551 if (filter_op == RTE_ETH_FILTER_NOP)
1554 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1557 switch (filter_op) {
1558 case RTE_ETH_FILTER_ADD:
1559 ret = ixgbe_add_del_fdir_filter(dev,
1560 (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1562 case RTE_ETH_FILTER_UPDATE:
1563 ret = ixgbe_add_del_fdir_filter(dev,
1564 (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1566 case RTE_ETH_FILTER_DELETE:
1567 ret = ixgbe_add_del_fdir_filter(dev,
1568 (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1570 case RTE_ETH_FILTER_FLUSH:
1571 ret = ixgbe_fdir_flush(dev);
1573 case RTE_ETH_FILTER_INFO:
1574 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1576 case RTE_ETH_FILTER_STATS:
1577 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1580 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1587 /* restore flow director filter */
1589 ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1591 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1592 struct ixgbe_hw_fdir_info *fdir_info =
1593 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1594 struct ixgbe_fdir_filter *node;
1595 bool is_perfect = FALSE;
1596 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1598 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1599 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1603 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1604 (void)fdir_write_perfect_filter_82599(hw,
1612 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1613 (void)fdir_add_signature_filter_82599(hw,
1622 /* remove all the flow director filters */
1624 ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1626 struct ixgbe_hw_fdir_info *fdir_info =
1627 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1628 struct ixgbe_fdir_filter *fdir_filter;
1629 struct ixgbe_fdir_filter *filter_flag;
1632 /* flush flow director */
1633 rte_hash_reset(fdir_info->hash_handle);
1634 memset(fdir_info->hash_map, 0,
1635 sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1636 filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
1637 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1638 TAILQ_REMOVE(&fdir_info->fdir_list,
1641 rte_free(fdir_filter);
1644 if (filter_flag != NULL)
1645 ret = ixgbe_fdir_flush(dev);