4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_interrupts.h>
42 #include <rte_debug.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
47 #include "ixgbe_logs.h"
48 #include "base/ixgbe_api.h"
49 #include "base/ixgbe_common.h"
50 #include "ixgbe_ethdev.h"
52 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
53 #define FDIRCTRL_PBALLOC_MASK 0x03
55 /* For calculating memory required for FDIR filters */
56 #define PBALLOC_SIZE_SHIFT 15
58 /* Number of bits used to mask bucket hash for different pballoc sizes */
59 #define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
60 #define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
61 #define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
62 #define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
63 #define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
64 #define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
65 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
66 #define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
67 #define IXGBE_MAX_FLX_SOURCE_OFF 62
68 #define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
69 #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
71 #define IXGBE_FDIR_FLOW_TYPES ( \
72 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
73 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
74 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
75 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
76 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
77 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
78 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
79 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
81 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
82 uint8_t ipv6_addr[16]; \
84 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
86 for (i = 0; i < sizeof(ipv6_addr); i++) { \
87 if (ipv6_addr[i] == UINT8_MAX) \
89 else if (ipv6_addr[i] != 0) { \
90 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
96 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
97 uint8_t ipv6_addr[16]; \
99 for (i = 0; i < sizeof(ipv6_addr); i++) { \
100 if ((ipv6m) & (1 << i)) \
101 ipv6_addr[i] = UINT8_MAX; \
105 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
108 #define DEFAULT_VXLAN_PORT 4789
109 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
111 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
112 static int fdir_set_input_mask(struct rte_eth_dev *dev,
113 const struct rte_eth_fdir_masks *input_mask);
114 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
115 const struct rte_eth_fdir_masks *input_mask);
116 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
117 const struct rte_eth_fdir_masks *input_mask);
118 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
119 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
120 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
121 static int ixgbe_fdir_filter_to_atr_input(
122 const struct rte_eth_fdir_filter *fdir_filter,
123 union ixgbe_atr_input *input,
124 enum rte_fdir_mode mode);
125 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
127 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
128 enum rte_fdir_pballoc_type pballoc);
129 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
130 enum rte_fdir_pballoc_type pballoc);
131 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
132 union ixgbe_atr_input *input, uint8_t queue,
133 uint32_t fdircmd, uint32_t fdirhash,
134 enum rte_fdir_mode mode);
135 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
136 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
138 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
139 const struct rte_eth_fdir_filter *fdir_filter,
142 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
143 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
144 struct rte_eth_fdir_info *fdir_info);
145 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
146 struct rte_eth_fdir_stats *fdir_stats);
149 * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
150 * It adds extra configuration of fdirctrl that is common for all filter types.
152 * Initialize Flow Director control registers
153 * @hw: pointer to hardware structure
154 * @fdirctrl: value to write to flow director control register
157 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
161 PMD_INIT_FUNC_TRACE();
163 /* Prime the keys for hashing */
164 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
165 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
168 * Continue setup of fdirctrl register bits:
169 * Set the maximum length per hash bucket to 0xA filters
170 * Send interrupt when 64 filters are left
172 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
173 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
176 * Poll init-done after we write the register. Estimated times:
177 * 10G: PBALLOC = 11b, timing is 60us
178 * 1G: PBALLOC = 11b, timing is 600us
179 * 100M: PBALLOC = 11b, timing is 6ms
181 * Multiple these timings by 4 if under full Rx load
183 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
184 * 1 msec per poll time. If we're at line rate and drop to 100M, then
185 * this might not finish in our poll time, but we can live with that
188 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
189 IXGBE_WRITE_FLUSH(hw);
190 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
191 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
192 IXGBE_FDIRCTRL_INIT_DONE)
197 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
198 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
206 * Set appropriate bits in fdirctrl for: variable reporting levels, moving
207 * flexbytes matching field, and drop queue (only for perfect matching mode).
210 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
214 switch (conf->pballoc) {
215 case RTE_FDIR_PBALLOC_64K:
216 /* 8k - 1 signature filters */
217 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
219 case RTE_FDIR_PBALLOC_128K:
220 /* 16k - 1 signature filters */
221 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
223 case RTE_FDIR_PBALLOC_256K:
224 /* 32k - 1 signature filters */
225 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
229 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
233 /* status flags: write hash & swindex in the rx descriptor */
234 switch (conf->status) {
235 case RTE_FDIR_NO_REPORT_STATUS:
236 /* do nothing, default mode */
238 case RTE_FDIR_REPORT_STATUS:
239 /* report status when the packet matches a fdir rule */
240 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
242 case RTE_FDIR_REPORT_STATUS_ALWAYS:
243 /* always report status */
244 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
248 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
252 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
253 IXGBE_FDIRCTRL_FLEX_SHIFT;
255 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
256 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
257 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
258 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
259 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
260 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
261 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
262 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
263 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
264 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
271 * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
273 * @hi_dword: Bits 31:16 mask to be bit swapped.
274 * @lo_dword: Bits 15:0 mask to be bit swapped.
276 * Flow director uses several registers to store 2 x 16 bit masks with the
277 * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
278 * mask affects the MS bit/byte of the target. This function reverses the
279 * bits in these masks.
281 static inline uint32_t
282 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
284 uint32_t mask = hi_dword << 16;
286 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
287 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
288 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
289 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
293 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
294 * but makes use of the rte_fdir_masks structure to see which bits to set.
297 fdir_set_input_mask_82599(struct rte_eth_dev *dev,
298 const struct rte_eth_fdir_masks *input_mask)
300 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
301 struct ixgbe_hw_fdir_info *info =
302 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
304 * mask VM pool and DIPv6 since there are currently not supported
305 * mask FLEX byte, it will be set in flex_conf
307 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
308 uint32_t fdirtcpm; /* TCP source and destination port masks. */
309 uint32_t fdiripv6m; /* IPv6 source and destination masks. */
310 uint16_t dst_ipv6m = 0;
311 uint16_t src_ipv6m = 0;
312 volatile uint32_t *reg;
314 PMD_INIT_FUNC_TRACE();
317 * Program the relevant mask registers. If src/dst_port or src/dst_addr
318 * are zero, then assume a full mask for that field. Also assume that
319 * a VLAN of 0 is unspecified, so mask that out as well. L4type
320 * cannot be masked out in this implementation.
322 if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
323 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
324 fdirm |= IXGBE_FDIRM_L4P;
326 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
327 /* mask VLAN Priority */
328 fdirm |= IXGBE_FDIRM_VLANP;
329 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
331 fdirm |= IXGBE_FDIRM_VLANID;
332 else if (input_mask->vlan_tci_mask == 0)
333 /* mask VLAN ID and Priority */
334 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
335 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
336 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
339 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
341 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
343 /* store the TCP/UDP port masks, bit reversed from port layout */
344 fdirtcpm = reverse_fdir_bitmasks(
345 rte_be_to_cpu_16(input_mask->dst_port_mask),
346 rte_be_to_cpu_16(input_mask->src_port_mask));
348 /* write all the same so that UDP, TCP and SCTP use the same mask
351 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
352 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
353 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
354 info->mask.src_port_mask = input_mask->src_port_mask;
355 info->mask.dst_port_mask = input_mask->dst_port_mask;
357 /* Store source and destination IPv4 masks (big-endian),
358 * can not use IXGBE_WRITE_REG.
360 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
361 *reg = ~(input_mask->ipv4_mask.src_ip);
362 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
363 *reg = ~(input_mask->ipv4_mask.dst_ip);
364 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
365 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
367 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
369 * Store source and destination IPv6 masks (bit reversed)
371 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
372 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
373 fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
375 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
376 info->mask.src_ipv6_mask = src_ipv6m;
377 info->mask.dst_ipv6_mask = dst_ipv6m;
380 return IXGBE_SUCCESS;
384 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
385 * but makes use of the rte_fdir_masks structure to see which bits to set.
388 fdir_set_input_mask_x550(struct rte_eth_dev *dev,
389 const struct rte_eth_fdir_masks *input_mask)
391 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
392 struct ixgbe_hw_fdir_info *info =
393 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
394 /* mask VM pool and DIPv6 since there are currently not supported
395 * mask FLEX byte, it will be set in flex_conf
397 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
400 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
403 PMD_INIT_FUNC_TRACE();
405 /* set the default UDP port for VxLAN */
406 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
407 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
409 /* some bits must be set for mac vlan or tunnel mode */
410 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
412 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
413 /* mask VLAN Priority */
414 fdirm |= IXGBE_FDIRM_VLANP;
415 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
417 fdirm |= IXGBE_FDIRM_VLANID;
418 else if (input_mask->vlan_tci_mask == 0)
419 /* mask VLAN ID and Priority */
420 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
421 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
422 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
425 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
427 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
429 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
430 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
431 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
432 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
433 IXGBE_FDIRIP6M_TNI_VNI;
435 mac_mask = input_mask->mac_addr_byte_mask;
436 fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
437 & IXGBE_FDIRIP6M_INNER_MAC;
438 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
440 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
441 switch (input_mask->tunnel_type_mask) {
443 /* Mask turnnel type */
444 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
449 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
452 info->mask.tunnel_type_mask =
453 input_mask->tunnel_type_mask;
455 switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
458 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
461 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
466 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
469 info->mask.tunnel_id_mask =
470 input_mask->tunnel_id_mask;
473 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
474 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
475 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
476 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
477 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
478 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
480 return IXGBE_SUCCESS;
484 fdir_set_input_mask(struct rte_eth_dev *dev,
485 const struct rte_eth_fdir_masks *input_mask)
487 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
489 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
490 mode <= RTE_FDIR_MODE_PERFECT)
491 return fdir_set_input_mask_82599(dev, input_mask);
492 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
493 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
494 return fdir_set_input_mask_x550(dev, input_mask);
496 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
501 * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
502 * arguments are valid
505 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
506 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
508 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
509 struct ixgbe_hw_fdir_info *info =
510 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
511 const struct rte_eth_flex_payload_cfg *flex_cfg;
512 const struct rte_eth_fdir_flex_mask *flex_mask;
514 uint16_t flexbytes = 0;
517 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
520 PMD_DRV_LOG(ERR, "NULL pointer.");
524 for (i = 0; i < conf->nb_payloads; i++) {
525 flex_cfg = &conf->flex_set[i];
526 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
527 PMD_DRV_LOG(ERR, "unsupported payload type.");
530 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
531 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
532 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
533 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
535 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
536 IXGBE_FDIRCTRL_FLEX_SHIFT;
538 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
543 for (i = 0; i < conf->nb_flexmasks; i++) {
544 flex_mask = &conf->flex_mask[i];
545 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
546 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
549 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
550 ((flex_mask->mask[1]) & 0xFF));
551 if (flexbytes == UINT16_MAX)
552 fdirm &= ~IXGBE_FDIRM_FLEX;
553 else if (flexbytes != 0) {
554 /* IXGBE_FDIRM_FLEX is set by default when set mask */
555 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
559 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
560 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
561 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
562 IXGBE_FDIRCTRL_FLEX_MASK) >>
563 IXGBE_FDIRCTRL_FLEX_SHIFT);
568 ixgbe_fdir_configure(struct rte_eth_dev *dev)
570 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
572 uint32_t fdirctrl, pbsize;
574 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
576 PMD_INIT_FUNC_TRACE();
578 if (hw->mac.type != ixgbe_mac_82599EB &&
579 hw->mac.type != ixgbe_mac_X540 &&
580 hw->mac.type != ixgbe_mac_X550 &&
581 hw->mac.type != ixgbe_mac_X550EM_x &&
582 hw->mac.type != ixgbe_mac_X550EM_a)
585 /* x550 supports mac-vlan and tunnel mode but other NICs not */
586 if (hw->mac.type != ixgbe_mac_X550 &&
587 hw->mac.type != ixgbe_mac_X550EM_x &&
588 hw->mac.type != ixgbe_mac_X550EM_a &&
589 mode != RTE_FDIR_MODE_SIGNATURE &&
590 mode != RTE_FDIR_MODE_PERFECT)
593 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
598 * Before enabling Flow Director, the Rx Packet Buffer size
599 * must be reduced. The new value is the current size minus
600 * flow director memory usage size.
602 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
603 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
604 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
607 * The defaults in the HW for RX PB 1-7 are not zero and so should be
608 * intialized to zero for non DCB mode otherwise actual total RX PB
609 * would be bigger than programmed and filter space would run into
612 for (i = 1; i < 8; i++)
613 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
615 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
617 PMD_INIT_LOG(ERR, " Error on setting FD mask");
620 err = ixgbe_set_fdir_flex_conf(dev,
621 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
623 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
627 err = fdir_enable_82599(hw, fdirctrl);
629 PMD_INIT_LOG(ERR, " Error on enabling FD.");
636 * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
637 * by the IXGBE driver code.
640 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
641 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
643 input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
644 input->formatted.flex_bytes = (uint16_t)(
645 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
646 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
648 switch (fdir_filter->input.flow_type) {
649 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
650 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
652 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
653 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
655 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
656 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
658 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
659 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
661 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
662 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
664 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
665 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
667 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
668 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
670 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
671 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
677 switch (fdir_filter->input.flow_type) {
678 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
679 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
680 input->formatted.src_port =
681 fdir_filter->input.flow.udp4_flow.src_port;
682 input->formatted.dst_port =
683 fdir_filter->input.flow.udp4_flow.dst_port;
684 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
685 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
686 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
687 input->formatted.src_ip[0] =
688 fdir_filter->input.flow.ip4_flow.src_ip;
689 input->formatted.dst_ip[0] =
690 fdir_filter->input.flow.ip4_flow.dst_ip;
693 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
694 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
695 input->formatted.src_port =
696 fdir_filter->input.flow.udp6_flow.src_port;
697 input->formatted.dst_port =
698 fdir_filter->input.flow.udp6_flow.dst_port;
699 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
700 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
701 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
702 rte_memcpy(input->formatted.src_ip,
703 fdir_filter->input.flow.ipv6_flow.src_ip,
704 sizeof(input->formatted.src_ip));
705 rte_memcpy(input->formatted.dst_ip,
706 fdir_filter->input.flow.ipv6_flow.dst_ip,
707 sizeof(input->formatted.dst_ip));
713 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
715 input->formatted.inner_mac,
716 fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
717 sizeof(input->formatted.inner_mac));
718 } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
720 input->formatted.inner_mac,
721 fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
722 sizeof(input->formatted.inner_mac));
723 input->formatted.tunnel_type =
724 fdir_filter->input.flow.tunnel_flow.tunnel_type;
725 input->formatted.tni_vni =
726 fdir_filter->input.flow.tunnel_flow.tunnel_id;
733 * The below function is taken from the FreeBSD IXGBE drivers release
734 * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
735 * before returning, as the signature hash can use 16bits.
737 * The newer driver has optimised functions for calculating bucket and
738 * signature hashes. However they don't support IPv6 type packets for signature
739 * filters so are not used here.
741 * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
744 * Compute the hashes for SW ATR
745 * @stream: input bitstream to compute the hash on
746 * @key: 32-bit hash key
749 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
753 * The algorithm is as follows:
754 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
755 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
756 * and A[n] x B[n] is bitwise AND between same length strings
758 * K[n] is 16 bits, defined as:
759 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
760 * for n modulo 32 < 15, K[n] =
761 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
763 * S[n] is 16 bits, defined as:
764 * for n >= 15, S[n] = S[n:n - 15]
765 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
767 * To simplify for programming, the algorithm is implemented
768 * in software this way:
770 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
772 * for (i = 0; i < 352; i+=32)
773 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
775 * lo_hash_dword[15:0] ^= Stream[15:0];
776 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
777 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
779 * hi_hash_dword[31:0] ^= Stream[351:320];
782 * hash[15:0] ^= Stream[15:0];
784 * for (i = 0; i < 16; i++) {
786 * hash[15:0] ^= lo_hash_dword[(i+15):i];
788 * hash[15:0] ^= hi_hash_dword[(i+15):i];
792 __be32 common_hash_dword = 0;
793 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
797 /* record the flow_vm_vlan bits as they are a key part to the hash */
798 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
800 /* generate common hash dword */
801 for (i = 1; i <= 13; i++)
802 common_hash_dword ^= atr_input->dword_stream[i];
804 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
806 /* low dword is word swapped version of common */
807 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
809 /* apply flow ID/VM pool/VLAN ID bits to hash words */
810 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
812 /* Process bits 0 and 16 */
813 if (key & 0x0001) hash_result ^= lo_hash_dword;
814 if (key & 0x00010000) hash_result ^= hi_hash_dword;
817 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
818 * delay this because bit 0 of the stream should not be processed
819 * so we do not add the vlan until after bit 0 was processed
821 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
824 /* process the remaining 30 bits in the key 2 bits at a time */
825 for (i = 15; i; i-- ) {
826 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
827 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
834 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
835 enum rte_fdir_pballoc_type pballoc)
837 if (pballoc == RTE_FDIR_PBALLOC_256K)
838 return ixgbe_atr_compute_hash_82599(input,
839 IXGBE_ATR_BUCKET_HASH_KEY) &
840 PERFECT_BUCKET_256KB_HASH_MASK;
841 else if (pballoc == RTE_FDIR_PBALLOC_128K)
842 return ixgbe_atr_compute_hash_82599(input,
843 IXGBE_ATR_BUCKET_HASH_KEY) &
844 PERFECT_BUCKET_128KB_HASH_MASK;
846 return ixgbe_atr_compute_hash_82599(input,
847 IXGBE_ATR_BUCKET_HASH_KEY) &
848 PERFECT_BUCKET_64KB_HASH_MASK;
852 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
853 * @hw: pointer to hardware structure
856 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
860 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
861 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
862 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
864 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
871 * Calculate the hash value needed for signature-match filters. In the FreeBSD
872 * driver, this is done by the optimised function
873 * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
874 * doesn't support calculating a hash for an IPv6 filter.
877 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
878 enum rte_fdir_pballoc_type pballoc)
880 uint32_t bucket_hash, sig_hash;
882 if (pballoc == RTE_FDIR_PBALLOC_256K)
883 bucket_hash = ixgbe_atr_compute_hash_82599(input,
884 IXGBE_ATR_BUCKET_HASH_KEY) &
885 SIG_BUCKET_256KB_HASH_MASK;
886 else if (pballoc == RTE_FDIR_PBALLOC_128K)
887 bucket_hash = ixgbe_atr_compute_hash_82599(input,
888 IXGBE_ATR_BUCKET_HASH_KEY) &
889 SIG_BUCKET_128KB_HASH_MASK;
891 bucket_hash = ixgbe_atr_compute_hash_82599(input,
892 IXGBE_ATR_BUCKET_HASH_KEY) &
893 SIG_BUCKET_64KB_HASH_MASK;
895 sig_hash = ixgbe_atr_compute_hash_82599(input,
896 IXGBE_ATR_SIGNATURE_HASH_KEY);
898 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
902 * This is based on ixgbe_fdir_write_perfect_filter_82599() in
903 * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
904 * added, and IPv6 support also added. The hash value is also pre-calculated
905 * as the pballoc value is needed to do it.
908 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
909 union ixgbe_atr_input *input, uint8_t queue,
910 uint32_t fdircmd, uint32_t fdirhash,
911 enum rte_fdir_mode mode)
913 uint32_t fdirport, fdirvlan;
914 u32 addr_low, addr_high;
917 volatile uint32_t *reg;
919 if (mode == RTE_FDIR_MODE_PERFECT) {
920 /* record the IPv4 address (big-endian)
921 * can not use IXGBE_WRITE_REG.
923 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
924 *reg = input->formatted.src_ip[0];
925 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
926 *reg = input->formatted.dst_ip[0];
928 /* record source and destination port (little-endian)*/
929 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
930 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
931 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
932 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
933 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
934 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
935 /* for mac vlan and tunnel modes */
936 addr_low = ((u32)input->formatted.inner_mac[0] |
937 ((u32)input->formatted.inner_mac[1] << 8) |
938 ((u32)input->formatted.inner_mac[2] << 16) |
939 ((u32)input->formatted.inner_mac[3] << 24));
940 addr_high = ((u32)input->formatted.inner_mac[4] |
941 ((u32)input->formatted.inner_mac[5] << 8));
943 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
944 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
945 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
946 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
949 if (input->formatted.tunnel_type !=
950 RTE_FDIR_TUNNEL_TYPE_NVGRE)
951 tunnel_type = 0x80000000;
952 tunnel_type |= addr_high;
953 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
954 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
955 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
956 input->formatted.tni_vni);
960 /* record vlan (little-endian) and flex_bytes(big-endian) */
961 fdirvlan = input->formatted.flex_bytes;
962 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
963 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
964 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
966 /* configure FDIRHASH register */
967 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
970 * flush all previous writes to make certain registers are
971 * programmed prior to issuing the command
973 IXGBE_WRITE_FLUSH(hw);
975 /* configure FDIRCMD register */
976 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
977 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
978 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
979 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
980 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
982 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
984 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
986 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
988 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
994 * This function is based on ixgbe_atr_add_signature_filter_82599() in
995 * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
996 * setting extra fields in the FDIRCMD register, and removes the code that was
997 * verifying the flow_type field. According to the documentation, a flow type of
998 * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1001 * Adds a signature hash filter
1002 * @hw: pointer to hardware structure
1003 * @input: unique input dword
1004 * @queue: queue index to direct traffic to
1005 * @fdircmd: any extra flags to set in fdircmd register
1006 * @fdirhash: pre-calculated hash value for the filter
1009 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1010 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1015 PMD_INIT_FUNC_TRACE();
1017 /* configure FDIRCMD register */
1018 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1019 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1020 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1021 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1023 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1024 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1026 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1028 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1030 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1036 * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1037 * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1038 * that it can be used for removing signature and perfect filters.
1041 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1043 uint32_t fdircmd = 0;
1046 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1048 /* flush hash to HW */
1049 IXGBE_WRITE_FLUSH(hw);
1051 /* Query if filter is present */
1052 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1054 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1056 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1060 /* if filter exists in hardware then remove it */
1061 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1062 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1063 IXGBE_WRITE_FLUSH(hw);
1064 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1065 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1067 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1069 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1075 * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1076 * @dev: pointer to the structure rte_eth_dev
1077 * @fdir_filter: fdir filter entry
1078 * @del: 1 - delete, 0 - add
1079 * @update: 1 - update
1082 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1083 const struct rte_eth_fdir_filter *fdir_filter,
1087 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088 uint32_t fdircmd_flags;
1090 union ixgbe_atr_input input;
1092 bool is_perfect = FALSE;
1094 struct ixgbe_hw_fdir_info *info =
1095 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1096 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1098 if (fdir_mode == RTE_FDIR_MODE_NONE)
1102 * Sanity check for x550.
1103 * When adding a new filter with flow type set to IPv4-other,
1104 * the flow director mask should be configed before,
1105 * and the L4 protocol and ports are masked.
1108 (hw->mac.type == ixgbe_mac_X550 ||
1109 hw->mac.type == ixgbe_mac_X550EM_x ||
1110 hw->mac.type == ixgbe_mac_X550EM_a) &&
1111 (fdir_filter->input.flow_type ==
1112 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
1113 (info->mask.src_port_mask != 0 ||
1114 info->mask.dst_port_mask != 0)) {
1115 PMD_DRV_LOG(ERR, "By this device,"
1116 " IPv4-other is not supported without"
1117 " L4 protocol and ports masked!");
1121 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1122 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1125 memset(&input, 0, sizeof(input));
1127 err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
1133 if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1134 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1138 fdirhash = atr_compute_perfect_hash_82599(&input,
1139 dev->data->dev_conf.fdir_conf.pballoc);
1140 fdirhash |= fdir_filter->soft_id <<
1141 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1143 fdirhash = atr_compute_sig_hash_82599(&input,
1144 dev->data->dev_conf.fdir_conf.pballoc);
1147 err = fdir_erase_filter_82599(hw, fdirhash);
1149 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1151 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1154 /* add or update an fdir filter*/
1155 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1156 if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
1158 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1159 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1161 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1162 " signature mode.");
1165 } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
1166 fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
1167 queue = (uint8_t)fdir_filter->action.rx_queue;
1172 err = fdir_write_perfect_filter_82599(hw, &input, queue,
1173 fdircmd_flags, fdirhash,
1176 err = fdir_add_signature_filter_82599(hw, &input, queue,
1177 fdircmd_flags, fdirhash);
1180 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1182 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1188 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1190 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1191 struct ixgbe_hw_fdir_info *info =
1192 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1195 ret = ixgbe_reinit_fdir_tables_82599(hw);
1197 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1209 #define FDIRENTRIES_NUM_SHIFT 10
1211 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1213 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214 struct ixgbe_hw_fdir_info *info =
1215 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1216 uint32_t fdirctrl, max_num;
1219 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1220 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1221 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1223 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1224 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1225 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1226 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1227 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1228 fdir_info->guarant_spc = max_num;
1229 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1230 fdir_info->guarant_spc = max_num * 4;
1232 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1233 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1234 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1235 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1236 fdir_info->mask.ipv6_mask.src_ip);
1237 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1238 fdir_info->mask.ipv6_mask.dst_ip);
1239 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1240 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1241 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1242 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1243 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1244 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1246 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1247 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1248 fdir_info->flow_types_mask[0] = 0;
1250 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1252 fdir_info->flex_payload_unit = sizeof(uint16_t);
1253 fdir_info->max_flex_payload_segment_num = 1;
1254 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1255 fdir_info->flex_conf.nb_payloads = 1;
1256 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1257 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1258 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1259 fdir_info->flex_conf.nb_flexmasks = 1;
1260 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1261 fdir_info->flex_conf.flex_mask[0].mask[0] =
1262 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1263 fdir_info->flex_conf.flex_mask[0].mask[1] =
1264 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1268 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1270 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1271 struct ixgbe_hw_fdir_info *info =
1272 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1273 uint32_t reg, max_num;
1274 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1276 /* Get the information from registers */
1277 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1278 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1279 IXGBE_FDIRFREE_COLL_SHIFT);
1280 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1281 IXGBE_FDIRFREE_FREE_SHIFT);
1283 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1284 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1285 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1286 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1287 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1289 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1290 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1291 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1292 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1293 IXGBE_FDIRUSTAT_ADD_SHIFT;
1295 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1296 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1297 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1298 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1299 IXGBE_FDIRFSTAT_FADD_SHIFT;
1301 /* Copy the new information in the fdir parameter */
1302 fdir_stats->collision = info->collision;
1303 fdir_stats->free = info->free;
1304 fdir_stats->maxhash = info->maxhash;
1305 fdir_stats->maxlen = info->maxlen;
1306 fdir_stats->remove = info->remove;
1307 fdir_stats->add = info->add;
1308 fdir_stats->f_remove = info->f_remove;
1309 fdir_stats->f_add = info->f_add;
1311 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1312 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1313 (reg & FDIRCTRL_PBALLOC_MASK)));
1314 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1315 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1316 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1317 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1318 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1323 * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1324 * @dev: pointer to the structure rte_eth_dev
1325 * @filter_op:operation will be taken
1326 * @arg: a pointer to specific structure corresponding to the filter_op
1329 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1330 enum rte_filter_op filter_op, void *arg)
1332 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1335 if (hw->mac.type != ixgbe_mac_82599EB &&
1336 hw->mac.type != ixgbe_mac_X540 &&
1337 hw->mac.type != ixgbe_mac_X550 &&
1338 hw->mac.type != ixgbe_mac_X550EM_x &&
1339 hw->mac.type != ixgbe_mac_X550EM_a)
1342 if (filter_op == RTE_ETH_FILTER_NOP)
1345 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1348 switch (filter_op) {
1349 case RTE_ETH_FILTER_ADD:
1350 ret = ixgbe_add_del_fdir_filter(dev,
1351 (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1353 case RTE_ETH_FILTER_UPDATE:
1354 ret = ixgbe_add_del_fdir_filter(dev,
1355 (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1357 case RTE_ETH_FILTER_DELETE:
1358 ret = ixgbe_add_del_fdir_filter(dev,
1359 (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1361 case RTE_ETH_FILTER_FLUSH:
1362 ret = ixgbe_fdir_flush(dev);
1364 case RTE_ETH_FILTER_INFO:
1365 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1367 case RTE_ETH_FILTER_STATS:
1368 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1371 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);