4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_interrupts.h>
42 #include <rte_debug.h>
44 #include <rte_ether.h>
45 #include <rte_ethdev.h>
46 #include <rte_malloc.h>
48 #include "ixgbe_logs.h"
49 #include "base/ixgbe_api.h"
50 #include "base/ixgbe_common.h"
51 #include "ixgbe_ethdev.h"
53 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
54 #define FDIRCTRL_PBALLOC_MASK 0x03
56 /* For calculating memory required for FDIR filters */
57 #define PBALLOC_SIZE_SHIFT 15
59 /* Number of bits used to mask bucket hash for different pballoc sizes */
60 #define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
61 #define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
62 #define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
63 #define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
64 #define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
65 #define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
66 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
67 #define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
68 #define IXGBE_MAX_FLX_SOURCE_OFF 62
69 #define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
70 #define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
72 #define IXGBE_FDIR_FLOW_TYPES ( \
73 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
74 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
75 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
76 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
77 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
78 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
79 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
80 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
82 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
83 uint8_t ipv6_addr[16]; \
85 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
87 for (i = 0; i < sizeof(ipv6_addr); i++) { \
88 if (ipv6_addr[i] == UINT8_MAX) \
90 else if (ipv6_addr[i] != 0) { \
91 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
97 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
98 uint8_t ipv6_addr[16]; \
100 for (i = 0; i < sizeof(ipv6_addr); i++) { \
101 if ((ipv6m) & (1 << i)) \
102 ipv6_addr[i] = UINT8_MAX; \
106 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
109 #define DEFAULT_VXLAN_PORT 4789
110 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
112 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
113 static int fdir_set_input_mask(struct rte_eth_dev *dev,
114 const struct rte_eth_fdir_masks *input_mask);
115 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
116 const struct rte_eth_fdir_masks *input_mask);
117 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
118 const struct rte_eth_fdir_masks *input_mask);
119 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
120 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
121 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
122 static int ixgbe_fdir_filter_to_atr_input(
123 const struct rte_eth_fdir_filter *fdir_filter,
124 union ixgbe_atr_input *input,
125 enum rte_fdir_mode mode);
126 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
128 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
129 enum rte_fdir_pballoc_type pballoc);
130 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
131 enum rte_fdir_pballoc_type pballoc);
132 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
133 union ixgbe_atr_input *input, uint8_t queue,
134 uint32_t fdircmd, uint32_t fdirhash,
135 enum rte_fdir_mode mode);
136 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
137 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
139 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
140 const struct rte_eth_fdir_filter *fdir_filter,
143 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
144 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
145 struct rte_eth_fdir_info *fdir_info);
146 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
147 struct rte_eth_fdir_stats *fdir_stats);
150 * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
151 * It adds extra configuration of fdirctrl that is common for all filter types.
153 * Initialize Flow Director control registers
154 * @hw: pointer to hardware structure
155 * @fdirctrl: value to write to flow director control register
158 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
162 PMD_INIT_FUNC_TRACE();
164 /* Prime the keys for hashing */
165 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
166 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
169 * Continue setup of fdirctrl register bits:
170 * Set the maximum length per hash bucket to 0xA filters
171 * Send interrupt when 64 filters are left
173 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
174 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
177 * Poll init-done after we write the register. Estimated times:
178 * 10G: PBALLOC = 11b, timing is 60us
179 * 1G: PBALLOC = 11b, timing is 600us
180 * 100M: PBALLOC = 11b, timing is 6ms
182 * Multiple these timings by 4 if under full Rx load
184 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
185 * 1 msec per poll time. If we're at line rate and drop to 100M, then
186 * this might not finish in our poll time, but we can live with that
189 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
190 IXGBE_WRITE_FLUSH(hw);
191 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
192 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
193 IXGBE_FDIRCTRL_INIT_DONE)
198 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
199 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
206 * Set appropriate bits in fdirctrl for: variable reporting levels, moving
207 * flexbytes matching field, and drop queue (only for perfect matching mode).
210 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
214 switch (conf->pballoc) {
215 case RTE_FDIR_PBALLOC_64K:
216 /* 8k - 1 signature filters */
217 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
219 case RTE_FDIR_PBALLOC_128K:
220 /* 16k - 1 signature filters */
221 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
223 case RTE_FDIR_PBALLOC_256K:
224 /* 32k - 1 signature filters */
225 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
229 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
233 /* status flags: write hash & swindex in the rx descriptor */
234 switch (conf->status) {
235 case RTE_FDIR_NO_REPORT_STATUS:
236 /* do nothing, default mode */
238 case RTE_FDIR_REPORT_STATUS:
239 /* report status when the packet matches a fdir rule */
240 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
242 case RTE_FDIR_REPORT_STATUS_ALWAYS:
243 /* always report status */
244 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
248 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
252 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
253 IXGBE_FDIRCTRL_FLEX_SHIFT;
255 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
256 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
257 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
258 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
259 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
260 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
261 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
262 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
263 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
264 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
271 * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
273 * @hi_dword: Bits 31:16 mask to be bit swapped.
274 * @lo_dword: Bits 15:0 mask to be bit swapped.
276 * Flow director uses several registers to store 2 x 16 bit masks with the
277 * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
278 * mask affects the MS bit/byte of the target. This function reverses the
279 * bits in these masks.
281 static inline uint32_t
282 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
284 uint32_t mask = hi_dword << 16;
287 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
288 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
289 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
290 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
294 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
295 * but makes use of the rte_fdir_masks structure to see which bits to set.
298 fdir_set_input_mask_82599(struct rte_eth_dev *dev,
299 const struct rte_eth_fdir_masks *input_mask)
301 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
302 struct ixgbe_hw_fdir_info *info =
303 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
305 * mask VM pool and DIPv6 since there are currently not supported
306 * mask FLEX byte, it will be set in flex_conf
308 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
309 uint32_t fdirtcpm; /* TCP source and destination port masks. */
310 uint32_t fdiripv6m; /* IPv6 source and destination masks. */
311 uint16_t dst_ipv6m = 0;
312 uint16_t src_ipv6m = 0;
313 volatile uint32_t *reg;
315 PMD_INIT_FUNC_TRACE();
318 * Program the relevant mask registers. If src/dst_port or src/dst_addr
319 * are zero, then assume a full mask for that field. Also assume that
320 * a VLAN of 0 is unspecified, so mask that out as well. L4type
321 * cannot be masked out in this implementation.
323 if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
324 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
325 fdirm |= IXGBE_FDIRM_L4P;
327 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
328 /* mask VLAN Priority */
329 fdirm |= IXGBE_FDIRM_VLANP;
330 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
332 fdirm |= IXGBE_FDIRM_VLANID;
333 else if (input_mask->vlan_tci_mask == 0)
334 /* mask VLAN ID and Priority */
335 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
336 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
337 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
340 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
342 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
344 /* store the TCP/UDP port masks, bit reversed from port layout */
345 fdirtcpm = reverse_fdir_bitmasks(
346 rte_be_to_cpu_16(input_mask->dst_port_mask),
347 rte_be_to_cpu_16(input_mask->src_port_mask));
349 /* write all the same so that UDP, TCP and SCTP use the same mask
352 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
353 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
354 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
355 info->mask.src_port_mask = input_mask->src_port_mask;
356 info->mask.dst_port_mask = input_mask->dst_port_mask;
358 /* Store source and destination IPv4 masks (big-endian),
359 * can not use IXGBE_WRITE_REG.
361 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
362 *reg = ~(input_mask->ipv4_mask.src_ip);
363 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
364 *reg = ~(input_mask->ipv4_mask.dst_ip);
365 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
366 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
368 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
370 * Store source and destination IPv6 masks (bit reversed)
372 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
373 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
374 fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
376 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
377 info->mask.src_ipv6_mask = src_ipv6m;
378 info->mask.dst_ipv6_mask = dst_ipv6m;
381 return IXGBE_SUCCESS;
385 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
386 * but makes use of the rte_fdir_masks structure to see which bits to set.
389 fdir_set_input_mask_x550(struct rte_eth_dev *dev,
390 const struct rte_eth_fdir_masks *input_mask)
392 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 struct ixgbe_hw_fdir_info *info =
394 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
395 /* mask VM pool and DIPv6 since there are currently not supported
396 * mask FLEX byte, it will be set in flex_conf
398 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
401 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
404 PMD_INIT_FUNC_TRACE();
406 /* set the default UDP port for VxLAN */
407 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
408 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
410 /* some bits must be set for mac vlan or tunnel mode */
411 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
413 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
414 /* mask VLAN Priority */
415 fdirm |= IXGBE_FDIRM_VLANP;
416 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
418 fdirm |= IXGBE_FDIRM_VLANID;
419 else if (input_mask->vlan_tci_mask == 0)
420 /* mask VLAN ID and Priority */
421 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
422 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
423 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
426 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
428 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
430 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
431 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
432 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
433 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
434 IXGBE_FDIRIP6M_TNI_VNI;
436 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
437 mac_mask = input_mask->mac_addr_byte_mask;
438 fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
439 & IXGBE_FDIRIP6M_INNER_MAC;
440 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
442 switch (input_mask->tunnel_type_mask) {
444 /* Mask turnnel type */
445 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
450 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
453 info->mask.tunnel_type_mask =
454 input_mask->tunnel_type_mask;
456 switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
459 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
462 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
467 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
470 info->mask.tunnel_id_mask =
471 input_mask->tunnel_id_mask;
474 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
475 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
476 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
477 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
478 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
479 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
481 return IXGBE_SUCCESS;
485 fdir_set_input_mask(struct rte_eth_dev *dev,
486 const struct rte_eth_fdir_masks *input_mask)
488 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
490 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
491 mode <= RTE_FDIR_MODE_PERFECT)
492 return fdir_set_input_mask_82599(dev, input_mask);
493 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
494 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
495 return fdir_set_input_mask_x550(dev, input_mask);
497 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
502 * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
503 * arguments are valid
506 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
507 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
509 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
510 struct ixgbe_hw_fdir_info *info =
511 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
512 const struct rte_eth_flex_payload_cfg *flex_cfg;
513 const struct rte_eth_fdir_flex_mask *flex_mask;
515 uint16_t flexbytes = 0;
518 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
521 PMD_DRV_LOG(ERR, "NULL pointer.");
525 for (i = 0; i < conf->nb_payloads; i++) {
526 flex_cfg = &conf->flex_set[i];
527 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
528 PMD_DRV_LOG(ERR, "unsupported payload type.");
531 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
532 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
533 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
534 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
536 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
537 IXGBE_FDIRCTRL_FLEX_SHIFT;
539 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
544 for (i = 0; i < conf->nb_flexmasks; i++) {
545 flex_mask = &conf->flex_mask[i];
546 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
547 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
550 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
551 ((flex_mask->mask[1]) & 0xFF));
552 if (flexbytes == UINT16_MAX)
553 fdirm &= ~IXGBE_FDIRM_FLEX;
554 else if (flexbytes != 0) {
555 /* IXGBE_FDIRM_FLEX is set by default when set mask */
556 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
560 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
561 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
562 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
563 IXGBE_FDIRCTRL_FLEX_MASK) >>
564 IXGBE_FDIRCTRL_FLEX_SHIFT);
569 ixgbe_fdir_configure(struct rte_eth_dev *dev)
571 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
573 uint32_t fdirctrl, pbsize;
575 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
577 PMD_INIT_FUNC_TRACE();
579 if (hw->mac.type != ixgbe_mac_82599EB &&
580 hw->mac.type != ixgbe_mac_X540 &&
581 hw->mac.type != ixgbe_mac_X550 &&
582 hw->mac.type != ixgbe_mac_X550EM_x &&
583 hw->mac.type != ixgbe_mac_X550EM_a)
586 /* x550 supports mac-vlan and tunnel mode but other NICs not */
587 if (hw->mac.type != ixgbe_mac_X550 &&
588 hw->mac.type != ixgbe_mac_X550EM_x &&
589 hw->mac.type != ixgbe_mac_X550EM_a &&
590 mode != RTE_FDIR_MODE_SIGNATURE &&
591 mode != RTE_FDIR_MODE_PERFECT)
594 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
599 * Before enabling Flow Director, the Rx Packet Buffer size
600 * must be reduced. The new value is the current size minus
601 * flow director memory usage size.
603 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
604 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
605 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
608 * The defaults in the HW for RX PB 1-7 are not zero and so should be
609 * intialized to zero for non DCB mode otherwise actual total RX PB
610 * would be bigger than programmed and filter space would run into
613 for (i = 1; i < 8; i++)
614 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
616 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
618 PMD_INIT_LOG(ERR, " Error on setting FD mask");
621 err = ixgbe_set_fdir_flex_conf(dev,
622 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
624 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
628 err = fdir_enable_82599(hw, fdirctrl);
630 PMD_INIT_LOG(ERR, " Error on enabling FD.");
637 * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
638 * by the IXGBE driver code.
641 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
642 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
644 input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
645 input->formatted.flex_bytes = (uint16_t)(
646 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
647 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
649 switch (fdir_filter->input.flow_type) {
650 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
651 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
653 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
654 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
656 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
657 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
659 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
660 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
662 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
663 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
665 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
666 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
668 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
669 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
671 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
672 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
678 switch (fdir_filter->input.flow_type) {
679 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
680 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
681 input->formatted.src_port =
682 fdir_filter->input.flow.udp4_flow.src_port;
683 input->formatted.dst_port =
684 fdir_filter->input.flow.udp4_flow.dst_port;
685 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
686 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
687 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
688 input->formatted.src_ip[0] =
689 fdir_filter->input.flow.ip4_flow.src_ip;
690 input->formatted.dst_ip[0] =
691 fdir_filter->input.flow.ip4_flow.dst_ip;
694 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
695 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
696 input->formatted.src_port =
697 fdir_filter->input.flow.udp6_flow.src_port;
698 input->formatted.dst_port =
699 fdir_filter->input.flow.udp6_flow.dst_port;
700 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
701 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
702 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
703 rte_memcpy(input->formatted.src_ip,
704 fdir_filter->input.flow.ipv6_flow.src_ip,
705 sizeof(input->formatted.src_ip));
706 rte_memcpy(input->formatted.dst_ip,
707 fdir_filter->input.flow.ipv6_flow.dst_ip,
708 sizeof(input->formatted.dst_ip));
714 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
716 input->formatted.inner_mac,
717 fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
718 sizeof(input->formatted.inner_mac));
719 } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
721 input->formatted.inner_mac,
722 fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
723 sizeof(input->formatted.inner_mac));
724 input->formatted.tunnel_type =
725 fdir_filter->input.flow.tunnel_flow.tunnel_type;
726 input->formatted.tni_vni =
727 fdir_filter->input.flow.tunnel_flow.tunnel_id;
734 * The below function is taken from the FreeBSD IXGBE drivers release
735 * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
736 * before returning, as the signature hash can use 16bits.
738 * The newer driver has optimised functions for calculating bucket and
739 * signature hashes. However they don't support IPv6 type packets for signature
740 * filters so are not used here.
742 * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
745 * Compute the hashes for SW ATR
746 * @stream: input bitstream to compute the hash on
747 * @key: 32-bit hash key
750 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
754 * The algorithm is as follows:
755 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
756 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
757 * and A[n] x B[n] is bitwise AND between same length strings
759 * K[n] is 16 bits, defined as:
760 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
761 * for n modulo 32 < 15, K[n] =
762 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
764 * S[n] is 16 bits, defined as:
765 * for n >= 15, S[n] = S[n:n - 15]
766 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
768 * To simplify for programming, the algorithm is implemented
769 * in software this way:
771 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
773 * for (i = 0; i < 352; i+=32)
774 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
776 * lo_hash_dword[15:0] ^= Stream[15:0];
777 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
778 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
780 * hi_hash_dword[31:0] ^= Stream[351:320];
783 * hash[15:0] ^= Stream[15:0];
785 * for (i = 0; i < 16; i++) {
787 * hash[15:0] ^= lo_hash_dword[(i+15):i];
789 * hash[15:0] ^= hi_hash_dword[(i+15):i];
793 __be32 common_hash_dword = 0;
794 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
798 /* record the flow_vm_vlan bits as they are a key part to the hash */
799 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
801 /* generate common hash dword */
802 for (i = 1; i <= 13; i++)
803 common_hash_dword ^= atr_input->dword_stream[i];
805 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
807 /* low dword is word swapped version of common */
808 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
810 /* apply flow ID/VM pool/VLAN ID bits to hash words */
811 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
813 /* Process bits 0 and 16 */
815 hash_result ^= lo_hash_dword;
816 if (key & 0x00010000)
817 hash_result ^= hi_hash_dword;
820 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
821 * delay this because bit 0 of the stream should not be processed
822 * so we do not add the vlan until after bit 0 was processed
824 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
827 /* process the remaining 30 bits in the key 2 bits at a time */
828 for (i = 15; i; i--) {
829 if (key & (0x0001 << i))
830 hash_result ^= lo_hash_dword >> i;
831 if (key & (0x00010000 << i))
832 hash_result ^= hi_hash_dword >> i;
839 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
840 enum rte_fdir_pballoc_type pballoc)
842 if (pballoc == RTE_FDIR_PBALLOC_256K)
843 return ixgbe_atr_compute_hash_82599(input,
844 IXGBE_ATR_BUCKET_HASH_KEY) &
845 PERFECT_BUCKET_256KB_HASH_MASK;
846 else if (pballoc == RTE_FDIR_PBALLOC_128K)
847 return ixgbe_atr_compute_hash_82599(input,
848 IXGBE_ATR_BUCKET_HASH_KEY) &
849 PERFECT_BUCKET_128KB_HASH_MASK;
851 return ixgbe_atr_compute_hash_82599(input,
852 IXGBE_ATR_BUCKET_HASH_KEY) &
853 PERFECT_BUCKET_64KB_HASH_MASK;
857 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
858 * @hw: pointer to hardware structure
861 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
865 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
866 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
867 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
869 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
876 * Calculate the hash value needed for signature-match filters. In the FreeBSD
877 * driver, this is done by the optimised function
878 * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
879 * doesn't support calculating a hash for an IPv6 filter.
882 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
883 enum rte_fdir_pballoc_type pballoc)
885 uint32_t bucket_hash, sig_hash;
887 if (pballoc == RTE_FDIR_PBALLOC_256K)
888 bucket_hash = ixgbe_atr_compute_hash_82599(input,
889 IXGBE_ATR_BUCKET_HASH_KEY) &
890 SIG_BUCKET_256KB_HASH_MASK;
891 else if (pballoc == RTE_FDIR_PBALLOC_128K)
892 bucket_hash = ixgbe_atr_compute_hash_82599(input,
893 IXGBE_ATR_BUCKET_HASH_KEY) &
894 SIG_BUCKET_128KB_HASH_MASK;
896 bucket_hash = ixgbe_atr_compute_hash_82599(input,
897 IXGBE_ATR_BUCKET_HASH_KEY) &
898 SIG_BUCKET_64KB_HASH_MASK;
900 sig_hash = ixgbe_atr_compute_hash_82599(input,
901 IXGBE_ATR_SIGNATURE_HASH_KEY);
903 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
907 * This is based on ixgbe_fdir_write_perfect_filter_82599() in
908 * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
909 * added, and IPv6 support also added. The hash value is also pre-calculated
910 * as the pballoc value is needed to do it.
913 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
914 union ixgbe_atr_input *input, uint8_t queue,
915 uint32_t fdircmd, uint32_t fdirhash,
916 enum rte_fdir_mode mode)
918 uint32_t fdirport, fdirvlan;
919 u32 addr_low, addr_high;
922 volatile uint32_t *reg;
924 if (mode == RTE_FDIR_MODE_PERFECT) {
925 /* record the IPv4 address (big-endian)
926 * can not use IXGBE_WRITE_REG.
928 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
929 *reg = input->formatted.src_ip[0];
930 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
931 *reg = input->formatted.dst_ip[0];
933 /* record source and destination port (little-endian)*/
934 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
935 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
936 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
937 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
938 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
939 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
940 /* for mac vlan and tunnel modes */
941 addr_low = ((u32)input->formatted.inner_mac[0] |
942 ((u32)input->formatted.inner_mac[1] << 8) |
943 ((u32)input->formatted.inner_mac[2] << 16) |
944 ((u32)input->formatted.inner_mac[3] << 24));
945 addr_high = ((u32)input->formatted.inner_mac[4] |
946 ((u32)input->formatted.inner_mac[5] << 8));
948 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
949 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
950 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
951 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
954 if (input->formatted.tunnel_type !=
955 RTE_FDIR_TUNNEL_TYPE_NVGRE)
956 tunnel_type = 0x80000000;
957 tunnel_type |= addr_high;
958 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
959 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
960 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
961 input->formatted.tni_vni);
965 /* record vlan (little-endian) and flex_bytes(big-endian) */
966 fdirvlan = input->formatted.flex_bytes;
967 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
968 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
969 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
971 /* configure FDIRHASH register */
972 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
975 * flush all previous writes to make certain registers are
976 * programmed prior to issuing the command
978 IXGBE_WRITE_FLUSH(hw);
980 /* configure FDIRCMD register */
981 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
982 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
983 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
984 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
985 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
987 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
989 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
991 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
993 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
999 * This function is based on ixgbe_atr_add_signature_filter_82599() in
1000 * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1001 * setting extra fields in the FDIRCMD register, and removes the code that was
1002 * verifying the flow_type field. According to the documentation, a flow type of
1003 * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1006 * Adds a signature hash filter
1007 * @hw: pointer to hardware structure
1008 * @input: unique input dword
1009 * @queue: queue index to direct traffic to
1010 * @fdircmd: any extra flags to set in fdircmd register
1011 * @fdirhash: pre-calculated hash value for the filter
1014 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1015 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1020 PMD_INIT_FUNC_TRACE();
1022 /* configure FDIRCMD register */
1023 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1024 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1025 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1026 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1028 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1029 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1031 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1033 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1035 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1041 * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1042 * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1043 * that it can be used for removing signature and perfect filters.
1046 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1048 uint32_t fdircmd = 0;
1051 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1053 /* flush hash to HW */
1054 IXGBE_WRITE_FLUSH(hw);
1056 /* Query if filter is present */
1057 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1059 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1061 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1065 /* if filter exists in hardware then remove it */
1066 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1067 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1068 IXGBE_WRITE_FLUSH(hw);
1069 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1070 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1072 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1074 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1079 static inline struct ixgbe_fdir_filter *
1080 ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1081 union ixgbe_atr_input *key)
1085 ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1089 return fdir_info->hash_map[ret];
1093 ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1094 struct ixgbe_fdir_filter *fdir_filter)
1098 ret = rte_hash_add_key(fdir_info->hash_handle,
1099 &fdir_filter->ixgbe_fdir);
1103 "Failed to insert fdir filter to hash table %d!",
1108 fdir_info->hash_map[ret] = fdir_filter;
1110 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1116 ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1117 union ixgbe_atr_input *key)
1120 struct ixgbe_fdir_filter *fdir_filter;
1122 ret = rte_hash_del_key(fdir_info->hash_handle, key);
1125 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1129 fdir_filter = fdir_info->hash_map[ret];
1130 fdir_info->hash_map[ret] = NULL;
1132 TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1133 rte_free(fdir_filter);
1139 * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1140 * @dev: pointer to the structure rte_eth_dev
1141 * @fdir_filter: fdir filter entry
1142 * @del: 1 - delete, 0 - add
1143 * @update: 1 - update
1146 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1147 const struct rte_eth_fdir_filter *fdir_filter,
1151 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1152 uint32_t fdircmd_flags;
1154 union ixgbe_atr_input input;
1156 bool is_perfect = FALSE;
1158 struct ixgbe_hw_fdir_info *info =
1159 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1160 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1161 struct ixgbe_fdir_filter *node;
1162 bool add_node = FALSE;
1164 if (fdir_mode == RTE_FDIR_MODE_NONE)
1168 * Sanity check for x550.
1169 * When adding a new filter with flow type set to IPv4-other,
1170 * the flow director mask should be configed before,
1171 * and the L4 protocol and ports are masked.
1174 (hw->mac.type == ixgbe_mac_X550 ||
1175 hw->mac.type == ixgbe_mac_X550EM_x ||
1176 hw->mac.type == ixgbe_mac_X550EM_a) &&
1177 (fdir_filter->input.flow_type ==
1178 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
1179 (info->mask.src_port_mask != 0 ||
1180 info->mask.dst_port_mask != 0)) {
1181 PMD_DRV_LOG(ERR, "By this device,"
1182 " IPv4-other is not supported without"
1183 " L4 protocol and ports masked!");
1187 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1188 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1191 memset(&input, 0, sizeof(input));
1193 err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
1199 if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1200 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1204 fdirhash = atr_compute_perfect_hash_82599(&input,
1205 dev->data->dev_conf.fdir_conf.pballoc);
1206 fdirhash |= fdir_filter->soft_id <<
1207 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1209 fdirhash = atr_compute_sig_hash_82599(&input,
1210 dev->data->dev_conf.fdir_conf.pballoc);
1213 err = ixgbe_remove_fdir_filter(info, &input);
1217 err = fdir_erase_filter_82599(hw, fdirhash);
1219 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1221 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1224 /* add or update an fdir filter*/
1225 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1226 if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
1228 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1229 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1231 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1232 " signature mode.");
1235 } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
1236 fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
1237 queue = (uint8_t)fdir_filter->action.rx_queue;
1241 node = ixgbe_fdir_filter_lookup(info, &input);
1244 node->fdirflags = fdircmd_flags;
1245 node->fdirhash = fdirhash;
1246 node->queue = queue;
1248 PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1253 node = rte_zmalloc("ixgbe_fdir",
1254 sizeof(struct ixgbe_fdir_filter),
1258 (void)rte_memcpy(&node->ixgbe_fdir,
1260 sizeof(union ixgbe_atr_input));
1261 node->fdirflags = fdircmd_flags;
1262 node->fdirhash = fdirhash;
1263 node->queue = queue;
1265 err = ixgbe_insert_fdir_filter(info, node);
1273 err = fdir_write_perfect_filter_82599(hw, &input, queue,
1274 fdircmd_flags, fdirhash,
1277 err = fdir_add_signature_filter_82599(hw, &input, queue,
1278 fdircmd_flags, fdirhash);
1281 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1284 (void)ixgbe_remove_fdir_filter(info, &input);
1286 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1293 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1295 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1296 struct ixgbe_hw_fdir_info *info =
1297 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1300 ret = ixgbe_reinit_fdir_tables_82599(hw);
1302 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1314 #define FDIRENTRIES_NUM_SHIFT 10
1316 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1318 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1319 struct ixgbe_hw_fdir_info *info =
1320 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1321 uint32_t fdirctrl, max_num;
1324 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1325 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1326 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1328 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1329 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1330 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1331 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1332 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1333 fdir_info->guarant_spc = max_num;
1334 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1335 fdir_info->guarant_spc = max_num * 4;
1337 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1338 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1339 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1340 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1341 fdir_info->mask.ipv6_mask.src_ip);
1342 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1343 fdir_info->mask.ipv6_mask.dst_ip);
1344 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1345 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1346 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1347 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1348 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1349 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1351 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1352 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1353 fdir_info->flow_types_mask[0] = 0;
1355 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1357 fdir_info->flex_payload_unit = sizeof(uint16_t);
1358 fdir_info->max_flex_payload_segment_num = 1;
1359 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1360 fdir_info->flex_conf.nb_payloads = 1;
1361 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1362 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1363 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1364 fdir_info->flex_conf.nb_flexmasks = 1;
1365 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1366 fdir_info->flex_conf.flex_mask[0].mask[0] =
1367 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1368 fdir_info->flex_conf.flex_mask[0].mask[1] =
1369 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1373 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1375 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1376 struct ixgbe_hw_fdir_info *info =
1377 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1378 uint32_t reg, max_num;
1379 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1381 /* Get the information from registers */
1382 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1383 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1384 IXGBE_FDIRFREE_COLL_SHIFT);
1385 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1386 IXGBE_FDIRFREE_FREE_SHIFT);
1388 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1389 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1390 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1391 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1392 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1394 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1395 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1396 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1397 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1398 IXGBE_FDIRUSTAT_ADD_SHIFT;
1400 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1401 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1402 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1403 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1404 IXGBE_FDIRFSTAT_FADD_SHIFT;
1406 /* Copy the new information in the fdir parameter */
1407 fdir_stats->collision = info->collision;
1408 fdir_stats->free = info->free;
1409 fdir_stats->maxhash = info->maxhash;
1410 fdir_stats->maxlen = info->maxlen;
1411 fdir_stats->remove = info->remove;
1412 fdir_stats->add = info->add;
1413 fdir_stats->f_remove = info->f_remove;
1414 fdir_stats->f_add = info->f_add;
1416 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1417 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1418 (reg & FDIRCTRL_PBALLOC_MASK)));
1419 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1420 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1421 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1422 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1423 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1428 * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1429 * @dev: pointer to the structure rte_eth_dev
1430 * @filter_op:operation will be taken
1431 * @arg: a pointer to specific structure corresponding to the filter_op
1434 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1435 enum rte_filter_op filter_op, void *arg)
1437 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1440 if (hw->mac.type != ixgbe_mac_82599EB &&
1441 hw->mac.type != ixgbe_mac_X540 &&
1442 hw->mac.type != ixgbe_mac_X550 &&
1443 hw->mac.type != ixgbe_mac_X550EM_x &&
1444 hw->mac.type != ixgbe_mac_X550EM_a)
1447 if (filter_op == RTE_ETH_FILTER_NOP)
1450 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1453 switch (filter_op) {
1454 case RTE_ETH_FILTER_ADD:
1455 ret = ixgbe_add_del_fdir_filter(dev,
1456 (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1458 case RTE_ETH_FILTER_UPDATE:
1459 ret = ixgbe_add_del_fdir_filter(dev,
1460 (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1462 case RTE_ETH_FILTER_DELETE:
1463 ret = ixgbe_add_del_fdir_filter(dev,
1464 (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1466 case RTE_ETH_FILTER_FLUSH:
1467 ret = ixgbe_fdir_flush(dev);
1469 case RTE_ETH_FILTER_INFO:
1470 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1472 case RTE_ETH_FILTER_STATS:
1473 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1476 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1483 /* restore flow director filter */
1485 ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1487 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1488 struct ixgbe_hw_fdir_info *fdir_info =
1489 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1490 struct ixgbe_fdir_filter *node;
1491 bool is_perfect = FALSE;
1492 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1494 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1495 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1499 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1500 (void)fdir_write_perfect_filter_82599(hw,
1508 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1509 (void)fdir_add_signature_filter_82599(hw,
1518 /* remove all the flow director filters */
1520 ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1522 struct ixgbe_hw_fdir_info *fdir_info =
1523 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1524 struct ixgbe_fdir_filter *fdir_filter;
1527 /* flush flow director */
1528 rte_hash_reset(fdir_info->hash_handle);
1529 memset(fdir_info->hash_map, 0,
1530 sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1531 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1532 TAILQ_REMOVE(&fdir_info->fdir_list,
1535 rte_free(fdir_filter);
1537 ret = ixgbe_fdir_flush(dev);