vdpa/mlx5: support queue update
[dpdk.git] / drivers / net / ixgbe / ixgbe_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdint.h>
7 #include <stdarg.h>
8 #include <errno.h>
9 #include <sys/queue.h>
10
11 #include <rte_interrupts.h>
12 #include <rte_log.h>
13 #include <rte_debug.h>
14 #include <rte_pci.h>
15 #include <rte_vxlan.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
18
19 #include "ixgbe_logs.h"
20 #include "base/ixgbe_api.h"
21 #include "base/ixgbe_common.h"
22 #include "ixgbe_ethdev.h"
23
24 /* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
25 #define FDIRCTRL_PBALLOC_MASK           0x03
26
27 /* For calculating memory required for FDIR filters */
28 #define PBALLOC_SIZE_SHIFT              15
29
30 /* Number of bits used to mask bucket hash for different pballoc sizes */
31 #define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF  /* 11 bits */
32 #define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF  /* 12 bits */
33 #define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF  /* 13 bits */
34 #define SIG_BUCKET_64KB_HASH_MASK       0x1FFF  /* 13 bits */
35 #define SIG_BUCKET_128KB_HASH_MASK      0x3FFF  /* 14 bits */
36 #define SIG_BUCKET_256KB_HASH_MASK      0x7FFF  /* 15 bits */
37 #define IXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /* default flexbytes offset in bytes */
38 #define IXGBE_FDIR_MAX_FLEX_LEN         2 /* len in bytes of flexbytes */
39 #define IXGBE_MAX_FLX_SOURCE_OFF        62
40 #define IXGBE_FDIRCTRL_FLEX_MASK        (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
41 #define IXGBE_FDIRCMD_CMD_INTERVAL_US   10
42
43 #define IXGBE_FDIR_FLOW_TYPES ( \
44         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
45         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
46         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
47         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
48         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
49         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
50         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
51         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
52
53 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
54         uint8_t ipv6_addr[16]; \
55         uint8_t i; \
56         rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
57         (ipv6m) = 0; \
58         for (i = 0; i < sizeof(ipv6_addr); i++) { \
59                 if (ipv6_addr[i] == UINT8_MAX) \
60                         (ipv6m) |= 1 << i; \
61                 else if (ipv6_addr[i] != 0) { \
62                         PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
63                         return -EINVAL; \
64                 } \
65         } \
66 } while (0)
67
68 #define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
69         uint8_t ipv6_addr[16]; \
70         uint8_t i; \
71         for (i = 0; i < sizeof(ipv6_addr); i++) { \
72                 if ((ipv6m) & (1 << i)) \
73                         ipv6_addr[i] = UINT8_MAX; \
74                 else \
75                         ipv6_addr[i] = 0; \
76         } \
77         rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
78 } while (0)
79
80 #define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
81
82 static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
83 static int fdir_set_input_mask(struct rte_eth_dev *dev,
84                                const struct rte_eth_fdir_masks *input_mask);
85 static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
86 static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
87 static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
88                 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
89 static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
90 static int ixgbe_fdir_filter_to_atr_input(
91                 const struct rte_eth_fdir_filter *fdir_filter,
92                 union ixgbe_atr_input *input,
93                 enum rte_fdir_mode mode);
94 static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
95                                  uint32_t key);
96 static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
97                 enum rte_fdir_pballoc_type pballoc);
98 static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
99                 enum rte_fdir_pballoc_type pballoc);
100 static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
101                         union ixgbe_atr_input *input, uint8_t queue,
102                         uint32_t fdircmd, uint32_t fdirhash,
103                         enum rte_fdir_mode mode);
104 static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
105                 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
106                 uint32_t fdirhash);
107 static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
108                               const struct rte_eth_fdir_filter *fdir_filter,
109                               bool del,
110                               bool update);
111 static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
112 static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
113                         struct rte_eth_fdir_info *fdir_info);
114 static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
115                         struct rte_eth_fdir_stats *fdir_stats);
116
117 /**
118  * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
119  * It adds extra configuration of fdirctrl that is common for all filter types.
120  *
121  *  Initialize Flow Director control registers
122  *  @hw: pointer to hardware structure
123  *  @fdirctrl: value to write to flow director control register
124  **/
125 static int
126 fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
127 {
128         int i;
129
130         PMD_INIT_FUNC_TRACE();
131
132         /* Prime the keys for hashing */
133         IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
134         IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
135
136         /*
137          * Continue setup of fdirctrl register bits:
138          *  Set the maximum length per hash bucket to 0xA filters
139          *  Send interrupt when 64 filters are left
140          */
141         fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
142                     (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
143
144         /*
145          * Poll init-done after we write the register.  Estimated times:
146          *      10G: PBALLOC = 11b, timing is 60us
147          *       1G: PBALLOC = 11b, timing is 600us
148          *     100M: PBALLOC = 11b, timing is 6ms
149          *
150          *     Multiple these timings by 4 if under full Rx load
151          *
152          * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
153          * 1 msec per poll time.  If we're at line rate and drop to 100M, then
154          * this might not finish in our poll time, but we can live with that
155          * for now.
156          */
157         IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
158         IXGBE_WRITE_FLUSH(hw);
159         for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
160                 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
161                                    IXGBE_FDIRCTRL_INIT_DONE)
162                         break;
163                 msec_delay(1);
164         }
165
166         if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
167                 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
168                 return -ETIMEDOUT;
169         }
170         return 0;
171 }
172
173 /*
174  * Set appropriate bits in fdirctrl for: variable reporting levels, moving
175  * flexbytes matching field, and drop queue (only for perfect matching mode).
176  */
177 static inline int
178 configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
179 {
180         *fdirctrl = 0;
181
182         switch (conf->pballoc) {
183         case RTE_FDIR_PBALLOC_64K:
184                 /* 8k - 1 signature filters */
185                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
186                 break;
187         case RTE_FDIR_PBALLOC_128K:
188                 /* 16k - 1 signature filters */
189                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
190                 break;
191         case RTE_FDIR_PBALLOC_256K:
192                 /* 32k - 1 signature filters */
193                 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
194                 break;
195         default:
196                 /* bad value */
197                 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
198                 return -EINVAL;
199         };
200
201         /* status flags: write hash & swindex in the rx descriptor */
202         switch (conf->status) {
203         case RTE_FDIR_NO_REPORT_STATUS:
204                 /* do nothing, default mode */
205                 break;
206         case RTE_FDIR_REPORT_STATUS:
207                 /* report status when the packet matches a fdir rule */
208                 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
209                 break;
210         case RTE_FDIR_REPORT_STATUS_ALWAYS:
211                 /* always report status */
212                 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
213                 break;
214         default:
215                 /* bad value */
216                 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
217                 return -EINVAL;
218         };
219
220         *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
221                      IXGBE_FDIRCTRL_FLEX_SHIFT;
222
223         if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
224             conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
225                 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
226                 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
227                 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
228                         *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
229                                         << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
230                 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
231                         *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
232                                         << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
233         }
234
235         return 0;
236 }
237
238 /**
239  * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
240  *
241  *  @hi_dword: Bits 31:16 mask to be bit swapped.
242  *  @lo_dword: Bits 15:0  mask to be bit swapped.
243  *
244  *  Flow director uses several registers to store 2 x 16 bit masks with the
245  *  bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
246  *  mask affects the MS bit/byte of the target. This function reverses the
247  *  bits in these masks.
248  *  **/
249 static inline uint32_t
250 reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
251 {
252         uint32_t mask = hi_dword << 16;
253
254         mask |= lo_dword;
255         mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
256         mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
257         mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
258         return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
259 }
260
261 /*
262  * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
263  * but makes use of the rte_fdir_masks structure to see which bits to set.
264  */
265 static int
266 fdir_set_input_mask_82599(struct rte_eth_dev *dev)
267 {
268         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
269         struct ixgbe_hw_fdir_info *info =
270                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
271         /*
272          * mask VM pool and DIPv6 since there are currently not supported
273          * mask FLEX byte, it will be set in flex_conf
274          */
275         uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
276         uint32_t fdirtcpm;  /* TCP source and destination port masks. */
277         uint32_t fdiripv6m; /* IPv6 source and destination masks. */
278         volatile uint32_t *reg;
279
280         PMD_INIT_FUNC_TRACE();
281
282         /*
283          * Program the relevant mask registers.  If src/dst_port or src/dst_addr
284          * are zero, then assume a full mask for that field. Also assume that
285          * a VLAN of 0 is unspecified, so mask that out as well.  L4type
286          * cannot be masked out in this implementation.
287          */
288         if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
289                 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
290                 fdirm |= IXGBE_FDIRM_L4P;
291
292         if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
293                 /* mask VLAN Priority */
294                 fdirm |= IXGBE_FDIRM_VLANP;
295         else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
296                 /* mask VLAN ID */
297                 fdirm |= IXGBE_FDIRM_VLANID;
298         else if (info->mask.vlan_tci_mask == 0)
299                 /* mask VLAN ID and Priority */
300                 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
301         else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
302                 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
303                 return -EINVAL;
304         }
305
306         /* flex byte mask */
307         if (info->mask.flex_bytes_mask == 0)
308                 fdirm |= IXGBE_FDIRM_FLEX;
309
310         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
311
312         /* store the TCP/UDP port masks, bit reversed from port layout */
313         fdirtcpm = reverse_fdir_bitmasks(
314                         rte_be_to_cpu_16(info->mask.dst_port_mask),
315                         rte_be_to_cpu_16(info->mask.src_port_mask));
316
317         /* write all the same so that UDP, TCP and SCTP use the same mask
318          * (little-endian)
319          */
320         IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
321         IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
322         IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
323
324         /* Store source and destination IPv4 masks (big-endian),
325          * can not use IXGBE_WRITE_REG.
326          */
327         reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
328         *reg = ~(info->mask.src_ipv4_mask);
329         reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
330         *reg = ~(info->mask.dst_ipv4_mask);
331
332         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
333                 /*
334                  * Store source and destination IPv6 masks (bit reversed)
335                  */
336                 fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
337                             info->mask.src_ipv6_mask;
338
339                 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
340         }
341
342         return IXGBE_SUCCESS;
343 }
344
345 /*
346  * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
347  * but makes use of the rte_fdir_masks structure to see which bits to set.
348  */
349 static int
350 fdir_set_input_mask_x550(struct rte_eth_dev *dev)
351 {
352         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
353         struct ixgbe_hw_fdir_info *info =
354                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
355         /* mask VM pool and DIPv6 since there are currently not supported
356          * mask FLEX byte, it will be set in flex_conf
357          */
358         uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
359                          IXGBE_FDIRM_FLEX;
360         uint32_t fdiripv6m;
361         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
362         uint16_t mac_mask;
363
364         PMD_INIT_FUNC_TRACE();
365
366         /* set the default UDP port for VxLAN */
367         if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
368                 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT);
369
370         /* some bits must be set for mac vlan or tunnel mode */
371         fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
372
373         if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
374                 /* mask VLAN Priority */
375                 fdirm |= IXGBE_FDIRM_VLANP;
376         else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
377                 /* mask VLAN ID */
378                 fdirm |= IXGBE_FDIRM_VLANID;
379         else if (info->mask.vlan_tci_mask == 0)
380                 /* mask VLAN ID and Priority */
381                 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
382         else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
383                 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
384                 return -EINVAL;
385         }
386
387         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
388
389         fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
390         fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
391         if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
392                 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
393                                 IXGBE_FDIRIP6M_TNI_VNI;
394
395         if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
396                 fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
397                 mac_mask = info->mask.mac_addr_byte_mask &
398                         (IXGBE_FDIRIP6M_INNER_MAC >>
399                         IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
400                 fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
401                                 IXGBE_FDIRIP6M_INNER_MAC);
402
403                 switch (info->mask.tunnel_type_mask) {
404                 case 0:
405                         /* Mask turnnel type */
406                         fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
407                         break;
408                 case 1:
409                         break;
410                 default:
411                         PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
412                         return -EINVAL;
413                 }
414
415                 switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
416                 case 0x0:
417                         /* Mask vxlan id */
418                         fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
419                         break;
420                 case 0x00FFFFFF:
421                         fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
422                         break;
423                 case 0xFFFFFFFF:
424                         break;
425                 default:
426                         PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
427                         return -EINVAL;
428                 }
429         }
430
431         IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
432         IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
433         IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
434         IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
435         IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
436         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
437
438         return IXGBE_SUCCESS;
439 }
440
441 static int
442 ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
443                                   const struct rte_eth_fdir_masks *input_mask)
444 {
445         struct ixgbe_hw_fdir_info *info =
446                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
447         uint16_t dst_ipv6m = 0;
448         uint16_t src_ipv6m = 0;
449
450         memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
451         info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
452         info->mask.src_port_mask = input_mask->src_port_mask;
453         info->mask.dst_port_mask = input_mask->dst_port_mask;
454         info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
455         info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
456         IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
457         IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
458         info->mask.src_ipv6_mask = src_ipv6m;
459         info->mask.dst_ipv6_mask = dst_ipv6m;
460
461         return IXGBE_SUCCESS;
462 }
463
464 static int
465 ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
466                                  const struct rte_eth_fdir_masks *input_mask)
467 {
468         struct ixgbe_hw_fdir_info *info =
469                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
470
471         memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
472         info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
473         info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
474         info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
475         info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
476
477         return IXGBE_SUCCESS;
478 }
479
480 static int
481 ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
482                             const struct rte_eth_fdir_masks *input_mask)
483 {
484         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
485
486         if (mode >= RTE_FDIR_MODE_SIGNATURE &&
487             mode <= RTE_FDIR_MODE_PERFECT)
488                 return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
489         else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
490                  mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
491                 return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
492
493         PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
494         return -ENOTSUP;
495 }
496
497 int
498 ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
499 {
500         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
501
502         if (mode >= RTE_FDIR_MODE_SIGNATURE &&
503             mode <= RTE_FDIR_MODE_PERFECT)
504                 return fdir_set_input_mask_82599(dev);
505         else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
506                  mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
507                 return fdir_set_input_mask_x550(dev);
508
509         PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
510         return -ENOTSUP;
511 }
512
513 int
514 ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
515                                 uint16_t offset)
516 {
517         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518         uint32_t fdirctrl;
519         int i;
520
521         fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
522
523         fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
524         fdirctrl |= ((offset >> 1) /* convert to word offset */
525                 << IXGBE_FDIRCTRL_FLEX_SHIFT);
526
527         IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
528         IXGBE_WRITE_FLUSH(hw);
529         for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
530                 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
531                         IXGBE_FDIRCTRL_INIT_DONE)
532                         break;
533                 msec_delay(1);
534         }
535         return 0;
536 }
537
538 static int
539 fdir_set_input_mask(struct rte_eth_dev *dev,
540                     const struct rte_eth_fdir_masks *input_mask)
541 {
542         int ret;
543
544         ret = ixgbe_fdir_store_input_mask(dev, input_mask);
545         if (ret)
546                 return ret;
547
548         return ixgbe_fdir_set_input_mask(dev);
549 }
550
551 /*
552  * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
553  * arguments are valid
554  */
555 static int
556 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
557                 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
558 {
559         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560         struct ixgbe_hw_fdir_info *info =
561                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
562         const struct rte_eth_flex_payload_cfg *flex_cfg;
563         const struct rte_eth_fdir_flex_mask *flex_mask;
564         uint32_t fdirm;
565         uint16_t flexbytes = 0;
566         uint16_t i;
567
568         fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
569
570         if (conf == NULL) {
571                 PMD_DRV_LOG(ERR, "NULL pointer.");
572                 return -EINVAL;
573         }
574
575         for (i = 0; i < conf->nb_payloads; i++) {
576                 flex_cfg = &conf->flex_set[i];
577                 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
578                         PMD_DRV_LOG(ERR, "unsupported payload type.");
579                         return -EINVAL;
580                 }
581                 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
582                     (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
583                     (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
584                         *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
585                         *fdirctrl |=
586                                 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
587                                         IXGBE_FDIRCTRL_FLEX_SHIFT;
588                 } else {
589                         PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
590                         return -EINVAL;
591                 }
592         }
593
594         for (i = 0; i < conf->nb_flexmasks; i++) {
595                 flex_mask = &conf->flex_mask[i];
596                 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
597                         PMD_DRV_LOG(ERR, "flexmask should be set globally.");
598                         return -EINVAL;
599                 }
600                 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
601                                         ((flex_mask->mask[1]) & 0xFF));
602                 if (flexbytes == UINT16_MAX)
603                         fdirm &= ~IXGBE_FDIRM_FLEX;
604                 else if (flexbytes != 0) {
605                         /* IXGBE_FDIRM_FLEX is set by default when set mask */
606                         PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
607                         return -EINVAL;
608                 }
609         }
610         IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
611         info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
612         info->flex_bytes_offset = (uint8_t)((*fdirctrl &
613                                             IXGBE_FDIRCTRL_FLEX_MASK) >>
614                                             IXGBE_FDIRCTRL_FLEX_SHIFT);
615         return 0;
616 }
617
618 int
619 ixgbe_fdir_configure(struct rte_eth_dev *dev)
620 {
621         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         int err;
623         uint32_t fdirctrl, pbsize;
624         int i;
625         enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
626
627         PMD_INIT_FUNC_TRACE();
628
629         if (hw->mac.type != ixgbe_mac_82599EB &&
630                 hw->mac.type != ixgbe_mac_X540 &&
631                 hw->mac.type != ixgbe_mac_X550 &&
632                 hw->mac.type != ixgbe_mac_X550EM_x &&
633                 hw->mac.type != ixgbe_mac_X550EM_a)
634                 return -ENOSYS;
635
636         /* x550 supports mac-vlan and tunnel mode but other NICs not */
637         if (hw->mac.type != ixgbe_mac_X550 &&
638             hw->mac.type != ixgbe_mac_X550EM_x &&
639             hw->mac.type != ixgbe_mac_X550EM_a &&
640             mode != RTE_FDIR_MODE_SIGNATURE &&
641             mode != RTE_FDIR_MODE_PERFECT)
642                 return -ENOSYS;
643
644         err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
645         if (err)
646                 return err;
647
648         /*
649          * Before enabling Flow Director, the Rx Packet Buffer size
650          * must be reduced.  The new value is the current size minus
651          * flow director memory usage size.
652          */
653         pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
654         IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
655             (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
656
657         /*
658          * The defaults in the HW for RX PB 1-7 are not zero and so should be
659          * initialized to zero for non DCB mode otherwise actual total RX PB
660          * would be bigger than programmed and filter space would run into
661          * the PB 0 region.
662          */
663         for (i = 1; i < 8; i++)
664                 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
665
666         err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
667         if (err < 0) {
668                 PMD_INIT_LOG(ERR, " Error on setting FD mask");
669                 return err;
670         }
671         err = ixgbe_set_fdir_flex_conf(dev,
672                 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
673         if (err < 0) {
674                 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
675                 return err;
676         }
677
678         err = fdir_enable_82599(hw, fdirctrl);
679         if (err < 0) {
680                 PMD_INIT_LOG(ERR, " Error on enabling FD.");
681                 return err;
682         }
683         return 0;
684 }
685
686 /*
687  * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
688  * by the IXGBE driver code.
689  */
690 static int
691 ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
692                 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
693 {
694         input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
695         input->formatted.flex_bytes = (uint16_t)(
696                 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
697                 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
698
699         switch (fdir_filter->input.flow_type) {
700         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
701                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
702                 break;
703         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
704                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
705                 break;
706         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
707                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
708                 break;
709         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
710                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
711                 break;
712         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
713                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
714                 break;
715         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
716                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
717                 break;
718         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
719                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
720                 break;
721         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
722                 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
723                 break;
724         default:
725                 break;
726         }
727
728         switch (fdir_filter->input.flow_type) {
729         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
730         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
731                 input->formatted.src_port =
732                         fdir_filter->input.flow.udp4_flow.src_port;
733                 input->formatted.dst_port =
734                         fdir_filter->input.flow.udp4_flow.dst_port;
735                 /* fall-through */
736         /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
737         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
738         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
739                 input->formatted.src_ip[0] =
740                         fdir_filter->input.flow.ip4_flow.src_ip;
741                 input->formatted.dst_ip[0] =
742                         fdir_filter->input.flow.ip4_flow.dst_ip;
743                 break;
744
745         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
746         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
747                 input->formatted.src_port =
748                         fdir_filter->input.flow.udp6_flow.src_port;
749                 input->formatted.dst_port =
750                         fdir_filter->input.flow.udp6_flow.dst_port;
751                 /* fall-through */
752         /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
753         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
754         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
755                 rte_memcpy(input->formatted.src_ip,
756                            fdir_filter->input.flow.ipv6_flow.src_ip,
757                            sizeof(input->formatted.src_ip));
758                 rte_memcpy(input->formatted.dst_ip,
759                            fdir_filter->input.flow.ipv6_flow.dst_ip,
760                            sizeof(input->formatted.dst_ip));
761                 break;
762         default:
763                 break;
764         }
765
766         if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
767                 rte_memcpy(
768                         input->formatted.inner_mac,
769                         fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
770                         sizeof(input->formatted.inner_mac));
771         } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
772                 rte_memcpy(
773                         input->formatted.inner_mac,
774                         fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
775                         sizeof(input->formatted.inner_mac));
776                 if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
777                                 RTE_FDIR_TUNNEL_TYPE_VXLAN)
778                         input->formatted.tunnel_type =
779                                         IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
780                 else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
781                                 RTE_FDIR_TUNNEL_TYPE_NVGRE)
782                         input->formatted.tunnel_type =
783                                         IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
784                 else
785                         PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
786
787                 input->formatted.tni_vni =
788                         fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
789         }
790
791         return 0;
792 }
793
794 /*
795  * The below function is taken from the FreeBSD IXGBE drivers release
796  * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
797  * before returning, as the signature hash can use 16bits.
798  *
799  * The newer driver has optimised functions for calculating bucket and
800  * signature hashes. However they don't support IPv6 type packets for signature
801  * filters so are not used here.
802  *
803  * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
804  * set.
805  *
806  * Compute the hashes for SW ATR
807  *  @stream: input bitstream to compute the hash on
808  *  @key: 32-bit hash key
809  **/
810 static uint32_t
811 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
812                                  uint32_t key)
813 {
814         /*
815          * The algorithm is as follows:
816          *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
817          *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
818          *    and A[n] x B[n] is bitwise AND between same length strings
819          *
820          *    K[n] is 16 bits, defined as:
821          *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
822          *       for n modulo 32 < 15, K[n] =
823          *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
824          *
825          *    S[n] is 16 bits, defined as:
826          *       for n >= 15, S[n] = S[n:n - 15]
827          *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
828          *
829          *    To simplify for programming, the algorithm is implemented
830          *    in software this way:
831          *
832          *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
833          *
834          *    for (i = 0; i < 352; i+=32)
835          *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
836          *
837          *    lo_hash_dword[15:0]  ^= Stream[15:0];
838          *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
839          *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
840          *
841          *    hi_hash_dword[31:0]  ^= Stream[351:320];
842          *
843          *    if (key[0])
844          *        hash[15:0] ^= Stream[15:0];
845          *
846          *    for (i = 0; i < 16; i++) {
847          *        if (key[i])
848          *            hash[15:0] ^= lo_hash_dword[(i+15):i];
849          *        if (key[i + 16])
850          *            hash[15:0] ^= hi_hash_dword[(i+15):i];
851          *    }
852          *
853          */
854         __be32 common_hash_dword = 0;
855         u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
856         u32 hash_result = 0;
857         u8 i;
858
859         /* record the flow_vm_vlan bits as they are a key part to the hash */
860         flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
861
862         /* generate common hash dword */
863         for (i = 1; i <= 13; i++)
864                 common_hash_dword ^= atr_input->dword_stream[i];
865
866         hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
867
868         /* low dword is word swapped version of common */
869         lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
870
871         /* apply flow ID/VM pool/VLAN ID bits to hash words */
872         hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
873
874         /* Process bits 0 and 16 */
875         if (key & 0x0001)
876                 hash_result ^= lo_hash_dword;
877         if (key & 0x00010000)
878                 hash_result ^= hi_hash_dword;
879
880         /*
881          * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
882          * delay this because bit 0 of the stream should not be processed
883          * so we do not add the vlan until after bit 0 was processed
884          */
885         lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
886
887
888         /* process the remaining 30 bits in the key 2 bits at a time */
889         for (i = 15; i; i--) {
890                 if (key & (0x0001 << i))
891                         hash_result ^= lo_hash_dword >> i;
892                 if (key & (0x00010000 << i))
893                         hash_result ^= hi_hash_dword >> i;
894         }
895
896         return hash_result;
897 }
898
899 static uint32_t
900 atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
901                 enum rte_fdir_pballoc_type pballoc)
902 {
903         if (pballoc == RTE_FDIR_PBALLOC_256K)
904                 return ixgbe_atr_compute_hash_82599(input,
905                                 IXGBE_ATR_BUCKET_HASH_KEY) &
906                                 PERFECT_BUCKET_256KB_HASH_MASK;
907         else if (pballoc == RTE_FDIR_PBALLOC_128K)
908                 return ixgbe_atr_compute_hash_82599(input,
909                                 IXGBE_ATR_BUCKET_HASH_KEY) &
910                                 PERFECT_BUCKET_128KB_HASH_MASK;
911         else
912                 return ixgbe_atr_compute_hash_82599(input,
913                                 IXGBE_ATR_BUCKET_HASH_KEY) &
914                                 PERFECT_BUCKET_64KB_HASH_MASK;
915 }
916
917 /**
918  * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
919  * @hw: pointer to hardware structure
920  */
921 static inline int
922 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
923 {
924         int i;
925
926         for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
927                 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
928                 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
929                         return 0;
930                 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
931         }
932
933         return -ETIMEDOUT;
934 }
935
936 /*
937  * Calculate the hash value needed for signature-match filters. In the FreeBSD
938  * driver, this is done by the optimised function
939  * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
940  * doesn't support calculating a hash for an IPv6 filter.
941  */
942 static uint32_t
943 atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
944                 enum rte_fdir_pballoc_type pballoc)
945 {
946         uint32_t bucket_hash, sig_hash;
947
948         if (pballoc == RTE_FDIR_PBALLOC_256K)
949                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
950                                 IXGBE_ATR_BUCKET_HASH_KEY) &
951                                 SIG_BUCKET_256KB_HASH_MASK;
952         else if (pballoc == RTE_FDIR_PBALLOC_128K)
953                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
954                                 IXGBE_ATR_BUCKET_HASH_KEY) &
955                                 SIG_BUCKET_128KB_HASH_MASK;
956         else
957                 bucket_hash = ixgbe_atr_compute_hash_82599(input,
958                                 IXGBE_ATR_BUCKET_HASH_KEY) &
959                                 SIG_BUCKET_64KB_HASH_MASK;
960
961         sig_hash = ixgbe_atr_compute_hash_82599(input,
962                         IXGBE_ATR_SIGNATURE_HASH_KEY);
963
964         return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
965 }
966
967 /*
968  * This is based on ixgbe_fdir_write_perfect_filter_82599() in
969  * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
970  * added, and IPv6 support also added. The hash value is also pre-calculated
971  * as the pballoc value is needed to do it.
972  */
973 static int
974 fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
975                         union ixgbe_atr_input *input, uint8_t queue,
976                         uint32_t fdircmd, uint32_t fdirhash,
977                         enum rte_fdir_mode mode)
978 {
979         uint32_t fdirport, fdirvlan;
980         u32 addr_low, addr_high;
981         u32 tunnel_type = 0;
982         int err = 0;
983         volatile uint32_t *reg;
984
985         if (mode == RTE_FDIR_MODE_PERFECT) {
986                 /* record the IPv4 address (big-endian)
987                  * can not use IXGBE_WRITE_REG.
988                  */
989                 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
990                 *reg = input->formatted.src_ip[0];
991                 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
992                 *reg = input->formatted.dst_ip[0];
993
994                 /* record source and destination port (little-endian)*/
995                 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
996                 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
997                 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
998                 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
999         } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1000                    mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
1001                 /* for mac vlan and tunnel modes */
1002                 addr_low = ((u32)input->formatted.inner_mac[0] |
1003                             ((u32)input->formatted.inner_mac[1] << 8) |
1004                             ((u32)input->formatted.inner_mac[2] << 16) |
1005                             ((u32)input->formatted.inner_mac[3] << 24));
1006                 addr_high = ((u32)input->formatted.inner_mac[4] |
1007                              ((u32)input->formatted.inner_mac[5] << 8));
1008
1009                 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1010                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1011                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
1012                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
1013                 } else {
1014                         /* tunnel mode */
1015                         if (input->formatted.tunnel_type)
1016                                 tunnel_type = 0x80000000;
1017                         tunnel_type |= addr_high;
1018                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
1019                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
1020                         IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
1021                                         input->formatted.tni_vni);
1022                 }
1023                 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
1024                 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
1025                 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
1026         }
1027
1028         /* record vlan (little-endian) and flex_bytes(big-endian) */
1029         fdirvlan = input->formatted.flex_bytes;
1030         fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1031         fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1032         IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1033
1034         /* configure FDIRHASH register */
1035         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1036
1037         /*
1038          * flush all previous writes to make certain registers are
1039          * programmed prior to issuing the command
1040          */
1041         IXGBE_WRITE_FLUSH(hw);
1042
1043         /* configure FDIRCMD register */
1044         fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1045                   IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1046         fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1047         fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1048         fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1049
1050         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1051
1052         PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1053
1054         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1055         if (err < 0)
1056                 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1057
1058         return err;
1059 }
1060
1061 /**
1062  * This function is based on ixgbe_atr_add_signature_filter_82599() in
1063  * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1064  * setting extra fields in the FDIRCMD register, and removes the code that was
1065  * verifying the flow_type field. According to the documentation, a flow type of
1066  * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1067  * work ok...
1068  *
1069  *  Adds a signature hash filter
1070  *  @hw: pointer to hardware structure
1071  *  @input: unique input dword
1072  *  @queue: queue index to direct traffic to
1073  *  @fdircmd: any extra flags to set in fdircmd register
1074  *  @fdirhash: pre-calculated hash value for the filter
1075  **/
1076 static int
1077 fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1078                 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1079                 uint32_t fdirhash)
1080 {
1081         int err = 0;
1082
1083         PMD_INIT_FUNC_TRACE();
1084
1085         /* configure FDIRCMD register */
1086         fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1087                   IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1088         fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1089         fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1090
1091         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1092         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1093
1094         PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1095
1096         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1097         if (err < 0)
1098                 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1099
1100         return err;
1101 }
1102
1103 /*
1104  * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1105  * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1106  * that it can be used for removing signature and perfect filters.
1107  */
1108 static int
1109 fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1110 {
1111         uint32_t fdircmd = 0;
1112         int err = 0;
1113
1114         IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1115
1116         /* flush hash to HW */
1117         IXGBE_WRITE_FLUSH(hw);
1118
1119         /* Query if filter is present */
1120         IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1121
1122         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1123         if (err < 0) {
1124                 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1125                 return err;
1126         }
1127
1128         /* if filter exists in hardware then remove it */
1129         if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1130                 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1131                 IXGBE_WRITE_FLUSH(hw);
1132                 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1133                                 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1134         }
1135         err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1136         if (err < 0)
1137                 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1138         return err;
1139
1140 }
1141
1142 static inline struct ixgbe_fdir_filter *
1143 ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
1144                          union ixgbe_atr_input *key)
1145 {
1146         int ret;
1147
1148         ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
1149         if (ret < 0)
1150                 return NULL;
1151
1152         return fdir_info->hash_map[ret];
1153 }
1154
1155 static inline int
1156 ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1157                          struct ixgbe_fdir_filter *fdir_filter)
1158 {
1159         int ret;
1160
1161         ret = rte_hash_add_key(fdir_info->hash_handle,
1162                                &fdir_filter->ixgbe_fdir);
1163
1164         if (ret < 0) {
1165                 PMD_DRV_LOG(ERR,
1166                             "Failed to insert fdir filter to hash table %d!",
1167                             ret);
1168                 return ret;
1169         }
1170
1171         fdir_info->hash_map[ret] = fdir_filter;
1172
1173         TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
1174
1175         return 0;
1176 }
1177
1178 static inline int
1179 ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
1180                          union ixgbe_atr_input *key)
1181 {
1182         int ret;
1183         struct ixgbe_fdir_filter *fdir_filter;
1184
1185         ret = rte_hash_del_key(fdir_info->hash_handle, key);
1186
1187         if (ret < 0) {
1188                 PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
1189                 return ret;
1190         }
1191
1192         fdir_filter = fdir_info->hash_map[ret];
1193         fdir_info->hash_map[ret] = NULL;
1194
1195         TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
1196         rte_free(fdir_filter);
1197
1198         return 0;
1199 }
1200
1201 static int
1202 ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
1203                             const struct rte_eth_fdir_filter *fdir_filter,
1204                             struct ixgbe_fdir_rule *rule)
1205 {
1206         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1207         int err;
1208
1209         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1210
1211         err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
1212                                              &rule->ixgbe_fdir,
1213                                              fdir_mode);
1214         if (err)
1215                 return err;
1216
1217         rule->mode = fdir_mode;
1218         if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
1219                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1220         rule->queue = fdir_filter->action.rx_queue;
1221         rule->soft_id = fdir_filter->soft_id;
1222
1223         return 0;
1224 }
1225
1226 int
1227 ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
1228                           struct ixgbe_fdir_rule *rule,
1229                           bool del,
1230                           bool update)
1231 {
1232         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1233         uint32_t fdircmd_flags;
1234         uint32_t fdirhash;
1235         uint8_t queue;
1236         bool is_perfect = FALSE;
1237         int err;
1238         struct ixgbe_hw_fdir_info *info =
1239                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1240         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1241         struct ixgbe_fdir_filter *node;
1242         bool add_node = FALSE;
1243
1244         if (fdir_mode == RTE_FDIR_MODE_NONE ||
1245             fdir_mode != rule->mode)
1246                 return -ENOTSUP;
1247
1248         /*
1249          * Sanity check for x550.
1250          * When adding a new filter with flow type set to IPv4,
1251          * the flow director mask should be configed before,
1252          * and the L4 protocol and ports are masked.
1253          */
1254         if ((!del) &&
1255             (hw->mac.type == ixgbe_mac_X550 ||
1256              hw->mac.type == ixgbe_mac_X550EM_x ||
1257              hw->mac.type == ixgbe_mac_X550EM_a) &&
1258             (rule->ixgbe_fdir.formatted.flow_type ==
1259              IXGBE_ATR_FLOW_TYPE_IPV4 ||
1260              rule->ixgbe_fdir.formatted.flow_type ==
1261              IXGBE_ATR_FLOW_TYPE_IPV6) &&
1262             (info->mask.src_port_mask != 0 ||
1263              info->mask.dst_port_mask != 0) &&
1264             (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
1265              rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
1266                 PMD_DRV_LOG(ERR, "By this device,"
1267                             " IPv4 is not supported without"
1268                             " L4 protocol and ports masked!");
1269                 return -ENOTSUP;
1270         }
1271
1272         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1273             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1274                 is_perfect = TRUE;
1275
1276         if (is_perfect) {
1277                 if (rule->ixgbe_fdir.formatted.flow_type &
1278                     IXGBE_ATR_L4TYPE_IPV6_MASK) {
1279                         PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1280                                     " perfect mode!");
1281                         return -ENOTSUP;
1282                 }
1283                 fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
1284                                                           dev->data->dev_conf.fdir_conf.pballoc);
1285                 fdirhash |= rule->soft_id <<
1286                         IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1287         } else
1288                 fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
1289                                                       dev->data->dev_conf.fdir_conf.pballoc);
1290
1291         if (del) {
1292                 err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1293                 if (err < 0)
1294                         return err;
1295
1296                 err = fdir_erase_filter_82599(hw, fdirhash);
1297                 if (err < 0)
1298                         PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1299                 else
1300                         PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1301                 return err;
1302         }
1303         /* add or update an fdir filter*/
1304         fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1305         if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
1306                 if (is_perfect) {
1307                         queue = dev->data->dev_conf.fdir_conf.drop_queue;
1308                         fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1309                 } else {
1310                         PMD_DRV_LOG(ERR, "Drop option is not supported in"
1311                                     " signature mode.");
1312                         return -EINVAL;
1313                 }
1314         } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
1315                 queue = (uint8_t)rule->queue;
1316         else
1317                 return -EINVAL;
1318
1319         node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
1320         if (node) {
1321                 if (update) {
1322                         node->fdirflags = fdircmd_flags;
1323                         node->fdirhash = fdirhash;
1324                         node->queue = queue;
1325                 } else {
1326                         PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
1327                         return -EINVAL;
1328                 }
1329         } else {
1330                 add_node = TRUE;
1331                 node = rte_zmalloc("ixgbe_fdir",
1332                                    sizeof(struct ixgbe_fdir_filter),
1333                                    0);
1334                 if (!node)
1335                         return -ENOMEM;
1336                 rte_memcpy(&node->ixgbe_fdir,
1337                                  &rule->ixgbe_fdir,
1338                                  sizeof(union ixgbe_atr_input));
1339                 node->fdirflags = fdircmd_flags;
1340                 node->fdirhash = fdirhash;
1341                 node->queue = queue;
1342
1343                 err = ixgbe_insert_fdir_filter(info, node);
1344                 if (err < 0) {
1345                         rte_free(node);
1346                         return err;
1347                 }
1348         }
1349
1350         if (is_perfect) {
1351                 err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
1352                                                       queue, fdircmd_flags,
1353                                                       fdirhash, fdir_mode);
1354         } else {
1355                 err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
1356                                                       queue, fdircmd_flags,
1357                                                       fdirhash);
1358         }
1359         if (err < 0) {
1360                 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1361
1362                 if (add_node)
1363                         (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
1364         } else {
1365                 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1366         }
1367
1368         return err;
1369 }
1370
1371 /* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1372  * @dev: pointer to the structure rte_eth_dev
1373  * @fdir_filter: fdir filter entry
1374  * @del: 1 - delete, 0 - add
1375  * @update: 1 - update
1376  */
1377 static int
1378 ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1379                           const struct rte_eth_fdir_filter *fdir_filter,
1380                           bool del,
1381                           bool update)
1382 {
1383         struct ixgbe_fdir_rule rule;
1384         int err;
1385
1386         err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
1387
1388         if (err)
1389                 return err;
1390
1391         return ixgbe_fdir_filter_program(dev, &rule, del, update);
1392 }
1393
1394 static int
1395 ixgbe_fdir_flush(struct rte_eth_dev *dev)
1396 {
1397         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1398         struct ixgbe_hw_fdir_info *info =
1399                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1400         int ret;
1401
1402         ret = ixgbe_reinit_fdir_tables_82599(hw);
1403         if (ret < 0) {
1404                 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1405                 return ret;
1406         }
1407
1408         info->f_add = 0;
1409         info->f_remove = 0;
1410         info->add = 0;
1411         info->remove = 0;
1412
1413         return ret;
1414 }
1415
1416 #define FDIRENTRIES_NUM_SHIFT 10
1417 static void
1418 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1419 {
1420         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1421         struct ixgbe_hw_fdir_info *info =
1422                         IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1423         uint32_t fdirctrl, max_num, i;
1424         uint8_t offset;
1425
1426         fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1427         offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1428                         IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1429
1430         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1431         max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1432                         (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1433         if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1434             fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1435                 fdir_info->guarant_spc = max_num;
1436         else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1437                 fdir_info->guarant_spc = max_num * 4;
1438
1439         fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1440         fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1441         fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1442         IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1443                         fdir_info->mask.ipv6_mask.src_ip);
1444         IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1445                         fdir_info->mask.ipv6_mask.dst_ip);
1446         fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1447         fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1448         fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1449         fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1450         fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1451         fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1452
1453         if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1454             fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1455                 fdir_info->flow_types_mask[0] = 0ULL;
1456         else
1457                 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1458         for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
1459                 fdir_info->flow_types_mask[i] = 0ULL;
1460
1461         fdir_info->flex_payload_unit = sizeof(uint16_t);
1462         fdir_info->max_flex_payload_segment_num = 1;
1463         fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1464         fdir_info->flex_conf.nb_payloads = 1;
1465         fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1466         fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1467         fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1468         fdir_info->flex_conf.nb_flexmasks = 1;
1469         fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1470         fdir_info->flex_conf.flex_mask[0].mask[0] =
1471                         (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1472         fdir_info->flex_conf.flex_mask[0].mask[1] =
1473                         (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1474 }
1475
1476 static void
1477 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1478 {
1479         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1480         struct ixgbe_hw_fdir_info *info =
1481                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1482         uint32_t reg, max_num;
1483         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1484
1485         /* Get the information from registers */
1486         reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1487         info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1488                                      IXGBE_FDIRFREE_COLL_SHIFT);
1489         info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1490                                 IXGBE_FDIRFREE_FREE_SHIFT);
1491
1492         reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1493         info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1494                                    IXGBE_FDIRLEN_MAXHASH_SHIFT);
1495         info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1496                                   IXGBE_FDIRLEN_MAXLEN_SHIFT);
1497
1498         reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1499         info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1500                 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1501         info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1502                 IXGBE_FDIRUSTAT_ADD_SHIFT;
1503
1504         reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1505         info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1506                 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1507         info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1508                 IXGBE_FDIRFSTAT_FADD_SHIFT;
1509
1510         /*  Copy the new information in the fdir parameter */
1511         fdir_stats->collision = info->collision;
1512         fdir_stats->free = info->free;
1513         fdir_stats->maxhash = info->maxhash;
1514         fdir_stats->maxlen = info->maxlen;
1515         fdir_stats->remove = info->remove;
1516         fdir_stats->add = info->add;
1517         fdir_stats->f_remove = info->f_remove;
1518         fdir_stats->f_add = info->f_add;
1519
1520         reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1521         max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1522                          (reg & FDIRCTRL_PBALLOC_MASK)));
1523         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1524             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1525                 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1526         else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1527                 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1528
1529 }
1530
1531 /*
1532  * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1533  * @dev: pointer to the structure rte_eth_dev
1534  * @filter_op:operation will be taken
1535  * @arg: a pointer to specific structure corresponding to the filter_op
1536  */
1537 int
1538 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1539                         enum rte_filter_op filter_op, void *arg)
1540 {
1541         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542         int ret = 0;
1543
1544         if (hw->mac.type != ixgbe_mac_82599EB &&
1545                 hw->mac.type != ixgbe_mac_X540 &&
1546                 hw->mac.type != ixgbe_mac_X550 &&
1547                 hw->mac.type != ixgbe_mac_X550EM_x &&
1548                 hw->mac.type != ixgbe_mac_X550EM_a)
1549                 return -ENOTSUP;
1550
1551         if (filter_op == RTE_ETH_FILTER_NOP)
1552                 return 0;
1553
1554         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1555                 return -EINVAL;
1556
1557         switch (filter_op) {
1558         case RTE_ETH_FILTER_ADD:
1559                 ret = ixgbe_add_del_fdir_filter(dev,
1560                         (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1561                 break;
1562         case RTE_ETH_FILTER_UPDATE:
1563                 ret = ixgbe_add_del_fdir_filter(dev,
1564                         (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1565                 break;
1566         case RTE_ETH_FILTER_DELETE:
1567                 ret = ixgbe_add_del_fdir_filter(dev,
1568                         (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1569                 break;
1570         case RTE_ETH_FILTER_FLUSH:
1571                 ret = ixgbe_fdir_flush(dev);
1572                 break;
1573         case RTE_ETH_FILTER_INFO:
1574                 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1575                 break;
1576         case RTE_ETH_FILTER_STATS:
1577                 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1578                 break;
1579         default:
1580                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1581                 ret = -EINVAL;
1582                 break;
1583         }
1584         return ret;
1585 }
1586
1587 /* restore flow director filter */
1588 void
1589 ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
1590 {
1591         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1592         struct ixgbe_hw_fdir_info *fdir_info =
1593                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1594         struct ixgbe_fdir_filter *node;
1595         bool is_perfect = FALSE;
1596         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1597
1598         if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1599             fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1600                 is_perfect = TRUE;
1601
1602         if (is_perfect) {
1603                 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1604                         (void)fdir_write_perfect_filter_82599(hw,
1605                                                               &node->ixgbe_fdir,
1606                                                               node->queue,
1607                                                               node->fdirflags,
1608                                                               node->fdirhash,
1609                                                               fdir_mode);
1610                 }
1611         } else {
1612                 TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
1613                         (void)fdir_add_signature_filter_82599(hw,
1614                                                               &node->ixgbe_fdir,
1615                                                               node->queue,
1616                                                               node->fdirflags,
1617                                                               node->fdirhash);
1618                 }
1619         }
1620 }
1621
1622 /* remove all the flow director filters */
1623 int
1624 ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
1625 {
1626         struct ixgbe_hw_fdir_info *fdir_info =
1627                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1628         struct ixgbe_fdir_filter *fdir_filter;
1629         struct ixgbe_fdir_filter *filter_flag;
1630         int ret = 0;
1631
1632         /* flush flow director */
1633         rte_hash_reset(fdir_info->hash_handle);
1634         memset(fdir_info->hash_map, 0,
1635                sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
1636         filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
1637         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1638                 TAILQ_REMOVE(&fdir_info->fdir_list,
1639                              fdir_filter,
1640                              entries);
1641                 rte_free(fdir_filter);
1642         }
1643
1644         if (filter_flag != NULL)
1645                 ret = ixgbe_fdir_flush(dev);
1646
1647         return ret;
1648 }