net/i40e: fix flow director MSI-X resource allocation
[dpdk.git] / drivers / net / i40e / i40e_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_arp.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_hash_crc.h>
24 #include <rte_bitmap.h>
25
26 #include "i40e_logs.h"
27 #include "base/i40e_type.h"
28 #include "base/i40e_prototype.h"
29 #include "i40e_ethdev.h"
30 #include "i40e_rxtx.h"
31
32 #define I40E_FDIR_MZ_NAME          "FDIR_MEMZONE"
33 #ifndef IPV6_ADDR_LEN
34 #define IPV6_ADDR_LEN              16
35 #endif
36
37 #ifndef IPPROTO_L2TP
38 #define IPPROTO_L2TP              115
39 #endif
40
41 #define I40E_FDIR_PKT_LEN                   512
42 #define I40E_FDIR_IP_DEFAULT_LEN            420
43 #define I40E_FDIR_IP_DEFAULT_TTL            0x40
44 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL    0x45
45 #define I40E_FDIR_TCP_DEFAULT_DATAOFF       0x50
46 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW     0x60000000
47
48 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
49 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
50 #define I40E_FDIR_UDP_DEFAULT_LEN           400
51 #define I40E_FDIR_GTP_DEFAULT_LEN           384
52 #define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
53 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
54
55 #define I40E_FDIR_GTPC_DST_PORT             2123
56 #define I40E_FDIR_GTPU_DST_PORT             2152
57 #define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
58 #define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
59 #define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
60 #define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
61
62 #define I40E_FDIR_ESP_DST_PORT              4500
63
64 /* Wait time for fdir filter programming */
65 #define I40E_FDIR_MAX_WAIT_US 10000
66
67 /* Wait count and interval for fdir filter flush */
68 #define I40E_FDIR_FLUSH_RETRY       50
69 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
70
71 #define I40E_COUNTER_PF           2
72 /* Statistic counter index for one pf */
73 #define I40E_COUNTER_INDEX_FDIR(pf_id)   (0 + (pf_id) * I40E_COUNTER_PF)
74
75 #define I40E_FDIR_FLOWS ( \
76         (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
77         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
78         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
79         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
80         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
81         (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
82         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
83         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
84         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
85         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
86         (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
87
88 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
89                         enum i40e_filter_pctype pctype,
90                         const struct rte_eth_fdir_filter *filter,
91                         bool add);
92 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
93                          struct i40e_fdir_filter *filter);
94 static struct i40e_fdir_filter *
95 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
96                         const struct i40e_fdir_input *input);
97 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
98                                    struct i40e_fdir_filter *filter);
99 static int
100 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
101                                   enum i40e_filter_pctype pctype,
102                                   const struct i40e_fdir_filter_conf *filter,
103                                   bool add, bool wait_status);
104
105 static int
106 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
107 {
108         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
109         struct i40e_hmc_obj_rxq rx_ctx;
110         int err = I40E_SUCCESS;
111
112         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
113         /* Init the RX queue in hardware */
114         rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
115         rx_ctx.hbuff = 0;
116         rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
117         rx_ctx.qlen = rxq->nb_rx_desc;
118 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
119         rx_ctx.dsize = 1;
120 #endif
121         rx_ctx.dtype = i40e_header_split_none;
122         rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
123         rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
124         rx_ctx.tphrdesc_ena = 1;
125         rx_ctx.tphwdesc_ena = 1;
126         rx_ctx.tphdata_ena = 1;
127         rx_ctx.tphhead_ena = 1;
128         rx_ctx.lrxqthresh = 2;
129         rx_ctx.crcstrip = 0;
130         rx_ctx.l2tsel = 1;
131         rx_ctx.showiv = 0;
132         rx_ctx.prefena = 1;
133
134         err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
135         if (err != I40E_SUCCESS) {
136                 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
137                 return err;
138         }
139         err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
140         if (err != I40E_SUCCESS) {
141                 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
142                 return err;
143         }
144         rxq->qrx_tail = hw->hw_addr +
145                 I40E_QRX_TAIL(rxq->vsi->base_queue);
146
147         rte_wmb();
148         /* Init the RX tail regieter. */
149         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
150
151         return err;
152 }
153
154 /*
155  * i40e_fdir_setup - reserve and initialize the Flow Director resources
156  * @pf: board private structure
157  */
158 int
159 i40e_fdir_setup(struct i40e_pf *pf)
160 {
161         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
162         struct i40e_vsi *vsi;
163         int err = I40E_SUCCESS;
164         char z_name[RTE_MEMZONE_NAMESIZE];
165         const struct rte_memzone *mz = NULL;
166         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
167         uint16_t i;
168
169         if ((pf->flags & I40E_FLAG_FDIR) == 0) {
170                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
171                 return I40E_NOT_SUPPORTED;
172         }
173
174         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
175                         " num_filters_best_effort = %u.",
176                         hw->func_caps.fd_filters_guaranteed,
177                         hw->func_caps.fd_filters_best_effort);
178
179         vsi = pf->fdir.fdir_vsi;
180         if (vsi) {
181                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
182                 return I40E_SUCCESS;
183         }
184
185         /* make new FDIR VSI */
186         vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
187         if (!vsi) {
188                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
189                 return I40E_ERR_NO_AVAILABLE_VSI;
190         }
191         pf->fdir.fdir_vsi = vsi;
192
193         /*Fdir tx queue setup*/
194         err = i40e_fdir_setup_tx_resources(pf);
195         if (err) {
196                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
197                 goto fail_setup_tx;
198         }
199
200         /*Fdir rx queue setup*/
201         err = i40e_fdir_setup_rx_resources(pf);
202         if (err) {
203                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
204                 goto fail_setup_rx;
205         }
206
207         err = i40e_tx_queue_init(pf->fdir.txq);
208         if (err) {
209                 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
210                 goto fail_mem;
211         }
212
213         /* need switch on before dev start*/
214         err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
215         if (err) {
216                 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
217                 goto fail_mem;
218         }
219
220         /* Init the rx queue in hardware */
221         err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
222         if (err) {
223                 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
224                 goto fail_mem;
225         }
226
227         /* switch on rx queue */
228         err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
229         if (err) {
230                 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
231                 goto fail_mem;
232         }
233
234         /* enable FDIR MSIX interrupt */
235         vsi->nb_used_qps = 1;
236         i40e_vsi_queues_bind_intr(vsi, I40E_ITR_INDEX_NONE);
237         i40e_vsi_enable_queues_intr(vsi);
238
239         /* reserve memory for the fdir programming packet */
240         snprintf(z_name, sizeof(z_name), "%s_%s_%d",
241                         eth_dev->device->driver->name,
242                         I40E_FDIR_MZ_NAME,
243                         eth_dev->data->port_id);
244         mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN *
245                         I40E_FDIR_PRG_PKT_CNT, SOCKET_ID_ANY);
246         if (!mz) {
247                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
248                                  "flow director program packet.");
249                 err = I40E_ERR_NO_MEMORY;
250                 goto fail_mem;
251         }
252
253         for (i = 0; i < I40E_FDIR_PRG_PKT_CNT; i++) {
254                 pf->fdir.prg_pkt[i] = (uint8_t *)mz->addr +
255                         I40E_FDIR_PKT_LEN * i;
256                 pf->fdir.dma_addr[i] = mz->iova +
257                         I40E_FDIR_PKT_LEN * i;
258         }
259
260         pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
261         pf->fdir.fdir_actual_cnt = 0;
262         pf->fdir.fdir_guarantee_free_space =
263                 pf->fdir.fdir_guarantee_total_space;
264
265         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
266                     vsi->base_queue);
267         return I40E_SUCCESS;
268
269 fail_mem:
270         i40e_dev_rx_queue_release(pf->fdir.rxq);
271         pf->fdir.rxq = NULL;
272 fail_setup_rx:
273         i40e_dev_tx_queue_release(pf->fdir.txq);
274         pf->fdir.txq = NULL;
275 fail_setup_tx:
276         i40e_vsi_release(vsi);
277         pf->fdir.fdir_vsi = NULL;
278         return err;
279 }
280
281 /*
282  * i40e_fdir_teardown - release the Flow Director resources
283  * @pf: board private structure
284  */
285 void
286 i40e_fdir_teardown(struct i40e_pf *pf)
287 {
288         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
289         struct i40e_vsi *vsi;
290         struct rte_eth_dev *dev = pf->adapter->eth_dev;
291
292         vsi = pf->fdir.fdir_vsi;
293         if (!vsi)
294                 return;
295
296         /* disable FDIR MSIX interrupt */
297         i40e_vsi_queues_unbind_intr(vsi);
298         i40e_vsi_disable_queues_intr(vsi);
299
300         int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
301         if (err)
302                 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
303         err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
304         if (err)
305                 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
306
307         i40e_dev_rx_queue_release(pf->fdir.rxq);
308         rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
309         pf->fdir.rxq = NULL;
310         i40e_dev_tx_queue_release(pf->fdir.txq);
311         rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
312         pf->fdir.txq = NULL;
313         i40e_vsi_release(vsi);
314         pf->fdir.fdir_vsi = NULL;
315 }
316
317 /* check whether the flow director table in empty */
318 static inline int
319 i40e_fdir_empty(struct i40e_hw *hw)
320 {
321         uint32_t guarant_cnt, best_cnt;
322
323         guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
324                                  I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
325                                  I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
326         best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
327                               I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
328                               I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
329         if (best_cnt + guarant_cnt > 0)
330                 return -1;
331
332         return 0;
333 }
334
335 /*
336  * Initialize the configuration about bytes stream extracted as flexible payload
337  * and mask setting
338  */
339 static inline void
340 i40e_init_flx_pld(struct i40e_pf *pf)
341 {
342         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
343         uint8_t pctype;
344         int i, index;
345         uint16_t flow_type;
346
347         /*
348          * Define the bytes stream extracted as flexible payload in
349          * field vector. By default, select 8 words from the beginning
350          * of payload as flexible payload.
351          */
352         for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
353                 index = i * I40E_MAX_FLXPLD_FIED;
354                 pf->fdir.flex_set[index].src_offset = 0;
355                 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
356                 pf->fdir.flex_set[index].dst_offset = 0;
357                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
358                 I40E_WRITE_REG(hw,
359                         I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
360                 I40E_WRITE_REG(hw,
361                         I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
362         }
363
364         /* initialize the masks */
365         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
366              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
367                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
368
369                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
370                         continue;
371                 pf->fdir.flex_mask[pctype].word_mask = 0;
372                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
373                 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
374                         pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
375                         pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
376                         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
377                 }
378         }
379 }
380
381 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
382         if ((flex_pit2).src_offset < \
383                 (flex_pit1).src_offset + (flex_pit1).size) { \
384                 PMD_DRV_LOG(ERR, "src_offset should be not" \
385                         " less than than previous offset" \
386                         " + previous FSIZE."); \
387                 return -EINVAL; \
388         } \
389 } while (0)
390
391 /*
392  * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
393  * and the flex_pit will be sorted by it's src_offset value
394  */
395 static inline uint16_t
396 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
397                         struct i40e_fdir_flex_pit *flex_pit)
398 {
399         uint16_t src_tmp, size, num = 0;
400         uint16_t i, k, j = 0;
401
402         while (j < I40E_FDIR_MAX_FLEX_LEN) {
403                 size = 1;
404                 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
405                         if (src_offset[j + 1] == src_offset[j] + 1)
406                                 size++;
407                         else
408                                 break;
409                 }
410                 src_tmp = src_offset[j] + 1 - size;
411                 /* the flex_pit need to be sort by src_offset */
412                 for (i = 0; i < num; i++) {
413                         if (src_tmp < flex_pit[i].src_offset)
414                                 break;
415                 }
416                 /* if insert required, move backward */
417                 for (k = num; k > i; k--)
418                         flex_pit[k] = flex_pit[k - 1];
419                 /* insert */
420                 flex_pit[i].dst_offset = j + 1 - size;
421                 flex_pit[i].src_offset = src_tmp;
422                 flex_pit[i].size = size;
423                 j++;
424                 num++;
425         }
426         return num;
427 }
428
429 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
430 static inline int
431 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
432 {
433         struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
434         uint16_t num, i;
435
436         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
437                 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
438                         PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
439                         return -EINVAL;
440                 }
441         }
442
443         memset(flex_pit, 0, sizeof(flex_pit));
444         num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
445         if (num > I40E_MAX_FLXPLD_FIED) {
446                 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
447                 return -EINVAL;
448         }
449         for (i = 0; i < num; i++) {
450                 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
451                         flex_pit[i].src_offset & 0x01) {
452                         PMD_DRV_LOG(ERR, "flexpayload should be measured"
453                                 " in word");
454                         return -EINVAL;
455                 }
456                 if (i != num - 1)
457                         I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
458         }
459         return 0;
460 }
461
462 /*
463  * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
464  * arguments are valid
465  */
466 static int
467 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
468                           const struct rte_eth_fdir_flex_conf *conf)
469 {
470         const struct rte_eth_flex_payload_cfg *flex_cfg;
471         const struct rte_eth_fdir_flex_mask *flex_mask;
472         uint16_t mask_tmp;
473         uint8_t nb_bitmask;
474         uint16_t i, j;
475         int ret = 0;
476         enum i40e_filter_pctype pctype;
477
478         if (conf == NULL) {
479                 PMD_DRV_LOG(INFO, "NULL pointer.");
480                 return -EINVAL;
481         }
482         /* check flexible payload setting configuration */
483         if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
484                 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
485                 return -EINVAL;
486         }
487         for (i = 0; i < conf->nb_payloads; i++) {
488                 flex_cfg = &conf->flex_set[i];
489                 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
490                         PMD_DRV_LOG(ERR, "invalid payload type.");
491                         return -EINVAL;
492                 }
493                 ret = i40e_check_fdir_flex_payload(flex_cfg);
494                 if (ret < 0) {
495                         PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
496                         return -EINVAL;
497                 }
498         }
499
500         /* check flex mask setting configuration */
501         if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
502                 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
503                 return -EINVAL;
504         }
505         for (i = 0; i < conf->nb_flexmasks; i++) {
506                 flex_mask = &conf->flex_mask[i];
507                 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
508                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
509                         PMD_DRV_LOG(WARNING, "invalid flow type.");
510                         return -EINVAL;
511                 }
512                 nb_bitmask = 0;
513                 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
514                         mask_tmp = I40E_WORD(flex_mask->mask[j],
515                                              flex_mask->mask[j + 1]);
516                         if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
517                                 nb_bitmask++;
518                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
519                                         PMD_DRV_LOG(ERR, " exceed maximal"
520                                                 " number of bitmasks.");
521                                         return -EINVAL;
522                                 }
523                         }
524                 }
525         }
526         return 0;
527 }
528
529 /*
530  * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
531  * @pf: board private structure
532  * @cfg: the rule how bytes stream is extracted as flexible payload
533  */
534 static void
535 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
536                          const struct rte_eth_flex_payload_cfg *cfg)
537 {
538         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
539         struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
540         uint32_t flx_pit, flx_ort;
541         uint16_t num, min_next_off;  /* in words */
542         uint8_t field_idx = 0;
543         uint8_t layer_idx = 0;
544         uint16_t i;
545
546         if (cfg->type == RTE_ETH_L2_PAYLOAD)
547                 layer_idx = I40E_FLXPLD_L2_IDX;
548         else if (cfg->type == RTE_ETH_L3_PAYLOAD)
549                 layer_idx = I40E_FLXPLD_L3_IDX;
550         else if (cfg->type == RTE_ETH_L4_PAYLOAD)
551                 layer_idx = I40E_FLXPLD_L4_IDX;
552
553         memset(flex_pit, 0, sizeof(flex_pit));
554         num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
555                       RTE_DIM(flex_pit));
556
557         if (num) {
558                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
559                           (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
560                           (layer_idx * I40E_MAX_FLXPLD_FIED);
561                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
562         }
563
564         for (i = 0; i < num; i++) {
565                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
566                 /* record the info in fdir structure */
567                 pf->fdir.flex_set[field_idx].src_offset =
568                         flex_pit[i].src_offset / sizeof(uint16_t);
569                 pf->fdir.flex_set[field_idx].size =
570                         flex_pit[i].size / sizeof(uint16_t);
571                 pf->fdir.flex_set[field_idx].dst_offset =
572                         flex_pit[i].dst_offset / sizeof(uint16_t);
573                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
574                                 pf->fdir.flex_set[field_idx].size,
575                                 pf->fdir.flex_set[field_idx].dst_offset);
576
577                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
578         }
579         min_next_off = pf->fdir.flex_set[field_idx].src_offset +
580                                 pf->fdir.flex_set[field_idx].size;
581
582         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
583                 /* set the non-used register obeying register's constrain */
584                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
585                            NONUSE_FLX_PIT_DEST_OFF);
586                 I40E_WRITE_REG(hw,
587                         I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
588                         flx_pit);
589                 min_next_off++;
590         }
591 }
592
593 /*
594  * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
595  * @pf: board private structure
596  * @pctype: packet classify type
597  * @flex_masks: mask for flexible payload
598  */
599 static void
600 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
601                 enum i40e_filter_pctype pctype,
602                 const struct rte_eth_fdir_flex_mask *mask_cfg)
603 {
604         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
605         struct i40e_fdir_flex_mask *flex_mask;
606         uint32_t flxinset, fd_mask;
607         uint16_t mask_tmp;
608         uint8_t i, nb_bitmask = 0;
609
610         flex_mask = &pf->fdir.flex_mask[pctype];
611         memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
612         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
613                 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
614                 if (mask_tmp != 0x0) {
615                         flex_mask->word_mask |=
616                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
617                         if (mask_tmp != UINT16_MAX) {
618                                 /* set bit mask */
619                                 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
620                                 flex_mask->bitmask[nb_bitmask].offset =
621                                         i / sizeof(uint16_t);
622                                 nb_bitmask++;
623                         }
624                 }
625         }
626         /* write mask to hw */
627         flxinset = (flex_mask->word_mask <<
628                 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
629                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
630         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
631
632         for (i = 0; i < nb_bitmask; i++) {
633                 fd_mask = (flex_mask->bitmask[i].mask <<
634                         I40E_PRTQF_FD_MSK_MASK_SHIFT) &
635                         I40E_PRTQF_FD_MSK_MASK_MASK;
636                 fd_mask |= ((flex_mask->bitmask[i].offset +
637                         I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
638                         I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
639                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
640                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
641         }
642 }
643
644 /*
645  * Enable/disable flow director RX processing in vector routines.
646  */
647 void
648 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
649 {
650         int32_t i;
651
652         for (i = 0; i < dev->data->nb_rx_queues; i++) {
653                 struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
654                 if (!rxq)
655                         continue;
656                 rxq->fdir_enabled = on;
657         }
658         PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
659 }
660
661 /*
662  * Configure flow director related setting
663  */
664 int
665 i40e_fdir_configure(struct rte_eth_dev *dev)
666 {
667         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
668         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
669         struct rte_eth_fdir_flex_conf *conf;
670         enum i40e_filter_pctype pctype;
671         uint32_t val;
672         uint8_t i;
673         int ret = 0;
674
675         /*
676         * configuration need to be done before
677         * flow director filters are added
678         * If filters exist, flush them.
679         */
680         if (i40e_fdir_empty(hw) < 0) {
681                 ret = i40e_fdir_flush(dev);
682                 if (ret) {
683                         PMD_DRV_LOG(ERR, "failed to flush fdir table.");
684                         return ret;
685                 }
686         }
687
688         /* enable FDIR filter */
689         val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
690         val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
691         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
692
693         i40e_init_flx_pld(pf); /* set flex config to default value */
694
695         conf = &dev->data->dev_conf.fdir_conf.flex_conf;
696         ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
697         if (ret < 0) {
698                 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
699                 return -EINVAL;
700         }
701
702         if (!pf->support_multi_driver) {
703                 /* configure flex payload */
704                 for (i = 0; i < conf->nb_payloads; i++)
705                         i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
706                 /* configure flex mask*/
707                 for (i = 0; i < conf->nb_flexmasks; i++) {
708                         if (hw->mac.type == I40E_MAC_X722) {
709                                 /* get pctype value in fd pctype register */
710                                 pctype = (enum i40e_filter_pctype)
711                                           i40e_read_rx_ctl(hw,
712                                                 I40E_GLQF_FD_PCTYPES(
713                                                 (int)i40e_flowtype_to_pctype(
714                                                 pf->adapter,
715                                                 conf->flex_mask[i].flow_type)));
716                         } else {
717                                 pctype = i40e_flowtype_to_pctype(pf->adapter,
718                                                   conf->flex_mask[i].flow_type);
719                         }
720
721                         i40e_set_flex_mask_on_pctype(pf, pctype,
722                                                      &conf->flex_mask[i]);
723                 }
724         } else {
725                 PMD_DRV_LOG(ERR, "Not support flexible payload.");
726         }
727
728         /* Enable FDIR processing in RX routines */
729         i40e_fdir_rx_proc_enable(dev, 1);
730
731         return ret;
732 }
733
734 static inline int
735 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
736                            unsigned char *raw_pkt,
737                            bool vlan)
738 {
739         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
740         uint16_t *ether_type;
741         uint8_t len = 2 * sizeof(struct rte_ether_addr);
742         struct rte_ipv4_hdr *ip;
743         struct rte_ipv6_hdr *ip6;
744         static const uint8_t next_proto[] = {
745                 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
746                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
747                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
748                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
749                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
750                 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
751                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
752                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
753                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
754                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
755         };
756
757         raw_pkt += 2 * sizeof(struct rte_ether_addr);
758         if (vlan && fdir_input->flow_ext.vlan_tci) {
759                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
760                 rte_memcpy(raw_pkt + sizeof(uint16_t),
761                            &fdir_input->flow_ext.vlan_tci,
762                            sizeof(uint16_t));
763                 raw_pkt += sizeof(vlan_frame);
764                 len += sizeof(vlan_frame);
765         }
766         ether_type = (uint16_t *)raw_pkt;
767         raw_pkt += sizeof(uint16_t);
768         len += sizeof(uint16_t);
769
770         switch (fdir_input->flow_type) {
771         case RTE_ETH_FLOW_L2_PAYLOAD:
772                 *ether_type = fdir_input->flow.l2_flow.ether_type;
773                 break;
774         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
775         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
776         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
777         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
778         case RTE_ETH_FLOW_FRAG_IPV4:
779                 ip = (struct rte_ipv4_hdr *)raw_pkt;
780
781                 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
782                 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
783                 /* set len to by default */
784                 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
785                 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
786                                         fdir_input->flow.ip4_flow.proto :
787                                         next_proto[fdir_input->flow_type];
788                 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
789                                         fdir_input->flow.ip4_flow.ttl :
790                                         I40E_FDIR_IP_DEFAULT_TTL;
791                 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
792                 /*
793                  * The source and destination fields in the transmitted packet
794                  * need to be presented in a reversed order with respect
795                  * to the expected received packets.
796                  */
797                 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
798                 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
799                 len += sizeof(struct rte_ipv4_hdr);
800                 break;
801         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
802         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
803         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
804         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
805         case RTE_ETH_FLOW_FRAG_IPV6:
806                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
807
808                 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
809                 ip6->vtc_flow =
810                         rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
811                                          (fdir_input->flow.ipv6_flow.tc <<
812                                           I40E_FDIR_IPv6_TC_OFFSET));
813                 ip6->payload_len =
814                         rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
815                 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
816                                         fdir_input->flow.ipv6_flow.proto :
817                                         next_proto[fdir_input->flow_type];
818                 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
819                                         fdir_input->flow.ipv6_flow.hop_limits :
820                                         I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
821                 /*
822                  * The source and destination fields in the transmitted packet
823                  * need to be presented in a reversed order with respect
824                  * to the expected received packets.
825                  */
826                 rte_memcpy(&(ip6->src_addr),
827                            &(fdir_input->flow.ipv6_flow.dst_ip),
828                            IPV6_ADDR_LEN);
829                 rte_memcpy(&(ip6->dst_addr),
830                            &(fdir_input->flow.ipv6_flow.src_ip),
831                            IPV6_ADDR_LEN);
832                 len += sizeof(struct rte_ipv6_hdr);
833                 break;
834         default:
835                 PMD_DRV_LOG(ERR, "unknown flow type %u.",
836                             fdir_input->flow_type);
837                 return -1;
838         }
839         return len;
840 }
841
842
843 /*
844  * i40e_fdir_construct_pkt - construct packet based on fields in input
845  * @pf: board private structure
846  * @fdir_input: input set of the flow director entry
847  * @raw_pkt: a packet to be constructed
848  */
849 static int
850 i40e_fdir_construct_pkt(struct i40e_pf *pf,
851                              const struct rte_eth_fdir_input *fdir_input,
852                              unsigned char *raw_pkt)
853 {
854         unsigned char *payload, *ptr;
855         struct rte_udp_hdr *udp;
856         struct rte_tcp_hdr *tcp;
857         struct rte_sctp_hdr *sctp;
858         uint8_t size, dst = 0;
859         uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
860         int len;
861
862         /* fill the ethernet and IP head */
863         len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
864                                          !!fdir_input->flow_ext.vlan_tci);
865         if (len < 0)
866                 return -EINVAL;
867
868         /* fill the L4 head */
869         switch (fdir_input->flow_type) {
870         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
871                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
872                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
873                 /*
874                  * The source and destination fields in the transmitted packet
875                  * need to be presented in a reversed order with respect
876                  * to the expected received packets.
877                  */
878                 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
879                 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
880                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
881                 break;
882
883         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
884                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
885                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
886                 /*
887                  * The source and destination fields in the transmitted packet
888                  * need to be presented in a reversed order with respect
889                  * to the expected received packets.
890                  */
891                 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
892                 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
893                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
894                 break;
895
896         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
897                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
898                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
899                 /*
900                  * The source and destination fields in the transmitted packet
901                  * need to be presented in a reversed order with respect
902                  * to the expected received packets.
903                  */
904                 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
905                 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
906                 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
907                 break;
908
909         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
910         case RTE_ETH_FLOW_FRAG_IPV4:
911                 payload = raw_pkt + len;
912                 set_idx = I40E_FLXPLD_L3_IDX;
913                 break;
914
915         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
916                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
917                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
918                 /*
919                  * The source and destination fields in the transmitted packet
920                  * need to be presented in a reversed order with respect
921                  * to the expected received packets.
922                  */
923                 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
924                 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
925                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
926                 break;
927
928         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
929                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
930                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
931                 /*
932                  * The source and destination fields in the transmitted packet
933                  * need to be presented in a reversed order with respect
934                  * to the expected received packets.
935                  */
936                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
937                 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
938                 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
939                 break;
940
941         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
942                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
943                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
944                 /*
945                  * The source and destination fields in the transmitted packet
946                  * need to be presented in a reversed order with respect
947                  * to the expected received packets.
948                  */
949                 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
950                 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
951                 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
952                 break;
953
954         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
955         case RTE_ETH_FLOW_FRAG_IPV6:
956                 payload = raw_pkt + len;
957                 set_idx = I40E_FLXPLD_L3_IDX;
958                 break;
959         case RTE_ETH_FLOW_L2_PAYLOAD:
960                 payload = raw_pkt + len;
961                 /*
962                  * ARP packet is a special case on which the payload
963                  * starts after the whole ARP header
964                  */
965                 if (fdir_input->flow.l2_flow.ether_type ==
966                                 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
967                         payload += sizeof(struct rte_arp_hdr);
968                 set_idx = I40E_FLXPLD_L2_IDX;
969                 break;
970         default:
971                 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
972                 return -EINVAL;
973         }
974
975         /* fill the flexbytes to payload */
976         for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
977                 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
978                 size = pf->fdir.flex_set[pit_idx].size;
979                 if (size == 0)
980                         continue;
981                 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
982                 ptr = payload +
983                         pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
984                 rte_memcpy(ptr,
985                                  &fdir_input->flow_ext.flexbytes[dst],
986                                  size * sizeof(uint16_t));
987         }
988
989         return 0;
990 }
991
992 static struct i40e_customized_pctype *
993 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
994 {
995         struct i40e_customized_pctype *cus_pctype;
996         enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
997
998         for (; i < I40E_CUSTOMIZED_MAX; i++) {
999                 cus_pctype = &pf->customized_pctype[i];
1000                 if (pctype == cus_pctype->pctype)
1001                         return cus_pctype;
1002         }
1003         return NULL;
1004 }
1005
1006 static inline int
1007 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
1008                 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
1009 {
1010         struct rte_ipv6_hdr *ip6;
1011
1012         ip6 = (struct rte_ipv6_hdr *)raw_pkt;
1013
1014         *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1015         ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1016                 (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
1017         ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1018         ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1019                 fdir_input->flow.ipv6_flow.proto : next_proto;
1020         ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1021                 fdir_input->flow.ipv6_flow.hop_limits :
1022                 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1023         /**
1024          * The source and destination fields in the transmitted packet
1025          * need to be presented in a reversed order with respect
1026          * to the expected received packets.
1027          */
1028         rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
1029                 IPV6_ADDR_LEN);
1030         rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
1031                 IPV6_ADDR_LEN);
1032         len += sizeof(struct rte_ipv6_hdr);
1033
1034         return len;
1035 }
1036
1037 static inline int
1038 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
1039                 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
1040 {
1041         struct rte_ipv4_hdr *ip4;
1042
1043         ip4 = (struct rte_ipv4_hdr *)raw_pkt;
1044
1045         *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1046         ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1047         /* set len to by default */
1048         ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1049         ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1050                 fdir_input->flow.ip4_flow.ttl :
1051                 I40E_FDIR_IP_DEFAULT_TTL;
1052         ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
1053         ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1054                 fdir_input->flow.ip4_flow.proto : next_proto;
1055         /**
1056          * The source and destination fields in the transmitted packet
1057          * need to be presented in a reversed order with respect
1058          * to the expected received packets.
1059          */
1060         ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1061         ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1062         len += sizeof(struct rte_ipv4_hdr);
1063
1064         return len;
1065 }
1066
1067 static inline int
1068 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
1069                                 const struct i40e_fdir_input *fdir_input,
1070                                 unsigned char *raw_pkt,
1071                                 bool vlan)
1072 {
1073         struct i40e_customized_pctype *cus_pctype = NULL;
1074         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
1075         uint16_t *ether_type;
1076         uint8_t len = 2 * sizeof(struct rte_ether_addr);
1077         uint8_t pctype = fdir_input->pctype;
1078         bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
1079         static const uint8_t next_proto[] = {
1080                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
1081                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
1082                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
1083                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
1084                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
1085                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
1086                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
1087                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
1088                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
1089                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
1090         };
1091
1092         rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
1093                 sizeof(struct rte_ether_addr));
1094         rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
1095                 &fdir_input->flow.l2_flow.src,
1096                 sizeof(struct rte_ether_addr));
1097         raw_pkt += 2 * sizeof(struct rte_ether_addr);
1098
1099         if (vlan && fdir_input->flow_ext.vlan_tci) {
1100                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
1101                 rte_memcpy(raw_pkt + sizeof(uint16_t),
1102                            &fdir_input->flow_ext.vlan_tci,
1103                            sizeof(uint16_t));
1104                 raw_pkt += sizeof(vlan_frame);
1105                 len += sizeof(vlan_frame);
1106         }
1107         ether_type = (uint16_t *)raw_pkt;
1108         raw_pkt += sizeof(uint16_t);
1109         len += sizeof(uint16_t);
1110
1111         if (is_customized_pctype) {
1112                 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1113                 if (!cus_pctype) {
1114                         PMD_DRV_LOG(ERR, "unknown pctype %u.",
1115                                     fdir_input->pctype);
1116                         return -1;
1117                 }
1118         }
1119
1120         if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1121                 *ether_type = fdir_input->flow.l2_flow.ether_type;
1122         else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1123                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1124                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1125                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1126                  pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1127                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1128                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1129                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1130                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1131                  pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
1132                  is_customized_pctype) {
1133                 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1134                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1135                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1136                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1137                         pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1138                         len = fill_ip4_head(fdir_input, raw_pkt,
1139                                         next_proto[pctype], len, ether_type);
1140                 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1141                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1142                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1143                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1144                         pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1145                         len = fill_ip6_head(fdir_input, raw_pkt,
1146                                         next_proto[pctype], len,
1147                                         ether_type);
1148                 } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1149                          cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1150                          cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1151                          cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1152                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1153                                         len, ether_type);
1154                 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
1155                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1156                                         len, ether_type);
1157                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1158                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
1159                                         len, ether_type);
1160                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1161                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1162                                         len, ether_type);
1163                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1164                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1165                                         len, ether_type);
1166                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
1167                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
1168                                         len, ether_type);
1169                 else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
1170                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
1171                                         len, ether_type);
1172                 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
1173                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1174                                         len, ether_type);
1175         } else {
1176                 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1177                 return -1;
1178         }
1179
1180         return len;
1181 }
1182
1183 /**
1184  * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1185  * @pf: board private structure
1186  * @fdir_input: input set of the flow director entry
1187  * @raw_pkt: a packet to be constructed
1188  */
1189 static int
1190 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1191                              const struct i40e_fdir_input *fdir_input,
1192                              unsigned char *raw_pkt)
1193 {
1194         unsigned char *payload = NULL;
1195         unsigned char *ptr;
1196         struct rte_udp_hdr *udp;
1197         struct rte_tcp_hdr *tcp;
1198         struct rte_sctp_hdr *sctp;
1199         struct rte_flow_item_gtp *gtp;
1200         struct rte_ipv4_hdr *gtp_ipv4;
1201         struct rte_ipv6_hdr *gtp_ipv6;
1202         struct rte_flow_item_l2tpv3oip *l2tpv3oip;
1203         struct rte_flow_item_esp *esp;
1204         struct rte_ipv4_hdr *esp_ipv4;
1205         struct rte_ipv6_hdr *esp_ipv6;
1206
1207         uint8_t size, dst = 0;
1208         uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1209         int len;
1210         uint8_t pctype = fdir_input->pctype;
1211         struct i40e_customized_pctype *cus_pctype;
1212
1213         /* raw pcket template - just copy contents of the raw packet */
1214         if (fdir_input->flow_ext.pkt_template) {
1215                 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1216                        fdir_input->flow.raw_flow.length);
1217                 return 0;
1218         }
1219
1220         /* fill the ethernet and IP head */
1221         len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1222                                               !!fdir_input->flow_ext.vlan_tci);
1223         if (len < 0)
1224                 return -EINVAL;
1225
1226         /* fill the L4 head */
1227         if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1228                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1229                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1230                 /**
1231                  * The source and destination fields in the transmitted packet
1232                  * need to be presented in a reversed order with respect
1233                  * to the expected received packets.
1234                  */
1235                 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1236                 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1237                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1238         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1239                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1240                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1241                 /**
1242                  * The source and destination fields in the transmitted packet
1243                  * need to be presented in a reversed order with respect
1244                  * to the expected received packets.
1245                  */
1246                 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1247                 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1248                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1249         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1250                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1251                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1252                 /**
1253                  * The source and destination fields in the transmitted packet
1254                  * need to be presented in a reversed order with respect
1255                  * to the expected received packets.
1256                  */
1257                 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1258                 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1259                 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1260         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1261                    pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1262                 payload = raw_pkt + len;
1263                 set_idx = I40E_FLXPLD_L3_IDX;
1264         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1265                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1266                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1267                 /**
1268                  * The source and destination fields in the transmitted packet
1269                  * need to be presented in a reversed order with respect
1270                  * to the expected received packets.
1271                  */
1272                 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1273                 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1274                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1275         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1276                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1277                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1278                 /**
1279                  * The source and destination fields in the transmitted packet
1280                  * need to be presented in a reversed order with respect
1281                  * to the expected received packets.
1282                  */
1283                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1284                 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1285                 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1286         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1287                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1288                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1289                 /**
1290                  * The source and destination fields in the transmitted packet
1291                  * need to be presented in a reversed order with respect
1292                  * to the expected received packets.
1293                  */
1294                 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1295                 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1296                 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1297         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1298                    pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1299                 payload = raw_pkt + len;
1300                 set_idx = I40E_FLXPLD_L3_IDX;
1301         } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1302                 payload = raw_pkt + len;
1303                 /**
1304                  * ARP packet is a special case on which the payload
1305                  * starts after the whole ARP header
1306                  */
1307                 if (fdir_input->flow.l2_flow.ether_type ==
1308                                 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1309                         payload += sizeof(struct rte_arp_hdr);
1310                 set_idx = I40E_FLXPLD_L2_IDX;
1311         } else if (fdir_input->flow_ext.customized_pctype) {
1312                 /* If customized pctype is used */
1313                 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1314                 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1315                     cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1316                     cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1317                     cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1318                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
1319                         udp->dgram_len =
1320                                 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1321
1322                         gtp = (struct rte_flow_item_gtp *)
1323                                 ((unsigned char *)udp +
1324                                         sizeof(struct rte_udp_hdr));
1325                         gtp->msg_len =
1326                                 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1327                         gtp->teid = fdir_input->flow.gtp_flow.teid;
1328                         gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1329
1330                         /* GTP-C message type is not supported. */
1331                         if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1332                                 udp->dst_port =
1333                                       rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1334                                 gtp->v_pt_rsv_flags =
1335                                         I40E_FDIR_GTP_VER_FLAG_0X32;
1336                         } else {
1337                                 udp->dst_port =
1338                                       rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1339                                 gtp->v_pt_rsv_flags =
1340                                         I40E_FDIR_GTP_VER_FLAG_0X30;
1341                         }
1342
1343                         if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1344                                 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1345                                 gtp_ipv4 = (struct rte_ipv4_hdr *)
1346                                         ((unsigned char *)gtp +
1347                                          sizeof(struct rte_flow_item_gtp));
1348                                 gtp_ipv4->version_ihl =
1349                                         I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1350                                 gtp_ipv4->next_proto_id = IPPROTO_IP;
1351                                 gtp_ipv4->total_length =
1352                                         rte_cpu_to_be_16(
1353                                                 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1354                                 payload = (unsigned char *)gtp_ipv4 +
1355                                         sizeof(struct rte_ipv4_hdr);
1356                         } else if (cus_pctype->index ==
1357                                    I40E_CUSTOMIZED_GTPU_IPV6) {
1358                                 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1359                                 gtp_ipv6 = (struct rte_ipv6_hdr *)
1360                                         ((unsigned char *)gtp +
1361                                          sizeof(struct rte_flow_item_gtp));
1362                                 gtp_ipv6->vtc_flow =
1363                                         rte_cpu_to_be_32(
1364                                                I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1365                                                (0 << I40E_FDIR_IPv6_TC_OFFSET));
1366                                 gtp_ipv6->proto = IPPROTO_NONE;
1367                                 gtp_ipv6->payload_len =
1368                                         rte_cpu_to_be_16(
1369                                               I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1370                                 gtp_ipv6->hop_limits =
1371                                         I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1372                                 payload = (unsigned char *)gtp_ipv6 +
1373                                         sizeof(struct rte_ipv6_hdr);
1374                         } else
1375                                 payload = (unsigned char *)gtp +
1376                                         sizeof(struct rte_flow_item_gtp);
1377                 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1378                            cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1379                         l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1380                                                                        + len);
1381
1382                         if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1383                                 l2tpv3oip->session_id =
1384                                  fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1385                         else
1386                                 l2tpv3oip->session_id =
1387                                  fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1388                         payload = (unsigned char *)l2tpv3oip +
1389                                 sizeof(struct rte_flow_item_l2tpv3oip);
1390                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1391                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1392                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1393                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1394                         if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1395                                 esp_ipv4 = (struct rte_ipv4_hdr *)
1396                                         (raw_pkt + len);
1397                                 esp = (struct rte_flow_item_esp *)esp_ipv4;
1398                                 esp->hdr.spi =
1399                                         fdir_input->flow.esp_ipv4_flow.spi;
1400                                 payload = (unsigned char *)esp +
1401                                         sizeof(struct rte_esp_hdr);
1402                                 len += sizeof(struct rte_esp_hdr);
1403                         } else if (cus_pctype->index ==
1404                                         I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1405                                 esp_ipv4 = (struct rte_ipv4_hdr *)
1406                                         (raw_pkt + len);
1407                                 udp = (struct rte_udp_hdr *)esp_ipv4;
1408                                 udp->dst_port = rte_cpu_to_be_16
1409                                         (I40E_FDIR_ESP_DST_PORT);
1410
1411                                 udp->dgram_len = rte_cpu_to_be_16
1412                                                 (I40E_FDIR_UDP_DEFAULT_LEN);
1413                                 esp = (struct rte_flow_item_esp *)
1414                                         ((unsigned char *)esp_ipv4 +
1415                                                 sizeof(struct rte_udp_hdr));
1416                                 esp->hdr.spi =
1417                                         fdir_input->flow.esp_ipv4_udp_flow.spi;
1418                                 payload = (unsigned char *)esp +
1419                                         sizeof(struct rte_esp_hdr);
1420                                 len += sizeof(struct rte_udp_hdr) +
1421                                                 sizeof(struct rte_esp_hdr);
1422                         } else if (cus_pctype->index ==
1423                                         I40E_CUSTOMIZED_ESP_IPV6) {
1424                                 esp_ipv6 = (struct rte_ipv6_hdr *)
1425                                         (raw_pkt + len);
1426                                 esp = (struct rte_flow_item_esp *)esp_ipv6;
1427                                 esp->hdr.spi =
1428                                         fdir_input->flow.esp_ipv6_flow.spi;
1429                                 payload = (unsigned char *)esp +
1430                                         sizeof(struct rte_esp_hdr);
1431                                 len += sizeof(struct rte_esp_hdr);
1432                         } else if (cus_pctype->index ==
1433                                         I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1434                                 esp_ipv6 = (struct rte_ipv6_hdr *)
1435                                         (raw_pkt + len);
1436                                 udp = (struct rte_udp_hdr *)esp_ipv6;
1437                                 udp->dst_port = rte_cpu_to_be_16
1438                                         (I40E_FDIR_ESP_DST_PORT);
1439
1440                                 udp->dgram_len = rte_cpu_to_be_16
1441                                         (I40E_FDIR_UDP_DEFAULT_LEN);
1442                                 esp = (struct rte_flow_item_esp *)
1443                                         ((unsigned char *)esp_ipv6 +
1444                                                 sizeof(struct rte_udp_hdr));
1445                                 esp->hdr.spi =
1446                                         fdir_input->flow.esp_ipv6_udp_flow.spi;
1447                                 payload = (unsigned char *)esp +
1448                                         sizeof(struct rte_esp_hdr);
1449                                 len += sizeof(struct rte_udp_hdr) +
1450                                                 sizeof(struct rte_esp_hdr);
1451                         }
1452                 }
1453         } else {
1454                 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1455                 return -1;
1456         }
1457
1458         /* fill the flexbytes to payload */
1459         for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1460                 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1461                 size = pf->fdir.flex_set[pit_idx].size;
1462                 if (size == 0)
1463                         continue;
1464                 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1465                 ptr = payload +
1466                       pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1467                 (void)rte_memcpy(ptr,
1468                                  &fdir_input->flow_ext.flexbytes[dst],
1469                                  size * sizeof(uint16_t));
1470         }
1471
1472         return 0;
1473 }
1474
1475 /* Construct the tx flags */
1476 static inline uint64_t
1477 i40e_build_ctob(uint32_t td_cmd,
1478                 uint32_t td_offset,
1479                 unsigned int size,
1480                 uint32_t td_tag)
1481 {
1482         return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1483                         ((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
1484                         ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1485                         ((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1486                         ((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
1487 }
1488
1489 /*
1490  * check the programming status descriptor in rx queue.
1491  * done after Programming Flow Director is programmed on
1492  * tx queue
1493  */
1494 static inline int
1495 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1496 {
1497         volatile union i40e_rx_desc *rxdp;
1498         uint64_t qword1;
1499         uint32_t rx_status;
1500         uint32_t len, id;
1501         uint32_t error;
1502         int ret = 0;
1503
1504         rxdp = &rxq->rx_ring[rxq->rx_tail];
1505         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1506         rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1507                         >> I40E_RXD_QW1_STATUS_SHIFT;
1508
1509         if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1510                 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1511                 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1512                             I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1513
1514                 if (len  == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1515                     id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1516                         error = (qword1 &
1517                                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1518                                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1519                         if (error == (0x1 <<
1520                                 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1521                                 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1522                                             " (FD_ID %u): programming status"
1523                                             " reported.",
1524                                             rxdp->wb.qword0.hi_dword.fd_id);
1525                                 ret = -1;
1526                         } else if (error == (0x1 <<
1527                                 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1528                                 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1529                                             " (FD_ID %u): programming status"
1530                                             " reported.",
1531                                             rxdp->wb.qword0.hi_dword.fd_id);
1532                                 ret = -1;
1533                         } else
1534                                 PMD_DRV_LOG(ERR, "invalid programming status"
1535                                             " reported, error = %u.", error);
1536                 } else
1537                         PMD_DRV_LOG(INFO, "unknown programming status"
1538                                     " reported, len = %d, id = %u.", len, id);
1539                 rxdp->wb.qword1.status_error_len = 0;
1540                 rxq->rx_tail++;
1541                 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1542                         rxq->rx_tail = 0;
1543                 if (rxq->rx_tail == 0)
1544                         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1545                 else
1546                         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1547         }
1548
1549         return ret;
1550 }
1551
1552 static inline void
1553 i40e_fdir_programming_status_cleanup(struct i40e_rx_queue *rxq)
1554 {
1555         uint16_t retry_count = 0;
1556
1557         /* capture the previous error report(if any) from rx ring */
1558         while ((i40e_check_fdir_programming_status(rxq) < 0) &&
1559                         (++retry_count < I40E_FDIR_NUM_RX_DESC))
1560                 PMD_DRV_LOG(INFO, "error report captured.");
1561 }
1562
1563 static int
1564 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1565                          struct i40e_fdir_filter *filter)
1566 {
1567         rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1568         if (input->input.flow_ext.pkt_template) {
1569                 filter->fdir.input.flow.raw_flow.packet = NULL;
1570                 filter->fdir.input.flow.raw_flow.length =
1571                         rte_hash_crc(input->input.flow.raw_flow.packet,
1572                                      input->input.flow.raw_flow.length,
1573                                      input->input.flow.raw_flow.pctype);
1574         }
1575         return 0;
1576 }
1577
1578 /* Check if there exists the flow director filter */
1579 static struct i40e_fdir_filter *
1580 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1581                         const struct i40e_fdir_input *input)
1582 {
1583         int ret;
1584
1585         if (input->flow_ext.pkt_template)
1586                 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1587                                                 (const void *)input,
1588                                                 input->flow.raw_flow.length);
1589         else
1590                 ret = rte_hash_lookup(fdir_info->hash_table,
1591                                       (const void *)input);
1592         if (ret < 0)
1593                 return NULL;
1594
1595         return fdir_info->hash_map[ret];
1596 }
1597
1598 /* Add a flow director filter into the SW list */
1599 static int
1600 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1601 {
1602         struct i40e_fdir_info *fdir_info = &pf->fdir;
1603         struct i40e_fdir_filter *hash_filter;
1604         int ret;
1605
1606         if (filter->fdir.input.flow_ext.pkt_template)
1607                 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1608                                  &filter->fdir.input,
1609                                  filter->fdir.input.flow.raw_flow.length);
1610         else
1611                 ret = rte_hash_add_key(fdir_info->hash_table,
1612                                        &filter->fdir.input);
1613         if (ret < 0) {
1614                 PMD_DRV_LOG(ERR,
1615                             "Failed to insert fdir filter to hash table %d!",
1616                             ret);
1617                 return ret;
1618         }
1619
1620         if (fdir_info->hash_map[ret])
1621                 return -1;
1622
1623         hash_filter = &fdir_info->fdir_filter_array[ret];
1624         rte_memcpy(hash_filter, filter, sizeof(*filter));
1625         fdir_info->hash_map[ret] = hash_filter;
1626         TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
1627
1628         return 0;
1629 }
1630
1631 /* Delete a flow director filter from the SW list */
1632 int
1633 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1634 {
1635         struct i40e_fdir_info *fdir_info = &pf->fdir;
1636         struct i40e_fdir_filter *filter;
1637         int ret;
1638
1639         if (input->flow_ext.pkt_template)
1640                 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1641                                                  input,
1642                                                  input->flow.raw_flow.length);
1643         else
1644                 ret = rte_hash_del_key(fdir_info->hash_table, input);
1645         if (ret < 0) {
1646                 PMD_DRV_LOG(ERR,
1647                             "Failed to delete fdir filter to hash table %d!",
1648                             ret);
1649                 return ret;
1650         }
1651         filter = fdir_info->hash_map[ret];
1652         fdir_info->hash_map[ret] = NULL;
1653
1654         TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1655
1656         return 0;
1657 }
1658
1659 struct rte_flow *
1660 i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
1661 {
1662         struct rte_flow *flow = NULL;
1663         uint64_t slab = 0;
1664         uint32_t pos = 0;
1665         uint32_t i = 0;
1666         int ret;
1667
1668         if (fdir_info->fdir_actual_cnt >=
1669                         fdir_info->fdir_space_size) {
1670                 PMD_DRV_LOG(ERR, "Fdir space full");
1671                 return NULL;
1672         }
1673
1674         ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
1675                         &slab);
1676
1677         /* normally this won't happen as the fdir_actual_cnt should be
1678          * same with the number of the set bits in fdir_flow_pool,
1679          * but anyway handle this error condition here for safe
1680          */
1681         if (ret == 0) {
1682                 PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
1683                 return NULL;
1684         }
1685
1686         i = rte_bsf64(slab);
1687         pos += i;
1688         rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
1689         flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
1690
1691         memset(flow, 0, sizeof(struct rte_flow));
1692
1693         return flow;
1694 }
1695
1696 void
1697 i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
1698                 struct rte_flow *flow)
1699 {
1700         struct i40e_fdir_entry *f;
1701
1702         f = FLOW_TO_FLOW_BITMAP(flow);
1703         rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
1704 }
1705
1706 /*
1707  * i40e_add_del_fdir_filter - add or remove a flow director filter.
1708  * @pf: board private structure
1709  * @filter: fdir filter entry
1710  * @add: 0 - delete, 1 - add
1711  */
1712 int
1713 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1714                          const struct rte_eth_fdir_filter *filter,
1715                          bool add)
1716 {
1717         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1718         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1719         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt[0];
1720         enum i40e_filter_pctype pctype;
1721         int ret = 0;
1722
1723         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1724                 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1725                         " check the mode in fdir_conf.");
1726                 return -ENOTSUP;
1727         }
1728
1729         pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1730         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1731                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1732                 return -EINVAL;
1733         }
1734         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1735                 PMD_DRV_LOG(ERR, "Invalid queue ID");
1736                 return -EINVAL;
1737         }
1738         if (filter->input.flow_ext.is_vf &&
1739                 filter->input.flow_ext.dst_id >= pf->vf_num) {
1740                 PMD_DRV_LOG(ERR, "Invalid VF ID");
1741                 return -EINVAL;
1742         }
1743
1744         memset(pkt, 0, I40E_FDIR_PKT_LEN);
1745
1746         ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1747         if (ret < 0) {
1748                 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1749                 return ret;
1750         }
1751
1752         if (hw->mac.type == I40E_MAC_X722) {
1753                 /* get translated pctype value in fd pctype register */
1754                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1755                         hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1756         }
1757
1758         ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1759         if (ret < 0) {
1760                 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1761                             pctype);
1762                 return ret;
1763         }
1764
1765         return ret;
1766 }
1767
1768 static inline unsigned char *
1769 i40e_find_available_buffer(struct rte_eth_dev *dev)
1770 {
1771         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1772         struct i40e_fdir_info *fdir_info = &pf->fdir;
1773         struct i40e_tx_queue *txq = pf->fdir.txq;
1774
1775         /* no available buffer
1776          * search for more available buffers from the current
1777          * descriptor, until an unavailable one
1778          */
1779         if (fdir_info->txq_available_buf_count <= 0) {
1780                 uint16_t tmp_tail;
1781                 volatile struct i40e_tx_desc *tmp_txdp;
1782
1783                 tmp_tail = txq->tx_tail;
1784                 tmp_txdp = &txq->tx_ring[tmp_tail + 1];
1785
1786                 do {
1787                         if ((tmp_txdp->cmd_type_offset_bsz &
1788                                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1789                                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1790                                 fdir_info->txq_available_buf_count++;
1791                         else
1792                                 break;
1793
1794                         tmp_tail += 2;
1795                         if (tmp_tail >= txq->nb_tx_desc)
1796                                 tmp_tail = 0;
1797                 } while (tmp_tail != txq->tx_tail);
1798         }
1799
1800         if (fdir_info->txq_available_buf_count > 0)
1801                 fdir_info->txq_available_buf_count--;
1802         else
1803                 return NULL;
1804         return (unsigned char *)fdir_info->prg_pkt[txq->tx_tail >> 1];
1805 }
1806
1807 /**
1808  * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1809  * @pf: board private structure
1810  * @filter: fdir filter entry
1811  * @add: 0 - delete, 1 - add
1812  */
1813 int
1814 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1815                               const struct i40e_fdir_filter_conf *filter,
1816                               bool add)
1817 {
1818         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1819         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1820         unsigned char *pkt = NULL;
1821         enum i40e_filter_pctype pctype;
1822         struct i40e_fdir_info *fdir_info = &pf->fdir;
1823         struct i40e_fdir_filter *node;
1824         struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1825         bool wait_status = true;
1826         int ret = 0;
1827
1828         if (pf->fdir.fdir_vsi == NULL) {
1829                 PMD_DRV_LOG(ERR, "FDIR is not enabled");
1830                 return -ENOTSUP;
1831         }
1832
1833         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1834                 PMD_DRV_LOG(ERR, "Invalid queue ID");
1835                 return -EINVAL;
1836         }
1837         if (filter->input.flow_ext.is_vf &&
1838             filter->input.flow_ext.dst_id >= pf->vf_num) {
1839                 PMD_DRV_LOG(ERR, "Invalid VF ID");
1840                 return -EINVAL;
1841         }
1842         if (filter->input.flow_ext.pkt_template) {
1843                 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1844                     !filter->input.flow.raw_flow.packet) {
1845                         PMD_DRV_LOG(ERR, "Invalid raw packet template"
1846                                 " flow filter parameters!");
1847                         return -EINVAL;
1848                 }
1849                 pctype = filter->input.flow.raw_flow.pctype;
1850         } else {
1851                 pctype = filter->input.pctype;
1852         }
1853
1854         /* Check if there is the filter in SW list */
1855         memset(&check_filter, 0, sizeof(check_filter));
1856         i40e_fdir_filter_convert(filter, &check_filter);
1857
1858         if (add) {
1859                 ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
1860                 if (ret < 0) {
1861                         PMD_DRV_LOG(ERR,
1862                                     "Conflict with existing flow director rules!");
1863                         return -EINVAL;
1864                 }
1865
1866                 if (fdir_info->fdir_invalprio == 1 &&
1867                                 fdir_info->fdir_guarantee_free_space > 0)
1868                         wait_status = false;
1869         } else {
1870                 node = i40e_sw_fdir_filter_lookup(fdir_info,
1871                                 &check_filter.fdir.input);
1872                 if (!node) {
1873                         PMD_DRV_LOG(ERR,
1874                                     "There's no corresponding flow firector filter!");
1875                         return -EINVAL;
1876                 }
1877
1878                 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1879                 if (ret < 0) {
1880                         PMD_DRV_LOG(ERR,
1881                                         "Error deleting fdir rule from hash table!");
1882                         return -EINVAL;
1883                 }
1884
1885                 if (fdir_info->fdir_invalprio == 1)
1886                         wait_status = false;
1887         }
1888
1889         /* find a buffer to store the pkt */
1890         pkt = i40e_find_available_buffer(dev);
1891         if (pkt == NULL)
1892                 goto error_op;
1893
1894         memset(pkt, 0, I40E_FDIR_PKT_LEN);
1895         ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1896         if (ret < 0) {
1897                 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1898                 goto error_op;
1899         }
1900
1901         if (hw->mac.type == I40E_MAC_X722) {
1902                 /* get translated pctype value in fd pctype register */
1903                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1904                         hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1905         }
1906
1907         ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add,
1908                         wait_status);
1909         if (ret < 0) {
1910                 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1911                             pctype);
1912                 goto error_op;
1913         }
1914
1915         if (add) {
1916                 fdir_info->fdir_actual_cnt++;
1917                 if (fdir_info->fdir_invalprio == 1 &&
1918                                 fdir_info->fdir_guarantee_free_space > 0)
1919                         fdir_info->fdir_guarantee_free_space--;
1920         } else {
1921                 fdir_info->fdir_actual_cnt--;
1922                 if (fdir_info->fdir_invalprio == 1 &&
1923                                 fdir_info->fdir_guarantee_free_space <
1924                                 fdir_info->fdir_guarantee_total_space)
1925                         fdir_info->fdir_guarantee_free_space++;
1926         }
1927
1928         return ret;
1929
1930 error_op:
1931         /* roll back */
1932         if (add)
1933                 i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
1934         else
1935                 i40e_sw_fdir_filter_insert(pf, &check_filter);
1936
1937         return ret;
1938 }
1939
1940 /*
1941  * i40e_fdir_filter_programming - Program a flow director filter rule.
1942  * Is done by Flow Director Programming Descriptor followed by packet
1943  * structure that contains the filter fields need to match.
1944  * @pf: board private structure
1945  * @pctype: pctype
1946  * @filter: fdir filter entry
1947  * @add: 0 - delete, 1 - add
1948  */
1949 static int
1950 i40e_fdir_filter_programming(struct i40e_pf *pf,
1951                         enum i40e_filter_pctype pctype,
1952                         const struct rte_eth_fdir_filter *filter,
1953                         bool add)
1954 {
1955         struct i40e_tx_queue *txq = pf->fdir.txq;
1956         struct i40e_rx_queue *rxq = pf->fdir.rxq;
1957         const struct rte_eth_fdir_action *fdir_action = &filter->action;
1958         volatile struct i40e_tx_desc *txdp;
1959         volatile struct i40e_filter_program_desc *fdirdp;
1960         uint32_t td_cmd;
1961         uint16_t vsi_id, i;
1962         uint8_t dest;
1963
1964         PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1965         fdirdp = (volatile struct i40e_filter_program_desc *)
1966                         (&(txq->tx_ring[txq->tx_tail]));
1967
1968         fdirdp->qindex_flex_ptype_vsi =
1969                         rte_cpu_to_le_32((fdir_action->rx_queue <<
1970                                           I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1971                                           I40E_TXD_FLTR_QW0_QINDEX_MASK);
1972
1973         fdirdp->qindex_flex_ptype_vsi |=
1974                         rte_cpu_to_le_32((fdir_action->flex_off <<
1975                                           I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1976                                           I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1977
1978         fdirdp->qindex_flex_ptype_vsi |=
1979                         rte_cpu_to_le_32((pctype <<
1980                                           I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1981                                           I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1982
1983         if (filter->input.flow_ext.is_vf)
1984                 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1985         else
1986                 /* Use LAN VSI Id by default */
1987                 vsi_id = pf->main_vsi->vsi_id;
1988         fdirdp->qindex_flex_ptype_vsi |=
1989                 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1990                                   I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1991                                   I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1992
1993         fdirdp->dtype_cmd_cntindex =
1994                         rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1995
1996         if (add)
1997                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1998                                 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1999                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2000         else
2001                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
2002                                 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2003                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2004
2005         if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
2006                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2007         else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
2008                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2009         else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
2010                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
2011         else {
2012                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
2013                             " unsupported fdir behavior.");
2014                 return -EINVAL;
2015         }
2016
2017         fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
2018                                 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
2019                                 I40E_TXD_FLTR_QW1_DEST_MASK);
2020
2021         fdirdp->dtype_cmd_cntindex |=
2022                 rte_cpu_to_le_32((fdir_action->report_status<<
2023                                 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
2024                                 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
2025
2026         fdirdp->dtype_cmd_cntindex |=
2027                         rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
2028         fdirdp->dtype_cmd_cntindex |=
2029                         rte_cpu_to_le_32(
2030                         ((uint32_t)pf->fdir.match_counter_index <<
2031                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2032                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
2033
2034         fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
2035
2036         PMD_DRV_LOG(INFO, "filling transmit descriptor.");
2037         txdp = &(txq->tx_ring[txq->tx_tail + 1]);
2038         txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[0]);
2039         td_cmd = I40E_TX_DESC_CMD_EOP |
2040                  I40E_TX_DESC_CMD_RS  |
2041                  I40E_TX_DESC_CMD_DUMMY;
2042
2043         txdp->cmd_type_offset_bsz =
2044                 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
2045
2046         txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
2047         if (txq->tx_tail >= txq->nb_tx_desc)
2048                 txq->tx_tail = 0;
2049         /* Update the tx tail register */
2050         rte_wmb();
2051         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2052         for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
2053                 if ((txdp->cmd_type_offset_bsz &
2054                                 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
2055                                 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2056                         break;
2057                 rte_delay_us(1);
2058         }
2059         if (i >= I40E_FDIR_MAX_WAIT_US) {
2060                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
2061                             " time out to get DD on tx queue.");
2062                 return -ETIMEDOUT;
2063         }
2064         /* totally delay 10 ms to check programming status*/
2065         for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
2066                 if (i40e_check_fdir_programming_status(rxq) >= 0)
2067                         return 0;
2068                 rte_delay_us(1);
2069         }
2070         PMD_DRV_LOG(ERR,
2071                 "Failed to program FDIR filter: programming status reported.");
2072         return -ETIMEDOUT;
2073 }
2074
2075 /*
2076  * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
2077  * Is done by Flow Director Programming Descriptor followed by packet
2078  * structure that contains the filter fields need to match.
2079  * @pf: board private structure
2080  * @pctype: pctype
2081  * @filter: fdir filter entry
2082  * @add: 0 - delete, 1 - add
2083  */
2084 static int
2085 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
2086                                   enum i40e_filter_pctype pctype,
2087                                   const struct i40e_fdir_filter_conf *filter,
2088                                   bool add, bool wait_status)
2089 {
2090         struct i40e_tx_queue *txq = pf->fdir.txq;
2091         struct i40e_rx_queue *rxq = pf->fdir.rxq;
2092         const struct i40e_fdir_action *fdir_action = &filter->action;
2093         volatile struct i40e_tx_desc *txdp;
2094         volatile struct i40e_filter_program_desc *fdirdp;
2095         uint32_t td_cmd;
2096         uint16_t vsi_id;
2097         uint8_t dest;
2098         uint32_t i;
2099
2100         PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
2101         fdirdp = (volatile struct i40e_filter_program_desc *)
2102                                 (&txq->tx_ring[txq->tx_tail]);
2103
2104         fdirdp->qindex_flex_ptype_vsi =
2105                         rte_cpu_to_le_32((fdir_action->rx_queue <<
2106                                           I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2107                                           I40E_TXD_FLTR_QW0_QINDEX_MASK);
2108
2109         fdirdp->qindex_flex_ptype_vsi |=
2110                         rte_cpu_to_le_32((fdir_action->flex_off <<
2111                                           I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
2112                                           I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
2113
2114         fdirdp->qindex_flex_ptype_vsi |=
2115                         rte_cpu_to_le_32((pctype <<
2116                                           I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
2117                                           I40E_TXD_FLTR_QW0_PCTYPE_MASK);
2118
2119         if (filter->input.flow_ext.is_vf)
2120                 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
2121         else
2122                 /* Use LAN VSI Id by default */
2123                 vsi_id = pf->main_vsi->vsi_id;
2124         fdirdp->qindex_flex_ptype_vsi |=
2125                 rte_cpu_to_le_32(((uint32_t)vsi_id <<
2126                                   I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
2127                                   I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
2128
2129         fdirdp->dtype_cmd_cntindex =
2130                         rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
2131
2132         if (add)
2133                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
2134                                 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2135                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2136         else
2137                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
2138                                 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2139                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2140
2141         if (fdir_action->behavior == I40E_FDIR_REJECT)
2142                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
2143         else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
2144                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
2145         else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
2146                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
2147         else {
2148                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
2149                 return -EINVAL;
2150         }
2151
2152         fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
2153                                 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
2154                                 I40E_TXD_FLTR_QW1_DEST_MASK);
2155
2156         fdirdp->dtype_cmd_cntindex |=
2157                 rte_cpu_to_le_32((fdir_action->report_status <<
2158                                 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
2159                                 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
2160
2161         fdirdp->dtype_cmd_cntindex |=
2162                         rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
2163         fdirdp->dtype_cmd_cntindex |=
2164                         rte_cpu_to_le_32(
2165                         ((uint32_t)pf->fdir.match_counter_index <<
2166                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2167                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
2168
2169         fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
2170
2171         PMD_DRV_LOG(INFO, "filling transmit descriptor.");
2172         txdp = &txq->tx_ring[txq->tx_tail + 1];
2173         txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
2174
2175         td_cmd = I40E_TX_DESC_CMD_EOP |
2176                  I40E_TX_DESC_CMD_RS  |
2177                  I40E_TX_DESC_CMD_DUMMY;
2178
2179         txdp->cmd_type_offset_bsz =
2180                 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
2181
2182         txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
2183         if (txq->tx_tail >= txq->nb_tx_desc)
2184                 txq->tx_tail = 0;
2185         /* Update the tx tail register */
2186         rte_wmb();
2187
2188         /* fdir program rx queue cleanup */
2189         i40e_fdir_programming_status_cleanup(rxq);
2190
2191         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2192
2193         if (wait_status) {
2194                 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
2195                         if ((txdp->cmd_type_offset_bsz &
2196                                         rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
2197                                         rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2198                                 break;
2199                         rte_delay_us(1);
2200                 }
2201                 if (i >= I40E_FDIR_MAX_WAIT_US) {
2202                         PMD_DRV_LOG(ERR,
2203                             "Failed to program FDIR filter: time out to get DD on tx queue.");
2204                         return -ETIMEDOUT;
2205                 }
2206                 /* totally delay 10 ms to check programming status*/
2207                 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
2208                 if (i40e_check_fdir_programming_status(rxq) < 0) {
2209                         PMD_DRV_LOG(ERR,
2210                             "Failed to program FDIR filter: programming status reported.");
2211                         return -ETIMEDOUT;
2212                 }
2213         }
2214
2215         return 0;
2216 }
2217
2218 /*
2219  * i40e_fdir_flush - clear all filters of Flow Director table
2220  * @pf: board private structure
2221  */
2222 int
2223 i40e_fdir_flush(struct rte_eth_dev *dev)
2224 {
2225         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2226         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2227         uint32_t reg;
2228         uint16_t guarant_cnt, best_cnt;
2229         uint16_t i;
2230
2231         I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
2232         I40E_WRITE_FLUSH(hw);
2233
2234         for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
2235                 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
2236                 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
2237                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
2238                         break;
2239         }
2240         if (i >= I40E_FDIR_FLUSH_RETRY) {
2241                 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
2242                 return -ETIMEDOUT;
2243         }
2244         guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2245                                 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2246                                 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2247         best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2248                                 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2249                                 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2250         if (guarant_cnt != 0 || best_cnt != 0) {
2251                 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
2252                 return -ENOSYS;
2253         } else
2254                 PMD_DRV_LOG(INFO, "FD table Flush success.");
2255         return 0;
2256 }
2257
2258 static inline void
2259 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
2260                         struct rte_eth_flex_payload_cfg *flex_set,
2261                         uint16_t *num)
2262 {
2263         struct i40e_fdir_flex_pit *flex_pit;
2264         struct rte_eth_flex_payload_cfg *ptr = flex_set;
2265         uint16_t src, dst, size, j, k;
2266         uint8_t i, layer_idx;
2267
2268         for (layer_idx = I40E_FLXPLD_L2_IDX;
2269              layer_idx <= I40E_FLXPLD_L4_IDX;
2270              layer_idx++) {
2271                 if (layer_idx == I40E_FLXPLD_L2_IDX)
2272                         ptr->type = RTE_ETH_L2_PAYLOAD;
2273                 else if (layer_idx == I40E_FLXPLD_L3_IDX)
2274                         ptr->type = RTE_ETH_L3_PAYLOAD;
2275                 else if (layer_idx == I40E_FLXPLD_L4_IDX)
2276                         ptr->type = RTE_ETH_L4_PAYLOAD;
2277
2278                 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2279                         flex_pit = &pf->fdir.flex_set[layer_idx *
2280                                 I40E_MAX_FLXPLD_FIED + i];
2281                         if (flex_pit->size == 0)
2282                                 continue;
2283                         src = flex_pit->src_offset * sizeof(uint16_t);
2284                         dst = flex_pit->dst_offset * sizeof(uint16_t);
2285                         size = flex_pit->size * sizeof(uint16_t);
2286                         for (j = src, k = dst; j < src + size; j++, k++)
2287                                 ptr->src_offset[k] = j;
2288                 }
2289                 (*num)++;
2290                 ptr++;
2291         }
2292 }
2293
2294 static inline void
2295 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2296                         struct rte_eth_fdir_flex_mask *flex_mask,
2297                         uint16_t *num)
2298 {
2299         struct i40e_fdir_flex_mask *mask;
2300         struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2301         uint16_t flow_type;
2302         uint8_t i, j;
2303         uint16_t off_bytes, mask_tmp;
2304
2305         for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2306              i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2307              i++) {
2308                 mask =  &pf->fdir.flex_mask[i];
2309                 flow_type = i40e_pctype_to_flowtype(pf->adapter,
2310                                                     (enum i40e_filter_pctype)i);
2311                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2312                         continue;
2313
2314                 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2315                         if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2316                                 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2317                                 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2318                         } else {
2319                                 ptr->mask[j * sizeof(uint16_t)] = 0x0;
2320                                 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2321                         }
2322                 }
2323                 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2324                         off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2325                         mask_tmp = ~mask->bitmask[j].mask;
2326                         ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2327                         ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2328                 }
2329                 ptr->flow_type = flow_type;
2330                 ptr++;
2331                 (*num)++;
2332         }
2333 }
2334
2335 /*
2336  * i40e_fdir_info_get - get information of Flow Director
2337  * @pf: ethernet device to get info from
2338  * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2339  *    the flow director information.
2340  */
2341 void
2342 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2343 {
2344         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2345         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2346         uint16_t num_flex_set = 0;
2347         uint16_t num_flex_mask = 0;
2348         uint16_t i;
2349
2350         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2351                 fdir->mode = RTE_FDIR_MODE_PERFECT;
2352         else
2353                 fdir->mode = RTE_FDIR_MODE_NONE;
2354
2355         fdir->guarant_spc =
2356                 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2357         fdir->best_spc =
2358                 (uint32_t)hw->func_caps.fd_filters_best_effort;
2359         fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2360         fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2361         for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2362                 fdir->flow_types_mask[i] = 0ULL;
2363         fdir->flex_payload_unit = sizeof(uint16_t);
2364         fdir->flex_bitmask_unit = sizeof(uint16_t);
2365         fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2366         fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2367         fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2368
2369         i40e_fdir_info_get_flex_set(pf,
2370                                 fdir->flex_conf.flex_set,
2371                                 &num_flex_set);
2372         i40e_fdir_info_get_flex_mask(pf,
2373                                 fdir->flex_conf.flex_mask,
2374                                 &num_flex_mask);
2375
2376         fdir->flex_conf.nb_payloads = num_flex_set;
2377         fdir->flex_conf.nb_flexmasks = num_flex_mask;
2378 }
2379
2380 /*
2381  * i40e_fdir_stat_get - get statistics of Flow Director
2382  * @pf: ethernet device to get info from
2383  * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2384  *    the flow director statistics.
2385  */
2386 void
2387 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2388 {
2389         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2390         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2391         uint32_t fdstat;
2392
2393         fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2394         stat->guarant_cnt =
2395                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2396                             I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2397         stat->best_cnt =
2398                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2399                             I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2400 }
2401
2402 static int
2403 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2404                      struct rte_eth_fdir_filter_info *info)
2405 {
2406         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2407         int ret = 0;
2408
2409         if (!info) {
2410                 PMD_DRV_LOG(ERR, "Invalid pointer");
2411                 return -EFAULT;
2412         }
2413
2414         switch (info->info_type) {
2415         case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2416                 ret = i40e_fdir_filter_inset_select(pf,
2417                                 &(info->info.input_set_conf));
2418                 break;
2419         default:
2420                 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2421                             info->info_type);
2422                 return -EINVAL;
2423         }
2424
2425         return ret;
2426 }
2427
2428 /*
2429  * i40e_fdir_ctrl_func - deal with all operations on flow director.
2430  * @pf: board private structure
2431  * @filter_op:operation will be taken.
2432  * @arg: a pointer to specific structure corresponding to the filter_op
2433  */
2434 int
2435 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2436                        enum rte_filter_op filter_op,
2437                        void *arg)
2438 {
2439         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2440         int ret = 0;
2441
2442         if ((pf->flags & I40E_FLAG_FDIR) == 0)
2443                 return -ENOTSUP;
2444
2445         if (filter_op == RTE_ETH_FILTER_NOP)
2446                 return 0;
2447
2448         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2449                 return -EINVAL;
2450
2451         switch (filter_op) {
2452         case RTE_ETH_FILTER_ADD:
2453                 ret = i40e_add_del_fdir_filter(dev,
2454                         (struct rte_eth_fdir_filter *)arg,
2455                         TRUE);
2456                 break;
2457         case RTE_ETH_FILTER_DELETE:
2458                 ret = i40e_add_del_fdir_filter(dev,
2459                         (struct rte_eth_fdir_filter *)arg,
2460                         FALSE);
2461                 break;
2462         case RTE_ETH_FILTER_FLUSH:
2463                 ret = i40e_fdir_flush(dev);
2464                 break;
2465         case RTE_ETH_FILTER_INFO:
2466                 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2467                 break;
2468         case RTE_ETH_FILTER_SET:
2469                 ret = i40e_fdir_filter_set(dev,
2470                         (struct rte_eth_fdir_filter_info *)arg);
2471                 break;
2472         case RTE_ETH_FILTER_STATS:
2473                 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2474                 break;
2475         default:
2476                 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2477                 ret = -EINVAL;
2478                 break;
2479         }
2480         return ret;
2481 }
2482
2483 /* Restore flow director filter */
2484 void
2485 i40e_fdir_filter_restore(struct i40e_pf *pf)
2486 {
2487         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2488         struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2489         struct i40e_fdir_filter *f;
2490         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2491         uint32_t fdstat;
2492         uint32_t guarant_cnt;  /**< Number of filters in guaranteed spaces. */
2493         uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
2494
2495         TAILQ_FOREACH(f, fdir_list, rules)
2496                 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2497
2498         fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2499         guarant_cnt =
2500                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2501                            I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2502         best_cnt =
2503                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2504                            I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2505
2506         PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d",
2507                     guarant_cnt, best_cnt);
2508 }