drivers/net: delete HW rings while freeing queues
[dpdk.git] / drivers / net / i40e / i40e_fdir.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_arp.h>
19 #include <rte_ip.h>
20 #include <rte_udp.h>
21 #include <rte_tcp.h>
22 #include <rte_sctp.h>
23 #include <rte_hash_crc.h>
24
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
30
31 #define I40E_FDIR_MZ_NAME          "FDIR_MEMZONE"
32 #ifndef IPV6_ADDR_LEN
33 #define IPV6_ADDR_LEN              16
34 #endif
35
36 #ifndef IPPROTO_L2TP
37 #define IPPROTO_L2TP              115
38 #endif
39
40 #define I40E_FDIR_PKT_LEN                   512
41 #define I40E_FDIR_IP_DEFAULT_LEN            420
42 #define I40E_FDIR_IP_DEFAULT_TTL            0x40
43 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL    0x45
44 #define I40E_FDIR_TCP_DEFAULT_DATAOFF       0x50
45 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW     0x60000000
46
47 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS   0xFF
48 #define I40E_FDIR_IPv6_PAYLOAD_LEN          380
49 #define I40E_FDIR_UDP_DEFAULT_LEN           400
50 #define I40E_FDIR_GTP_DEFAULT_LEN           384
51 #define I40E_FDIR_INNER_IP_DEFAULT_LEN      384
52 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN    344
53
54 #define I40E_FDIR_GTPC_DST_PORT             2123
55 #define I40E_FDIR_GTPU_DST_PORT             2152
56 #define I40E_FDIR_GTP_VER_FLAG_0X30         0x30
57 #define I40E_FDIR_GTP_VER_FLAG_0X32         0x32
58 #define I40E_FDIR_GTP_MSG_TYPE_0X01         0x01
59 #define I40E_FDIR_GTP_MSG_TYPE_0XFF         0xFF
60
61 #define I40E_FDIR_ESP_DST_PORT              4500
62
63 /* Wait time for fdir filter programming */
64 #define I40E_FDIR_MAX_WAIT_US 10000
65
66 /* Wait count and interval for fdir filter flush */
67 #define I40E_FDIR_FLUSH_RETRY       50
68 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
69
70 #define I40E_COUNTER_PF           2
71 /* Statistic counter index for one pf */
72 #define I40E_COUNTER_INDEX_FDIR(pf_id)   (0 + (pf_id) * I40E_COUNTER_PF)
73
74 #define I40E_FDIR_FLOWS ( \
75         (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
76         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
77         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
78         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
79         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
80         (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
81         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
82         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
83         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
84         (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
85         (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
86
87 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
88                         enum i40e_filter_pctype pctype,
89                         const struct rte_eth_fdir_filter *filter,
90                         bool add);
91 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
92                          struct i40e_fdir_filter *filter);
93 static struct i40e_fdir_filter *
94 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
95                         const struct i40e_fdir_input *input);
96 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
97                                    struct i40e_fdir_filter *filter);
98 static int
99 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
100                                   enum i40e_filter_pctype pctype,
101                                   const struct i40e_fdir_filter_conf *filter,
102                                   bool add);
103
104 static int
105 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
106 {
107         struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
108         struct i40e_hmc_obj_rxq rx_ctx;
109         int err = I40E_SUCCESS;
110
111         memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
112         /* Init the RX queue in hardware */
113         rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
114         rx_ctx.hbuff = 0;
115         rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
116         rx_ctx.qlen = rxq->nb_rx_desc;
117 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
118         rx_ctx.dsize = 1;
119 #endif
120         rx_ctx.dtype = i40e_header_split_none;
121         rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
122         rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
123         rx_ctx.tphrdesc_ena = 1;
124         rx_ctx.tphwdesc_ena = 1;
125         rx_ctx.tphdata_ena = 1;
126         rx_ctx.tphhead_ena = 1;
127         rx_ctx.lrxqthresh = 2;
128         rx_ctx.crcstrip = 0;
129         rx_ctx.l2tsel = 1;
130         rx_ctx.showiv = 0;
131         rx_ctx.prefena = 1;
132
133         err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
134         if (err != I40E_SUCCESS) {
135                 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
136                 return err;
137         }
138         err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
139         if (err != I40E_SUCCESS) {
140                 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
141                 return err;
142         }
143         rxq->qrx_tail = hw->hw_addr +
144                 I40E_QRX_TAIL(rxq->vsi->base_queue);
145
146         rte_wmb();
147         /* Init the RX tail regieter. */
148         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
149
150         return err;
151 }
152
153 /*
154  * i40e_fdir_setup - reserve and initialize the Flow Director resources
155  * @pf: board private structure
156  */
157 int
158 i40e_fdir_setup(struct i40e_pf *pf)
159 {
160         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
161         struct i40e_vsi *vsi;
162         int err = I40E_SUCCESS;
163         char z_name[RTE_MEMZONE_NAMESIZE];
164         const struct rte_memzone *mz = NULL;
165         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
166
167         if ((pf->flags & I40E_FLAG_FDIR) == 0) {
168                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
169                 return I40E_NOT_SUPPORTED;
170         }
171
172         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
173                         " num_filters_best_effort = %u.",
174                         hw->func_caps.fd_filters_guaranteed,
175                         hw->func_caps.fd_filters_best_effort);
176
177         vsi = pf->fdir.fdir_vsi;
178         if (vsi) {
179                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
180                 return I40E_SUCCESS;
181         }
182         /* make new FDIR VSI */
183         vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
184         if (!vsi) {
185                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
186                 return I40E_ERR_NO_AVAILABLE_VSI;
187         }
188         pf->fdir.fdir_vsi = vsi;
189
190         /*Fdir tx queue setup*/
191         err = i40e_fdir_setup_tx_resources(pf);
192         if (err) {
193                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
194                 goto fail_setup_tx;
195         }
196
197         /*Fdir rx queue setup*/
198         err = i40e_fdir_setup_rx_resources(pf);
199         if (err) {
200                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
201                 goto fail_setup_rx;
202         }
203
204         err = i40e_tx_queue_init(pf->fdir.txq);
205         if (err) {
206                 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
207                 goto fail_mem;
208         }
209
210         /* need switch on before dev start*/
211         err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
212         if (err) {
213                 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
214                 goto fail_mem;
215         }
216
217         /* Init the rx queue in hardware */
218         err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
219         if (err) {
220                 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
221                 goto fail_mem;
222         }
223
224         /* switch on rx queue */
225         err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
226         if (err) {
227                 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
228                 goto fail_mem;
229         }
230
231         /* reserve memory for the fdir programming packet */
232         snprintf(z_name, sizeof(z_name), "%s_%s_%d",
233                         eth_dev->device->driver->name,
234                         I40E_FDIR_MZ_NAME,
235                         eth_dev->data->port_id);
236         mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
237         if (!mz) {
238                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
239                                  "flow director program packet.");
240                 err = I40E_ERR_NO_MEMORY;
241                 goto fail_mem;
242         }
243         pf->fdir.prg_pkt = mz->addr;
244         pf->fdir.dma_addr = mz->iova;
245
246         pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
247         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
248                     vsi->base_queue);
249         return I40E_SUCCESS;
250
251 fail_mem:
252         i40e_dev_rx_queue_release(pf->fdir.rxq);
253         pf->fdir.rxq = NULL;
254 fail_setup_rx:
255         i40e_dev_tx_queue_release(pf->fdir.txq);
256         pf->fdir.txq = NULL;
257 fail_setup_tx:
258         i40e_vsi_release(vsi);
259         pf->fdir.fdir_vsi = NULL;
260         return err;
261 }
262
263 /*
264  * i40e_fdir_teardown - release the Flow Director resources
265  * @pf: board private structure
266  */
267 void
268 i40e_fdir_teardown(struct i40e_pf *pf)
269 {
270         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
271         struct i40e_vsi *vsi;
272         struct rte_eth_dev *dev = pf->adapter->eth_dev;
273
274         vsi = pf->fdir.fdir_vsi;
275         if (!vsi)
276                 return;
277         int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
278         if (err)
279                 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
280         err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
281         if (err)
282                 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
283         i40e_dev_rx_queue_release(pf->fdir.rxq);
284         rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
285         pf->fdir.rxq = NULL;
286         i40e_dev_tx_queue_release(pf->fdir.txq);
287         rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
288         pf->fdir.txq = NULL;
289         i40e_vsi_release(vsi);
290         pf->fdir.fdir_vsi = NULL;
291 }
292
293 /* check whether the flow director table in empty */
294 static inline int
295 i40e_fdir_empty(struct i40e_hw *hw)
296 {
297         uint32_t guarant_cnt, best_cnt;
298
299         guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
300                                  I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
301                                  I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
302         best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
303                               I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
304                               I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
305         if (best_cnt + guarant_cnt > 0)
306                 return -1;
307
308         return 0;
309 }
310
311 /*
312  * Initialize the configuration about bytes stream extracted as flexible payload
313  * and mask setting
314  */
315 static inline void
316 i40e_init_flx_pld(struct i40e_pf *pf)
317 {
318         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
319         uint8_t pctype;
320         int i, index;
321         uint16_t flow_type;
322
323         /*
324          * Define the bytes stream extracted as flexible payload in
325          * field vector. By default, select 8 words from the beginning
326          * of payload as flexible payload.
327          */
328         for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
329                 index = i * I40E_MAX_FLXPLD_FIED;
330                 pf->fdir.flex_set[index].src_offset = 0;
331                 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
332                 pf->fdir.flex_set[index].dst_offset = 0;
333                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
334                 I40E_WRITE_REG(hw,
335                         I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
336                 I40E_WRITE_REG(hw,
337                         I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
338         }
339
340         /* initialize the masks */
341         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
342              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
343                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
344
345                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
346                         continue;
347                 pf->fdir.flex_mask[pctype].word_mask = 0;
348                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
349                 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
350                         pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
351                         pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
352                         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
353                 }
354         }
355 }
356
357 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
358         if ((flex_pit2).src_offset < \
359                 (flex_pit1).src_offset + (flex_pit1).size) { \
360                 PMD_DRV_LOG(ERR, "src_offset should be not" \
361                         " less than than previous offset" \
362                         " + previous FSIZE."); \
363                 return -EINVAL; \
364         } \
365 } while (0)
366
367 /*
368  * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
369  * and the flex_pit will be sorted by it's src_offset value
370  */
371 static inline uint16_t
372 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
373                         struct i40e_fdir_flex_pit *flex_pit)
374 {
375         uint16_t src_tmp, size, num = 0;
376         uint16_t i, k, j = 0;
377
378         while (j < I40E_FDIR_MAX_FLEX_LEN) {
379                 size = 1;
380                 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
381                         if (src_offset[j + 1] == src_offset[j] + 1)
382                                 size++;
383                         else
384                                 break;
385                 }
386                 src_tmp = src_offset[j] + 1 - size;
387                 /* the flex_pit need to be sort by src_offset */
388                 for (i = 0; i < num; i++) {
389                         if (src_tmp < flex_pit[i].src_offset)
390                                 break;
391                 }
392                 /* if insert required, move backward */
393                 for (k = num; k > i; k--)
394                         flex_pit[k] = flex_pit[k - 1];
395                 /* insert */
396                 flex_pit[i].dst_offset = j + 1 - size;
397                 flex_pit[i].src_offset = src_tmp;
398                 flex_pit[i].size = size;
399                 j++;
400                 num++;
401         }
402         return num;
403 }
404
405 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
406 static inline int
407 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
408 {
409         struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
410         uint16_t num, i;
411
412         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
413                 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
414                         PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
415                         return -EINVAL;
416                 }
417         }
418
419         memset(flex_pit, 0, sizeof(flex_pit));
420         num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
421         if (num > I40E_MAX_FLXPLD_FIED) {
422                 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
423                 return -EINVAL;
424         }
425         for (i = 0; i < num; i++) {
426                 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
427                         flex_pit[i].src_offset & 0x01) {
428                         PMD_DRV_LOG(ERR, "flexpayload should be measured"
429                                 " in word");
430                         return -EINVAL;
431                 }
432                 if (i != num - 1)
433                         I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
434         }
435         return 0;
436 }
437
438 /*
439  * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
440  * arguments are valid
441  */
442 static int
443 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
444                           const struct rte_eth_fdir_flex_conf *conf)
445 {
446         const struct rte_eth_flex_payload_cfg *flex_cfg;
447         const struct rte_eth_fdir_flex_mask *flex_mask;
448         uint16_t mask_tmp;
449         uint8_t nb_bitmask;
450         uint16_t i, j;
451         int ret = 0;
452         enum i40e_filter_pctype pctype;
453
454         if (conf == NULL) {
455                 PMD_DRV_LOG(INFO, "NULL pointer.");
456                 return -EINVAL;
457         }
458         /* check flexible payload setting configuration */
459         if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
460                 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
461                 return -EINVAL;
462         }
463         for (i = 0; i < conf->nb_payloads; i++) {
464                 flex_cfg = &conf->flex_set[i];
465                 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
466                         PMD_DRV_LOG(ERR, "invalid payload type.");
467                         return -EINVAL;
468                 }
469                 ret = i40e_check_fdir_flex_payload(flex_cfg);
470                 if (ret < 0) {
471                         PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
472                         return -EINVAL;
473                 }
474         }
475
476         /* check flex mask setting configuration */
477         if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
478                 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
479                 return -EINVAL;
480         }
481         for (i = 0; i < conf->nb_flexmasks; i++) {
482                 flex_mask = &conf->flex_mask[i];
483                 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
484                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
485                         PMD_DRV_LOG(WARNING, "invalid flow type.");
486                         return -EINVAL;
487                 }
488                 nb_bitmask = 0;
489                 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
490                         mask_tmp = I40E_WORD(flex_mask->mask[j],
491                                              flex_mask->mask[j + 1]);
492                         if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
493                                 nb_bitmask++;
494                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
495                                         PMD_DRV_LOG(ERR, " exceed maximal"
496                                                 " number of bitmasks.");
497                                         return -EINVAL;
498                                 }
499                         }
500                 }
501         }
502         return 0;
503 }
504
505 /*
506  * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
507  * @pf: board private structure
508  * @cfg: the rule how bytes stream is extracted as flexible payload
509  */
510 static void
511 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
512                          const struct rte_eth_flex_payload_cfg *cfg)
513 {
514         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
515         struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
516         uint32_t flx_pit, flx_ort;
517         uint16_t num, min_next_off;  /* in words */
518         uint8_t field_idx = 0;
519         uint8_t layer_idx = 0;
520         uint16_t i;
521
522         if (cfg->type == RTE_ETH_L2_PAYLOAD)
523                 layer_idx = I40E_FLXPLD_L2_IDX;
524         else if (cfg->type == RTE_ETH_L3_PAYLOAD)
525                 layer_idx = I40E_FLXPLD_L3_IDX;
526         else if (cfg->type == RTE_ETH_L4_PAYLOAD)
527                 layer_idx = I40E_FLXPLD_L4_IDX;
528
529         memset(flex_pit, 0, sizeof(flex_pit));
530         num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
531                       RTE_DIM(flex_pit));
532
533         if (num) {
534                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
535                           (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
536                           (layer_idx * I40E_MAX_FLXPLD_FIED);
537                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
538         }
539
540         for (i = 0; i < num; i++) {
541                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
542                 /* record the info in fdir structure */
543                 pf->fdir.flex_set[field_idx].src_offset =
544                         flex_pit[i].src_offset / sizeof(uint16_t);
545                 pf->fdir.flex_set[field_idx].size =
546                         flex_pit[i].size / sizeof(uint16_t);
547                 pf->fdir.flex_set[field_idx].dst_offset =
548                         flex_pit[i].dst_offset / sizeof(uint16_t);
549                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
550                                 pf->fdir.flex_set[field_idx].size,
551                                 pf->fdir.flex_set[field_idx].dst_offset);
552
553                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
554         }
555         min_next_off = pf->fdir.flex_set[field_idx].src_offset +
556                                 pf->fdir.flex_set[field_idx].size;
557
558         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
559                 /* set the non-used register obeying register's constrain */
560                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
561                            NONUSE_FLX_PIT_DEST_OFF);
562                 I40E_WRITE_REG(hw,
563                         I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
564                         flx_pit);
565                 min_next_off++;
566         }
567 }
568
569 /*
570  * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
571  * @pf: board private structure
572  * @pctype: packet classify type
573  * @flex_masks: mask for flexible payload
574  */
575 static void
576 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
577                 enum i40e_filter_pctype pctype,
578                 const struct rte_eth_fdir_flex_mask *mask_cfg)
579 {
580         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
581         struct i40e_fdir_flex_mask *flex_mask;
582         uint32_t flxinset, fd_mask;
583         uint16_t mask_tmp;
584         uint8_t i, nb_bitmask = 0;
585
586         flex_mask = &pf->fdir.flex_mask[pctype];
587         memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
588         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
589                 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
590                 if (mask_tmp != 0x0) {
591                         flex_mask->word_mask |=
592                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
593                         if (mask_tmp != UINT16_MAX) {
594                                 /* set bit mask */
595                                 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
596                                 flex_mask->bitmask[nb_bitmask].offset =
597                                         i / sizeof(uint16_t);
598                                 nb_bitmask++;
599                         }
600                 }
601         }
602         /* write mask to hw */
603         flxinset = (flex_mask->word_mask <<
604                 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
605                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
606         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
607
608         for (i = 0; i < nb_bitmask; i++) {
609                 fd_mask = (flex_mask->bitmask[i].mask <<
610                         I40E_PRTQF_FD_MSK_MASK_SHIFT) &
611                         I40E_PRTQF_FD_MSK_MASK_MASK;
612                 fd_mask |= ((flex_mask->bitmask[i].offset +
613                         I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
614                         I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
615                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
616                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
617         }
618 }
619
620 /*
621  * Enable/disable flow director RX processing in vector routines.
622  */
623 void
624 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
625 {
626         int32_t i;
627
628         for (i = 0; i < dev->data->nb_rx_queues; i++) {
629                 struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
630                 if (!rxq)
631                         continue;
632                 rxq->fdir_enabled = on;
633         }
634         PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
635 }
636
637 /*
638  * Configure flow director related setting
639  */
640 int
641 i40e_fdir_configure(struct rte_eth_dev *dev)
642 {
643         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
644         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645         struct rte_eth_fdir_flex_conf *conf;
646         enum i40e_filter_pctype pctype;
647         uint32_t val;
648         uint8_t i;
649         int ret = 0;
650
651         /*
652         * configuration need to be done before
653         * flow director filters are added
654         * If filters exist, flush them.
655         */
656         if (i40e_fdir_empty(hw) < 0) {
657                 ret = i40e_fdir_flush(dev);
658                 if (ret) {
659                         PMD_DRV_LOG(ERR, "failed to flush fdir table.");
660                         return ret;
661                 }
662         }
663
664         /* enable FDIR filter */
665         val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
666         val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
667         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
668
669         i40e_init_flx_pld(pf); /* set flex config to default value */
670
671         conf = &dev->data->dev_conf.fdir_conf.flex_conf;
672         ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
673         if (ret < 0) {
674                 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
675                 return -EINVAL;
676         }
677
678         if (!pf->support_multi_driver) {
679                 /* configure flex payload */
680                 for (i = 0; i < conf->nb_payloads; i++)
681                         i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
682                 /* configure flex mask*/
683                 for (i = 0; i < conf->nb_flexmasks; i++) {
684                         if (hw->mac.type == I40E_MAC_X722) {
685                                 /* get pctype value in fd pctype register */
686                                 pctype = (enum i40e_filter_pctype)
687                                           i40e_read_rx_ctl(hw,
688                                                 I40E_GLQF_FD_PCTYPES(
689                                                 (int)i40e_flowtype_to_pctype(
690                                                 pf->adapter,
691                                                 conf->flex_mask[i].flow_type)));
692                         } else {
693                                 pctype = i40e_flowtype_to_pctype(pf->adapter,
694                                                   conf->flex_mask[i].flow_type);
695                         }
696
697                         i40e_set_flex_mask_on_pctype(pf, pctype,
698                                                      &conf->flex_mask[i]);
699                 }
700         } else {
701                 PMD_DRV_LOG(ERR, "Not support flexible payload.");
702         }
703
704         /* Enable FDIR processing in RX routines */
705         i40e_fdir_rx_proc_enable(dev, 1);
706
707         return ret;
708 }
709
710 static inline int
711 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
712                            unsigned char *raw_pkt,
713                            bool vlan)
714 {
715         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
716         uint16_t *ether_type;
717         uint8_t len = 2 * sizeof(struct rte_ether_addr);
718         struct rte_ipv4_hdr *ip;
719         struct rte_ipv6_hdr *ip6;
720         static const uint8_t next_proto[] = {
721                 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
722                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
723                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
724                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
725                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
726                 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
727                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
728                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
729                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
730                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
731         };
732
733         raw_pkt += 2 * sizeof(struct rte_ether_addr);
734         if (vlan && fdir_input->flow_ext.vlan_tci) {
735                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
736                 rte_memcpy(raw_pkt + sizeof(uint16_t),
737                            &fdir_input->flow_ext.vlan_tci,
738                            sizeof(uint16_t));
739                 raw_pkt += sizeof(vlan_frame);
740                 len += sizeof(vlan_frame);
741         }
742         ether_type = (uint16_t *)raw_pkt;
743         raw_pkt += sizeof(uint16_t);
744         len += sizeof(uint16_t);
745
746         switch (fdir_input->flow_type) {
747         case RTE_ETH_FLOW_L2_PAYLOAD:
748                 *ether_type = fdir_input->flow.l2_flow.ether_type;
749                 break;
750         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
751         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
752         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
753         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
754         case RTE_ETH_FLOW_FRAG_IPV4:
755                 ip = (struct rte_ipv4_hdr *)raw_pkt;
756
757                 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
758                 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
759                 /* set len to by default */
760                 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
761                 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
762                                         fdir_input->flow.ip4_flow.proto :
763                                         next_proto[fdir_input->flow_type];
764                 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
765                                         fdir_input->flow.ip4_flow.ttl :
766                                         I40E_FDIR_IP_DEFAULT_TTL;
767                 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
768                 /*
769                  * The source and destination fields in the transmitted packet
770                  * need to be presented in a reversed order with respect
771                  * to the expected received packets.
772                  */
773                 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
774                 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
775                 len += sizeof(struct rte_ipv4_hdr);
776                 break;
777         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
778         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
779         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
780         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
781         case RTE_ETH_FLOW_FRAG_IPV6:
782                 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
783
784                 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
785                 ip6->vtc_flow =
786                         rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
787                                          (fdir_input->flow.ipv6_flow.tc <<
788                                           I40E_FDIR_IPv6_TC_OFFSET));
789                 ip6->payload_len =
790                         rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
791                 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
792                                         fdir_input->flow.ipv6_flow.proto :
793                                         next_proto[fdir_input->flow_type];
794                 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
795                                         fdir_input->flow.ipv6_flow.hop_limits :
796                                         I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
797                 /*
798                  * The source and destination fields in the transmitted packet
799                  * need to be presented in a reversed order with respect
800                  * to the expected received packets.
801                  */
802                 rte_memcpy(&(ip6->src_addr),
803                            &(fdir_input->flow.ipv6_flow.dst_ip),
804                            IPV6_ADDR_LEN);
805                 rte_memcpy(&(ip6->dst_addr),
806                            &(fdir_input->flow.ipv6_flow.src_ip),
807                            IPV6_ADDR_LEN);
808                 len += sizeof(struct rte_ipv6_hdr);
809                 break;
810         default:
811                 PMD_DRV_LOG(ERR, "unknown flow type %u.",
812                             fdir_input->flow_type);
813                 return -1;
814         }
815         return len;
816 }
817
818
819 /*
820  * i40e_fdir_construct_pkt - construct packet based on fields in input
821  * @pf: board private structure
822  * @fdir_input: input set of the flow director entry
823  * @raw_pkt: a packet to be constructed
824  */
825 static int
826 i40e_fdir_construct_pkt(struct i40e_pf *pf,
827                              const struct rte_eth_fdir_input *fdir_input,
828                              unsigned char *raw_pkt)
829 {
830         unsigned char *payload, *ptr;
831         struct rte_udp_hdr *udp;
832         struct rte_tcp_hdr *tcp;
833         struct rte_sctp_hdr *sctp;
834         uint8_t size, dst = 0;
835         uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
836         int len;
837
838         /* fill the ethernet and IP head */
839         len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
840                                          !!fdir_input->flow_ext.vlan_tci);
841         if (len < 0)
842                 return -EINVAL;
843
844         /* fill the L4 head */
845         switch (fdir_input->flow_type) {
846         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
847                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
848                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
849                 /*
850                  * The source and destination fields in the transmitted packet
851                  * need to be presented in a reversed order with respect
852                  * to the expected received packets.
853                  */
854                 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
855                 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
856                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
857                 break;
858
859         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
860                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
861                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
862                 /*
863                  * The source and destination fields in the transmitted packet
864                  * need to be presented in a reversed order with respect
865                  * to the expected received packets.
866                  */
867                 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
868                 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
869                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
870                 break;
871
872         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
873                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
874                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
875                 /*
876                  * The source and destination fields in the transmitted packet
877                  * need to be presented in a reversed order with respect
878                  * to the expected received packets.
879                  */
880                 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
881                 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
882                 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
883                 break;
884
885         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
886         case RTE_ETH_FLOW_FRAG_IPV4:
887                 payload = raw_pkt + len;
888                 set_idx = I40E_FLXPLD_L3_IDX;
889                 break;
890
891         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
892                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
893                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
894                 /*
895                  * The source and destination fields in the transmitted packet
896                  * need to be presented in a reversed order with respect
897                  * to the expected received packets.
898                  */
899                 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
900                 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
901                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
902                 break;
903
904         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
905                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
906                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
907                 /*
908                  * The source and destination fields in the transmitted packet
909                  * need to be presented in a reversed order with respect
910                  * to the expected received packets.
911                  */
912                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
913                 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
914                 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
915                 break;
916
917         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
918                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
919                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
920                 /*
921                  * The source and destination fields in the transmitted packet
922                  * need to be presented in a reversed order with respect
923                  * to the expected received packets.
924                  */
925                 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
926                 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
927                 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
928                 break;
929
930         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
931         case RTE_ETH_FLOW_FRAG_IPV6:
932                 payload = raw_pkt + len;
933                 set_idx = I40E_FLXPLD_L3_IDX;
934                 break;
935         case RTE_ETH_FLOW_L2_PAYLOAD:
936                 payload = raw_pkt + len;
937                 /*
938                  * ARP packet is a special case on which the payload
939                  * starts after the whole ARP header
940                  */
941                 if (fdir_input->flow.l2_flow.ether_type ==
942                                 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
943                         payload += sizeof(struct rte_arp_hdr);
944                 set_idx = I40E_FLXPLD_L2_IDX;
945                 break;
946         default:
947                 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
948                 return -EINVAL;
949         }
950
951         /* fill the flexbytes to payload */
952         for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
953                 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
954                 size = pf->fdir.flex_set[pit_idx].size;
955                 if (size == 0)
956                         continue;
957                 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
958                 ptr = payload +
959                         pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
960                 rte_memcpy(ptr,
961                                  &fdir_input->flow_ext.flexbytes[dst],
962                                  size * sizeof(uint16_t));
963         }
964
965         return 0;
966 }
967
968 static struct i40e_customized_pctype *
969 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
970 {
971         struct i40e_customized_pctype *cus_pctype;
972         enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
973
974         for (; i < I40E_CUSTOMIZED_MAX; i++) {
975                 cus_pctype = &pf->customized_pctype[i];
976                 if (pctype == cus_pctype->pctype)
977                         return cus_pctype;
978         }
979         return NULL;
980 }
981
982 static inline int
983 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
984                 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
985 {
986         struct rte_ipv6_hdr *ip6;
987
988         ip6 = (struct rte_ipv6_hdr *)raw_pkt;
989
990         *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
991         ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
992                 (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
993         ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
994         ip6->proto = fdir_input->flow.ipv6_flow.proto ?
995                 fdir_input->flow.ipv6_flow.proto : next_proto;
996         ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
997                 fdir_input->flow.ipv6_flow.hop_limits :
998                 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
999         /**
1000          * The source and destination fields in the transmitted packet
1001          * need to be presented in a reversed order with respect
1002          * to the expected received packets.
1003          */
1004         rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
1005                 IPV6_ADDR_LEN);
1006         rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
1007                 IPV6_ADDR_LEN);
1008         len += sizeof(struct rte_ipv6_hdr);
1009
1010         return len;
1011 }
1012
1013 static inline int
1014 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
1015                 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
1016 {
1017         struct rte_ipv4_hdr *ip4;
1018
1019         ip4 = (struct rte_ipv4_hdr *)raw_pkt;
1020
1021         *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1022         ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1023         /* set len to by default */
1024         ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1025         ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1026                 fdir_input->flow.ip4_flow.ttl :
1027                 I40E_FDIR_IP_DEFAULT_TTL;
1028         ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
1029         ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1030                 fdir_input->flow.ip4_flow.proto : next_proto;
1031         /**
1032          * The source and destination fields in the transmitted packet
1033          * need to be presented in a reversed order with respect
1034          * to the expected received packets.
1035          */
1036         ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1037         ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1038         len += sizeof(struct rte_ipv4_hdr);
1039
1040         return len;
1041 }
1042
1043 static inline int
1044 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
1045                                 const struct i40e_fdir_input *fdir_input,
1046                                 unsigned char *raw_pkt,
1047                                 bool vlan)
1048 {
1049         struct i40e_customized_pctype *cus_pctype = NULL;
1050         static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
1051         uint16_t *ether_type;
1052         uint8_t len = 2 * sizeof(struct rte_ether_addr);
1053         uint8_t pctype = fdir_input->pctype;
1054         bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
1055         static const uint8_t next_proto[] = {
1056                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
1057                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
1058                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
1059                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
1060                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
1061                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
1062                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
1063                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
1064                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
1065                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
1066         };
1067
1068         rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
1069                 sizeof(struct rte_ether_addr));
1070         rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
1071                 &fdir_input->flow.l2_flow.src,
1072                 sizeof(struct rte_ether_addr));
1073         raw_pkt += 2 * sizeof(struct rte_ether_addr);
1074
1075         if (vlan && fdir_input->flow_ext.vlan_tci) {
1076                 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
1077                 rte_memcpy(raw_pkt + sizeof(uint16_t),
1078                            &fdir_input->flow_ext.vlan_tci,
1079                            sizeof(uint16_t));
1080                 raw_pkt += sizeof(vlan_frame);
1081                 len += sizeof(vlan_frame);
1082         }
1083         ether_type = (uint16_t *)raw_pkt;
1084         raw_pkt += sizeof(uint16_t);
1085         len += sizeof(uint16_t);
1086
1087         if (is_customized_pctype) {
1088                 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1089                 if (!cus_pctype) {
1090                         PMD_DRV_LOG(ERR, "unknown pctype %u.",
1091                                     fdir_input->pctype);
1092                         return -1;
1093                 }
1094         }
1095
1096         if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1097                 *ether_type = fdir_input->flow.l2_flow.ether_type;
1098         else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1099                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1100                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1101                  pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1102                  pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1103                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1104                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1105                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1106                  pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1107                  pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
1108                  is_customized_pctype) {
1109                 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1110                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1111                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1112                         pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1113                         pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1114                         len = fill_ip4_head(fdir_input, raw_pkt,
1115                                         next_proto[pctype], len, ether_type);
1116                 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1117                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1118                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1119                         pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1120                         pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1121                         len = fill_ip6_head(fdir_input, raw_pkt,
1122                                         next_proto[pctype], len,
1123                                         ether_type);
1124                 } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1125                          cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1126                          cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1127                          cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1128                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1129                                         len, ether_type);
1130                 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
1131                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1132                                         len, ether_type);
1133                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1134                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
1135                                         len, ether_type);
1136                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1137                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1138                                         len, ether_type);
1139                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1140                         len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1141                                         len, ether_type);
1142                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
1143                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
1144                                         len, ether_type);
1145                 else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
1146                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
1147                                         len, ether_type);
1148                 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
1149                         len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1150                                         len, ether_type);
1151         } else {
1152                 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1153                 return -1;
1154         }
1155
1156         return len;
1157 }
1158
1159 /**
1160  * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1161  * @pf: board private structure
1162  * @fdir_input: input set of the flow director entry
1163  * @raw_pkt: a packet to be constructed
1164  */
1165 static int
1166 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1167                              const struct i40e_fdir_input *fdir_input,
1168                              unsigned char *raw_pkt)
1169 {
1170         unsigned char *payload = NULL;
1171         unsigned char *ptr;
1172         struct rte_udp_hdr *udp;
1173         struct rte_tcp_hdr *tcp;
1174         struct rte_sctp_hdr *sctp;
1175         struct rte_flow_item_gtp *gtp;
1176         struct rte_ipv4_hdr *gtp_ipv4;
1177         struct rte_ipv6_hdr *gtp_ipv6;
1178         struct rte_flow_item_l2tpv3oip *l2tpv3oip;
1179         struct rte_flow_item_esp *esp;
1180         struct rte_ipv4_hdr *esp_ipv4;
1181         struct rte_ipv6_hdr *esp_ipv6;
1182
1183         uint8_t size, dst = 0;
1184         uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1185         int len;
1186         uint8_t pctype = fdir_input->pctype;
1187         struct i40e_customized_pctype *cus_pctype;
1188
1189         /* raw pcket template - just copy contents of the raw packet */
1190         if (fdir_input->flow_ext.pkt_template) {
1191                 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1192                        fdir_input->flow.raw_flow.length);
1193                 return 0;
1194         }
1195
1196         /* fill the ethernet and IP head */
1197         len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1198                                               !!fdir_input->flow_ext.vlan_tci);
1199         if (len < 0)
1200                 return -EINVAL;
1201
1202         /* fill the L4 head */
1203         if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1204                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1205                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1206                 /**
1207                  * The source and destination fields in the transmitted packet
1208                  * need to be presented in a reversed order with respect
1209                  * to the expected received packets.
1210                  */
1211                 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1212                 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1213                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1214         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1215                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1216                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1217                 /**
1218                  * The source and destination fields in the transmitted packet
1219                  * need to be presented in a reversed order with respect
1220                  * to the expected received packets.
1221                  */
1222                 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1223                 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1224                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1225         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1226                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1227                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1228                 /**
1229                  * The source and destination fields in the transmitted packet
1230                  * need to be presented in a reversed order with respect
1231                  * to the expected received packets.
1232                  */
1233                 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1234                 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1235                 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1236         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1237                    pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1238                 payload = raw_pkt + len;
1239                 set_idx = I40E_FLXPLD_L3_IDX;
1240         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1241                 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1242                 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1243                 /**
1244                  * The source and destination fields in the transmitted packet
1245                  * need to be presented in a reversed order with respect
1246                  * to the expected received packets.
1247                  */
1248                 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1249                 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1250                 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1251         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1252                 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1253                 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1254                 /**
1255                  * The source and destination fields in the transmitted packet
1256                  * need to be presented in a reversed order with respect
1257                  * to the expected received packets.
1258                  */
1259                 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1260                 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1261                 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1262         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1263                 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1264                 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1265                 /**
1266                  * The source and destination fields in the transmitted packet
1267                  * need to be presented in a reversed order with respect
1268                  * to the expected received packets.
1269                  */
1270                 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1271                 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1272                 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1273         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1274                    pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1275                 payload = raw_pkt + len;
1276                 set_idx = I40E_FLXPLD_L3_IDX;
1277         } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1278                 payload = raw_pkt + len;
1279                 /**
1280                  * ARP packet is a special case on which the payload
1281                  * starts after the whole ARP header
1282                  */
1283                 if (fdir_input->flow.l2_flow.ether_type ==
1284                                 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1285                         payload += sizeof(struct rte_arp_hdr);
1286                 set_idx = I40E_FLXPLD_L2_IDX;
1287         } else if (fdir_input->flow_ext.customized_pctype) {
1288                 /* If customized pctype is used */
1289                 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1290                 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1291                     cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1292                     cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1293                     cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1294                         udp = (struct rte_udp_hdr *)(raw_pkt + len);
1295                         udp->dgram_len =
1296                                 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1297
1298                         gtp = (struct rte_flow_item_gtp *)
1299                                 ((unsigned char *)udp +
1300                                         sizeof(struct rte_udp_hdr));
1301                         gtp->msg_len =
1302                                 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1303                         gtp->teid = fdir_input->flow.gtp_flow.teid;
1304                         gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1305
1306                         /* GTP-C message type is not supported. */
1307                         if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1308                                 udp->dst_port =
1309                                       rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1310                                 gtp->v_pt_rsv_flags =
1311                                         I40E_FDIR_GTP_VER_FLAG_0X32;
1312                         } else {
1313                                 udp->dst_port =
1314                                       rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1315                                 gtp->v_pt_rsv_flags =
1316                                         I40E_FDIR_GTP_VER_FLAG_0X30;
1317                         }
1318
1319                         if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1320                                 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1321                                 gtp_ipv4 = (struct rte_ipv4_hdr *)
1322                                         ((unsigned char *)gtp +
1323                                          sizeof(struct rte_flow_item_gtp));
1324                                 gtp_ipv4->version_ihl =
1325                                         I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1326                                 gtp_ipv4->next_proto_id = IPPROTO_IP;
1327                                 gtp_ipv4->total_length =
1328                                         rte_cpu_to_be_16(
1329                                                 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1330                                 payload = (unsigned char *)gtp_ipv4 +
1331                                         sizeof(struct rte_ipv4_hdr);
1332                         } else if (cus_pctype->index ==
1333                                    I40E_CUSTOMIZED_GTPU_IPV6) {
1334                                 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1335                                 gtp_ipv6 = (struct rte_ipv6_hdr *)
1336                                         ((unsigned char *)gtp +
1337                                          sizeof(struct rte_flow_item_gtp));
1338                                 gtp_ipv6->vtc_flow =
1339                                         rte_cpu_to_be_32(
1340                                                I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1341                                                (0 << I40E_FDIR_IPv6_TC_OFFSET));
1342                                 gtp_ipv6->proto = IPPROTO_NONE;
1343                                 gtp_ipv6->payload_len =
1344                                         rte_cpu_to_be_16(
1345                                               I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1346                                 gtp_ipv6->hop_limits =
1347                                         I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1348                                 payload = (unsigned char *)gtp_ipv6 +
1349                                         sizeof(struct rte_ipv6_hdr);
1350                         } else
1351                                 payload = (unsigned char *)gtp +
1352                                         sizeof(struct rte_flow_item_gtp);
1353                 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1354                            cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1355                         l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1356                                                                        + len);
1357
1358                         if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1359                                 l2tpv3oip->session_id =
1360                                  fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1361                         else
1362                                 l2tpv3oip->session_id =
1363                                  fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1364                         payload = (unsigned char *)l2tpv3oip +
1365                                 sizeof(struct rte_flow_item_l2tpv3oip);
1366                 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1367                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1368                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1369                         cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1370                         if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1371                                 esp_ipv4 = (struct rte_ipv4_hdr *)
1372                                         (raw_pkt + len);
1373                                 esp = (struct rte_flow_item_esp *)esp_ipv4;
1374                                 esp->hdr.spi =
1375                                         fdir_input->flow.esp_ipv4_flow.spi;
1376                                 payload = (unsigned char *)esp +
1377                                         sizeof(struct rte_esp_hdr);
1378                                 len += sizeof(struct rte_esp_hdr);
1379                         } else if (cus_pctype->index ==
1380                                         I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1381                                 esp_ipv4 = (struct rte_ipv4_hdr *)
1382                                         (raw_pkt + len);
1383                                 udp = (struct rte_udp_hdr *)esp_ipv4;
1384                                 udp->dst_port = rte_cpu_to_be_16
1385                                         (I40E_FDIR_ESP_DST_PORT);
1386
1387                                 udp->dgram_len = rte_cpu_to_be_16
1388                                                 (I40E_FDIR_UDP_DEFAULT_LEN);
1389                                 esp = (struct rte_flow_item_esp *)
1390                                         ((unsigned char *)esp_ipv4 +
1391                                                 sizeof(struct rte_udp_hdr));
1392                                 esp->hdr.spi =
1393                                         fdir_input->flow.esp_ipv4_udp_flow.spi;
1394                                 payload = (unsigned char *)esp +
1395                                         sizeof(struct rte_esp_hdr);
1396                                 len += sizeof(struct rte_udp_hdr) +
1397                                                 sizeof(struct rte_esp_hdr);
1398                         } else if (cus_pctype->index ==
1399                                         I40E_CUSTOMIZED_ESP_IPV6) {
1400                                 esp_ipv6 = (struct rte_ipv6_hdr *)
1401                                         (raw_pkt + len);
1402                                 esp = (struct rte_flow_item_esp *)esp_ipv6;
1403                                 esp->hdr.spi =
1404                                         fdir_input->flow.esp_ipv6_flow.spi;
1405                                 payload = (unsigned char *)esp +
1406                                         sizeof(struct rte_esp_hdr);
1407                                 len += sizeof(struct rte_esp_hdr);
1408                         } else if (cus_pctype->index ==
1409                                         I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1410                                 esp_ipv6 = (struct rte_ipv6_hdr *)
1411                                         (raw_pkt + len);
1412                                 udp = (struct rte_udp_hdr *)esp_ipv6;
1413                                 udp->dst_port = rte_cpu_to_be_16
1414                                         (I40E_FDIR_ESP_DST_PORT);
1415
1416                                 udp->dgram_len = rte_cpu_to_be_16
1417                                         (I40E_FDIR_UDP_DEFAULT_LEN);
1418                                 esp = (struct rte_flow_item_esp *)
1419                                         ((unsigned char *)esp_ipv6 +
1420                                                 sizeof(struct rte_udp_hdr));
1421                                 esp->hdr.spi =
1422                                         fdir_input->flow.esp_ipv6_udp_flow.spi;
1423                                 payload = (unsigned char *)esp +
1424                                         sizeof(struct rte_esp_hdr);
1425                                 len += sizeof(struct rte_udp_hdr) +
1426                                                 sizeof(struct rte_esp_hdr);
1427                         }
1428                 }
1429         } else {
1430                 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1431                 return -1;
1432         }
1433
1434         /* fill the flexbytes to payload */
1435         for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1436                 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1437                 size = pf->fdir.flex_set[pit_idx].size;
1438                 if (size == 0)
1439                         continue;
1440                 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1441                 ptr = payload +
1442                       pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1443                 (void)rte_memcpy(ptr,
1444                                  &fdir_input->flow_ext.flexbytes[dst],
1445                                  size * sizeof(uint16_t));
1446         }
1447
1448         return 0;
1449 }
1450
1451 /* Construct the tx flags */
1452 static inline uint64_t
1453 i40e_build_ctob(uint32_t td_cmd,
1454                 uint32_t td_offset,
1455                 unsigned int size,
1456                 uint32_t td_tag)
1457 {
1458         return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1459                         ((uint64_t)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
1460                         ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1461                         ((uint64_t)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1462                         ((uint64_t)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
1463 }
1464
1465 /*
1466  * check the programming status descriptor in rx queue.
1467  * done after Programming Flow Director is programmed on
1468  * tx queue
1469  */
1470 static inline int
1471 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1472 {
1473         volatile union i40e_rx_desc *rxdp;
1474         uint64_t qword1;
1475         uint32_t rx_status;
1476         uint32_t len, id;
1477         uint32_t error;
1478         int ret = 0;
1479
1480         rxdp = &rxq->rx_ring[rxq->rx_tail];
1481         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1482         rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1483                         >> I40E_RXD_QW1_STATUS_SHIFT;
1484
1485         if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1486                 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1487                 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1488                             I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1489
1490                 if (len  == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1491                     id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1492                         error = (qword1 &
1493                                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1494                                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1495                         if (error == (0x1 <<
1496                                 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1497                                 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1498                                             " (FD_ID %u): programming status"
1499                                             " reported.",
1500                                             rxdp->wb.qword0.hi_dword.fd_id);
1501                                 ret = -1;
1502                         } else if (error == (0x1 <<
1503                                 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1504                                 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1505                                             " (FD_ID %u): programming status"
1506                                             " reported.",
1507                                             rxdp->wb.qword0.hi_dword.fd_id);
1508                                 ret = -1;
1509                         } else
1510                                 PMD_DRV_LOG(ERR, "invalid programming status"
1511                                             " reported, error = %u.", error);
1512                 } else
1513                         PMD_DRV_LOG(INFO, "unknown programming status"
1514                                     " reported, len = %d, id = %u.", len, id);
1515                 rxdp->wb.qword1.status_error_len = 0;
1516                 rxq->rx_tail++;
1517                 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1518                         rxq->rx_tail = 0;
1519                 if (rxq->rx_tail == 0)
1520                         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1521                 else
1522                         I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1523         }
1524
1525         return ret;
1526 }
1527
1528 static int
1529 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1530                          struct i40e_fdir_filter *filter)
1531 {
1532         rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1533         if (input->input.flow_ext.pkt_template) {
1534                 filter->fdir.input.flow.raw_flow.packet = NULL;
1535                 filter->fdir.input.flow.raw_flow.length =
1536                         rte_hash_crc(input->input.flow.raw_flow.packet,
1537                                      input->input.flow.raw_flow.length,
1538                                      input->input.flow.raw_flow.pctype);
1539         }
1540         return 0;
1541 }
1542
1543 /* Check if there exists the flow director filter */
1544 static struct i40e_fdir_filter *
1545 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1546                         const struct i40e_fdir_input *input)
1547 {
1548         int ret;
1549
1550         if (input->flow_ext.pkt_template)
1551                 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1552                                                 (const void *)input,
1553                                                 input->flow.raw_flow.length);
1554         else
1555                 ret = rte_hash_lookup(fdir_info->hash_table,
1556                                       (const void *)input);
1557         if (ret < 0)
1558                 return NULL;
1559
1560         return fdir_info->hash_map[ret];
1561 }
1562
1563 /* Add a flow director filter into the SW list */
1564 static int
1565 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1566 {
1567         struct i40e_fdir_info *fdir_info = &pf->fdir;
1568         int ret;
1569
1570         if (filter->fdir.input.flow_ext.pkt_template)
1571                 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1572                                  &filter->fdir.input,
1573                                  filter->fdir.input.flow.raw_flow.length);
1574         else
1575                 ret = rte_hash_add_key(fdir_info->hash_table,
1576                                        &filter->fdir.input);
1577         if (ret < 0) {
1578                 PMD_DRV_LOG(ERR,
1579                             "Failed to insert fdir filter to hash table %d!",
1580                             ret);
1581                 return ret;
1582         }
1583         fdir_info->hash_map[ret] = filter;
1584
1585         TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1586
1587         return 0;
1588 }
1589
1590 /* Delete a flow director filter from the SW list */
1591 int
1592 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1593 {
1594         struct i40e_fdir_info *fdir_info = &pf->fdir;
1595         struct i40e_fdir_filter *filter;
1596         int ret;
1597
1598         if (input->flow_ext.pkt_template)
1599                 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1600                                                  input,
1601                                                  input->flow.raw_flow.length);
1602         else
1603                 ret = rte_hash_del_key(fdir_info->hash_table, input);
1604         if (ret < 0) {
1605                 PMD_DRV_LOG(ERR,
1606                             "Failed to delete fdir filter to hash table %d!",
1607                             ret);
1608                 return ret;
1609         }
1610         filter = fdir_info->hash_map[ret];
1611         fdir_info->hash_map[ret] = NULL;
1612
1613         TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1614         rte_free(filter);
1615
1616         return 0;
1617 }
1618
1619 /*
1620  * i40e_add_del_fdir_filter - add or remove a flow director filter.
1621  * @pf: board private structure
1622  * @filter: fdir filter entry
1623  * @add: 0 - delete, 1 - add
1624  */
1625 int
1626 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1627                          const struct rte_eth_fdir_filter *filter,
1628                          bool add)
1629 {
1630         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1632         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1633         enum i40e_filter_pctype pctype;
1634         int ret = 0;
1635
1636         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1637                 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1638                         " check the mode in fdir_conf.");
1639                 return -ENOTSUP;
1640         }
1641
1642         pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1643         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1644                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1645                 return -EINVAL;
1646         }
1647         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1648                 PMD_DRV_LOG(ERR, "Invalid queue ID");
1649                 return -EINVAL;
1650         }
1651         if (filter->input.flow_ext.is_vf &&
1652                 filter->input.flow_ext.dst_id >= pf->vf_num) {
1653                 PMD_DRV_LOG(ERR, "Invalid VF ID");
1654                 return -EINVAL;
1655         }
1656
1657         memset(pkt, 0, I40E_FDIR_PKT_LEN);
1658
1659         ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1660         if (ret < 0) {
1661                 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1662                 return ret;
1663         }
1664
1665         if (hw->mac.type == I40E_MAC_X722) {
1666                 /* get translated pctype value in fd pctype register */
1667                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1668                         hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1669         }
1670
1671         ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1672         if (ret < 0) {
1673                 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1674                             pctype);
1675                 return ret;
1676         }
1677
1678         return ret;
1679 }
1680
1681 /**
1682  * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1683  * @pf: board private structure
1684  * @filter: fdir filter entry
1685  * @add: 0 - delete, 1 - add
1686  */
1687 int
1688 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1689                               const struct i40e_fdir_filter_conf *filter,
1690                               bool add)
1691 {
1692         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1693         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1694         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1695         enum i40e_filter_pctype pctype;
1696         struct i40e_fdir_info *fdir_info = &pf->fdir;
1697         struct i40e_fdir_filter *fdir_filter, *node;
1698         struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1699         int ret = 0;
1700
1701         if (pf->fdir.fdir_vsi == NULL) {
1702                 PMD_DRV_LOG(ERR, "FDIR is not enabled");
1703                 return -ENOTSUP;
1704         }
1705
1706         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1707                 PMD_DRV_LOG(ERR, "Invalid queue ID");
1708                 return -EINVAL;
1709         }
1710         if (filter->input.flow_ext.is_vf &&
1711             filter->input.flow_ext.dst_id >= pf->vf_num) {
1712                 PMD_DRV_LOG(ERR, "Invalid VF ID");
1713                 return -EINVAL;
1714         }
1715         if (filter->input.flow_ext.pkt_template) {
1716                 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1717                     !filter->input.flow.raw_flow.packet) {
1718                         PMD_DRV_LOG(ERR, "Invalid raw packet template"
1719                                 " flow filter parameters!");
1720                         return -EINVAL;
1721                 }
1722                 pctype = filter->input.flow.raw_flow.pctype;
1723         } else {
1724                 pctype = filter->input.pctype;
1725         }
1726
1727         /* Check if there is the filter in SW list */
1728         memset(&check_filter, 0, sizeof(check_filter));
1729         i40e_fdir_filter_convert(filter, &check_filter);
1730         node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1731         if (add && node) {
1732                 PMD_DRV_LOG(ERR,
1733                             "Conflict with existing flow director rules!");
1734                 return -EINVAL;
1735         }
1736
1737         if (!add && !node) {
1738                 PMD_DRV_LOG(ERR,
1739                             "There's no corresponding flow firector filter!");
1740                 return -EINVAL;
1741         }
1742
1743         memset(pkt, 0, I40E_FDIR_PKT_LEN);
1744
1745         ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1746         if (ret < 0) {
1747                 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1748                 return ret;
1749         }
1750
1751         if (hw->mac.type == I40E_MAC_X722) {
1752                 /* get translated pctype value in fd pctype register */
1753                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1754                         hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1755         }
1756
1757         ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1758         if (ret < 0) {
1759                 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1760                             pctype);
1761                 return ret;
1762         }
1763
1764         if (add) {
1765                 fdir_filter = rte_zmalloc("fdir_filter",
1766                                           sizeof(*fdir_filter), 0);
1767                 if (fdir_filter == NULL) {
1768                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
1769                         return -ENOMEM;
1770                 }
1771
1772                 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1773                 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1774                 if (ret < 0)
1775                         rte_free(fdir_filter);
1776         } else {
1777                 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1778         }
1779
1780         return ret;
1781 }
1782
1783 /*
1784  * i40e_fdir_filter_programming - Program a flow director filter rule.
1785  * Is done by Flow Director Programming Descriptor followed by packet
1786  * structure that contains the filter fields need to match.
1787  * @pf: board private structure
1788  * @pctype: pctype
1789  * @filter: fdir filter entry
1790  * @add: 0 - delete, 1 - add
1791  */
1792 static int
1793 i40e_fdir_filter_programming(struct i40e_pf *pf,
1794                         enum i40e_filter_pctype pctype,
1795                         const struct rte_eth_fdir_filter *filter,
1796                         bool add)
1797 {
1798         struct i40e_tx_queue *txq = pf->fdir.txq;
1799         struct i40e_rx_queue *rxq = pf->fdir.rxq;
1800         const struct rte_eth_fdir_action *fdir_action = &filter->action;
1801         volatile struct i40e_tx_desc *txdp;
1802         volatile struct i40e_filter_program_desc *fdirdp;
1803         uint32_t td_cmd;
1804         uint16_t vsi_id, i;
1805         uint8_t dest;
1806
1807         PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1808         fdirdp = (volatile struct i40e_filter_program_desc *)
1809                         (&(txq->tx_ring[txq->tx_tail]));
1810
1811         fdirdp->qindex_flex_ptype_vsi =
1812                         rte_cpu_to_le_32((fdir_action->rx_queue <<
1813                                           I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1814                                           I40E_TXD_FLTR_QW0_QINDEX_MASK);
1815
1816         fdirdp->qindex_flex_ptype_vsi |=
1817                         rte_cpu_to_le_32((fdir_action->flex_off <<
1818                                           I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1819                                           I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1820
1821         fdirdp->qindex_flex_ptype_vsi |=
1822                         rte_cpu_to_le_32((pctype <<
1823                                           I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1824                                           I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1825
1826         if (filter->input.flow_ext.is_vf)
1827                 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1828         else
1829                 /* Use LAN VSI Id by default */
1830                 vsi_id = pf->main_vsi->vsi_id;
1831         fdirdp->qindex_flex_ptype_vsi |=
1832                 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1833                                   I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1834                                   I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1835
1836         fdirdp->dtype_cmd_cntindex =
1837                         rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1838
1839         if (add)
1840                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1841                                 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1842                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1843         else
1844                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1845                                 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1846                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1847
1848         if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1849                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1850         else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1851                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1852         else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1853                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1854         else {
1855                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1856                             " unsupported fdir behavior.");
1857                 return -EINVAL;
1858         }
1859
1860         fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1861                                 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1862                                 I40E_TXD_FLTR_QW1_DEST_MASK);
1863
1864         fdirdp->dtype_cmd_cntindex |=
1865                 rte_cpu_to_le_32((fdir_action->report_status<<
1866                                 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1867                                 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1868
1869         fdirdp->dtype_cmd_cntindex |=
1870                         rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1871         fdirdp->dtype_cmd_cntindex |=
1872                         rte_cpu_to_le_32(
1873                         ((uint32_t)pf->fdir.match_counter_index <<
1874                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1875                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1876
1877         fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1878
1879         PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1880         txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1881         txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1882         td_cmd = I40E_TX_DESC_CMD_EOP |
1883                  I40E_TX_DESC_CMD_RS  |
1884                  I40E_TX_DESC_CMD_DUMMY;
1885
1886         txdp->cmd_type_offset_bsz =
1887                 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1888
1889         txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1890         if (txq->tx_tail >= txq->nb_tx_desc)
1891                 txq->tx_tail = 0;
1892         /* Update the tx tail register */
1893         rte_wmb();
1894         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1895         for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1896                 if ((txdp->cmd_type_offset_bsz &
1897                                 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1898                                 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1899                         break;
1900                 rte_delay_us(1);
1901         }
1902         if (i >= I40E_FDIR_MAX_WAIT_US) {
1903                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1904                             " time out to get DD on tx queue.");
1905                 return -ETIMEDOUT;
1906         }
1907         /* totally delay 10 ms to check programming status*/
1908         for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1909                 if (i40e_check_fdir_programming_status(rxq) >= 0)
1910                         return 0;
1911                 rte_delay_us(1);
1912         }
1913         PMD_DRV_LOG(ERR,
1914                 "Failed to program FDIR filter: programming status reported.");
1915         return -ETIMEDOUT;
1916 }
1917
1918 /*
1919  * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1920  * Is done by Flow Director Programming Descriptor followed by packet
1921  * structure that contains the filter fields need to match.
1922  * @pf: board private structure
1923  * @pctype: pctype
1924  * @filter: fdir filter entry
1925  * @add: 0 - delete, 1 - add
1926  */
1927 static int
1928 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1929                                   enum i40e_filter_pctype pctype,
1930                                   const struct i40e_fdir_filter_conf *filter,
1931                                   bool add)
1932 {
1933         struct i40e_tx_queue *txq = pf->fdir.txq;
1934         struct i40e_rx_queue *rxq = pf->fdir.rxq;
1935         const struct i40e_fdir_action *fdir_action = &filter->action;
1936         volatile struct i40e_tx_desc *txdp;
1937         volatile struct i40e_filter_program_desc *fdirdp;
1938         uint32_t td_cmd;
1939         uint16_t vsi_id, i;
1940         uint8_t dest;
1941
1942         PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1943         fdirdp = (volatile struct i40e_filter_program_desc *)
1944                                 (&txq->tx_ring[txq->tx_tail]);
1945
1946         fdirdp->qindex_flex_ptype_vsi =
1947                         rte_cpu_to_le_32((fdir_action->rx_queue <<
1948                                           I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1949                                           I40E_TXD_FLTR_QW0_QINDEX_MASK);
1950
1951         fdirdp->qindex_flex_ptype_vsi |=
1952                         rte_cpu_to_le_32((fdir_action->flex_off <<
1953                                           I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1954                                           I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1955
1956         fdirdp->qindex_flex_ptype_vsi |=
1957                         rte_cpu_to_le_32((pctype <<
1958                                           I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1959                                           I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1960
1961         if (filter->input.flow_ext.is_vf)
1962                 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1963         else
1964                 /* Use LAN VSI Id by default */
1965                 vsi_id = pf->main_vsi->vsi_id;
1966         fdirdp->qindex_flex_ptype_vsi |=
1967                 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1968                                   I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1969                                   I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1970
1971         fdirdp->dtype_cmd_cntindex =
1972                         rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1973
1974         if (add)
1975                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1976                                 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1977                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1978         else
1979                 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1980                                 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1981                                 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1982
1983         if (fdir_action->behavior == I40E_FDIR_REJECT)
1984                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1985         else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1986                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1987         else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1988                 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1989         else {
1990                 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1991                 return -EINVAL;
1992         }
1993
1994         fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1995                                 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1996                                 I40E_TXD_FLTR_QW1_DEST_MASK);
1997
1998         fdirdp->dtype_cmd_cntindex |=
1999                 rte_cpu_to_le_32((fdir_action->report_status <<
2000                                 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
2001                                 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
2002
2003         fdirdp->dtype_cmd_cntindex |=
2004                         rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
2005         fdirdp->dtype_cmd_cntindex |=
2006                         rte_cpu_to_le_32(
2007                         ((uint32_t)pf->fdir.match_counter_index <<
2008                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2009                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
2010
2011         fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
2012
2013         PMD_DRV_LOG(INFO, "filling transmit descriptor.");
2014         txdp = &txq->tx_ring[txq->tx_tail + 1];
2015         txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
2016         td_cmd = I40E_TX_DESC_CMD_EOP |
2017                  I40E_TX_DESC_CMD_RS  |
2018                  I40E_TX_DESC_CMD_DUMMY;
2019
2020         txdp->cmd_type_offset_bsz =
2021                 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
2022
2023         txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
2024         if (txq->tx_tail >= txq->nb_tx_desc)
2025                 txq->tx_tail = 0;
2026         /* Update the tx tail register */
2027         rte_wmb();
2028         I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2029         for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
2030                 if ((txdp->cmd_type_offset_bsz &
2031                                 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
2032                                 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2033                         break;
2034                 rte_delay_us(1);
2035         }
2036         if (i >= I40E_FDIR_MAX_WAIT_US) {
2037                 PMD_DRV_LOG(ERR,
2038                     "Failed to program FDIR filter: time out to get DD on tx queue.");
2039                 return -ETIMEDOUT;
2040         }
2041         /* totally delay 10 ms to check programming status*/
2042         rte_delay_us(I40E_FDIR_MAX_WAIT_US);
2043         if (i40e_check_fdir_programming_status(rxq) < 0) {
2044                 PMD_DRV_LOG(ERR,
2045                     "Failed to program FDIR filter: programming status reported.");
2046                 return -ETIMEDOUT;
2047         }
2048
2049         return 0;
2050 }
2051
2052 /*
2053  * i40e_fdir_flush - clear all filters of Flow Director table
2054  * @pf: board private structure
2055  */
2056 int
2057 i40e_fdir_flush(struct rte_eth_dev *dev)
2058 {
2059         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2060         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2061         uint32_t reg;
2062         uint16_t guarant_cnt, best_cnt;
2063         uint16_t i;
2064
2065         I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
2066         I40E_WRITE_FLUSH(hw);
2067
2068         for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
2069                 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
2070                 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
2071                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
2072                         break;
2073         }
2074         if (i >= I40E_FDIR_FLUSH_RETRY) {
2075                 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
2076                 return -ETIMEDOUT;
2077         }
2078         guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2079                                 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2080                                 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2081         best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2082                                 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2083                                 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2084         if (guarant_cnt != 0 || best_cnt != 0) {
2085                 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
2086                 return -ENOSYS;
2087         } else
2088                 PMD_DRV_LOG(INFO, "FD table Flush success.");
2089         return 0;
2090 }
2091
2092 static inline void
2093 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
2094                         struct rte_eth_flex_payload_cfg *flex_set,
2095                         uint16_t *num)
2096 {
2097         struct i40e_fdir_flex_pit *flex_pit;
2098         struct rte_eth_flex_payload_cfg *ptr = flex_set;
2099         uint16_t src, dst, size, j, k;
2100         uint8_t i, layer_idx;
2101
2102         for (layer_idx = I40E_FLXPLD_L2_IDX;
2103              layer_idx <= I40E_FLXPLD_L4_IDX;
2104              layer_idx++) {
2105                 if (layer_idx == I40E_FLXPLD_L2_IDX)
2106                         ptr->type = RTE_ETH_L2_PAYLOAD;
2107                 else if (layer_idx == I40E_FLXPLD_L3_IDX)
2108                         ptr->type = RTE_ETH_L3_PAYLOAD;
2109                 else if (layer_idx == I40E_FLXPLD_L4_IDX)
2110                         ptr->type = RTE_ETH_L4_PAYLOAD;
2111
2112                 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2113                         flex_pit = &pf->fdir.flex_set[layer_idx *
2114                                 I40E_MAX_FLXPLD_FIED + i];
2115                         if (flex_pit->size == 0)
2116                                 continue;
2117                         src = flex_pit->src_offset * sizeof(uint16_t);
2118                         dst = flex_pit->dst_offset * sizeof(uint16_t);
2119                         size = flex_pit->size * sizeof(uint16_t);
2120                         for (j = src, k = dst; j < src + size; j++, k++)
2121                                 ptr->src_offset[k] = j;
2122                 }
2123                 (*num)++;
2124                 ptr++;
2125         }
2126 }
2127
2128 static inline void
2129 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2130                         struct rte_eth_fdir_flex_mask *flex_mask,
2131                         uint16_t *num)
2132 {
2133         struct i40e_fdir_flex_mask *mask;
2134         struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2135         uint16_t flow_type;
2136         uint8_t i, j;
2137         uint16_t off_bytes, mask_tmp;
2138
2139         for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2140              i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2141              i++) {
2142                 mask =  &pf->fdir.flex_mask[i];
2143                 flow_type = i40e_pctype_to_flowtype(pf->adapter,
2144                                                     (enum i40e_filter_pctype)i);
2145                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2146                         continue;
2147
2148                 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2149                         if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2150                                 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2151                                 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2152                         } else {
2153                                 ptr->mask[j * sizeof(uint16_t)] = 0x0;
2154                                 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2155                         }
2156                 }
2157                 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2158                         off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2159                         mask_tmp = ~mask->bitmask[j].mask;
2160                         ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2161                         ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2162                 }
2163                 ptr->flow_type = flow_type;
2164                 ptr++;
2165                 (*num)++;
2166         }
2167 }
2168
2169 /*
2170  * i40e_fdir_info_get - get information of Flow Director
2171  * @pf: ethernet device to get info from
2172  * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2173  *    the flow director information.
2174  */
2175 void
2176 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2177 {
2178         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2179         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2180         uint16_t num_flex_set = 0;
2181         uint16_t num_flex_mask = 0;
2182         uint16_t i;
2183
2184         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2185                 fdir->mode = RTE_FDIR_MODE_PERFECT;
2186         else
2187                 fdir->mode = RTE_FDIR_MODE_NONE;
2188
2189         fdir->guarant_spc =
2190                 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2191         fdir->best_spc =
2192                 (uint32_t)hw->func_caps.fd_filters_best_effort;
2193         fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2194         fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2195         for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2196                 fdir->flow_types_mask[i] = 0ULL;
2197         fdir->flex_payload_unit = sizeof(uint16_t);
2198         fdir->flex_bitmask_unit = sizeof(uint16_t);
2199         fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2200         fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2201         fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2202
2203         i40e_fdir_info_get_flex_set(pf,
2204                                 fdir->flex_conf.flex_set,
2205                                 &num_flex_set);
2206         i40e_fdir_info_get_flex_mask(pf,
2207                                 fdir->flex_conf.flex_mask,
2208                                 &num_flex_mask);
2209
2210         fdir->flex_conf.nb_payloads = num_flex_set;
2211         fdir->flex_conf.nb_flexmasks = num_flex_mask;
2212 }
2213
2214 /*
2215  * i40e_fdir_stat_get - get statistics of Flow Director
2216  * @pf: ethernet device to get info from
2217  * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2218  *    the flow director statistics.
2219  */
2220 void
2221 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2222 {
2223         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2225         uint32_t fdstat;
2226
2227         fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2228         stat->guarant_cnt =
2229                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2230                             I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2231         stat->best_cnt =
2232                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2233                             I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2234 }
2235
2236 static int
2237 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2238                      struct rte_eth_fdir_filter_info *info)
2239 {
2240         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2241         int ret = 0;
2242
2243         if (!info) {
2244                 PMD_DRV_LOG(ERR, "Invalid pointer");
2245                 return -EFAULT;
2246         }
2247
2248         switch (info->info_type) {
2249         case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2250                 ret = i40e_fdir_filter_inset_select(pf,
2251                                 &(info->info.input_set_conf));
2252                 break;
2253         default:
2254                 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2255                             info->info_type);
2256                 return -EINVAL;
2257         }
2258
2259         return ret;
2260 }
2261
2262 /*
2263  * i40e_fdir_ctrl_func - deal with all operations on flow director.
2264  * @pf: board private structure
2265  * @filter_op:operation will be taken.
2266  * @arg: a pointer to specific structure corresponding to the filter_op
2267  */
2268 int
2269 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2270                        enum rte_filter_op filter_op,
2271                        void *arg)
2272 {
2273         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2274         int ret = 0;
2275
2276         if ((pf->flags & I40E_FLAG_FDIR) == 0)
2277                 return -ENOTSUP;
2278
2279         if (filter_op == RTE_ETH_FILTER_NOP)
2280                 return 0;
2281
2282         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2283                 return -EINVAL;
2284
2285         switch (filter_op) {
2286         case RTE_ETH_FILTER_ADD:
2287                 ret = i40e_add_del_fdir_filter(dev,
2288                         (struct rte_eth_fdir_filter *)arg,
2289                         TRUE);
2290                 break;
2291         case RTE_ETH_FILTER_DELETE:
2292                 ret = i40e_add_del_fdir_filter(dev,
2293                         (struct rte_eth_fdir_filter *)arg,
2294                         FALSE);
2295                 break;
2296         case RTE_ETH_FILTER_FLUSH:
2297                 ret = i40e_fdir_flush(dev);
2298                 break;
2299         case RTE_ETH_FILTER_INFO:
2300                 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2301                 break;
2302         case RTE_ETH_FILTER_SET:
2303                 ret = i40e_fdir_filter_set(dev,
2304                         (struct rte_eth_fdir_filter_info *)arg);
2305                 break;
2306         case RTE_ETH_FILTER_STATS:
2307                 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2308                 break;
2309         default:
2310                 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2311                 ret = -EINVAL;
2312                 break;
2313         }
2314         return ret;
2315 }
2316
2317 /* Restore flow director filter */
2318 void
2319 i40e_fdir_filter_restore(struct i40e_pf *pf)
2320 {
2321         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2322         struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2323         struct i40e_fdir_filter *f;
2324         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2325         uint32_t fdstat;
2326         uint32_t guarant_cnt;  /**< Number of filters in guaranteed spaces. */
2327         uint32_t best_cnt;     /**< Number of filters in best effort spaces. */
2328
2329         TAILQ_FOREACH(f, fdir_list, rules)
2330                 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2331
2332         fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2333         guarant_cnt =
2334                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2335                            I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2336         best_cnt =
2337                 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2338                            I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2339
2340         PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d",
2341                     guarant_cnt, best_cnt);
2342 }