1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
31 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
33 #define IPV6_ADDR_LEN 16
37 #define IPPROTO_L2TP 115
40 #define I40E_FDIR_PKT_LEN 512
41 #define I40E_FDIR_IP_DEFAULT_LEN 420
42 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
43 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
44 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
45 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
47 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
48 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
49 #define I40E_FDIR_UDP_DEFAULT_LEN 400
50 #define I40E_FDIR_GTP_DEFAULT_LEN 384
51 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
52 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
54 #define I40E_FDIR_GTPC_DST_PORT 2123
55 #define I40E_FDIR_GTPU_DST_PORT 2152
56 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
57 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
58 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
59 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
61 #define I40E_FDIR_ESP_DST_PORT 4500
63 /* Wait time for fdir filter programming */
64 #define I40E_FDIR_MAX_WAIT_US 10000
66 /* Wait count and interval for fdir filter flush */
67 #define I40E_FDIR_FLUSH_RETRY 50
68 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
70 #define I40E_COUNTER_PF 2
71 /* Statistic counter index for one pf */
72 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
74 #define I40E_FDIR_FLOWS ( \
75 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
76 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
77 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
78 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
79 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
80 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
81 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
82 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
83 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
84 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
85 (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
87 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
88 enum i40e_filter_pctype pctype,
89 const struct rte_eth_fdir_filter *filter,
91 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
92 struct i40e_fdir_filter *filter);
93 static struct i40e_fdir_filter *
94 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
95 const struct i40e_fdir_input *input);
96 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
97 struct i40e_fdir_filter *filter);
99 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
100 enum i40e_filter_pctype pctype,
101 const struct i40e_fdir_filter_conf *filter,
105 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
107 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
108 struct i40e_hmc_obj_rxq rx_ctx;
109 int err = I40E_SUCCESS;
111 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
112 /* Init the RX queue in hardware */
113 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
115 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
116 rx_ctx.qlen = rxq->nb_rx_desc;
117 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
120 rx_ctx.dtype = i40e_header_split_none;
121 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
122 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
123 rx_ctx.tphrdesc_ena = 1;
124 rx_ctx.tphwdesc_ena = 1;
125 rx_ctx.tphdata_ena = 1;
126 rx_ctx.tphhead_ena = 1;
127 rx_ctx.lrxqthresh = 2;
133 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
134 if (err != I40E_SUCCESS) {
135 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
138 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
139 if (err != I40E_SUCCESS) {
140 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
143 rxq->qrx_tail = hw->hw_addr +
144 I40E_QRX_TAIL(rxq->vsi->base_queue);
147 /* Init the RX tail regieter. */
148 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
154 * i40e_fdir_setup - reserve and initialize the Flow Director resources
155 * @pf: board private structure
158 i40e_fdir_setup(struct i40e_pf *pf)
160 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
161 struct i40e_vsi *vsi;
162 int err = I40E_SUCCESS;
163 char z_name[RTE_MEMZONE_NAMESIZE];
164 const struct rte_memzone *mz = NULL;
165 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
167 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
168 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
169 return I40E_NOT_SUPPORTED;
172 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
173 " num_filters_best_effort = %u.",
174 hw->func_caps.fd_filters_guaranteed,
175 hw->func_caps.fd_filters_best_effort);
177 vsi = pf->fdir.fdir_vsi;
179 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
182 /* make new FDIR VSI */
183 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
185 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
186 return I40E_ERR_NO_AVAILABLE_VSI;
188 pf->fdir.fdir_vsi = vsi;
190 /*Fdir tx queue setup*/
191 err = i40e_fdir_setup_tx_resources(pf);
193 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
197 /*Fdir rx queue setup*/
198 err = i40e_fdir_setup_rx_resources(pf);
200 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
204 err = i40e_tx_queue_init(pf->fdir.txq);
206 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
210 /* need switch on before dev start*/
211 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
213 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
217 /* Init the rx queue in hardware */
218 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
220 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
224 /* switch on rx queue */
225 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
227 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
231 /* reserve memory for the fdir programming packet */
232 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
233 eth_dev->device->driver->name,
235 eth_dev->data->port_id);
236 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
238 PMD_DRV_LOG(ERR, "Cannot init memzone for "
239 "flow director program packet.");
240 err = I40E_ERR_NO_MEMORY;
243 pf->fdir.prg_pkt = mz->addr;
244 pf->fdir.dma_addr = mz->iova;
246 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
247 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
252 i40e_dev_rx_queue_release(pf->fdir.rxq);
255 i40e_dev_tx_queue_release(pf->fdir.txq);
258 i40e_vsi_release(vsi);
259 pf->fdir.fdir_vsi = NULL;
264 * i40e_fdir_teardown - release the Flow Director resources
265 * @pf: board private structure
268 i40e_fdir_teardown(struct i40e_pf *pf)
270 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
271 struct i40e_vsi *vsi;
273 vsi = pf->fdir.fdir_vsi;
276 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
278 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
279 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
281 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
282 i40e_dev_rx_queue_release(pf->fdir.rxq);
284 i40e_dev_tx_queue_release(pf->fdir.txq);
286 i40e_vsi_release(vsi);
287 pf->fdir.fdir_vsi = NULL;
290 /* check whether the flow director table in empty */
292 i40e_fdir_empty(struct i40e_hw *hw)
294 uint32_t guarant_cnt, best_cnt;
296 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
297 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
298 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
299 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
300 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
301 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
302 if (best_cnt + guarant_cnt > 0)
309 * Initialize the configuration about bytes stream extracted as flexible payload
313 i40e_init_flx_pld(struct i40e_pf *pf)
315 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
321 * Define the bytes stream extracted as flexible payload in
322 * field vector. By default, select 8 words from the beginning
323 * of payload as flexible payload.
325 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
326 index = i * I40E_MAX_FLXPLD_FIED;
327 pf->fdir.flex_set[index].src_offset = 0;
328 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
329 pf->fdir.flex_set[index].dst_offset = 0;
330 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
332 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
334 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
337 /* initialize the masks */
338 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
339 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
340 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
342 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
344 pf->fdir.flex_mask[pctype].word_mask = 0;
345 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
346 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
347 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
348 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
349 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
354 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
355 if ((flex_pit2).src_offset < \
356 (flex_pit1).src_offset + (flex_pit1).size) { \
357 PMD_DRV_LOG(ERR, "src_offset should be not" \
358 " less than than previous offset" \
359 " + previous FSIZE."); \
365 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
366 * and the flex_pit will be sorted by it's src_offset value
368 static inline uint16_t
369 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
370 struct i40e_fdir_flex_pit *flex_pit)
372 uint16_t src_tmp, size, num = 0;
373 uint16_t i, k, j = 0;
375 while (j < I40E_FDIR_MAX_FLEX_LEN) {
377 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
378 if (src_offset[j + 1] == src_offset[j] + 1)
383 src_tmp = src_offset[j] + 1 - size;
384 /* the flex_pit need to be sort by src_offset */
385 for (i = 0; i < num; i++) {
386 if (src_tmp < flex_pit[i].src_offset)
389 /* if insert required, move backward */
390 for (k = num; k > i; k--)
391 flex_pit[k] = flex_pit[k - 1];
393 flex_pit[i].dst_offset = j + 1 - size;
394 flex_pit[i].src_offset = src_tmp;
395 flex_pit[i].size = size;
402 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
404 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
406 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
409 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
410 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
411 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
416 memset(flex_pit, 0, sizeof(flex_pit));
417 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
418 if (num > I40E_MAX_FLXPLD_FIED) {
419 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
422 for (i = 0; i < num; i++) {
423 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
424 flex_pit[i].src_offset & 0x01) {
425 PMD_DRV_LOG(ERR, "flexpayload should be measured"
430 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
436 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
437 * arguments are valid
440 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
441 const struct rte_eth_fdir_flex_conf *conf)
443 const struct rte_eth_flex_payload_cfg *flex_cfg;
444 const struct rte_eth_fdir_flex_mask *flex_mask;
449 enum i40e_filter_pctype pctype;
452 PMD_DRV_LOG(INFO, "NULL pointer.");
455 /* check flexible payload setting configuration */
456 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
457 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
460 for (i = 0; i < conf->nb_payloads; i++) {
461 flex_cfg = &conf->flex_set[i];
462 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
463 PMD_DRV_LOG(ERR, "invalid payload type.");
466 ret = i40e_check_fdir_flex_payload(flex_cfg);
468 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
473 /* check flex mask setting configuration */
474 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
475 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
478 for (i = 0; i < conf->nb_flexmasks; i++) {
479 flex_mask = &conf->flex_mask[i];
480 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
481 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
482 PMD_DRV_LOG(WARNING, "invalid flow type.");
486 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
487 mask_tmp = I40E_WORD(flex_mask->mask[j],
488 flex_mask->mask[j + 1]);
489 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
491 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
492 PMD_DRV_LOG(ERR, " exceed maximal"
493 " number of bitmasks.");
503 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
504 * @pf: board private structure
505 * @cfg: the rule how bytes stream is extracted as flexible payload
508 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
509 const struct rte_eth_flex_payload_cfg *cfg)
511 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
512 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
513 uint32_t flx_pit, flx_ort;
514 uint16_t num, min_next_off; /* in words */
515 uint8_t field_idx = 0;
516 uint8_t layer_idx = 0;
519 if (cfg->type == RTE_ETH_L2_PAYLOAD)
520 layer_idx = I40E_FLXPLD_L2_IDX;
521 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
522 layer_idx = I40E_FLXPLD_L3_IDX;
523 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
524 layer_idx = I40E_FLXPLD_L4_IDX;
526 memset(flex_pit, 0, sizeof(flex_pit));
527 num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
531 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
532 (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
533 (layer_idx * I40E_MAX_FLXPLD_FIED);
534 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
537 for (i = 0; i < num; i++) {
538 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
539 /* record the info in fdir structure */
540 pf->fdir.flex_set[field_idx].src_offset =
541 flex_pit[i].src_offset / sizeof(uint16_t);
542 pf->fdir.flex_set[field_idx].size =
543 flex_pit[i].size / sizeof(uint16_t);
544 pf->fdir.flex_set[field_idx].dst_offset =
545 flex_pit[i].dst_offset / sizeof(uint16_t);
546 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
547 pf->fdir.flex_set[field_idx].size,
548 pf->fdir.flex_set[field_idx].dst_offset);
550 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
552 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
553 pf->fdir.flex_set[field_idx].size;
555 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
556 /* set the non-used register obeying register's constrain */
557 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
558 NONUSE_FLX_PIT_DEST_OFF);
560 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
567 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
568 * @pf: board private structure
569 * @pctype: packet classify type
570 * @flex_masks: mask for flexible payload
573 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
574 enum i40e_filter_pctype pctype,
575 const struct rte_eth_fdir_flex_mask *mask_cfg)
577 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
578 struct i40e_fdir_flex_mask *flex_mask;
579 uint32_t flxinset, fd_mask;
581 uint8_t i, nb_bitmask = 0;
583 flex_mask = &pf->fdir.flex_mask[pctype];
584 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
585 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
586 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
587 if (mask_tmp != 0x0) {
588 flex_mask->word_mask |=
589 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
590 if (mask_tmp != UINT16_MAX) {
592 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
593 flex_mask->bitmask[nb_bitmask].offset =
594 i / sizeof(uint16_t);
599 /* write mask to hw */
600 flxinset = (flex_mask->word_mask <<
601 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
602 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
603 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
605 for (i = 0; i < nb_bitmask; i++) {
606 fd_mask = (flex_mask->bitmask[i].mask <<
607 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
608 I40E_PRTQF_FD_MSK_MASK_MASK;
609 fd_mask |= ((flex_mask->bitmask[i].offset +
610 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
611 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
612 I40E_PRTQF_FD_MSK_OFFSET_MASK;
613 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
618 * Enable/disable flow director RX processing in vector routines.
621 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
625 for (i = 0; i < dev->data->nb_rx_queues; i++) {
626 struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
629 rxq->fdir_enabled = on;
631 PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
635 * Configure flow director related setting
638 i40e_fdir_configure(struct rte_eth_dev *dev)
640 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
641 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 struct rte_eth_fdir_flex_conf *conf;
643 enum i40e_filter_pctype pctype;
649 * configuration need to be done before
650 * flow director filters are added
651 * If filters exist, flush them.
653 if (i40e_fdir_empty(hw) < 0) {
654 ret = i40e_fdir_flush(dev);
656 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
661 /* enable FDIR filter */
662 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
663 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
664 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
666 i40e_init_flx_pld(pf); /* set flex config to default value */
668 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
669 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
671 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
675 if (!pf->support_multi_driver) {
676 /* configure flex payload */
677 for (i = 0; i < conf->nb_payloads; i++)
678 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
679 /* configure flex mask*/
680 for (i = 0; i < conf->nb_flexmasks; i++) {
681 if (hw->mac.type == I40E_MAC_X722) {
682 /* get pctype value in fd pctype register */
683 pctype = (enum i40e_filter_pctype)
685 I40E_GLQF_FD_PCTYPES(
686 (int)i40e_flowtype_to_pctype(
688 conf->flex_mask[i].flow_type)));
690 pctype = i40e_flowtype_to_pctype(pf->adapter,
691 conf->flex_mask[i].flow_type);
694 i40e_set_flex_mask_on_pctype(pf, pctype,
695 &conf->flex_mask[i]);
698 PMD_DRV_LOG(ERR, "Not support flexible payload.");
701 /* Enable FDIR processing in RX routines */
702 i40e_fdir_rx_proc_enable(dev, 1);
708 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
709 unsigned char *raw_pkt,
712 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
713 uint16_t *ether_type;
714 uint8_t len = 2 * sizeof(struct rte_ether_addr);
715 struct rte_ipv4_hdr *ip;
716 struct rte_ipv6_hdr *ip6;
717 static const uint8_t next_proto[] = {
718 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
719 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
720 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
721 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
722 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
723 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
724 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
725 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
726 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
727 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
730 raw_pkt += 2 * sizeof(struct rte_ether_addr);
731 if (vlan && fdir_input->flow_ext.vlan_tci) {
732 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
733 rte_memcpy(raw_pkt + sizeof(uint16_t),
734 &fdir_input->flow_ext.vlan_tci,
736 raw_pkt += sizeof(vlan_frame);
737 len += sizeof(vlan_frame);
739 ether_type = (uint16_t *)raw_pkt;
740 raw_pkt += sizeof(uint16_t);
741 len += sizeof(uint16_t);
743 switch (fdir_input->flow_type) {
744 case RTE_ETH_FLOW_L2_PAYLOAD:
745 *ether_type = fdir_input->flow.l2_flow.ether_type;
747 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
748 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
749 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
750 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
751 case RTE_ETH_FLOW_FRAG_IPV4:
752 ip = (struct rte_ipv4_hdr *)raw_pkt;
754 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
755 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
756 /* set len to by default */
757 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
758 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
759 fdir_input->flow.ip4_flow.proto :
760 next_proto[fdir_input->flow_type];
761 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
762 fdir_input->flow.ip4_flow.ttl :
763 I40E_FDIR_IP_DEFAULT_TTL;
764 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
766 * The source and destination fields in the transmitted packet
767 * need to be presented in a reversed order with respect
768 * to the expected received packets.
770 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
771 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
772 len += sizeof(struct rte_ipv4_hdr);
774 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
775 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
776 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
777 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
778 case RTE_ETH_FLOW_FRAG_IPV6:
779 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
781 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
783 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
784 (fdir_input->flow.ipv6_flow.tc <<
785 I40E_FDIR_IPv6_TC_OFFSET));
787 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
788 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
789 fdir_input->flow.ipv6_flow.proto :
790 next_proto[fdir_input->flow_type];
791 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
792 fdir_input->flow.ipv6_flow.hop_limits :
793 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
795 * The source and destination fields in the transmitted packet
796 * need to be presented in a reversed order with respect
797 * to the expected received packets.
799 rte_memcpy(&(ip6->src_addr),
800 &(fdir_input->flow.ipv6_flow.dst_ip),
802 rte_memcpy(&(ip6->dst_addr),
803 &(fdir_input->flow.ipv6_flow.src_ip),
805 len += sizeof(struct rte_ipv6_hdr);
808 PMD_DRV_LOG(ERR, "unknown flow type %u.",
809 fdir_input->flow_type);
817 * i40e_fdir_construct_pkt - construct packet based on fields in input
818 * @pf: board private structure
819 * @fdir_input: input set of the flow director entry
820 * @raw_pkt: a packet to be constructed
823 i40e_fdir_construct_pkt(struct i40e_pf *pf,
824 const struct rte_eth_fdir_input *fdir_input,
825 unsigned char *raw_pkt)
827 unsigned char *payload, *ptr;
828 struct rte_udp_hdr *udp;
829 struct rte_tcp_hdr *tcp;
830 struct rte_sctp_hdr *sctp;
831 uint8_t size, dst = 0;
832 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
835 /* fill the ethernet and IP head */
836 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
837 !!fdir_input->flow_ext.vlan_tci);
841 /* fill the L4 head */
842 switch (fdir_input->flow_type) {
843 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
844 udp = (struct rte_udp_hdr *)(raw_pkt + len);
845 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
847 * The source and destination fields in the transmitted packet
848 * need to be presented in a reversed order with respect
849 * to the expected received packets.
851 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
852 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
853 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
856 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
857 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
858 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
860 * The source and destination fields in the transmitted packet
861 * need to be presented in a reversed order with respect
862 * to the expected received packets.
864 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
865 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
866 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
869 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
870 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
871 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
873 * The source and destination fields in the transmitted packet
874 * need to be presented in a reversed order with respect
875 * to the expected received packets.
877 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
878 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
879 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
882 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
883 case RTE_ETH_FLOW_FRAG_IPV4:
884 payload = raw_pkt + len;
885 set_idx = I40E_FLXPLD_L3_IDX;
888 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
889 udp = (struct rte_udp_hdr *)(raw_pkt + len);
890 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
892 * The source and destination fields in the transmitted packet
893 * need to be presented in a reversed order with respect
894 * to the expected received packets.
896 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
897 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
898 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
901 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
902 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
903 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
905 * The source and destination fields in the transmitted packet
906 * need to be presented in a reversed order with respect
907 * to the expected received packets.
909 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
910 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
911 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
914 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
915 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
916 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
918 * The source and destination fields in the transmitted packet
919 * need to be presented in a reversed order with respect
920 * to the expected received packets.
922 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
923 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
924 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
927 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
928 case RTE_ETH_FLOW_FRAG_IPV6:
929 payload = raw_pkt + len;
930 set_idx = I40E_FLXPLD_L3_IDX;
932 case RTE_ETH_FLOW_L2_PAYLOAD:
933 payload = raw_pkt + len;
935 * ARP packet is a special case on which the payload
936 * starts after the whole ARP header
938 if (fdir_input->flow.l2_flow.ether_type ==
939 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
940 payload += sizeof(struct rte_arp_hdr);
941 set_idx = I40E_FLXPLD_L2_IDX;
944 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
948 /* fill the flexbytes to payload */
949 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
950 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
951 size = pf->fdir.flex_set[pit_idx].size;
954 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
956 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
958 &fdir_input->flow_ext.flexbytes[dst],
959 size * sizeof(uint16_t));
965 static struct i40e_customized_pctype *
966 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
968 struct i40e_customized_pctype *cus_pctype;
969 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
971 for (; i < I40E_CUSTOMIZED_MAX; i++) {
972 cus_pctype = &pf->customized_pctype[i];
973 if (pctype == cus_pctype->pctype)
980 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
981 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
983 struct rte_ipv6_hdr *ip6;
985 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
987 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
988 ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
989 (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
990 ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
991 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
992 fdir_input->flow.ipv6_flow.proto : next_proto;
993 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
994 fdir_input->flow.ipv6_flow.hop_limits :
995 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
997 * The source and destination fields in the transmitted packet
998 * need to be presented in a reversed order with respect
999 * to the expected received packets.
1001 rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
1003 rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
1005 len += sizeof(struct rte_ipv6_hdr);
1011 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
1012 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
1014 struct rte_ipv4_hdr *ip4;
1016 ip4 = (struct rte_ipv4_hdr *)raw_pkt;
1018 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1019 ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1020 /* set len to by default */
1021 ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1022 ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1023 fdir_input->flow.ip4_flow.ttl :
1024 I40E_FDIR_IP_DEFAULT_TTL;
1025 ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
1026 ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1027 fdir_input->flow.ip4_flow.proto : next_proto;
1029 * The source and destination fields in the transmitted packet
1030 * need to be presented in a reversed order with respect
1031 * to the expected received packets.
1033 ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1034 ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1035 len += sizeof(struct rte_ipv4_hdr);
1041 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
1042 const struct i40e_fdir_input *fdir_input,
1043 unsigned char *raw_pkt,
1046 struct i40e_customized_pctype *cus_pctype = NULL;
1047 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
1048 uint16_t *ether_type;
1049 uint8_t len = 2 * sizeof(struct rte_ether_addr);
1050 uint8_t pctype = fdir_input->pctype;
1051 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
1052 static const uint8_t next_proto[] = {
1053 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
1054 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
1055 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
1056 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
1057 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
1058 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
1059 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
1060 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
1061 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
1062 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
1065 rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
1066 sizeof(struct rte_ether_addr));
1067 rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
1068 &fdir_input->flow.l2_flow.src,
1069 sizeof(struct rte_ether_addr));
1070 raw_pkt += 2 * sizeof(struct rte_ether_addr);
1072 if (vlan && fdir_input->flow_ext.vlan_tci) {
1073 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
1074 rte_memcpy(raw_pkt + sizeof(uint16_t),
1075 &fdir_input->flow_ext.vlan_tci,
1077 raw_pkt += sizeof(vlan_frame);
1078 len += sizeof(vlan_frame);
1080 ether_type = (uint16_t *)raw_pkt;
1081 raw_pkt += sizeof(uint16_t);
1082 len += sizeof(uint16_t);
1084 if (is_customized_pctype) {
1085 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1087 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1088 fdir_input->pctype);
1093 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1094 *ether_type = fdir_input->flow.l2_flow.ether_type;
1095 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1096 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1097 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1098 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1099 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1100 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1101 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1102 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1103 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1104 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
1105 is_customized_pctype) {
1106 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1107 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1108 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1109 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1110 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1111 len = fill_ip4_head(fdir_input, raw_pkt,
1112 next_proto[pctype], len, ether_type);
1113 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1114 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1115 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1116 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1117 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1118 len = fill_ip6_head(fdir_input, raw_pkt,
1119 next_proto[pctype], len,
1121 } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1122 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1123 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1124 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1125 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1127 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
1128 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1130 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1131 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
1133 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1134 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1136 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1137 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
1139 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
1140 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
1142 else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
1143 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
1145 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
1146 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
1149 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1157 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1158 * @pf: board private structure
1159 * @fdir_input: input set of the flow director entry
1160 * @raw_pkt: a packet to be constructed
1163 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1164 const struct i40e_fdir_input *fdir_input,
1165 unsigned char *raw_pkt)
1167 unsigned char *payload = NULL;
1169 struct rte_udp_hdr *udp;
1170 struct rte_tcp_hdr *tcp;
1171 struct rte_sctp_hdr *sctp;
1172 struct rte_flow_item_gtp *gtp;
1173 struct rte_ipv4_hdr *gtp_ipv4;
1174 struct rte_ipv6_hdr *gtp_ipv6;
1175 struct rte_flow_item_l2tpv3oip *l2tpv3oip;
1176 struct rte_flow_item_esp *esp;
1177 struct rte_ipv4_hdr *esp_ipv4;
1178 struct rte_ipv6_hdr *esp_ipv6;
1180 uint8_t size, dst = 0;
1181 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1183 uint8_t pctype = fdir_input->pctype;
1184 struct i40e_customized_pctype *cus_pctype;
1186 /* raw pcket template - just copy contents of the raw packet */
1187 if (fdir_input->flow_ext.pkt_template) {
1188 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1189 fdir_input->flow.raw_flow.length);
1193 /* fill the ethernet and IP head */
1194 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1195 !!fdir_input->flow_ext.vlan_tci);
1199 /* fill the L4 head */
1200 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1201 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1202 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1204 * The source and destination fields in the transmitted packet
1205 * need to be presented in a reversed order with respect
1206 * to the expected received packets.
1208 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1209 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1210 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1211 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1212 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1213 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1215 * The source and destination fields in the transmitted packet
1216 * need to be presented in a reversed order with respect
1217 * to the expected received packets.
1219 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1220 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1221 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1222 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1223 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1224 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1226 * The source and destination fields in the transmitted packet
1227 * need to be presented in a reversed order with respect
1228 * to the expected received packets.
1230 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1231 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1232 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1233 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1234 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1235 payload = raw_pkt + len;
1236 set_idx = I40E_FLXPLD_L3_IDX;
1237 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1238 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1239 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1241 * The source and destination fields in the transmitted packet
1242 * need to be presented in a reversed order with respect
1243 * to the expected received packets.
1245 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1246 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1247 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1248 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1249 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1250 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1252 * The source and destination fields in the transmitted packet
1253 * need to be presented in a reversed order with respect
1254 * to the expected received packets.
1256 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1257 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1258 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1259 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1260 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1261 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1263 * The source and destination fields in the transmitted packet
1264 * need to be presented in a reversed order with respect
1265 * to the expected received packets.
1267 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1268 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1269 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1270 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1271 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1272 payload = raw_pkt + len;
1273 set_idx = I40E_FLXPLD_L3_IDX;
1274 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1275 payload = raw_pkt + len;
1277 * ARP packet is a special case on which the payload
1278 * starts after the whole ARP header
1280 if (fdir_input->flow.l2_flow.ether_type ==
1281 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1282 payload += sizeof(struct rte_arp_hdr);
1283 set_idx = I40E_FLXPLD_L2_IDX;
1284 } else if (fdir_input->flow_ext.customized_pctype) {
1285 /* If customized pctype is used */
1286 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1287 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1288 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1289 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1290 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1291 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1293 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1295 gtp = (struct rte_flow_item_gtp *)
1296 ((unsigned char *)udp +
1297 sizeof(struct rte_udp_hdr));
1299 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1300 gtp->teid = fdir_input->flow.gtp_flow.teid;
1301 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1303 /* GTP-C message type is not supported. */
1304 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1306 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1307 gtp->v_pt_rsv_flags =
1308 I40E_FDIR_GTP_VER_FLAG_0X32;
1311 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1312 gtp->v_pt_rsv_flags =
1313 I40E_FDIR_GTP_VER_FLAG_0X30;
1316 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1317 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1318 gtp_ipv4 = (struct rte_ipv4_hdr *)
1319 ((unsigned char *)gtp +
1320 sizeof(struct rte_flow_item_gtp));
1321 gtp_ipv4->version_ihl =
1322 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1323 gtp_ipv4->next_proto_id = IPPROTO_IP;
1324 gtp_ipv4->total_length =
1326 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1327 payload = (unsigned char *)gtp_ipv4 +
1328 sizeof(struct rte_ipv4_hdr);
1329 } else if (cus_pctype->index ==
1330 I40E_CUSTOMIZED_GTPU_IPV6) {
1331 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1332 gtp_ipv6 = (struct rte_ipv6_hdr *)
1333 ((unsigned char *)gtp +
1334 sizeof(struct rte_flow_item_gtp));
1335 gtp_ipv6->vtc_flow =
1337 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1338 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1339 gtp_ipv6->proto = IPPROTO_NONE;
1340 gtp_ipv6->payload_len =
1342 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1343 gtp_ipv6->hop_limits =
1344 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1345 payload = (unsigned char *)gtp_ipv6 +
1346 sizeof(struct rte_ipv6_hdr);
1348 payload = (unsigned char *)gtp +
1349 sizeof(struct rte_flow_item_gtp);
1350 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1351 cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1352 l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1355 if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1356 l2tpv3oip->session_id =
1357 fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1359 l2tpv3oip->session_id =
1360 fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1361 payload = (unsigned char *)l2tpv3oip +
1362 sizeof(struct rte_flow_item_l2tpv3oip);
1363 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1364 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1365 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1366 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1367 if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1368 esp_ipv4 = (struct rte_ipv4_hdr *)
1370 esp = (struct rte_flow_item_esp *)esp_ipv4;
1372 fdir_input->flow.esp_ipv4_flow.spi;
1373 payload = (unsigned char *)esp +
1374 sizeof(struct rte_esp_hdr);
1375 len += sizeof(struct rte_esp_hdr);
1376 } else if (cus_pctype->index ==
1377 I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1378 esp_ipv4 = (struct rte_ipv4_hdr *)
1380 udp = (struct rte_udp_hdr *)esp_ipv4;
1381 udp->dst_port = rte_cpu_to_be_16
1382 (I40E_FDIR_ESP_DST_PORT);
1384 udp->dgram_len = rte_cpu_to_be_16
1385 (I40E_FDIR_UDP_DEFAULT_LEN);
1386 esp = (struct rte_flow_item_esp *)
1387 ((unsigned char *)esp_ipv4 +
1388 sizeof(struct rte_udp_hdr));
1390 fdir_input->flow.esp_ipv4_udp_flow.spi;
1391 payload = (unsigned char *)esp +
1392 sizeof(struct rte_esp_hdr);
1393 len += sizeof(struct rte_udp_hdr) +
1394 sizeof(struct rte_esp_hdr);
1395 } else if (cus_pctype->index ==
1396 I40E_CUSTOMIZED_ESP_IPV6) {
1397 esp_ipv6 = (struct rte_ipv6_hdr *)
1399 esp = (struct rte_flow_item_esp *)esp_ipv6;
1401 fdir_input->flow.esp_ipv6_flow.spi;
1402 payload = (unsigned char *)esp +
1403 sizeof(struct rte_esp_hdr);
1404 len += sizeof(struct rte_esp_hdr);
1405 } else if (cus_pctype->index ==
1406 I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1407 esp_ipv6 = (struct rte_ipv6_hdr *)
1409 udp = (struct rte_udp_hdr *)esp_ipv6;
1410 udp->dst_port = rte_cpu_to_be_16
1411 (I40E_FDIR_ESP_DST_PORT);
1413 udp->dgram_len = rte_cpu_to_be_16
1414 (I40E_FDIR_UDP_DEFAULT_LEN);
1415 esp = (struct rte_flow_item_esp *)
1416 ((unsigned char *)esp_ipv6 +
1417 sizeof(struct rte_udp_hdr));
1419 fdir_input->flow.esp_ipv6_udp_flow.spi;
1420 payload = (unsigned char *)esp +
1421 sizeof(struct rte_esp_hdr);
1422 len += sizeof(struct rte_udp_hdr) +
1423 sizeof(struct rte_esp_hdr);
1427 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1431 /* fill the flexbytes to payload */
1432 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1433 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1434 size = pf->fdir.flex_set[pit_idx].size;
1437 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1439 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1440 (void)rte_memcpy(ptr,
1441 &fdir_input->flow_ext.flexbytes[dst],
1442 size * sizeof(uint16_t));
1448 /* Construct the tx flags */
1449 static inline uint64_t
1450 i40e_build_ctob(uint32_t td_cmd,
1455 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1456 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1457 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1458 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1459 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1463 * check the programming status descriptor in rx queue.
1464 * done after Programming Flow Director is programmed on
1468 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1470 volatile union i40e_rx_desc *rxdp;
1477 rxdp = &rxq->rx_ring[rxq->rx_tail];
1478 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1479 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1480 >> I40E_RXD_QW1_STATUS_SHIFT;
1482 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1483 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1484 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1485 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1487 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1488 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1490 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1491 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1492 if (error == (0x1 <<
1493 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1494 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1495 " (FD_ID %u): programming status"
1497 rxdp->wb.qword0.hi_dword.fd_id);
1499 } else if (error == (0x1 <<
1500 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1501 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1502 " (FD_ID %u): programming status"
1504 rxdp->wb.qword0.hi_dword.fd_id);
1507 PMD_DRV_LOG(ERR, "invalid programming status"
1508 " reported, error = %u.", error);
1510 PMD_DRV_LOG(INFO, "unknown programming status"
1511 " reported, len = %d, id = %u.", len, id);
1512 rxdp->wb.qword1.status_error_len = 0;
1514 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1516 if (rxq->rx_tail == 0)
1517 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1519 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1526 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1527 struct i40e_fdir_filter *filter)
1529 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1530 if (input->input.flow_ext.pkt_template) {
1531 filter->fdir.input.flow.raw_flow.packet = NULL;
1532 filter->fdir.input.flow.raw_flow.length =
1533 rte_hash_crc(input->input.flow.raw_flow.packet,
1534 input->input.flow.raw_flow.length,
1535 input->input.flow.raw_flow.pctype);
1540 /* Check if there exists the flow director filter */
1541 static struct i40e_fdir_filter *
1542 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1543 const struct i40e_fdir_input *input)
1547 if (input->flow_ext.pkt_template)
1548 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1549 (const void *)input,
1550 input->flow.raw_flow.length);
1552 ret = rte_hash_lookup(fdir_info->hash_table,
1553 (const void *)input);
1557 return fdir_info->hash_map[ret];
1560 /* Add a flow director filter into the SW list */
1562 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1564 struct i40e_fdir_info *fdir_info = &pf->fdir;
1567 if (filter->fdir.input.flow_ext.pkt_template)
1568 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1569 &filter->fdir.input,
1570 filter->fdir.input.flow.raw_flow.length);
1572 ret = rte_hash_add_key(fdir_info->hash_table,
1573 &filter->fdir.input);
1576 "Failed to insert fdir filter to hash table %d!",
1580 fdir_info->hash_map[ret] = filter;
1582 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1587 /* Delete a flow director filter from the SW list */
1589 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1591 struct i40e_fdir_info *fdir_info = &pf->fdir;
1592 struct i40e_fdir_filter *filter;
1595 if (input->flow_ext.pkt_template)
1596 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1598 input->flow.raw_flow.length);
1600 ret = rte_hash_del_key(fdir_info->hash_table, input);
1603 "Failed to delete fdir filter to hash table %d!",
1607 filter = fdir_info->hash_map[ret];
1608 fdir_info->hash_map[ret] = NULL;
1610 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1617 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1618 * @pf: board private structure
1619 * @filter: fdir filter entry
1620 * @add: 0 - delete, 1 - add
1623 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1624 const struct rte_eth_fdir_filter *filter,
1627 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1628 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1629 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1630 enum i40e_filter_pctype pctype;
1633 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1634 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1635 " check the mode in fdir_conf.");
1639 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1640 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1641 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1644 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1645 PMD_DRV_LOG(ERR, "Invalid queue ID");
1648 if (filter->input.flow_ext.is_vf &&
1649 filter->input.flow_ext.dst_id >= pf->vf_num) {
1650 PMD_DRV_LOG(ERR, "Invalid VF ID");
1654 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1656 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1658 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1662 if (hw->mac.type == I40E_MAC_X722) {
1663 /* get translated pctype value in fd pctype register */
1664 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1665 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1668 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1670 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1679 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1680 * @pf: board private structure
1681 * @filter: fdir filter entry
1682 * @add: 0 - delete, 1 - add
1685 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1686 const struct i40e_fdir_filter_conf *filter,
1689 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1690 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1691 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1692 enum i40e_filter_pctype pctype;
1693 struct i40e_fdir_info *fdir_info = &pf->fdir;
1694 struct i40e_fdir_filter *fdir_filter, *node;
1695 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1698 if (pf->fdir.fdir_vsi == NULL) {
1699 PMD_DRV_LOG(ERR, "FDIR is not enabled");
1703 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1704 PMD_DRV_LOG(ERR, "Invalid queue ID");
1707 if (filter->input.flow_ext.is_vf &&
1708 filter->input.flow_ext.dst_id >= pf->vf_num) {
1709 PMD_DRV_LOG(ERR, "Invalid VF ID");
1712 if (filter->input.flow_ext.pkt_template) {
1713 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1714 !filter->input.flow.raw_flow.packet) {
1715 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1716 " flow filter parameters!");
1719 pctype = filter->input.flow.raw_flow.pctype;
1721 pctype = filter->input.pctype;
1724 /* Check if there is the filter in SW list */
1725 memset(&check_filter, 0, sizeof(check_filter));
1726 i40e_fdir_filter_convert(filter, &check_filter);
1727 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1730 "Conflict with existing flow director rules!");
1734 if (!add && !node) {
1736 "There's no corresponding flow firector filter!");
1740 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1742 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1744 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1748 if (hw->mac.type == I40E_MAC_X722) {
1749 /* get translated pctype value in fd pctype register */
1750 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1751 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1754 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1756 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1762 fdir_filter = rte_zmalloc("fdir_filter",
1763 sizeof(*fdir_filter), 0);
1764 if (fdir_filter == NULL) {
1765 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
1769 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1770 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1772 rte_free(fdir_filter);
1774 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1781 * i40e_fdir_filter_programming - Program a flow director filter rule.
1782 * Is done by Flow Director Programming Descriptor followed by packet
1783 * structure that contains the filter fields need to match.
1784 * @pf: board private structure
1786 * @filter: fdir filter entry
1787 * @add: 0 - delete, 1 - add
1790 i40e_fdir_filter_programming(struct i40e_pf *pf,
1791 enum i40e_filter_pctype pctype,
1792 const struct rte_eth_fdir_filter *filter,
1795 struct i40e_tx_queue *txq = pf->fdir.txq;
1796 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1797 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1798 volatile struct i40e_tx_desc *txdp;
1799 volatile struct i40e_filter_program_desc *fdirdp;
1804 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1805 fdirdp = (volatile struct i40e_filter_program_desc *)
1806 (&(txq->tx_ring[txq->tx_tail]));
1808 fdirdp->qindex_flex_ptype_vsi =
1809 rte_cpu_to_le_32((fdir_action->rx_queue <<
1810 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1811 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1813 fdirdp->qindex_flex_ptype_vsi |=
1814 rte_cpu_to_le_32((fdir_action->flex_off <<
1815 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1816 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1818 fdirdp->qindex_flex_ptype_vsi |=
1819 rte_cpu_to_le_32((pctype <<
1820 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1821 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1823 if (filter->input.flow_ext.is_vf)
1824 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1826 /* Use LAN VSI Id by default */
1827 vsi_id = pf->main_vsi->vsi_id;
1828 fdirdp->qindex_flex_ptype_vsi |=
1829 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1830 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1831 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1833 fdirdp->dtype_cmd_cntindex =
1834 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1837 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1838 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1839 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1841 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1842 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1843 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1845 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1846 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1847 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1848 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1849 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1850 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1852 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1853 " unsupported fdir behavior.");
1857 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1858 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1859 I40E_TXD_FLTR_QW1_DEST_MASK);
1861 fdirdp->dtype_cmd_cntindex |=
1862 rte_cpu_to_le_32((fdir_action->report_status<<
1863 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1864 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1866 fdirdp->dtype_cmd_cntindex |=
1867 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1868 fdirdp->dtype_cmd_cntindex |=
1870 ((uint32_t)pf->fdir.match_counter_index <<
1871 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1872 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1874 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1876 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1877 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1878 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1879 td_cmd = I40E_TX_DESC_CMD_EOP |
1880 I40E_TX_DESC_CMD_RS |
1881 I40E_TX_DESC_CMD_DUMMY;
1883 txdp->cmd_type_offset_bsz =
1884 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1886 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1887 if (txq->tx_tail >= txq->nb_tx_desc)
1889 /* Update the tx tail register */
1891 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1892 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1893 if ((txdp->cmd_type_offset_bsz &
1894 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1895 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1899 if (i >= I40E_FDIR_MAX_WAIT_US) {
1900 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1901 " time out to get DD on tx queue.");
1904 /* totally delay 10 ms to check programming status*/
1905 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1906 if (i40e_check_fdir_programming_status(rxq) >= 0)
1911 "Failed to program FDIR filter: programming status reported.");
1916 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1917 * Is done by Flow Director Programming Descriptor followed by packet
1918 * structure that contains the filter fields need to match.
1919 * @pf: board private structure
1921 * @filter: fdir filter entry
1922 * @add: 0 - delete, 1 - add
1925 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1926 enum i40e_filter_pctype pctype,
1927 const struct i40e_fdir_filter_conf *filter,
1930 struct i40e_tx_queue *txq = pf->fdir.txq;
1931 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1932 const struct i40e_fdir_action *fdir_action = &filter->action;
1933 volatile struct i40e_tx_desc *txdp;
1934 volatile struct i40e_filter_program_desc *fdirdp;
1939 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1940 fdirdp = (volatile struct i40e_filter_program_desc *)
1941 (&txq->tx_ring[txq->tx_tail]);
1943 fdirdp->qindex_flex_ptype_vsi =
1944 rte_cpu_to_le_32((fdir_action->rx_queue <<
1945 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1946 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1948 fdirdp->qindex_flex_ptype_vsi |=
1949 rte_cpu_to_le_32((fdir_action->flex_off <<
1950 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1951 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1953 fdirdp->qindex_flex_ptype_vsi |=
1954 rte_cpu_to_le_32((pctype <<
1955 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1956 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1958 if (filter->input.flow_ext.is_vf)
1959 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1961 /* Use LAN VSI Id by default */
1962 vsi_id = pf->main_vsi->vsi_id;
1963 fdirdp->qindex_flex_ptype_vsi |=
1964 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1965 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1966 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1968 fdirdp->dtype_cmd_cntindex =
1969 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1972 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1973 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1974 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1976 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1977 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1978 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1980 if (fdir_action->behavior == I40E_FDIR_REJECT)
1981 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1982 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1983 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1984 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1985 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1987 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1991 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1992 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1993 I40E_TXD_FLTR_QW1_DEST_MASK);
1995 fdirdp->dtype_cmd_cntindex |=
1996 rte_cpu_to_le_32((fdir_action->report_status <<
1997 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1998 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
2000 fdirdp->dtype_cmd_cntindex |=
2001 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
2002 fdirdp->dtype_cmd_cntindex |=
2004 ((uint32_t)pf->fdir.match_counter_index <<
2005 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2006 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
2008 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
2010 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
2011 txdp = &txq->tx_ring[txq->tx_tail + 1];
2012 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
2013 td_cmd = I40E_TX_DESC_CMD_EOP |
2014 I40E_TX_DESC_CMD_RS |
2015 I40E_TX_DESC_CMD_DUMMY;
2017 txdp->cmd_type_offset_bsz =
2018 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
2020 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
2021 if (txq->tx_tail >= txq->nb_tx_desc)
2023 /* Update the tx tail register */
2025 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2026 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
2027 if ((txdp->cmd_type_offset_bsz &
2028 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
2029 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
2033 if (i >= I40E_FDIR_MAX_WAIT_US) {
2035 "Failed to program FDIR filter: time out to get DD on tx queue.");
2038 /* totally delay 10 ms to check programming status*/
2039 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
2040 if (i40e_check_fdir_programming_status(rxq) < 0) {
2042 "Failed to program FDIR filter: programming status reported.");
2050 * i40e_fdir_flush - clear all filters of Flow Director table
2051 * @pf: board private structure
2054 i40e_fdir_flush(struct rte_eth_dev *dev)
2056 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2057 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2059 uint16_t guarant_cnt, best_cnt;
2062 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
2063 I40E_WRITE_FLUSH(hw);
2065 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
2066 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
2067 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
2068 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
2071 if (i >= I40E_FDIR_FLUSH_RETRY) {
2072 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
2075 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2076 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2077 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2078 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
2079 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2080 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2081 if (guarant_cnt != 0 || best_cnt != 0) {
2082 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
2085 PMD_DRV_LOG(INFO, "FD table Flush success.");
2090 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
2091 struct rte_eth_flex_payload_cfg *flex_set,
2094 struct i40e_fdir_flex_pit *flex_pit;
2095 struct rte_eth_flex_payload_cfg *ptr = flex_set;
2096 uint16_t src, dst, size, j, k;
2097 uint8_t i, layer_idx;
2099 for (layer_idx = I40E_FLXPLD_L2_IDX;
2100 layer_idx <= I40E_FLXPLD_L4_IDX;
2102 if (layer_idx == I40E_FLXPLD_L2_IDX)
2103 ptr->type = RTE_ETH_L2_PAYLOAD;
2104 else if (layer_idx == I40E_FLXPLD_L3_IDX)
2105 ptr->type = RTE_ETH_L3_PAYLOAD;
2106 else if (layer_idx == I40E_FLXPLD_L4_IDX)
2107 ptr->type = RTE_ETH_L4_PAYLOAD;
2109 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2110 flex_pit = &pf->fdir.flex_set[layer_idx *
2111 I40E_MAX_FLXPLD_FIED + i];
2112 if (flex_pit->size == 0)
2114 src = flex_pit->src_offset * sizeof(uint16_t);
2115 dst = flex_pit->dst_offset * sizeof(uint16_t);
2116 size = flex_pit->size * sizeof(uint16_t);
2117 for (j = src, k = dst; j < src + size; j++, k++)
2118 ptr->src_offset[k] = j;
2126 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2127 struct rte_eth_fdir_flex_mask *flex_mask,
2130 struct i40e_fdir_flex_mask *mask;
2131 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2134 uint16_t off_bytes, mask_tmp;
2136 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2137 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2139 mask = &pf->fdir.flex_mask[i];
2140 flow_type = i40e_pctype_to_flowtype(pf->adapter,
2141 (enum i40e_filter_pctype)i);
2142 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2145 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2146 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2147 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2148 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2150 ptr->mask[j * sizeof(uint16_t)] = 0x0;
2151 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2154 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2155 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2156 mask_tmp = ~mask->bitmask[j].mask;
2157 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2158 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2160 ptr->flow_type = flow_type;
2167 * i40e_fdir_info_get - get information of Flow Director
2168 * @pf: ethernet device to get info from
2169 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2170 * the flow director information.
2173 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2175 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2176 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2177 uint16_t num_flex_set = 0;
2178 uint16_t num_flex_mask = 0;
2181 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2182 fdir->mode = RTE_FDIR_MODE_PERFECT;
2184 fdir->mode = RTE_FDIR_MODE_NONE;
2187 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2189 (uint32_t)hw->func_caps.fd_filters_best_effort;
2190 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2191 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2192 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2193 fdir->flow_types_mask[i] = 0ULL;
2194 fdir->flex_payload_unit = sizeof(uint16_t);
2195 fdir->flex_bitmask_unit = sizeof(uint16_t);
2196 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2197 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2198 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2200 i40e_fdir_info_get_flex_set(pf,
2201 fdir->flex_conf.flex_set,
2203 i40e_fdir_info_get_flex_mask(pf,
2204 fdir->flex_conf.flex_mask,
2207 fdir->flex_conf.nb_payloads = num_flex_set;
2208 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2212 * i40e_fdir_stat_get - get statistics of Flow Director
2213 * @pf: ethernet device to get info from
2214 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2215 * the flow director statistics.
2218 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2220 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2221 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2224 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2226 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2227 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2229 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2230 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2234 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2235 struct rte_eth_fdir_filter_info *info)
2237 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2241 PMD_DRV_LOG(ERR, "Invalid pointer");
2245 switch (info->info_type) {
2246 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2247 ret = i40e_fdir_filter_inset_select(pf,
2248 &(info->info.input_set_conf));
2251 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2260 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2261 * @pf: board private structure
2262 * @filter_op:operation will be taken.
2263 * @arg: a pointer to specific structure corresponding to the filter_op
2266 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2267 enum rte_filter_op filter_op,
2270 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2273 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2276 if (filter_op == RTE_ETH_FILTER_NOP)
2279 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2282 switch (filter_op) {
2283 case RTE_ETH_FILTER_ADD:
2284 ret = i40e_add_del_fdir_filter(dev,
2285 (struct rte_eth_fdir_filter *)arg,
2288 case RTE_ETH_FILTER_DELETE:
2289 ret = i40e_add_del_fdir_filter(dev,
2290 (struct rte_eth_fdir_filter *)arg,
2293 case RTE_ETH_FILTER_FLUSH:
2294 ret = i40e_fdir_flush(dev);
2296 case RTE_ETH_FILTER_INFO:
2297 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2299 case RTE_ETH_FILTER_SET:
2300 ret = i40e_fdir_filter_set(dev,
2301 (struct rte_eth_fdir_filter_info *)arg);
2303 case RTE_ETH_FILTER_STATS:
2304 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2307 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2314 /* Restore flow director filter */
2316 i40e_fdir_filter_restore(struct i40e_pf *pf)
2318 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2319 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2320 struct i40e_fdir_filter *f;
2321 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2323 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2324 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2326 TAILQ_FOREACH(f, fdir_list, rules)
2327 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2329 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2331 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2332 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2334 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2335 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2337 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2338 guarant_cnt, best_cnt);