1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
31 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
33 #define IPV6_ADDR_LEN 16
37 #define IPPROTO_L2TP 115
40 #define I40E_FDIR_PKT_LEN 512
41 #define I40E_FDIR_IP_DEFAULT_LEN 420
42 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
43 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
44 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
45 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
47 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
48 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
49 #define I40E_FDIR_UDP_DEFAULT_LEN 400
50 #define I40E_FDIR_GTP_DEFAULT_LEN 384
51 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
52 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
54 #define I40E_FDIR_GTPC_DST_PORT 2123
55 #define I40E_FDIR_GTPU_DST_PORT 2152
56 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
57 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
58 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
59 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
61 /* Wait time for fdir filter programming */
62 #define I40E_FDIR_MAX_WAIT_US 10000
64 /* Wait count and interval for fdir filter flush */
65 #define I40E_FDIR_FLUSH_RETRY 50
66 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
68 #define I40E_COUNTER_PF 2
69 /* Statistic counter index for one pf */
70 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
72 #define I40E_FDIR_FLOWS ( \
73 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
74 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
75 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
76 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
77 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
78 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
79 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
80 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
81 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
82 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
83 (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
85 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
86 enum i40e_filter_pctype pctype,
87 const struct rte_eth_fdir_filter *filter,
89 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
90 struct i40e_fdir_filter *filter);
91 static struct i40e_fdir_filter *
92 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
93 const struct i40e_fdir_input *input);
94 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
95 struct i40e_fdir_filter *filter);
97 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
98 enum i40e_filter_pctype pctype,
99 const struct i40e_fdir_filter_conf *filter,
103 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
105 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
106 struct i40e_hmc_obj_rxq rx_ctx;
107 int err = I40E_SUCCESS;
109 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
110 /* Init the RX queue in hardware */
111 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
113 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
114 rx_ctx.qlen = rxq->nb_rx_desc;
115 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
118 rx_ctx.dtype = i40e_header_split_none;
119 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
120 rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
121 rx_ctx.tphrdesc_ena = 1;
122 rx_ctx.tphwdesc_ena = 1;
123 rx_ctx.tphdata_ena = 1;
124 rx_ctx.tphhead_ena = 1;
125 rx_ctx.lrxqthresh = 2;
131 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
132 if (err != I40E_SUCCESS) {
133 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
136 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
137 if (err != I40E_SUCCESS) {
138 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
141 rxq->qrx_tail = hw->hw_addr +
142 I40E_QRX_TAIL(rxq->vsi->base_queue);
145 /* Init the RX tail regieter. */
146 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
152 * i40e_fdir_setup - reserve and initialize the Flow Director resources
153 * @pf: board private structure
156 i40e_fdir_setup(struct i40e_pf *pf)
158 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
159 struct i40e_vsi *vsi;
160 int err = I40E_SUCCESS;
161 char z_name[RTE_MEMZONE_NAMESIZE];
162 const struct rte_memzone *mz = NULL;
163 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
165 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
166 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
167 return I40E_NOT_SUPPORTED;
170 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
171 " num_filters_best_effort = %u.",
172 hw->func_caps.fd_filters_guaranteed,
173 hw->func_caps.fd_filters_best_effort);
175 vsi = pf->fdir.fdir_vsi;
177 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
180 /* make new FDIR VSI */
181 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
183 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
184 return I40E_ERR_NO_AVAILABLE_VSI;
186 pf->fdir.fdir_vsi = vsi;
188 /*Fdir tx queue setup*/
189 err = i40e_fdir_setup_tx_resources(pf);
191 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
195 /*Fdir rx queue setup*/
196 err = i40e_fdir_setup_rx_resources(pf);
198 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
202 err = i40e_tx_queue_init(pf->fdir.txq);
204 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
208 /* need switch on before dev start*/
209 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
211 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
215 /* Init the rx queue in hardware */
216 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
218 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
222 /* switch on rx queue */
223 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
225 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
229 /* reserve memory for the fdir programming packet */
230 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
231 eth_dev->device->driver->name,
233 eth_dev->data->port_id);
234 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
236 PMD_DRV_LOG(ERR, "Cannot init memzone for "
237 "flow director program packet.");
238 err = I40E_ERR_NO_MEMORY;
241 pf->fdir.prg_pkt = mz->addr;
242 pf->fdir.dma_addr = mz->iova;
244 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
245 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
250 i40e_dev_rx_queue_release(pf->fdir.rxq);
253 i40e_dev_tx_queue_release(pf->fdir.txq);
256 i40e_vsi_release(vsi);
257 pf->fdir.fdir_vsi = NULL;
262 * i40e_fdir_teardown - release the Flow Director resources
263 * @pf: board private structure
266 i40e_fdir_teardown(struct i40e_pf *pf)
268 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
269 struct i40e_vsi *vsi;
271 vsi = pf->fdir.fdir_vsi;
274 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
276 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
277 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
279 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
280 i40e_dev_rx_queue_release(pf->fdir.rxq);
282 i40e_dev_tx_queue_release(pf->fdir.txq);
284 i40e_vsi_release(vsi);
285 pf->fdir.fdir_vsi = NULL;
288 /* check whether the flow director table in empty */
290 i40e_fdir_empty(struct i40e_hw *hw)
292 uint32_t guarant_cnt, best_cnt;
294 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
295 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
296 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
297 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
298 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
299 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
300 if (best_cnt + guarant_cnt > 0)
307 * Initialize the configuration about bytes stream extracted as flexible payload
311 i40e_init_flx_pld(struct i40e_pf *pf)
313 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
319 * Define the bytes stream extracted as flexible payload in
320 * field vector. By default, select 8 words from the beginning
321 * of payload as flexible payload.
323 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
324 index = i * I40E_MAX_FLXPLD_FIED;
325 pf->fdir.flex_set[index].src_offset = 0;
326 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
327 pf->fdir.flex_set[index].dst_offset = 0;
328 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
330 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
332 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
335 /* initialize the masks */
336 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
337 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
338 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
340 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
342 pf->fdir.flex_mask[pctype].word_mask = 0;
343 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
344 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
345 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
346 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
347 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
352 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
353 if ((flex_pit2).src_offset < \
354 (flex_pit1).src_offset + (flex_pit1).size) { \
355 PMD_DRV_LOG(ERR, "src_offset should be not" \
356 " less than than previous offset" \
357 " + previous FSIZE."); \
363 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
364 * and the flex_pit will be sorted by it's src_offset value
366 static inline uint16_t
367 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
368 struct i40e_fdir_flex_pit *flex_pit)
370 uint16_t src_tmp, size, num = 0;
371 uint16_t i, k, j = 0;
373 while (j < I40E_FDIR_MAX_FLEX_LEN) {
375 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
376 if (src_offset[j + 1] == src_offset[j] + 1)
381 src_tmp = src_offset[j] + 1 - size;
382 /* the flex_pit need to be sort by src_offset */
383 for (i = 0; i < num; i++) {
384 if (src_tmp < flex_pit[i].src_offset)
387 /* if insert required, move backward */
388 for (k = num; k > i; k--)
389 flex_pit[k] = flex_pit[k - 1];
391 flex_pit[i].dst_offset = j + 1 - size;
392 flex_pit[i].src_offset = src_tmp;
393 flex_pit[i].size = size;
400 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
402 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
404 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
407 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
408 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
409 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
414 memset(flex_pit, 0, sizeof(flex_pit));
415 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
416 if (num > I40E_MAX_FLXPLD_FIED) {
417 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
420 for (i = 0; i < num; i++) {
421 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
422 flex_pit[i].src_offset & 0x01) {
423 PMD_DRV_LOG(ERR, "flexpayload should be measured"
428 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
434 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
435 * arguments are valid
438 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
439 const struct rte_eth_fdir_flex_conf *conf)
441 const struct rte_eth_flex_payload_cfg *flex_cfg;
442 const struct rte_eth_fdir_flex_mask *flex_mask;
447 enum i40e_filter_pctype pctype;
450 PMD_DRV_LOG(INFO, "NULL pointer.");
453 /* check flexible payload setting configuration */
454 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
455 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
458 for (i = 0; i < conf->nb_payloads; i++) {
459 flex_cfg = &conf->flex_set[i];
460 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
461 PMD_DRV_LOG(ERR, "invalid payload type.");
464 ret = i40e_check_fdir_flex_payload(flex_cfg);
466 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
471 /* check flex mask setting configuration */
472 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
473 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
476 for (i = 0; i < conf->nb_flexmasks; i++) {
477 flex_mask = &conf->flex_mask[i];
478 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
479 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
480 PMD_DRV_LOG(WARNING, "invalid flow type.");
484 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
485 mask_tmp = I40E_WORD(flex_mask->mask[j],
486 flex_mask->mask[j + 1]);
487 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
489 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
490 PMD_DRV_LOG(ERR, " exceed maximal"
491 " number of bitmasks.");
501 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
502 * @pf: board private structure
503 * @cfg: the rule how bytes stream is extracted as flexible payload
506 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
507 const struct rte_eth_flex_payload_cfg *cfg)
509 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
510 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
511 uint32_t flx_pit, flx_ort;
512 uint16_t num, min_next_off; /* in words */
513 uint8_t field_idx = 0;
514 uint8_t layer_idx = 0;
517 if (cfg->type == RTE_ETH_L2_PAYLOAD)
518 layer_idx = I40E_FLXPLD_L2_IDX;
519 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
520 layer_idx = I40E_FLXPLD_L3_IDX;
521 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
522 layer_idx = I40E_FLXPLD_L4_IDX;
524 memset(flex_pit, 0, sizeof(flex_pit));
525 num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
529 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
530 (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
531 (layer_idx * I40E_MAX_FLXPLD_FIED);
532 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
535 for (i = 0; i < num; i++) {
536 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
537 /* record the info in fdir structure */
538 pf->fdir.flex_set[field_idx].src_offset =
539 flex_pit[i].src_offset / sizeof(uint16_t);
540 pf->fdir.flex_set[field_idx].size =
541 flex_pit[i].size / sizeof(uint16_t);
542 pf->fdir.flex_set[field_idx].dst_offset =
543 flex_pit[i].dst_offset / sizeof(uint16_t);
544 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
545 pf->fdir.flex_set[field_idx].size,
546 pf->fdir.flex_set[field_idx].dst_offset);
548 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
550 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
551 pf->fdir.flex_set[field_idx].size;
553 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
554 /* set the non-used register obeying register's constrain */
555 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
556 NONUSE_FLX_PIT_DEST_OFF);
558 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
565 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
566 * @pf: board private structure
567 * @pctype: packet classify type
568 * @flex_masks: mask for flexible payload
571 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
572 enum i40e_filter_pctype pctype,
573 const struct rte_eth_fdir_flex_mask *mask_cfg)
575 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
576 struct i40e_fdir_flex_mask *flex_mask;
577 uint32_t flxinset, fd_mask;
579 uint8_t i, nb_bitmask = 0;
581 flex_mask = &pf->fdir.flex_mask[pctype];
582 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
583 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
584 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
585 if (mask_tmp != 0x0) {
586 flex_mask->word_mask |=
587 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
588 if (mask_tmp != UINT16_MAX) {
590 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
591 flex_mask->bitmask[nb_bitmask].offset =
592 i / sizeof(uint16_t);
597 /* write mask to hw */
598 flxinset = (flex_mask->word_mask <<
599 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
600 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
601 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
603 for (i = 0; i < nb_bitmask; i++) {
604 fd_mask = (flex_mask->bitmask[i].mask <<
605 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
606 I40E_PRTQF_FD_MSK_MASK_MASK;
607 fd_mask |= ((flex_mask->bitmask[i].offset +
608 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
609 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
610 I40E_PRTQF_FD_MSK_OFFSET_MASK;
611 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
616 * Enable/disable flow director RX processing in vector routines.
619 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
623 for (i = 0; i < dev->data->nb_rx_queues; i++) {
624 struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
627 rxq->fdir_enabled = on;
629 PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
633 * Configure flow director related setting
636 i40e_fdir_configure(struct rte_eth_dev *dev)
638 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
639 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 struct rte_eth_fdir_flex_conf *conf;
641 enum i40e_filter_pctype pctype;
647 * configuration need to be done before
648 * flow director filters are added
649 * If filters exist, flush them.
651 if (i40e_fdir_empty(hw) < 0) {
652 ret = i40e_fdir_flush(dev);
654 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
659 /* enable FDIR filter */
660 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
661 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
662 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
664 i40e_init_flx_pld(pf); /* set flex config to default value */
666 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
667 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
669 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
673 if (!pf->support_multi_driver) {
674 /* configure flex payload */
675 for (i = 0; i < conf->nb_payloads; i++)
676 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
677 /* configure flex mask*/
678 for (i = 0; i < conf->nb_flexmasks; i++) {
679 if (hw->mac.type == I40E_MAC_X722) {
680 /* get pctype value in fd pctype register */
681 pctype = (enum i40e_filter_pctype)
683 I40E_GLQF_FD_PCTYPES(
684 (int)i40e_flowtype_to_pctype(
686 conf->flex_mask[i].flow_type)));
688 pctype = i40e_flowtype_to_pctype(pf->adapter,
689 conf->flex_mask[i].flow_type);
692 i40e_set_flex_mask_on_pctype(pf, pctype,
693 &conf->flex_mask[i]);
696 PMD_DRV_LOG(ERR, "Not support flexible payload.");
699 /* Enable FDIR processing in RX routines */
700 i40e_fdir_rx_proc_enable(dev, 1);
706 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
707 unsigned char *raw_pkt,
710 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
711 uint16_t *ether_type;
712 uint8_t len = 2 * sizeof(struct rte_ether_addr);
713 struct rte_ipv4_hdr *ip;
714 struct rte_ipv6_hdr *ip6;
715 static const uint8_t next_proto[] = {
716 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
717 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
718 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
719 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
720 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
721 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
722 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
723 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
724 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
725 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
728 raw_pkt += 2 * sizeof(struct rte_ether_addr);
729 if (vlan && fdir_input->flow_ext.vlan_tci) {
730 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
731 rte_memcpy(raw_pkt + sizeof(uint16_t),
732 &fdir_input->flow_ext.vlan_tci,
734 raw_pkt += sizeof(vlan_frame);
735 len += sizeof(vlan_frame);
737 ether_type = (uint16_t *)raw_pkt;
738 raw_pkt += sizeof(uint16_t);
739 len += sizeof(uint16_t);
741 switch (fdir_input->flow_type) {
742 case RTE_ETH_FLOW_L2_PAYLOAD:
743 *ether_type = fdir_input->flow.l2_flow.ether_type;
745 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
746 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
747 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
748 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
749 case RTE_ETH_FLOW_FRAG_IPV4:
750 ip = (struct rte_ipv4_hdr *)raw_pkt;
752 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
753 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
754 /* set len to by default */
755 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
756 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
757 fdir_input->flow.ip4_flow.proto :
758 next_proto[fdir_input->flow_type];
759 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
760 fdir_input->flow.ip4_flow.ttl :
761 I40E_FDIR_IP_DEFAULT_TTL;
762 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
764 * The source and destination fields in the transmitted packet
765 * need to be presented in a reversed order with respect
766 * to the expected received packets.
768 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
769 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
770 len += sizeof(struct rte_ipv4_hdr);
772 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
773 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
774 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
775 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
776 case RTE_ETH_FLOW_FRAG_IPV6:
777 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
779 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
781 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
782 (fdir_input->flow.ipv6_flow.tc <<
783 I40E_FDIR_IPv6_TC_OFFSET));
785 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
786 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
787 fdir_input->flow.ipv6_flow.proto :
788 next_proto[fdir_input->flow_type];
789 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
790 fdir_input->flow.ipv6_flow.hop_limits :
791 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
793 * The source and destination fields in the transmitted packet
794 * need to be presented in a reversed order with respect
795 * to the expected received packets.
797 rte_memcpy(&(ip6->src_addr),
798 &(fdir_input->flow.ipv6_flow.dst_ip),
800 rte_memcpy(&(ip6->dst_addr),
801 &(fdir_input->flow.ipv6_flow.src_ip),
803 len += sizeof(struct rte_ipv6_hdr);
806 PMD_DRV_LOG(ERR, "unknown flow type %u.",
807 fdir_input->flow_type);
815 * i40e_fdir_construct_pkt - construct packet based on fields in input
816 * @pf: board private structure
817 * @fdir_input: input set of the flow director entry
818 * @raw_pkt: a packet to be constructed
821 i40e_fdir_construct_pkt(struct i40e_pf *pf,
822 const struct rte_eth_fdir_input *fdir_input,
823 unsigned char *raw_pkt)
825 unsigned char *payload, *ptr;
826 struct rte_udp_hdr *udp;
827 struct rte_tcp_hdr *tcp;
828 struct rte_sctp_hdr *sctp;
829 uint8_t size, dst = 0;
830 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
833 /* fill the ethernet and IP head */
834 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
835 !!fdir_input->flow_ext.vlan_tci);
839 /* fill the L4 head */
840 switch (fdir_input->flow_type) {
841 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
842 udp = (struct rte_udp_hdr *)(raw_pkt + len);
843 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
845 * The source and destination fields in the transmitted packet
846 * need to be presented in a reversed order with respect
847 * to the expected received packets.
849 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
850 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
851 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
854 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
855 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
856 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
858 * The source and destination fields in the transmitted packet
859 * need to be presented in a reversed order with respect
860 * to the expected received packets.
862 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
863 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
864 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
867 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
868 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
869 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
871 * The source and destination fields in the transmitted packet
872 * need to be presented in a reversed order with respect
873 * to the expected received packets.
875 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
876 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
877 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
880 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
881 case RTE_ETH_FLOW_FRAG_IPV4:
882 payload = raw_pkt + len;
883 set_idx = I40E_FLXPLD_L3_IDX;
886 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
887 udp = (struct rte_udp_hdr *)(raw_pkt + len);
888 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
890 * The source and destination fields in the transmitted packet
891 * need to be presented in a reversed order with respect
892 * to the expected received packets.
894 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
895 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
896 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
899 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
900 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
901 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
903 * The source and destination fields in the transmitted packet
904 * need to be presented in a reversed order with respect
905 * to the expected received packets.
907 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
908 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
909 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
912 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
913 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
914 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
916 * The source and destination fields in the transmitted packet
917 * need to be presented in a reversed order with respect
918 * to the expected received packets.
920 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
921 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
922 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
925 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
926 case RTE_ETH_FLOW_FRAG_IPV6:
927 payload = raw_pkt + len;
928 set_idx = I40E_FLXPLD_L3_IDX;
930 case RTE_ETH_FLOW_L2_PAYLOAD:
931 payload = raw_pkt + len;
933 * ARP packet is a special case on which the payload
934 * starts after the whole ARP header
936 if (fdir_input->flow.l2_flow.ether_type ==
937 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
938 payload += sizeof(struct rte_arp_hdr);
939 set_idx = I40E_FLXPLD_L2_IDX;
942 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
946 /* fill the flexbytes to payload */
947 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
948 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
949 size = pf->fdir.flex_set[pit_idx].size;
952 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
954 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
956 &fdir_input->flow_ext.flexbytes[dst],
957 size * sizeof(uint16_t));
963 static struct i40e_customized_pctype *
964 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
966 struct i40e_customized_pctype *cus_pctype;
967 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
969 for (; i < I40E_CUSTOMIZED_MAX; i++) {
970 cus_pctype = &pf->customized_pctype[i];
971 if (pctype == cus_pctype->pctype)
978 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
979 const struct i40e_fdir_input *fdir_input,
980 unsigned char *raw_pkt,
983 struct i40e_customized_pctype *cus_pctype = NULL;
984 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
985 uint16_t *ether_type;
986 uint8_t len = 2 * sizeof(struct rte_ether_addr);
987 struct rte_ipv4_hdr *ip;
988 struct rte_ipv6_hdr *ip6;
989 uint8_t pctype = fdir_input->pctype;
990 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
991 static const uint8_t next_proto[] = {
992 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
993 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
994 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
995 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
996 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
997 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
998 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
999 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
1000 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
1001 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
1004 raw_pkt += 2 * sizeof(struct rte_ether_addr);
1005 if (vlan && fdir_input->flow_ext.vlan_tci) {
1006 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
1007 rte_memcpy(raw_pkt + sizeof(uint16_t),
1008 &fdir_input->flow_ext.vlan_tci,
1010 raw_pkt += sizeof(vlan_frame);
1011 len += sizeof(vlan_frame);
1013 ether_type = (uint16_t *)raw_pkt;
1014 raw_pkt += sizeof(uint16_t);
1015 len += sizeof(uint16_t);
1017 if (is_customized_pctype) {
1018 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1020 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1021 fdir_input->pctype);
1026 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1027 *ether_type = fdir_input->flow.l2_flow.ether_type;
1028 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1029 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1030 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1031 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1032 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1033 ((is_customized_pctype) &&
1034 ((cus_pctype->index == I40E_CUSTOMIZED_GTPC) ||
1035 (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) ||
1036 (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6) ||
1037 (cus_pctype->index == I40E_CUSTOMIZED_GTPU) ||
1038 (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)))) {
1039 ip = (struct rte_ipv4_hdr *)raw_pkt;
1041 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1042 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1043 /* set len to by default */
1044 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1045 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1046 fdir_input->flow.ip4_flow.ttl :
1047 I40E_FDIR_IP_DEFAULT_TTL;
1048 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1050 * The source and destination fields in the transmitted packet
1051 * need to be presented in a reversed order with respect
1052 * to the expected received packets.
1054 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1055 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1057 if (!is_customized_pctype)
1058 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1059 fdir_input->flow.ip4_flow.proto :
1060 next_proto[fdir_input->pctype];
1061 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1062 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1063 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1064 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1065 ip->next_proto_id = IPPROTO_UDP;
1066 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1067 ip->next_proto_id = IPPROTO_L2TP;
1068 len += sizeof(struct rte_ipv4_hdr);
1069 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1070 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1071 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1072 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1073 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
1074 ((is_customized_pctype) &&
1075 (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3))) {
1076 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
1078 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1080 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1081 (fdir_input->flow.ipv6_flow.tc <<
1082 I40E_FDIR_IPv6_TC_OFFSET));
1084 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1085 if (!is_customized_pctype)
1086 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1087 fdir_input->flow.ipv6_flow.proto :
1088 next_proto[fdir_input->pctype];
1089 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
1090 ip6->proto = IPPROTO_L2TP;
1091 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1092 fdir_input->flow.ipv6_flow.hop_limits :
1093 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1095 * The source and destination fields in the transmitted packet
1096 * need to be presented in a reversed order with respect
1097 * to the expected received packets.
1099 rte_memcpy(&ip6->src_addr,
1100 &fdir_input->flow.ipv6_flow.dst_ip,
1102 rte_memcpy(&ip6->dst_addr,
1103 &fdir_input->flow.ipv6_flow.src_ip,
1105 len += sizeof(struct rte_ipv6_hdr);
1107 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1108 fdir_input->pctype);
1116 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1117 * @pf: board private structure
1118 * @fdir_input: input set of the flow director entry
1119 * @raw_pkt: a packet to be constructed
1122 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1123 const struct i40e_fdir_input *fdir_input,
1124 unsigned char *raw_pkt)
1126 unsigned char *payload = NULL;
1128 struct rte_udp_hdr *udp;
1129 struct rte_tcp_hdr *tcp;
1130 struct rte_sctp_hdr *sctp;
1131 struct rte_flow_item_gtp *gtp;
1132 struct rte_ipv4_hdr *gtp_ipv4;
1133 struct rte_ipv6_hdr *gtp_ipv6;
1134 struct rte_flow_item_l2tpv3oip *l2tpv3oip;
1135 uint8_t size, dst = 0;
1136 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1138 uint8_t pctype = fdir_input->pctype;
1139 struct i40e_customized_pctype *cus_pctype;
1141 /* raw pcket template - just copy contents of the raw packet */
1142 if (fdir_input->flow_ext.pkt_template) {
1143 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1144 fdir_input->flow.raw_flow.length);
1148 /* fill the ethernet and IP head */
1149 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1150 !!fdir_input->flow_ext.vlan_tci);
1154 /* fill the L4 head */
1155 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1156 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1157 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1159 * The source and destination fields in the transmitted packet
1160 * need to be presented in a reversed order with respect
1161 * to the expected received packets.
1163 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1164 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1165 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1166 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1167 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1168 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1170 * The source and destination fields in the transmitted packet
1171 * need to be presented in a reversed order with respect
1172 * to the expected received packets.
1174 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1175 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1176 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1177 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1178 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1179 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1181 * The source and destination fields in the transmitted packet
1182 * need to be presented in a reversed order with respect
1183 * to the expected received packets.
1185 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1186 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1187 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1188 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1189 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1190 payload = raw_pkt + len;
1191 set_idx = I40E_FLXPLD_L3_IDX;
1192 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1193 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1194 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1196 * The source and destination fields in the transmitted packet
1197 * need to be presented in a reversed order with respect
1198 * to the expected received packets.
1200 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1201 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1202 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1203 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1204 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1205 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1207 * The source and destination fields in the transmitted packet
1208 * need to be presented in a reversed order with respect
1209 * to the expected received packets.
1211 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1212 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1213 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1214 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1215 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1216 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1218 * The source and destination fields in the transmitted packet
1219 * need to be presented in a reversed order with respect
1220 * to the expected received packets.
1222 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1223 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1224 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1225 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1226 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1227 payload = raw_pkt + len;
1228 set_idx = I40E_FLXPLD_L3_IDX;
1229 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1230 payload = raw_pkt + len;
1232 * ARP packet is a special case on which the payload
1233 * starts after the whole ARP header
1235 if (fdir_input->flow.l2_flow.ether_type ==
1236 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1237 payload += sizeof(struct rte_arp_hdr);
1238 set_idx = I40E_FLXPLD_L2_IDX;
1239 } else if (fdir_input->flow_ext.customized_pctype) {
1240 /* If customized pctype is used */
1241 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1242 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1243 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1244 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1245 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1246 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1248 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1250 gtp = (struct rte_flow_item_gtp *)
1251 ((unsigned char *)udp +
1252 sizeof(struct rte_udp_hdr));
1254 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1255 gtp->teid = fdir_input->flow.gtp_flow.teid;
1256 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1258 /* GTP-C message type is not supported. */
1259 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1261 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1262 gtp->v_pt_rsv_flags =
1263 I40E_FDIR_GTP_VER_FLAG_0X32;
1266 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1267 gtp->v_pt_rsv_flags =
1268 I40E_FDIR_GTP_VER_FLAG_0X30;
1271 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1272 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1273 gtp_ipv4 = (struct rte_ipv4_hdr *)
1274 ((unsigned char *)gtp +
1275 sizeof(struct rte_flow_item_gtp));
1276 gtp_ipv4->version_ihl =
1277 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1278 gtp_ipv4->next_proto_id = IPPROTO_IP;
1279 gtp_ipv4->total_length =
1281 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1282 payload = (unsigned char *)gtp_ipv4 +
1283 sizeof(struct rte_ipv4_hdr);
1284 } else if (cus_pctype->index ==
1285 I40E_CUSTOMIZED_GTPU_IPV6) {
1286 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1287 gtp_ipv6 = (struct rte_ipv6_hdr *)
1288 ((unsigned char *)gtp +
1289 sizeof(struct rte_flow_item_gtp));
1290 gtp_ipv6->vtc_flow =
1292 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1293 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1294 gtp_ipv6->proto = IPPROTO_NONE;
1295 gtp_ipv6->payload_len =
1297 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1298 gtp_ipv6->hop_limits =
1299 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1300 payload = (unsigned char *)gtp_ipv6 +
1301 sizeof(struct rte_ipv6_hdr);
1303 payload = (unsigned char *)gtp +
1304 sizeof(struct rte_flow_item_gtp);
1305 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1306 cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1307 l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1310 if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1311 l2tpv3oip->session_id =
1312 fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1314 l2tpv3oip->session_id =
1315 fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1316 payload = (unsigned char *)l2tpv3oip +
1317 sizeof(struct rte_flow_item_l2tpv3oip);
1320 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1321 fdir_input->pctype);
1325 /* fill the flexbytes to payload */
1326 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1327 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1328 size = pf->fdir.flex_set[pit_idx].size;
1331 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1333 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1334 (void)rte_memcpy(ptr,
1335 &fdir_input->flow_ext.flexbytes[dst],
1336 size * sizeof(uint16_t));
1342 /* Construct the tx flags */
1343 static inline uint64_t
1344 i40e_build_ctob(uint32_t td_cmd,
1349 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1350 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1351 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1352 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1353 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1357 * check the programming status descriptor in rx queue.
1358 * done after Programming Flow Director is programmed on
1362 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1364 volatile union i40e_rx_desc *rxdp;
1371 rxdp = &rxq->rx_ring[rxq->rx_tail];
1372 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1373 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1374 >> I40E_RXD_QW1_STATUS_SHIFT;
1376 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1377 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1378 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1379 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1381 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1382 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1384 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1385 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1386 if (error == (0x1 <<
1387 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1388 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1389 " (FD_ID %u): programming status"
1391 rxdp->wb.qword0.hi_dword.fd_id);
1393 } else if (error == (0x1 <<
1394 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1395 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1396 " (FD_ID %u): programming status"
1398 rxdp->wb.qword0.hi_dword.fd_id);
1401 PMD_DRV_LOG(ERR, "invalid programming status"
1402 " reported, error = %u.", error);
1404 PMD_DRV_LOG(INFO, "unknown programming status"
1405 " reported, len = %d, id = %u.", len, id);
1406 rxdp->wb.qword1.status_error_len = 0;
1408 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1410 if (rxq->rx_tail == 0)
1411 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1413 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1420 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1421 struct i40e_fdir_filter *filter)
1423 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1424 if (input->input.flow_ext.pkt_template) {
1425 filter->fdir.input.flow.raw_flow.packet = NULL;
1426 filter->fdir.input.flow.raw_flow.length =
1427 rte_hash_crc(input->input.flow.raw_flow.packet,
1428 input->input.flow.raw_flow.length,
1429 input->input.flow.raw_flow.pctype);
1434 /* Check if there exists the flow director filter */
1435 static struct i40e_fdir_filter *
1436 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1437 const struct i40e_fdir_input *input)
1441 if (input->flow_ext.pkt_template)
1442 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1443 (const void *)input,
1444 input->flow.raw_flow.length);
1446 ret = rte_hash_lookup(fdir_info->hash_table,
1447 (const void *)input);
1451 return fdir_info->hash_map[ret];
1454 /* Add a flow director filter into the SW list */
1456 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1458 struct i40e_fdir_info *fdir_info = &pf->fdir;
1461 if (filter->fdir.input.flow_ext.pkt_template)
1462 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1463 &filter->fdir.input,
1464 filter->fdir.input.flow.raw_flow.length);
1466 ret = rte_hash_add_key(fdir_info->hash_table,
1467 &filter->fdir.input);
1470 "Failed to insert fdir filter to hash table %d!",
1474 fdir_info->hash_map[ret] = filter;
1476 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1481 /* Delete a flow director filter from the SW list */
1483 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1485 struct i40e_fdir_info *fdir_info = &pf->fdir;
1486 struct i40e_fdir_filter *filter;
1489 if (input->flow_ext.pkt_template)
1490 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1492 input->flow.raw_flow.length);
1494 ret = rte_hash_del_key(fdir_info->hash_table, input);
1497 "Failed to delete fdir filter to hash table %d!",
1501 filter = fdir_info->hash_map[ret];
1502 fdir_info->hash_map[ret] = NULL;
1504 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1511 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1512 * @pf: board private structure
1513 * @filter: fdir filter entry
1514 * @add: 0 - delete, 1 - add
1517 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1518 const struct rte_eth_fdir_filter *filter,
1521 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1523 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1524 enum i40e_filter_pctype pctype;
1527 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1528 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1529 " check the mode in fdir_conf.");
1533 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1534 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1535 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1538 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1539 PMD_DRV_LOG(ERR, "Invalid queue ID");
1542 if (filter->input.flow_ext.is_vf &&
1543 filter->input.flow_ext.dst_id >= pf->vf_num) {
1544 PMD_DRV_LOG(ERR, "Invalid VF ID");
1548 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1550 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1552 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1556 if (hw->mac.type == I40E_MAC_X722) {
1557 /* get translated pctype value in fd pctype register */
1558 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1559 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1562 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1564 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1573 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1574 * @pf: board private structure
1575 * @filter: fdir filter entry
1576 * @add: 0 - delete, 1 - add
1579 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1580 const struct i40e_fdir_filter_conf *filter,
1583 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1585 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1586 enum i40e_filter_pctype pctype;
1587 struct i40e_fdir_info *fdir_info = &pf->fdir;
1588 struct i40e_fdir_filter *fdir_filter, *node;
1589 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1592 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1593 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1597 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1598 PMD_DRV_LOG(ERR, "Invalid queue ID");
1601 if (filter->input.flow_ext.is_vf &&
1602 filter->input.flow_ext.dst_id >= pf->vf_num) {
1603 PMD_DRV_LOG(ERR, "Invalid VF ID");
1606 if (filter->input.flow_ext.pkt_template) {
1607 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1608 !filter->input.flow.raw_flow.packet) {
1609 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1610 " flow filter parameters!");
1613 pctype = filter->input.flow.raw_flow.pctype;
1615 pctype = filter->input.pctype;
1618 /* Check if there is the filter in SW list */
1619 memset(&check_filter, 0, sizeof(check_filter));
1620 i40e_fdir_filter_convert(filter, &check_filter);
1621 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1624 "Conflict with existing flow director rules!");
1628 if (!add && !node) {
1630 "There's no corresponding flow firector filter!");
1634 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1636 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1638 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1642 if (hw->mac.type == I40E_MAC_X722) {
1643 /* get translated pctype value in fd pctype register */
1644 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1645 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1648 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1650 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1656 fdir_filter = rte_zmalloc("fdir_filter",
1657 sizeof(*fdir_filter), 0);
1658 if (fdir_filter == NULL) {
1659 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
1663 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1664 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1666 rte_free(fdir_filter);
1668 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1675 * i40e_fdir_filter_programming - Program a flow director filter rule.
1676 * Is done by Flow Director Programming Descriptor followed by packet
1677 * structure that contains the filter fields need to match.
1678 * @pf: board private structure
1680 * @filter: fdir filter entry
1681 * @add: 0 - delete, 1 - add
1684 i40e_fdir_filter_programming(struct i40e_pf *pf,
1685 enum i40e_filter_pctype pctype,
1686 const struct rte_eth_fdir_filter *filter,
1689 struct i40e_tx_queue *txq = pf->fdir.txq;
1690 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1691 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1692 volatile struct i40e_tx_desc *txdp;
1693 volatile struct i40e_filter_program_desc *fdirdp;
1698 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1699 fdirdp = (volatile struct i40e_filter_program_desc *)
1700 (&(txq->tx_ring[txq->tx_tail]));
1702 fdirdp->qindex_flex_ptype_vsi =
1703 rte_cpu_to_le_32((fdir_action->rx_queue <<
1704 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1705 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1707 fdirdp->qindex_flex_ptype_vsi |=
1708 rte_cpu_to_le_32((fdir_action->flex_off <<
1709 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1710 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1712 fdirdp->qindex_flex_ptype_vsi |=
1713 rte_cpu_to_le_32((pctype <<
1714 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1715 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1717 if (filter->input.flow_ext.is_vf)
1718 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1720 /* Use LAN VSI Id by default */
1721 vsi_id = pf->main_vsi->vsi_id;
1722 fdirdp->qindex_flex_ptype_vsi |=
1723 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1724 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1725 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1727 fdirdp->dtype_cmd_cntindex =
1728 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1731 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1732 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1733 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1735 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1736 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1737 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1739 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1740 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1741 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1742 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1743 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1744 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1746 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1747 " unsupported fdir behavior.");
1751 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1752 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1753 I40E_TXD_FLTR_QW1_DEST_MASK);
1755 fdirdp->dtype_cmd_cntindex |=
1756 rte_cpu_to_le_32((fdir_action->report_status<<
1757 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1758 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1760 fdirdp->dtype_cmd_cntindex |=
1761 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1762 fdirdp->dtype_cmd_cntindex |=
1764 ((uint32_t)pf->fdir.match_counter_index <<
1765 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1766 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1768 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1770 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1771 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1772 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1773 td_cmd = I40E_TX_DESC_CMD_EOP |
1774 I40E_TX_DESC_CMD_RS |
1775 I40E_TX_DESC_CMD_DUMMY;
1777 txdp->cmd_type_offset_bsz =
1778 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1780 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1781 if (txq->tx_tail >= txq->nb_tx_desc)
1783 /* Update the tx tail register */
1785 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1786 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1787 if ((txdp->cmd_type_offset_bsz &
1788 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1789 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1793 if (i >= I40E_FDIR_MAX_WAIT_US) {
1794 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1795 " time out to get DD on tx queue.");
1798 /* totally delay 10 ms to check programming status*/
1799 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1800 if (i40e_check_fdir_programming_status(rxq) >= 0)
1805 "Failed to program FDIR filter: programming status reported.");
1810 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1811 * Is done by Flow Director Programming Descriptor followed by packet
1812 * structure that contains the filter fields need to match.
1813 * @pf: board private structure
1815 * @filter: fdir filter entry
1816 * @add: 0 - delete, 1 - add
1819 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1820 enum i40e_filter_pctype pctype,
1821 const struct i40e_fdir_filter_conf *filter,
1824 struct i40e_tx_queue *txq = pf->fdir.txq;
1825 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1826 const struct i40e_fdir_action *fdir_action = &filter->action;
1827 volatile struct i40e_tx_desc *txdp;
1828 volatile struct i40e_filter_program_desc *fdirdp;
1833 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1834 fdirdp = (volatile struct i40e_filter_program_desc *)
1835 (&txq->tx_ring[txq->tx_tail]);
1837 fdirdp->qindex_flex_ptype_vsi =
1838 rte_cpu_to_le_32((fdir_action->rx_queue <<
1839 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1840 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1842 fdirdp->qindex_flex_ptype_vsi |=
1843 rte_cpu_to_le_32((fdir_action->flex_off <<
1844 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1845 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1847 fdirdp->qindex_flex_ptype_vsi |=
1848 rte_cpu_to_le_32((pctype <<
1849 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1850 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1852 if (filter->input.flow_ext.is_vf)
1853 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1855 /* Use LAN VSI Id by default */
1856 vsi_id = pf->main_vsi->vsi_id;
1857 fdirdp->qindex_flex_ptype_vsi |=
1858 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1859 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1860 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1862 fdirdp->dtype_cmd_cntindex =
1863 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1866 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1867 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1868 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1870 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1871 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1872 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1874 if (fdir_action->behavior == I40E_FDIR_REJECT)
1875 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1876 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1877 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1878 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1879 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1881 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1885 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1886 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1887 I40E_TXD_FLTR_QW1_DEST_MASK);
1889 fdirdp->dtype_cmd_cntindex |=
1890 rte_cpu_to_le_32((fdir_action->report_status <<
1891 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1892 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1894 fdirdp->dtype_cmd_cntindex |=
1895 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1896 fdirdp->dtype_cmd_cntindex |=
1898 ((uint32_t)pf->fdir.match_counter_index <<
1899 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1900 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1902 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1904 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1905 txdp = &txq->tx_ring[txq->tx_tail + 1];
1906 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1907 td_cmd = I40E_TX_DESC_CMD_EOP |
1908 I40E_TX_DESC_CMD_RS |
1909 I40E_TX_DESC_CMD_DUMMY;
1911 txdp->cmd_type_offset_bsz =
1912 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1914 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1915 if (txq->tx_tail >= txq->nb_tx_desc)
1917 /* Update the tx tail register */
1919 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1920 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1921 if ((txdp->cmd_type_offset_bsz &
1922 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1923 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1927 if (i >= I40E_FDIR_MAX_WAIT_US) {
1929 "Failed to program FDIR filter: time out to get DD on tx queue.");
1932 /* totally delay 10 ms to check programming status*/
1933 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1934 if (i40e_check_fdir_programming_status(rxq) < 0) {
1936 "Failed to program FDIR filter: programming status reported.");
1944 * i40e_fdir_flush - clear all filters of Flow Director table
1945 * @pf: board private structure
1948 i40e_fdir_flush(struct rte_eth_dev *dev)
1950 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1951 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1953 uint16_t guarant_cnt, best_cnt;
1956 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1957 I40E_WRITE_FLUSH(hw);
1959 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1960 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1961 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1962 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1965 if (i >= I40E_FDIR_FLUSH_RETRY) {
1966 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1969 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1970 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1971 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1972 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1973 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1974 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1975 if (guarant_cnt != 0 || best_cnt != 0) {
1976 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1979 PMD_DRV_LOG(INFO, "FD table Flush success.");
1984 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1985 struct rte_eth_flex_payload_cfg *flex_set,
1988 struct i40e_fdir_flex_pit *flex_pit;
1989 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1990 uint16_t src, dst, size, j, k;
1991 uint8_t i, layer_idx;
1993 for (layer_idx = I40E_FLXPLD_L2_IDX;
1994 layer_idx <= I40E_FLXPLD_L4_IDX;
1996 if (layer_idx == I40E_FLXPLD_L2_IDX)
1997 ptr->type = RTE_ETH_L2_PAYLOAD;
1998 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1999 ptr->type = RTE_ETH_L3_PAYLOAD;
2000 else if (layer_idx == I40E_FLXPLD_L4_IDX)
2001 ptr->type = RTE_ETH_L4_PAYLOAD;
2003 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2004 flex_pit = &pf->fdir.flex_set[layer_idx *
2005 I40E_MAX_FLXPLD_FIED + i];
2006 if (flex_pit->size == 0)
2008 src = flex_pit->src_offset * sizeof(uint16_t);
2009 dst = flex_pit->dst_offset * sizeof(uint16_t);
2010 size = flex_pit->size * sizeof(uint16_t);
2011 for (j = src, k = dst; j < src + size; j++, k++)
2012 ptr->src_offset[k] = j;
2020 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2021 struct rte_eth_fdir_flex_mask *flex_mask,
2024 struct i40e_fdir_flex_mask *mask;
2025 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2028 uint16_t off_bytes, mask_tmp;
2030 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2031 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2033 mask = &pf->fdir.flex_mask[i];
2034 flow_type = i40e_pctype_to_flowtype(pf->adapter,
2035 (enum i40e_filter_pctype)i);
2036 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2039 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2040 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2041 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2042 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2044 ptr->mask[j * sizeof(uint16_t)] = 0x0;
2045 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2048 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2049 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2050 mask_tmp = ~mask->bitmask[j].mask;
2051 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2052 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2054 ptr->flow_type = flow_type;
2061 * i40e_fdir_info_get - get information of Flow Director
2062 * @pf: ethernet device to get info from
2063 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2064 * the flow director information.
2067 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2069 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2070 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2071 uint16_t num_flex_set = 0;
2072 uint16_t num_flex_mask = 0;
2075 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2076 fdir->mode = RTE_FDIR_MODE_PERFECT;
2078 fdir->mode = RTE_FDIR_MODE_NONE;
2081 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2083 (uint32_t)hw->func_caps.fd_filters_best_effort;
2084 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2085 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2086 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2087 fdir->flow_types_mask[i] = 0ULL;
2088 fdir->flex_payload_unit = sizeof(uint16_t);
2089 fdir->flex_bitmask_unit = sizeof(uint16_t);
2090 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2091 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2092 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2094 i40e_fdir_info_get_flex_set(pf,
2095 fdir->flex_conf.flex_set,
2097 i40e_fdir_info_get_flex_mask(pf,
2098 fdir->flex_conf.flex_mask,
2101 fdir->flex_conf.nb_payloads = num_flex_set;
2102 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2106 * i40e_fdir_stat_get - get statistics of Flow Director
2107 * @pf: ethernet device to get info from
2108 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2109 * the flow director statistics.
2112 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2114 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2115 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2118 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2120 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2121 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2123 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2124 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2128 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2129 struct rte_eth_fdir_filter_info *info)
2131 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2135 PMD_DRV_LOG(ERR, "Invalid pointer");
2139 switch (info->info_type) {
2140 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2141 ret = i40e_fdir_filter_inset_select(pf,
2142 &(info->info.input_set_conf));
2145 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2154 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2155 * @pf: board private structure
2156 * @filter_op:operation will be taken.
2157 * @arg: a pointer to specific structure corresponding to the filter_op
2160 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2161 enum rte_filter_op filter_op,
2164 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2167 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2170 if (filter_op == RTE_ETH_FILTER_NOP)
2173 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2176 switch (filter_op) {
2177 case RTE_ETH_FILTER_ADD:
2178 ret = i40e_add_del_fdir_filter(dev,
2179 (struct rte_eth_fdir_filter *)arg,
2182 case RTE_ETH_FILTER_DELETE:
2183 ret = i40e_add_del_fdir_filter(dev,
2184 (struct rte_eth_fdir_filter *)arg,
2187 case RTE_ETH_FILTER_FLUSH:
2188 ret = i40e_fdir_flush(dev);
2190 case RTE_ETH_FILTER_INFO:
2191 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2193 case RTE_ETH_FILTER_SET:
2194 ret = i40e_fdir_filter_set(dev,
2195 (struct rte_eth_fdir_filter_info *)arg);
2197 case RTE_ETH_FILTER_STATS:
2198 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2201 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2208 /* Restore flow director filter */
2210 i40e_fdir_filter_restore(struct i40e_pf *pf)
2212 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2213 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2214 struct i40e_fdir_filter *f;
2215 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2217 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2218 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2220 TAILQ_FOREACH(f, fdir_list, rules)
2221 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2223 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2225 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2226 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2228 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2229 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2231 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2232 guarant_cnt, best_cnt);