1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
31 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
33 #define IPV6_ADDR_LEN 16
36 #define I40E_FDIR_PKT_LEN 512
37 #define I40E_FDIR_IP_DEFAULT_LEN 420
38 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
39 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
40 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
41 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
43 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
44 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
45 #define I40E_FDIR_UDP_DEFAULT_LEN 400
46 #define I40E_FDIR_GTP_DEFAULT_LEN 384
47 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
48 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
50 #define I40E_FDIR_GTPC_DST_PORT 2123
51 #define I40E_FDIR_GTPU_DST_PORT 2152
52 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
53 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
54 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
55 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
57 /* Wait time for fdir filter programming */
58 #define I40E_FDIR_MAX_WAIT_US 10000
60 /* Wait count and interval for fdir filter flush */
61 #define I40E_FDIR_FLUSH_RETRY 50
62 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
64 #define I40E_COUNTER_PF 2
65 /* Statistic counter index for one pf */
66 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
68 #define I40E_FDIR_FLOWS ( \
69 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
70 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
71 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
72 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
73 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
74 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
75 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
76 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
77 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
78 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
79 (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
81 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
82 enum i40e_filter_pctype pctype,
83 const struct rte_eth_fdir_filter *filter,
85 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
86 struct i40e_fdir_filter *filter);
87 static struct i40e_fdir_filter *
88 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
89 const struct i40e_fdir_input *input);
90 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
91 struct i40e_fdir_filter *filter);
93 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
94 enum i40e_filter_pctype pctype,
95 const struct i40e_fdir_filter_conf *filter,
99 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
101 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
102 struct i40e_hmc_obj_rxq rx_ctx;
103 int err = I40E_SUCCESS;
105 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
106 /* Init the RX queue in hardware */
107 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
109 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
110 rx_ctx.qlen = rxq->nb_rx_desc;
111 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
114 rx_ctx.dtype = i40e_header_split_none;
115 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
116 rx_ctx.rxmax = ETHER_MAX_LEN;
117 rx_ctx.tphrdesc_ena = 1;
118 rx_ctx.tphwdesc_ena = 1;
119 rx_ctx.tphdata_ena = 1;
120 rx_ctx.tphhead_ena = 1;
121 rx_ctx.lrxqthresh = 2;
127 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
128 if (err != I40E_SUCCESS) {
129 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
132 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
133 if (err != I40E_SUCCESS) {
134 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
137 rxq->qrx_tail = hw->hw_addr +
138 I40E_QRX_TAIL(rxq->vsi->base_queue);
141 /* Init the RX tail regieter. */
142 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
148 * i40e_fdir_setup - reserve and initialize the Flow Director resources
149 * @pf: board private structure
152 i40e_fdir_setup(struct i40e_pf *pf)
154 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
155 struct i40e_vsi *vsi;
156 int err = I40E_SUCCESS;
157 char z_name[RTE_MEMZONE_NAMESIZE];
158 const struct rte_memzone *mz = NULL;
159 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
161 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
162 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
163 return I40E_NOT_SUPPORTED;
166 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
167 " num_filters_best_effort = %u.",
168 hw->func_caps.fd_filters_guaranteed,
169 hw->func_caps.fd_filters_best_effort);
171 vsi = pf->fdir.fdir_vsi;
173 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
176 /* make new FDIR VSI */
177 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
179 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
180 return I40E_ERR_NO_AVAILABLE_VSI;
182 pf->fdir.fdir_vsi = vsi;
184 /*Fdir tx queue setup*/
185 err = i40e_fdir_setup_tx_resources(pf);
187 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
191 /*Fdir rx queue setup*/
192 err = i40e_fdir_setup_rx_resources(pf);
194 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
198 err = i40e_tx_queue_init(pf->fdir.txq);
200 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
204 /* need switch on before dev start*/
205 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
207 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
211 /* Init the rx queue in hardware */
212 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
214 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
218 /* switch on rx queue */
219 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
221 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
225 /* reserve memory for the fdir programming packet */
226 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
227 eth_dev->device->driver->name,
229 eth_dev->data->port_id);
230 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
232 PMD_DRV_LOG(ERR, "Cannot init memzone for "
233 "flow director program packet.");
234 err = I40E_ERR_NO_MEMORY;
237 pf->fdir.prg_pkt = mz->addr;
238 pf->fdir.dma_addr = mz->iova;
240 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
241 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
246 i40e_dev_rx_queue_release(pf->fdir.rxq);
249 i40e_dev_tx_queue_release(pf->fdir.txq);
252 i40e_vsi_release(vsi);
253 pf->fdir.fdir_vsi = NULL;
258 * i40e_fdir_teardown - release the Flow Director resources
259 * @pf: board private structure
262 i40e_fdir_teardown(struct i40e_pf *pf)
264 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
265 struct i40e_vsi *vsi;
267 vsi = pf->fdir.fdir_vsi;
270 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
272 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
273 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
275 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
276 i40e_dev_rx_queue_release(pf->fdir.rxq);
278 i40e_dev_tx_queue_release(pf->fdir.txq);
280 i40e_vsi_release(vsi);
281 pf->fdir.fdir_vsi = NULL;
284 /* check whether the flow director table in empty */
286 i40e_fdir_empty(struct i40e_hw *hw)
288 uint32_t guarant_cnt, best_cnt;
290 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
291 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
292 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
293 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
294 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
295 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
296 if (best_cnt + guarant_cnt > 0)
303 * Initialize the configuration about bytes stream extracted as flexible payload
307 i40e_init_flx_pld(struct i40e_pf *pf)
309 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
315 * Define the bytes stream extracted as flexible payload in
316 * field vector. By default, select 8 words from the beginning
317 * of payload as flexible payload.
319 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
320 index = i * I40E_MAX_FLXPLD_FIED;
321 pf->fdir.flex_set[index].src_offset = 0;
322 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
323 pf->fdir.flex_set[index].dst_offset = 0;
324 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
326 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
328 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
331 /* initialize the masks */
332 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
333 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
334 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
336 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
338 pf->fdir.flex_mask[pctype].word_mask = 0;
339 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
340 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
341 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
342 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
343 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
348 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
349 if ((flex_pit2).src_offset < \
350 (flex_pit1).src_offset + (flex_pit1).size) { \
351 PMD_DRV_LOG(ERR, "src_offset should be not" \
352 " less than than previous offset" \
353 " + previous FSIZE."); \
359 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
360 * and the flex_pit will be sorted by it's src_offset value
362 static inline uint16_t
363 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
364 struct i40e_fdir_flex_pit *flex_pit)
366 uint16_t src_tmp, size, num = 0;
367 uint16_t i, k, j = 0;
369 while (j < I40E_FDIR_MAX_FLEX_LEN) {
371 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
372 if (src_offset[j + 1] == src_offset[j] + 1)
377 src_tmp = src_offset[j] + 1 - size;
378 /* the flex_pit need to be sort by src_offset */
379 for (i = 0; i < num; i++) {
380 if (src_tmp < flex_pit[i].src_offset)
383 /* if insert required, move backward */
384 for (k = num; k > i; k--)
385 flex_pit[k] = flex_pit[k - 1];
387 flex_pit[i].dst_offset = j + 1 - size;
388 flex_pit[i].src_offset = src_tmp;
389 flex_pit[i].size = size;
396 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
398 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
400 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
403 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
404 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
405 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
410 memset(flex_pit, 0, sizeof(flex_pit));
411 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
412 if (num > I40E_MAX_FLXPLD_FIED) {
413 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
416 for (i = 0; i < num; i++) {
417 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
418 flex_pit[i].src_offset & 0x01) {
419 PMD_DRV_LOG(ERR, "flexpayload should be measured"
424 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
430 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
431 * arguments are valid
434 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
435 const struct rte_eth_fdir_flex_conf *conf)
437 const struct rte_eth_flex_payload_cfg *flex_cfg;
438 const struct rte_eth_fdir_flex_mask *flex_mask;
443 enum i40e_filter_pctype pctype;
446 PMD_DRV_LOG(INFO, "NULL pointer.");
449 /* check flexible payload setting configuration */
450 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
451 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
454 for (i = 0; i < conf->nb_payloads; i++) {
455 flex_cfg = &conf->flex_set[i];
456 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
457 PMD_DRV_LOG(ERR, "invalid payload type.");
460 ret = i40e_check_fdir_flex_payload(flex_cfg);
462 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
467 /* check flex mask setting configuration */
468 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
469 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
472 for (i = 0; i < conf->nb_flexmasks; i++) {
473 flex_mask = &conf->flex_mask[i];
474 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
475 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
476 PMD_DRV_LOG(WARNING, "invalid flow type.");
480 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
481 mask_tmp = I40E_WORD(flex_mask->mask[j],
482 flex_mask->mask[j + 1]);
483 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
485 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
486 PMD_DRV_LOG(ERR, " exceed maximal"
487 " number of bitmasks.");
497 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
498 * @pf: board private structure
499 * @cfg: the rule how bytes stream is extracted as flexible payload
502 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
503 const struct rte_eth_flex_payload_cfg *cfg)
505 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
506 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
507 uint32_t flx_pit, flx_ort;
508 uint16_t num, min_next_off; /* in words */
509 uint8_t field_idx = 0;
510 uint8_t layer_idx = 0;
513 if (cfg->type == RTE_ETH_L2_PAYLOAD)
514 layer_idx = I40E_FLXPLD_L2_IDX;
515 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
516 layer_idx = I40E_FLXPLD_L3_IDX;
517 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
518 layer_idx = I40E_FLXPLD_L4_IDX;
520 memset(flex_pit, 0, sizeof(flex_pit));
521 num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
525 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
526 (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
527 (layer_idx * I40E_MAX_FLXPLD_FIED);
528 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
531 for (i = 0; i < num; i++) {
532 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
533 /* record the info in fdir structure */
534 pf->fdir.flex_set[field_idx].src_offset =
535 flex_pit[i].src_offset / sizeof(uint16_t);
536 pf->fdir.flex_set[field_idx].size =
537 flex_pit[i].size / sizeof(uint16_t);
538 pf->fdir.flex_set[field_idx].dst_offset =
539 flex_pit[i].dst_offset / sizeof(uint16_t);
540 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
541 pf->fdir.flex_set[field_idx].size,
542 pf->fdir.flex_set[field_idx].dst_offset);
544 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
546 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
547 pf->fdir.flex_set[field_idx].size;
549 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
550 /* set the non-used register obeying register's constrain */
551 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
552 NONUSE_FLX_PIT_DEST_OFF);
554 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
561 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
562 * @pf: board private structure
563 * @pctype: packet classify type
564 * @flex_masks: mask for flexible payload
567 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
568 enum i40e_filter_pctype pctype,
569 const struct rte_eth_fdir_flex_mask *mask_cfg)
571 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
572 struct i40e_fdir_flex_mask *flex_mask;
573 uint32_t flxinset, fd_mask;
575 uint8_t i, nb_bitmask = 0;
577 flex_mask = &pf->fdir.flex_mask[pctype];
578 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
579 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
580 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
581 if (mask_tmp != 0x0) {
582 flex_mask->word_mask |=
583 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
584 if (mask_tmp != UINT16_MAX) {
586 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
587 flex_mask->bitmask[nb_bitmask].offset =
588 i / sizeof(uint16_t);
593 /* write mask to hw */
594 flxinset = (flex_mask->word_mask <<
595 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
596 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
597 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
599 for (i = 0; i < nb_bitmask; i++) {
600 fd_mask = (flex_mask->bitmask[i].mask <<
601 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
602 I40E_PRTQF_FD_MSK_MASK_MASK;
603 fd_mask |= ((flex_mask->bitmask[i].offset +
604 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
605 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
606 I40E_PRTQF_FD_MSK_OFFSET_MASK;
607 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
612 * Configure flow director related setting
615 i40e_fdir_configure(struct rte_eth_dev *dev)
617 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
618 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
619 struct rte_eth_fdir_flex_conf *conf;
620 enum i40e_filter_pctype pctype;
626 * configuration need to be done before
627 * flow director filters are added
628 * If filters exist, flush them.
630 if (i40e_fdir_empty(hw) < 0) {
631 ret = i40e_fdir_flush(dev);
633 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
638 /* enable FDIR filter */
639 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
640 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
641 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
643 i40e_init_flx_pld(pf); /* set flex config to default value */
645 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
646 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
648 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
652 if (!pf->support_multi_driver) {
653 /* configure flex payload */
654 for (i = 0; i < conf->nb_payloads; i++)
655 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
656 /* configure flex mask*/
657 for (i = 0; i < conf->nb_flexmasks; i++) {
658 if (hw->mac.type == I40E_MAC_X722) {
659 /* get pctype value in fd pctype register */
660 pctype = (enum i40e_filter_pctype)
662 I40E_GLQF_FD_PCTYPES(
663 (int)i40e_flowtype_to_pctype(
665 conf->flex_mask[i].flow_type)));
667 pctype = i40e_flowtype_to_pctype(pf->adapter,
668 conf->flex_mask[i].flow_type);
671 i40e_set_flex_mask_on_pctype(pf, pctype,
672 &conf->flex_mask[i]);
675 PMD_DRV_LOG(ERR, "Not support flexible payload.");
682 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
683 unsigned char *raw_pkt,
686 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
687 uint16_t *ether_type;
688 uint8_t len = 2 * sizeof(struct ether_addr);
690 struct ipv6_hdr *ip6;
691 static const uint8_t next_proto[] = {
692 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
693 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
694 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
695 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
696 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
697 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
698 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
699 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
700 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
701 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
704 raw_pkt += 2 * sizeof(struct ether_addr);
705 if (vlan && fdir_input->flow_ext.vlan_tci) {
706 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
707 rte_memcpy(raw_pkt + sizeof(uint16_t),
708 &fdir_input->flow_ext.vlan_tci,
710 raw_pkt += sizeof(vlan_frame);
711 len += sizeof(vlan_frame);
713 ether_type = (uint16_t *)raw_pkt;
714 raw_pkt += sizeof(uint16_t);
715 len += sizeof(uint16_t);
717 switch (fdir_input->flow_type) {
718 case RTE_ETH_FLOW_L2_PAYLOAD:
719 *ether_type = fdir_input->flow.l2_flow.ether_type;
721 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
722 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
723 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
724 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
725 case RTE_ETH_FLOW_FRAG_IPV4:
726 ip = (struct ipv4_hdr *)raw_pkt;
728 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
729 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
730 /* set len to by default */
731 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
732 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
733 fdir_input->flow.ip4_flow.proto :
734 next_proto[fdir_input->flow_type];
735 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
736 fdir_input->flow.ip4_flow.ttl :
737 I40E_FDIR_IP_DEFAULT_TTL;
738 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
740 * The source and destination fields in the transmitted packet
741 * need to be presented in a reversed order with respect
742 * to the expected received packets.
744 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
745 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
746 len += sizeof(struct ipv4_hdr);
748 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
749 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
750 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
751 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
752 case RTE_ETH_FLOW_FRAG_IPV6:
753 ip6 = (struct ipv6_hdr *)raw_pkt;
755 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
757 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
758 (fdir_input->flow.ipv6_flow.tc <<
759 I40E_FDIR_IPv6_TC_OFFSET));
761 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
762 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
763 fdir_input->flow.ipv6_flow.proto :
764 next_proto[fdir_input->flow_type];
765 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
766 fdir_input->flow.ipv6_flow.hop_limits :
767 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
769 * The source and destination fields in the transmitted packet
770 * need to be presented in a reversed order with respect
771 * to the expected received packets.
773 rte_memcpy(&(ip6->src_addr),
774 &(fdir_input->flow.ipv6_flow.dst_ip),
776 rte_memcpy(&(ip6->dst_addr),
777 &(fdir_input->flow.ipv6_flow.src_ip),
779 len += sizeof(struct ipv6_hdr);
782 PMD_DRV_LOG(ERR, "unknown flow type %u.",
783 fdir_input->flow_type);
791 * i40e_fdir_construct_pkt - construct packet based on fields in input
792 * @pf: board private structure
793 * @fdir_input: input set of the flow director entry
794 * @raw_pkt: a packet to be constructed
797 i40e_fdir_construct_pkt(struct i40e_pf *pf,
798 const struct rte_eth_fdir_input *fdir_input,
799 unsigned char *raw_pkt)
801 unsigned char *payload, *ptr;
804 struct sctp_hdr *sctp;
805 uint8_t size, dst = 0;
806 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
809 /* fill the ethernet and IP head */
810 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
811 !!fdir_input->flow_ext.vlan_tci);
815 /* fill the L4 head */
816 switch (fdir_input->flow_type) {
817 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
818 udp = (struct udp_hdr *)(raw_pkt + len);
819 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
821 * The source and destination fields in the transmitted packet
822 * need to be presented in a reversed order with respect
823 * to the expected received packets.
825 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
826 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
827 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
830 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
831 tcp = (struct tcp_hdr *)(raw_pkt + len);
832 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
834 * The source and destination fields in the transmitted packet
835 * need to be presented in a reversed order with respect
836 * to the expected received packets.
838 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
839 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
840 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
843 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
844 sctp = (struct sctp_hdr *)(raw_pkt + len);
845 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
847 * The source and destination fields in the transmitted packet
848 * need to be presented in a reversed order with respect
849 * to the expected received packets.
851 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
852 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
853 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
856 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
857 case RTE_ETH_FLOW_FRAG_IPV4:
858 payload = raw_pkt + len;
859 set_idx = I40E_FLXPLD_L3_IDX;
862 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
863 udp = (struct udp_hdr *)(raw_pkt + len);
864 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
866 * The source and destination fields in the transmitted packet
867 * need to be presented in a reversed order with respect
868 * to the expected received packets.
870 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
871 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
872 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
875 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
876 tcp = (struct tcp_hdr *)(raw_pkt + len);
877 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
879 * The source and destination fields in the transmitted packet
880 * need to be presented in a reversed order with respect
881 * to the expected received packets.
883 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
884 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
885 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
888 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
889 sctp = (struct sctp_hdr *)(raw_pkt + len);
890 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
892 * The source and destination fields in the transmitted packet
893 * need to be presented in a reversed order with respect
894 * to the expected received packets.
896 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
897 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
898 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
901 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
902 case RTE_ETH_FLOW_FRAG_IPV6:
903 payload = raw_pkt + len;
904 set_idx = I40E_FLXPLD_L3_IDX;
906 case RTE_ETH_FLOW_L2_PAYLOAD:
907 payload = raw_pkt + len;
909 * ARP packet is a special case on which the payload
910 * starts after the whole ARP header
912 if (fdir_input->flow.l2_flow.ether_type ==
913 rte_cpu_to_be_16(ETHER_TYPE_ARP))
914 payload += sizeof(struct arp_hdr);
915 set_idx = I40E_FLXPLD_L2_IDX;
918 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
922 /* fill the flexbytes to payload */
923 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
924 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
925 size = pf->fdir.flex_set[pit_idx].size;
928 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
930 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
932 &fdir_input->flow_ext.flexbytes[dst],
933 size * sizeof(uint16_t));
939 static struct i40e_customized_pctype *
940 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
942 struct i40e_customized_pctype *cus_pctype;
943 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
945 for (; i < I40E_CUSTOMIZED_MAX; i++) {
946 cus_pctype = &pf->customized_pctype[i];
947 if (pctype == cus_pctype->pctype)
954 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
955 const struct i40e_fdir_input *fdir_input,
956 unsigned char *raw_pkt,
959 struct i40e_customized_pctype *cus_pctype = NULL;
960 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
961 uint16_t *ether_type;
962 uint8_t len = 2 * sizeof(struct ether_addr);
964 struct ipv6_hdr *ip6;
965 uint8_t pctype = fdir_input->pctype;
966 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
967 static const uint8_t next_proto[] = {
968 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
969 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
970 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
971 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
972 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
973 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
974 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
975 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
976 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
977 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
980 raw_pkt += 2 * sizeof(struct ether_addr);
981 if (vlan && fdir_input->flow_ext.vlan_tci) {
982 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
983 rte_memcpy(raw_pkt + sizeof(uint16_t),
984 &fdir_input->flow_ext.vlan_tci,
986 raw_pkt += sizeof(vlan_frame);
987 len += sizeof(vlan_frame);
989 ether_type = (uint16_t *)raw_pkt;
990 raw_pkt += sizeof(uint16_t);
991 len += sizeof(uint16_t);
993 if (is_customized_pctype) {
994 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
996 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1002 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1003 *ether_type = fdir_input->flow.l2_flow.ether_type;
1004 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1005 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1006 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1007 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1008 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1009 is_customized_pctype) {
1010 ip = (struct ipv4_hdr *)raw_pkt;
1012 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
1013 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1014 /* set len to by default */
1015 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1016 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1017 fdir_input->flow.ip4_flow.ttl :
1018 I40E_FDIR_IP_DEFAULT_TTL;
1019 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1021 * The source and destination fields in the transmitted packet
1022 * need to be presented in a reversed order with respect
1023 * to the expected received packets.
1025 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1026 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1028 if (!is_customized_pctype)
1029 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1030 fdir_input->flow.ip4_flow.proto :
1031 next_proto[fdir_input->pctype];
1032 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1033 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1034 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1035 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1036 ip->next_proto_id = IPPROTO_UDP;
1037 len += sizeof(struct ipv4_hdr);
1038 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1039 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1040 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1041 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1042 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1043 ip6 = (struct ipv6_hdr *)raw_pkt;
1045 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
1047 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1048 (fdir_input->flow.ipv6_flow.tc <<
1049 I40E_FDIR_IPv6_TC_OFFSET));
1051 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1052 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1053 fdir_input->flow.ipv6_flow.proto :
1054 next_proto[fdir_input->pctype];
1055 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1056 fdir_input->flow.ipv6_flow.hop_limits :
1057 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1059 * The source and destination fields in the transmitted packet
1060 * need to be presented in a reversed order with respect
1061 * to the expected received packets.
1063 rte_memcpy(&ip6->src_addr,
1064 &fdir_input->flow.ipv6_flow.dst_ip,
1066 rte_memcpy(&ip6->dst_addr,
1067 &fdir_input->flow.ipv6_flow.src_ip,
1069 len += sizeof(struct ipv6_hdr);
1071 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1072 fdir_input->pctype);
1080 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1081 * @pf: board private structure
1082 * @fdir_input: input set of the flow director entry
1083 * @raw_pkt: a packet to be constructed
1086 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1087 const struct i40e_fdir_input *fdir_input,
1088 unsigned char *raw_pkt)
1090 unsigned char *payload = NULL;
1092 struct udp_hdr *udp;
1093 struct tcp_hdr *tcp;
1094 struct sctp_hdr *sctp;
1095 struct rte_flow_item_gtp *gtp;
1096 struct ipv4_hdr *gtp_ipv4;
1097 struct ipv6_hdr *gtp_ipv6;
1098 uint8_t size, dst = 0;
1099 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1101 uint8_t pctype = fdir_input->pctype;
1102 struct i40e_customized_pctype *cus_pctype;
1104 /* raw pcket template - just copy contents of the raw packet */
1105 if (fdir_input->flow_ext.pkt_template) {
1106 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1107 fdir_input->flow.raw_flow.length);
1111 /* fill the ethernet and IP head */
1112 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1113 !!fdir_input->flow_ext.vlan_tci);
1117 /* fill the L4 head */
1118 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1119 udp = (struct udp_hdr *)(raw_pkt + len);
1120 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1122 * The source and destination fields in the transmitted packet
1123 * need to be presented in a reversed order with respect
1124 * to the expected received packets.
1126 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1127 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1128 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1129 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1130 tcp = (struct tcp_hdr *)(raw_pkt + len);
1131 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1133 * The source and destination fields in the transmitted packet
1134 * need to be presented in a reversed order with respect
1135 * to the expected received packets.
1137 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1138 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1139 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1140 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1141 sctp = (struct sctp_hdr *)(raw_pkt + len);
1142 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1144 * The source and destination fields in the transmitted packet
1145 * need to be presented in a reversed order with respect
1146 * to the expected received packets.
1148 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1149 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1150 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1151 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1152 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1153 payload = raw_pkt + len;
1154 set_idx = I40E_FLXPLD_L3_IDX;
1155 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1156 udp = (struct udp_hdr *)(raw_pkt + len);
1157 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1159 * The source and destination fields in the transmitted packet
1160 * need to be presented in a reversed order with respect
1161 * to the expected received packets.
1163 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1164 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1165 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1166 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1167 tcp = (struct tcp_hdr *)(raw_pkt + len);
1168 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1170 * The source and destination fields in the transmitted packet
1171 * need to be presented in a reversed order with respect
1172 * to the expected received packets.
1174 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1175 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1176 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1177 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1178 sctp = (struct sctp_hdr *)(raw_pkt + len);
1179 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1181 * The source and destination fields in the transmitted packet
1182 * need to be presented in a reversed order with respect
1183 * to the expected received packets.
1185 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1186 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1187 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1188 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1189 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1190 payload = raw_pkt + len;
1191 set_idx = I40E_FLXPLD_L3_IDX;
1192 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1193 payload = raw_pkt + len;
1195 * ARP packet is a special case on which the payload
1196 * starts after the whole ARP header
1198 if (fdir_input->flow.l2_flow.ether_type ==
1199 rte_cpu_to_be_16(ETHER_TYPE_ARP))
1200 payload += sizeof(struct arp_hdr);
1201 set_idx = I40E_FLXPLD_L2_IDX;
1202 } else if (fdir_input->flow_ext.customized_pctype) {
1203 /* If customized pctype is used */
1204 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1205 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1206 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1207 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1208 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1209 udp = (struct udp_hdr *)(raw_pkt + len);
1211 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1213 gtp = (struct rte_flow_item_gtp *)
1214 ((unsigned char *)udp + sizeof(struct udp_hdr));
1216 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1217 gtp->teid = fdir_input->flow.gtp_flow.teid;
1218 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1220 /* GTP-C message type is not supported. */
1221 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1223 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1224 gtp->v_pt_rsv_flags =
1225 I40E_FDIR_GTP_VER_FLAG_0X32;
1228 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1229 gtp->v_pt_rsv_flags =
1230 I40E_FDIR_GTP_VER_FLAG_0X30;
1233 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1234 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1235 gtp_ipv4 = (struct ipv4_hdr *)
1236 ((unsigned char *)gtp +
1237 sizeof(struct rte_flow_item_gtp));
1238 gtp_ipv4->version_ihl =
1239 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1240 gtp_ipv4->next_proto_id = IPPROTO_IP;
1241 gtp_ipv4->total_length =
1243 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1244 payload = (unsigned char *)gtp_ipv4 +
1245 sizeof(struct ipv4_hdr);
1246 } else if (cus_pctype->index ==
1247 I40E_CUSTOMIZED_GTPU_IPV6) {
1248 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1249 gtp_ipv6 = (struct ipv6_hdr *)
1250 ((unsigned char *)gtp +
1251 sizeof(struct rte_flow_item_gtp));
1252 gtp_ipv6->vtc_flow =
1254 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1255 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1256 gtp_ipv6->proto = IPPROTO_NONE;
1257 gtp_ipv6->payload_len =
1259 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1260 gtp_ipv6->hop_limits =
1261 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1262 payload = (unsigned char *)gtp_ipv6 +
1263 sizeof(struct ipv6_hdr);
1265 payload = (unsigned char *)gtp +
1266 sizeof(struct rte_flow_item_gtp);
1269 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1270 fdir_input->pctype);
1274 /* fill the flexbytes to payload */
1275 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1276 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1277 size = pf->fdir.flex_set[pit_idx].size;
1280 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1282 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1283 (void)rte_memcpy(ptr,
1284 &fdir_input->flow_ext.flexbytes[dst],
1285 size * sizeof(uint16_t));
1291 /* Construct the tx flags */
1292 static inline uint64_t
1293 i40e_build_ctob(uint32_t td_cmd,
1298 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1299 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1300 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1301 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1302 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1306 * check the programming status descriptor in rx queue.
1307 * done after Programming Flow Director is programmed on
1311 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1313 volatile union i40e_rx_desc *rxdp;
1320 rxdp = &rxq->rx_ring[rxq->rx_tail];
1321 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1322 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1323 >> I40E_RXD_QW1_STATUS_SHIFT;
1325 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1326 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1327 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1328 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1330 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1331 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1333 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1334 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1335 if (error == (0x1 <<
1336 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1337 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1338 " (FD_ID %u): programming status"
1340 rxdp->wb.qword0.hi_dword.fd_id);
1342 } else if (error == (0x1 <<
1343 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1344 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1345 " (FD_ID %u): programming status"
1347 rxdp->wb.qword0.hi_dword.fd_id);
1350 PMD_DRV_LOG(ERR, "invalid programming status"
1351 " reported, error = %u.", error);
1353 PMD_DRV_LOG(INFO, "unknown programming status"
1354 " reported, len = %d, id = %u.", len, id);
1355 rxdp->wb.qword1.status_error_len = 0;
1357 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1359 if (rxq->rx_tail == 0)
1360 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1362 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1369 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1370 struct i40e_fdir_filter *filter)
1372 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1373 if (input->input.flow_ext.pkt_template) {
1374 filter->fdir.input.flow.raw_flow.packet = NULL;
1375 filter->fdir.input.flow.raw_flow.length =
1376 rte_hash_crc(input->input.flow.raw_flow.packet,
1377 input->input.flow.raw_flow.length,
1378 input->input.flow.raw_flow.pctype);
1383 /* Check if there exists the flow director filter */
1384 static struct i40e_fdir_filter *
1385 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1386 const struct i40e_fdir_input *input)
1390 if (input->flow_ext.pkt_template)
1391 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1392 (const void *)input,
1393 input->flow.raw_flow.length);
1395 ret = rte_hash_lookup(fdir_info->hash_table,
1396 (const void *)input);
1400 return fdir_info->hash_map[ret];
1403 /* Add a flow director filter into the SW list */
1405 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1407 struct i40e_fdir_info *fdir_info = &pf->fdir;
1410 if (filter->fdir.input.flow_ext.pkt_template)
1411 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1412 &filter->fdir.input,
1413 filter->fdir.input.flow.raw_flow.length);
1415 ret = rte_hash_add_key(fdir_info->hash_table,
1416 &filter->fdir.input);
1419 "Failed to insert fdir filter to hash table %d!",
1423 fdir_info->hash_map[ret] = filter;
1425 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1430 /* Delete a flow director filter from the SW list */
1432 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1434 struct i40e_fdir_info *fdir_info = &pf->fdir;
1435 struct i40e_fdir_filter *filter;
1438 if (input->flow_ext.pkt_template)
1439 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1441 input->flow.raw_flow.length);
1443 ret = rte_hash_del_key(fdir_info->hash_table, input);
1446 "Failed to delete fdir filter to hash table %d!",
1450 filter = fdir_info->hash_map[ret];
1451 fdir_info->hash_map[ret] = NULL;
1453 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1460 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1461 * @pf: board private structure
1462 * @filter: fdir filter entry
1463 * @add: 0 - delete, 1 - add
1466 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1467 const struct rte_eth_fdir_filter *filter,
1470 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1471 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1472 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1473 enum i40e_filter_pctype pctype;
1476 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1477 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1478 " check the mode in fdir_conf.");
1482 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1483 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1484 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1487 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1488 PMD_DRV_LOG(ERR, "Invalid queue ID");
1491 if (filter->input.flow_ext.is_vf &&
1492 filter->input.flow_ext.dst_id >= pf->vf_num) {
1493 PMD_DRV_LOG(ERR, "Invalid VF ID");
1497 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1499 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1501 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1505 if (hw->mac.type == I40E_MAC_X722) {
1506 /* get translated pctype value in fd pctype register */
1507 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1508 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1511 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1513 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1522 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1523 * @pf: board private structure
1524 * @filter: fdir filter entry
1525 * @add: 0 - delete, 1 - add
1528 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1529 const struct i40e_fdir_filter_conf *filter,
1532 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1533 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1534 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1535 enum i40e_filter_pctype pctype;
1536 struct i40e_fdir_info *fdir_info = &pf->fdir;
1537 struct i40e_fdir_filter *fdir_filter, *node;
1538 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1541 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1542 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1546 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1547 PMD_DRV_LOG(ERR, "Invalid queue ID");
1550 if (filter->input.flow_ext.is_vf &&
1551 filter->input.flow_ext.dst_id >= pf->vf_num) {
1552 PMD_DRV_LOG(ERR, "Invalid VF ID");
1555 if (filter->input.flow_ext.pkt_template) {
1556 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1557 !filter->input.flow.raw_flow.packet) {
1558 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1559 " flow filter parameters!");
1562 pctype = filter->input.flow.raw_flow.pctype;
1564 pctype = filter->input.pctype;
1567 /* Check if there is the filter in SW list */
1568 memset(&check_filter, 0, sizeof(check_filter));
1569 i40e_fdir_filter_convert(filter, &check_filter);
1570 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1573 "Conflict with existing flow director rules!");
1577 if (!add && !node) {
1579 "There's no corresponding flow firector filter!");
1583 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1585 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1587 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1591 if (hw->mac.type == I40E_MAC_X722) {
1592 /* get translated pctype value in fd pctype register */
1593 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1594 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1597 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1599 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1605 fdir_filter = rte_zmalloc("fdir_filter",
1606 sizeof(*fdir_filter), 0);
1607 if (fdir_filter == NULL) {
1608 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
1612 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1613 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1615 rte_free(fdir_filter);
1617 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1624 * i40e_fdir_filter_programming - Program a flow director filter rule.
1625 * Is done by Flow Director Programming Descriptor followed by packet
1626 * structure that contains the filter fields need to match.
1627 * @pf: board private structure
1629 * @filter: fdir filter entry
1630 * @add: 0 - delete, 1 - add
1633 i40e_fdir_filter_programming(struct i40e_pf *pf,
1634 enum i40e_filter_pctype pctype,
1635 const struct rte_eth_fdir_filter *filter,
1638 struct i40e_tx_queue *txq = pf->fdir.txq;
1639 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1640 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1641 volatile struct i40e_tx_desc *txdp;
1642 volatile struct i40e_filter_program_desc *fdirdp;
1647 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1648 fdirdp = (volatile struct i40e_filter_program_desc *)
1649 (&(txq->tx_ring[txq->tx_tail]));
1651 fdirdp->qindex_flex_ptype_vsi =
1652 rte_cpu_to_le_32((fdir_action->rx_queue <<
1653 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1654 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1656 fdirdp->qindex_flex_ptype_vsi |=
1657 rte_cpu_to_le_32((fdir_action->flex_off <<
1658 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1659 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1661 fdirdp->qindex_flex_ptype_vsi |=
1662 rte_cpu_to_le_32((pctype <<
1663 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1664 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1666 if (filter->input.flow_ext.is_vf)
1667 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1669 /* Use LAN VSI Id by default */
1670 vsi_id = pf->main_vsi->vsi_id;
1671 fdirdp->qindex_flex_ptype_vsi |=
1672 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1673 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1674 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1676 fdirdp->dtype_cmd_cntindex =
1677 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1680 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1681 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1682 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1684 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1685 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1686 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1688 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1689 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1690 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1691 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1692 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1693 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1695 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1696 " unsupported fdir behavior.");
1700 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1701 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1702 I40E_TXD_FLTR_QW1_DEST_MASK);
1704 fdirdp->dtype_cmd_cntindex |=
1705 rte_cpu_to_le_32((fdir_action->report_status<<
1706 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1707 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1709 fdirdp->dtype_cmd_cntindex |=
1710 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1711 fdirdp->dtype_cmd_cntindex |=
1713 ((uint32_t)pf->fdir.match_counter_index <<
1714 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1715 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1717 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1719 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1720 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1721 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1722 td_cmd = I40E_TX_DESC_CMD_EOP |
1723 I40E_TX_DESC_CMD_RS |
1724 I40E_TX_DESC_CMD_DUMMY;
1726 txdp->cmd_type_offset_bsz =
1727 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1729 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1730 if (txq->tx_tail >= txq->nb_tx_desc)
1732 /* Update the tx tail register */
1734 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1735 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1736 if ((txdp->cmd_type_offset_bsz &
1737 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1738 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1742 if (i >= I40E_FDIR_MAX_WAIT_US) {
1743 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1744 " time out to get DD on tx queue.");
1747 /* totally delay 10 ms to check programming status*/
1748 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1749 if (i40e_check_fdir_programming_status(rxq) >= 0)
1754 "Failed to program FDIR filter: programming status reported.");
1759 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1760 * Is done by Flow Director Programming Descriptor followed by packet
1761 * structure that contains the filter fields need to match.
1762 * @pf: board private structure
1764 * @filter: fdir filter entry
1765 * @add: 0 - delete, 1 - add
1768 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1769 enum i40e_filter_pctype pctype,
1770 const struct i40e_fdir_filter_conf *filter,
1773 struct i40e_tx_queue *txq = pf->fdir.txq;
1774 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1775 const struct i40e_fdir_action *fdir_action = &filter->action;
1776 volatile struct i40e_tx_desc *txdp;
1777 volatile struct i40e_filter_program_desc *fdirdp;
1782 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1783 fdirdp = (volatile struct i40e_filter_program_desc *)
1784 (&txq->tx_ring[txq->tx_tail]);
1786 fdirdp->qindex_flex_ptype_vsi =
1787 rte_cpu_to_le_32((fdir_action->rx_queue <<
1788 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1789 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1791 fdirdp->qindex_flex_ptype_vsi |=
1792 rte_cpu_to_le_32((fdir_action->flex_off <<
1793 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1794 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1796 fdirdp->qindex_flex_ptype_vsi |=
1797 rte_cpu_to_le_32((pctype <<
1798 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1799 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1801 if (filter->input.flow_ext.is_vf)
1802 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1804 /* Use LAN VSI Id by default */
1805 vsi_id = pf->main_vsi->vsi_id;
1806 fdirdp->qindex_flex_ptype_vsi |=
1807 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1808 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1809 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1811 fdirdp->dtype_cmd_cntindex =
1812 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1815 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1816 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1817 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1819 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1820 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1821 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1823 if (fdir_action->behavior == I40E_FDIR_REJECT)
1824 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1825 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1826 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1827 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1828 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1830 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1834 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1835 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1836 I40E_TXD_FLTR_QW1_DEST_MASK);
1838 fdirdp->dtype_cmd_cntindex |=
1839 rte_cpu_to_le_32((fdir_action->report_status <<
1840 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1841 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1843 fdirdp->dtype_cmd_cntindex |=
1844 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1845 fdirdp->dtype_cmd_cntindex |=
1847 ((uint32_t)pf->fdir.match_counter_index <<
1848 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1849 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1851 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1853 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1854 txdp = &txq->tx_ring[txq->tx_tail + 1];
1855 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1856 td_cmd = I40E_TX_DESC_CMD_EOP |
1857 I40E_TX_DESC_CMD_RS |
1858 I40E_TX_DESC_CMD_DUMMY;
1860 txdp->cmd_type_offset_bsz =
1861 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1863 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1864 if (txq->tx_tail >= txq->nb_tx_desc)
1866 /* Update the tx tail register */
1868 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1869 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1870 if ((txdp->cmd_type_offset_bsz &
1871 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1872 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1876 if (i >= I40E_FDIR_MAX_WAIT_US) {
1878 "Failed to program FDIR filter: time out to get DD on tx queue.");
1881 /* totally delay 10 ms to check programming status*/
1882 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1883 if (i40e_check_fdir_programming_status(rxq) < 0) {
1885 "Failed to program FDIR filter: programming status reported.");
1893 * i40e_fdir_flush - clear all filters of Flow Director table
1894 * @pf: board private structure
1897 i40e_fdir_flush(struct rte_eth_dev *dev)
1899 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1900 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1902 uint16_t guarant_cnt, best_cnt;
1905 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1906 I40E_WRITE_FLUSH(hw);
1908 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1909 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1910 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1911 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1914 if (i >= I40E_FDIR_FLUSH_RETRY) {
1915 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1918 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1919 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1920 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1921 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1922 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1923 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1924 if (guarant_cnt != 0 || best_cnt != 0) {
1925 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1928 PMD_DRV_LOG(INFO, "FD table Flush success.");
1933 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1934 struct rte_eth_flex_payload_cfg *flex_set,
1937 struct i40e_fdir_flex_pit *flex_pit;
1938 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1939 uint16_t src, dst, size, j, k;
1940 uint8_t i, layer_idx;
1942 for (layer_idx = I40E_FLXPLD_L2_IDX;
1943 layer_idx <= I40E_FLXPLD_L4_IDX;
1945 if (layer_idx == I40E_FLXPLD_L2_IDX)
1946 ptr->type = RTE_ETH_L2_PAYLOAD;
1947 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1948 ptr->type = RTE_ETH_L3_PAYLOAD;
1949 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1950 ptr->type = RTE_ETH_L4_PAYLOAD;
1952 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1953 flex_pit = &pf->fdir.flex_set[layer_idx *
1954 I40E_MAX_FLXPLD_FIED + i];
1955 if (flex_pit->size == 0)
1957 src = flex_pit->src_offset * sizeof(uint16_t);
1958 dst = flex_pit->dst_offset * sizeof(uint16_t);
1959 size = flex_pit->size * sizeof(uint16_t);
1960 for (j = src, k = dst; j < src + size; j++, k++)
1961 ptr->src_offset[k] = j;
1969 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1970 struct rte_eth_fdir_flex_mask *flex_mask,
1973 struct i40e_fdir_flex_mask *mask;
1974 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1977 uint16_t off_bytes, mask_tmp;
1979 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1980 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1982 mask = &pf->fdir.flex_mask[i];
1983 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1984 (enum i40e_filter_pctype)i);
1985 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1988 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1989 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1990 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1991 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1993 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1994 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1997 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1998 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1999 mask_tmp = ~mask->bitmask[j].mask;
2000 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2001 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2003 ptr->flow_type = flow_type;
2010 * i40e_fdir_info_get - get information of Flow Director
2011 * @pf: ethernet device to get info from
2012 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2013 * the flow director information.
2016 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2018 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2019 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2020 uint16_t num_flex_set = 0;
2021 uint16_t num_flex_mask = 0;
2024 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2025 fdir->mode = RTE_FDIR_MODE_PERFECT;
2027 fdir->mode = RTE_FDIR_MODE_NONE;
2030 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2032 (uint32_t)hw->func_caps.fd_filters_best_effort;
2033 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2034 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2035 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2036 fdir->flow_types_mask[i] = 0ULL;
2037 fdir->flex_payload_unit = sizeof(uint16_t);
2038 fdir->flex_bitmask_unit = sizeof(uint16_t);
2039 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2040 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2041 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2043 i40e_fdir_info_get_flex_set(pf,
2044 fdir->flex_conf.flex_set,
2046 i40e_fdir_info_get_flex_mask(pf,
2047 fdir->flex_conf.flex_mask,
2050 fdir->flex_conf.nb_payloads = num_flex_set;
2051 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2055 * i40e_fdir_stat_get - get statistics of Flow Director
2056 * @pf: ethernet device to get info from
2057 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2058 * the flow director statistics.
2061 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2063 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2064 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2067 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2069 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2070 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2072 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2073 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2077 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2078 struct rte_eth_fdir_filter_info *info)
2080 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2084 PMD_DRV_LOG(ERR, "Invalid pointer");
2088 switch (info->info_type) {
2089 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2090 ret = i40e_fdir_filter_inset_select(pf,
2091 &(info->info.input_set_conf));
2094 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2103 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2104 * @pf: board private structure
2105 * @filter_op:operation will be taken.
2106 * @arg: a pointer to specific structure corresponding to the filter_op
2109 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2110 enum rte_filter_op filter_op,
2113 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2116 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2119 if (filter_op == RTE_ETH_FILTER_NOP)
2122 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2125 switch (filter_op) {
2126 case RTE_ETH_FILTER_ADD:
2127 ret = i40e_add_del_fdir_filter(dev,
2128 (struct rte_eth_fdir_filter *)arg,
2131 case RTE_ETH_FILTER_DELETE:
2132 ret = i40e_add_del_fdir_filter(dev,
2133 (struct rte_eth_fdir_filter *)arg,
2136 case RTE_ETH_FILTER_FLUSH:
2137 ret = i40e_fdir_flush(dev);
2139 case RTE_ETH_FILTER_INFO:
2140 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2142 case RTE_ETH_FILTER_SET:
2143 ret = i40e_fdir_filter_set(dev,
2144 (struct rte_eth_fdir_filter_info *)arg);
2146 case RTE_ETH_FILTER_STATS:
2147 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2150 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2157 /* Restore flow director filter */
2159 i40e_fdir_filter_restore(struct i40e_pf *pf)
2161 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2162 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2163 struct i40e_fdir_filter *f;
2164 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2166 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2167 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2169 TAILQ_FOREACH(f, fdir_list, rules)
2170 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2172 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2174 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2175 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2177 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2178 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2180 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2181 guarant_cnt, best_cnt);